text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import functools
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
from pip._vendor.resolvelib import Resolver as RLResolver
from pip._internal.exceptions import InstallationError
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.resolution.resolvelib.provider import PipProvider
from pip._internal.utils.misc import dist_is_editable
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .base import Constraint
from .factory import Factory
if MYPY_CHECK_RUNNING:
from typing import Dict, List, Optional, Set, Tuple
from pip._vendor.resolvelib.resolvers import Result
from pip._vendor.resolvelib.structs import Graph
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
lazy_wheel=False, # type: bool
):
super(Resolver, self).__init__()
if lazy_wheel:
logger.warning(
'pip is using lazily downloaded wheels using HTTP '
'range requests to obtain dependency information. '
'This experimental feature is enabled through '
'--use-feature=fast-deps and it is not ready for production.'
)
assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
finder=finder,
preparer=preparer,
make_install_req=make_install_req,
wheel_cache=wheel_cache,
use_user_site=use_user_site,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
py_version_info=py_version_info,
lazy_wheel=lazy_wheel,
)
self.ignore_dependencies = ignore_dependencies
self.upgrade_strategy = upgrade_strategy
self._result = None # type: Optional[Result]
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
constraints = {} # type: Dict[str, Constraint]
user_requested = set() # type: Set[str]
requirements = []
for req in root_reqs:
if req.constraint:
# Ensure we only accept valid constraints
problem = check_invalid_constraint_type(req)
if problem:
raise InstallationError(problem)
if not req.match_markers():
continue
name = canonicalize_name(req.name)
if name in constraints:
constraints[name] &= req
else:
constraints[name] = Constraint.from_ireq(req)
else:
if req.user_supplied and req.name:
user_requested.add(canonicalize_name(req.name))
r = self.factory.make_requirement_from_install_req(
req, requested_extras=(),
)
if r is not None:
requirements.append(r)
provider = PipProvider(
factory=self.factory,
constraints=constraints,
ignore_dependencies=self.ignore_dependencies,
upgrade_strategy=self.upgrade_strategy,
user_requested=user_requested,
)
reporter = BaseReporter()
resolver = RLResolver(provider, reporter)
try:
try_to_avoid_resolution_too_deep = 2000000
self._result = resolver.resolve(
requirements, max_rounds=try_to_avoid_resolution_too_deep,
)
except ResolutionImpossible as e:
error = self.factory.get_installation_error(e)
six.raise_from(error, e)
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
for candidate in self._result.mapping.values():
ireq = candidate.get_install_requirement()
if ireq is None:
continue
# Check if there is already an installation under the same name,
# and set a flag for later stages to uninstall it, if needed.
# * There isn't, good -- no uninstalltion needed.
# * The --force-reinstall flag is set. Always reinstall.
# * The installation is different in version or editable-ness, so
# we need to uninstall it to install the new distribution.
# * The installed version is the same as the pending distribution.
# Skip this distrubiton altogether to save work.
installed_dist = self.factory.get_dist_to_uninstall(candidate)
if installed_dist is None:
ireq.should_reinstall = False
elif self.factory.force_reinstall:
ireq.should_reinstall = True
elif installed_dist.parsed_version != candidate.version:
ireq.should_reinstall = True
elif dist_is_editable(installed_dist) != candidate.is_editable:
ireq.should_reinstall = True
else:
continue
link = candidate.source_link
if link and link.is_yanked:
# The reason can contain non-ASCII characters, Unicode
# is required for Python 2.
msg = (
u'The candidate selected for download or install is a '
u'yanked version: {name!r} candidate (version {version} '
u'at {link})\nReason for being yanked: {reason}'
).format(
name=candidate.name,
version=candidate.version,
link=link,
reason=link.yanked_reason or u'<none given>',
)
logger.warning(msg)
req_set.add_named_requirement(ireq)
return req_set
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Get order for installation of requirements in RequirementSet.
The returned list contains a requirement before another that depends on
it. This helps ensure that the environment is kept consistent as they
get installed one-by-one.
The current implementation creates a topological ordering of the
dependency graph, while breaking any cycles in the graph at arbitrary
points. We make no guarantees about where the cycle would be broken,
other than they would be broken.
"""
assert self._result is not None, "must call resolve() first"
graph = self._result.graph
weights = get_topological_weights(graph)
sorted_items = sorted(
req_set.requirements.items(),
key=functools.partial(_req_set_item_sorter, weights=weights),
reverse=True,
)
return [ireq for _, ireq in sorted_items]
def get_topological_weights(graph):
# type: (Graph) -> Dict[Optional[str], int]
"""Assign weights to each node based on how "deep" they are.
This implementation may change at any point in the future without prior
notice.
We take the length for the longest path to any node from root, ignoring any
paths that contain a single node twice (i.e. cycles). This is done through
a depth-first search through the graph, while keeping track of the path to
the node.
Cycles in the graph result would result in node being revisited while also
being it's own path. In this case, take no action. This helps ensure we
don't get stuck in a cycle.
When assigning weight, the longer path (i.e. larger length) is preferred.
"""
path = set() # type: Set[Optional[str]]
weights = {} # type: Dict[Optional[str], int]
def visit(node):
# type: (Optional[str]) -> None
if node in path:
# We hit a cycle, so we'll break it here.
return
# Time to visit the children!
path.add(node)
for child in graph.iter_children(node):
visit(child)
path.remove(node)
last_known_parent_count = weights.get(node, 0)
weights[node] = max(last_known_parent_count, len(path))
# `None` is guaranteed to be the root node by resolvelib.
visit(None)
# Sanity checks
assert weights[None] == 0
assert len(weights) == len(graph)
return weights
def _req_set_item_sorter(
item, # type: Tuple[str, InstallRequirement]
weights, # type: Dict[Optional[str], int]
):
# type: (...) -> Tuple[int, str]
"""Key function used to sort install requirements for installation.
Based on the "weight" mapping calculated in ``get_installation_order()``.
The canonical package name is returned as the second member as a tie-
breaker to ensure the result is predictable, which is useful in tests.
"""
name = canonicalize_name(item[0])
return weights[name], name
|
RalfBarkow/Zettelkasten
|
venv/lib/python3.9/site-packages/pip/_internal/resolution/resolvelib/resolver.py
|
Python
|
gpl-3.0
| 10,097
|
[
"VisIt"
] |
d583fed68398a73316eab13f1325bf918259db2c4fe4bf2a8b4ba659e85cede0
|
# -*- coding:utf-8 -*-
## src/common/optparser.py
##
## Copyright (C) 2003-2005 Vincent Hanquez <tab AT snarc.org>
## Copyright (C) 2003-2014 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2005-2006 Dimitur Kirov <dkirov AT gmail.com>
## Nikos Kouremenos <kourem AT gmail.com>
## Copyright (C) 2006-2008 Jean-Marie Traissard <jim AT lapin.org>
## Copyright (C) 2007 James Newton <redshodan AT gmail.com>
## Brendan Taylor <whateley AT gmail.com>
## Tomasz Melcer <liori AT exroot.org>
## Stephan Erb <steve-e AT h3c.de>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import os
import sys
import locale
import re
from time import time
from common import gajim
from common import helpers
from common import caps_cache
import sqlite3 as sqlite
from common import logger
class OptionsParser:
def __init__(self, filename):
self.__filename = filename
self.old_values = {} # values that are saved in the file and maybe
# no longer valid
def read(self):
try:
fd = open(self.__filename)
except Exception:
if os.path.exists(self.__filename):
#we talk about a file
print(_('Error: cannot open %s for reading') % self.__filename,
file=sys.stderr)
return False
new_version = gajim.config.get('version')
new_version = new_version.split('-', 1)[0]
seen = set()
regex = re.compile(r"(?P<optname>[^.]+)(?:(?:\.(?P<key>.+))?\.(?P<subname>[^.]+))?\s=\s(?P<value>.*)")
for line in fd:
optname, key, subname, value = regex.match(line).groups()
if key is None:
self.old_values[optname] = value
gajim.config.set(optname, value)
else:
if (optname, key) not in seen:
gajim.config.add_per(optname, key)
seen.add((optname, key))
gajim.config.set_per(optname, key, subname, value)
old_version = gajim.config.get('version')
old_version = old_version.split('-', 1)[0]
self.update_config(old_version, new_version)
self.old_values = {} # clean mem
fd.close()
return True
def write_line(self, fd, opt, parents, value):
if value is None:
return
# convert to utf8 before writing to file if needed
value = str(value)
s = ''
if parents:
if len(parents) == 1:
return
for p in parents:
s += p + '.'
s += opt
fd.write(s + ' = ' + value + '\n')
def write(self):
(base_dir, filename) = os.path.split(self.__filename)
self.__tempfile = os.path.join(base_dir, '.' + filename)
try:
f = open(self.__tempfile, 'w')
except IOError as e:
return str(e)
try:
gajim.config.foreach(self.write_line, f)
except IOError as e:
return str(e)
f.flush()
os.fsync(f.fileno())
f.close()
if os.path.exists(self.__filename):
if os.name == 'nt':
# win32 needs this
try:
os.remove(self.__filename)
except Exception:
pass
try:
os.rename(self.__tempfile, self.__filename)
except IOError as e:
return str(e)
os.chmod(self.__filename, 0o600)
def update_config(self, old_version, new_version):
old_version_list = old_version.split('.') # convert '0.x.y' to (0, x, y)
old = []
while len(old_version_list):
old.append(int(old_version_list.pop(0)))
new_version_list = new_version.split('.')
new = []
while len(new_version_list):
new.append(int(new_version_list.pop(0)))
if old < [0, 9] and new >= [0, 9]:
self.update_config_x_to_09()
if old < [0, 10] and new >= [0, 10]:
self.update_config_09_to_010()
if old < [0, 10, 1, 1] and new >= [0, 10, 1, 1]:
self.update_config_to_01011()
if old < [0, 10, 1, 2] and new >= [0, 10, 1, 2]:
self.update_config_to_01012()
if old < [0, 10, 1, 3] and new >= [0, 10, 1, 3]:
self.update_config_to_01013()
if old < [0, 10, 1, 4] and new >= [0, 10, 1, 4]:
self.update_config_to_01014()
if old < [0, 10, 1, 5] and new >= [0, 10, 1, 5]:
self.update_config_to_01015()
if old < [0, 10, 1, 6] and new >= [0, 10, 1, 6]:
self.update_config_to_01016()
if old < [0, 10, 1, 7] and new >= [0, 10, 1, 7]:
self.update_config_to_01017()
if old < [0, 10, 1, 8] and new >= [0, 10, 1, 8]:
self.update_config_to_01018()
if old < [0, 11, 0, 1] and new >= [0, 11, 0, 1]:
self.update_config_to_01101()
if old < [0, 11, 0, 2] and new >= [0, 11, 0, 2]:
self.update_config_to_01102()
if old < [0, 11, 1, 1] and new >= [0, 11, 1, 1]:
self.update_config_to_01111()
if old < [0, 11, 1, 2] and new >= [0, 11, 1, 2]:
self.update_config_to_01112()
if old < [0, 11, 1, 3] and new >= [0, 11, 1, 3]:
self.update_config_to_01113()
if old < [0, 11, 1, 4] and new >= [0, 11, 1, 4]:
self.update_config_to_01114()
if old < [0, 11, 1, 5] and new >= [0, 11, 1, 5]:
self.update_config_to_01115()
if old < [0, 11, 2, 1] and new >= [0, 11, 2, 1]:
self.update_config_to_01121()
if old < [0, 11, 4, 1] and new >= [0, 11, 4, 1]:
self.update_config_to_01141()
if old < [0, 11, 4, 2] and new >= [0, 11, 4, 2]:
self.update_config_to_01142()
if old < [0, 11, 4, 3] and new >= [0, 11, 4, 3]:
self.update_config_to_01143()
if old < [0, 11, 4, 4] and new >= [0, 11, 4, 4]:
self.update_config_to_01144()
if old < [0, 12, 0, 1] and new >= [0, 12, 0, 1]:
self.update_config_to_01201()
if old < [0, 12, 1, 1] and new >= [0, 12, 1, 1]:
self.update_config_to_01211()
if old < [0, 12, 1, 2] and new >= [0, 12, 1, 2]:
self.update_config_to_01212()
if old < [0, 12, 1, 3] and new >= [0, 12, 1, 3]:
self.update_config_to_01213()
if old < [0, 12, 1, 4] and new >= [0, 12, 1, 4]:
self.update_config_to_01214()
if old < [0, 12, 1, 5] and new >= [0, 12, 1, 5]:
self.update_config_to_01215()
if old < [0, 12, 3, 1] and new >= [0, 12, 3, 1]:
self.update_config_to_01231()
if old < [0, 12, 5, 1] and new >= [0, 12, 5, 1]:
self.update_config_from_0125()
self.update_config_to_01251()
if old < [0, 12, 5, 2] and new >= [0, 12, 5, 2]:
self.update_config_to_01252()
if old < [0, 12, 5, 3] and new >= [0, 12, 5, 3]:
self.update_config_to_01253()
if old < [0, 12, 5, 4] and new >= [0, 12, 5, 4]:
self.update_config_to_01254()
if old < [0, 12, 5, 5] and new >= [0, 12, 5, 5]:
self.update_config_to_01255()
if old < [0, 12, 5, 6] and new >= [0, 12, 5, 6]:
self.update_config_to_01256()
if old < [0, 12, 5, 7] and new >= [0, 12, 5, 7]:
self.update_config_to_01257()
if old < [0, 12, 5, 8] and new >= [0, 12, 5, 8]:
self.update_config_to_01258()
if old < [0, 13, 10, 0] and new >= [0, 13, 10, 0]:
self.update_config_to_013100()
if old < [0, 13, 10, 1] and new >= [0, 13, 10, 1]:
self.update_config_to_013101()
if old < [0, 13, 90, 1] and new >= [0, 13, 90, 1]:
self.update_config_to_013901()
if old < [0, 14, 0, 1] and new >= [0, 14, 0, 1]:
self.update_config_to_01401()
if old < [0, 14, 90, 0] and new >= [0, 14, 90, 0]:
self.update_config_to_014900()
gajim.logger.init_vars()
gajim.logger.attach_cache_database()
gajim.config.set('version', new_version)
caps_cache.capscache.initialize_from_db()
def assert_unread_msgs_table_exists(self):
"""
Create table unread_messages if there is no such table
"""
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
CREATE TABLE unread_messages (
message_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
jid_id INTEGER
);
'''
)
con.commit()
gajim.logger.init_vars()
except sqlite.OperationalError:
pass
con.close()
def update_ft_proxies(self, to_remove=[], to_add=[]):
for account in gajim.config.get_per('accounts'):
proxies_str = gajim.config.get_per('accounts', account,
'file_transfer_proxies')
proxies = [p.strip() for p in proxies_str.split(',')]
for wrong_proxy in to_remove:
if wrong_proxy in proxies:
proxies.remove(wrong_proxy)
for new_proxy in to_add:
if new_proxy not in proxies:
proxies.append(new_proxy)
proxies_str = ', '.join(proxies)
gajim.config.set_per('accounts', account, 'file_transfer_proxies',
proxies_str)
def update_config_x_to_09(self):
# Var name that changed:
# avatar_width /height -> chat_avatar_width / height
if 'avatar_width' in self.old_values:
gajim.config.set('chat_avatar_width', self.old_values['avatar_width'])
if 'avatar_height' in self.old_values:
gajim.config.set('chat_avatar_height', self.old_values['avatar_height'])
if 'use_dbus' in self.old_values:
gajim.config.set('remote_control', self.old_values['use_dbus'])
# always_compact_view -> always_compact_view_chat / _gc
if 'always_compact_view' in self.old_values:
gajim.config.set('always_compact_view_chat',
self.old_values['always_compact_view'])
gajim.config.set('always_compact_view_gc',
self.old_values['always_compact_view'])
# new theme: grocery, plain
d = ['accounttextcolor', 'accountbgcolor', 'accountfont',
'accountfontattrs', 'grouptextcolor', 'groupbgcolor', 'groupfont',
'groupfontattrs', 'contacttextcolor', 'contactbgcolor', 'contactfont',
'contactfontattrs', 'bannertextcolor', 'bannerbgcolor', 'bannerfont',
'bannerfontattrs']
for theme_name in (_('grocery'), _('default')):
if theme_name not in gajim.config.get_per('themes'):
gajim.config.add_per('themes', theme_name)
theme = gajim.config.themes_default[theme_name]
for o in d:
gajim.config.set_per('themes', theme_name, o, theme[d.index(o)])
# Remove cyan theme if it's not the current theme
if 'cyan' in gajim.config.get_per('themes'):
gajim.config.del_per('themes', 'cyan')
if _('cyan') in gajim.config.get_per('themes'):
gajim.config.del_per('themes', _('cyan'))
# If we removed our roster_theme, choose the default green one or another
# one if doesn't exists in config
if gajim.config.get('roster_theme') not in gajim.config.get_per('themes'):
theme = _('green')
if theme not in gajim.config.get_per('themes'):
theme = gajim.config.get_per('themes')[0]
gajim.config.set('roster_theme', theme)
# new proxies in accounts.name.file_transfer_proxies
self.update_ft_proxies(to_add=['proxy.netlab.cz'])
gajim.config.set('version', '0.9')
def update_config_09_to_010(self):
if 'usetabbedchat' in self.old_values and not \
self.old_values['usetabbedchat']:
gajim.config.set('one_message_window', 'never')
if 'autodetect_browser_mailer' in self.old_values and \
self.old_values['autodetect_browser_mailer'] is True:
gajim.config.set('autodetect_browser_mailer', False)
if 'useemoticons' in self.old_values and \
not self.old_values['useemoticons']:
gajim.config.set('emoticons_theme', '')
if 'always_compact_view_chat' in self.old_values and \
self.old_values['always_compact_view_chat'] != 'False':
gajim.config.set('always_hide_chat_buttons', True)
if 'always_compact_view_gc' in self.old_values and \
self.old_values['always_compact_view_gc'] != 'False':
gajim.config.set('always_hide_groupchat_buttons', True)
self.update_ft_proxies(to_remove=['proxy65.jabber.autocom.pl',
'proxy65.jabber.ccc.de'], to_add=['transfer.jabber.freenet.de'])
# create unread_messages table if needed
self.assert_unread_msgs_table_exists()
gajim.config.set('version', '0.10')
def update_config_to_01011(self):
if 'print_status_in_muc' in self.old_values and \
self.old_values['print_status_in_muc'] in (True, False):
gajim.config.set('print_status_in_muc', 'in_and_out')
gajim.config.set('version', '0.10.1.1')
def update_config_to_01012(self):
# See [6456]
if 'emoticons_theme' in self.old_values and \
self.old_values['emoticons_theme'] == 'Disabled':
gajim.config.set('emoticons_theme', '')
gajim.config.set('version', '0.10.1.2')
def update_config_to_01013(self):
"""
Create table transports_cache if there is no such table
"""
# FIXME see #2812
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
CREATE TABLE transports_cache (
transport TEXT UNIQUE,
type INTEGER
);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.10.1.3')
def update_config_to_01014(self):
"""
Apply indeces to the logs database
"""
print(_('migrating logs database to indices'))
# FIXME see #2812
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
# apply indeces
try:
cur.executescript(
'''
CREATE INDEX idx_logs_jid_id_kind ON logs (jid_id, kind);
CREATE INDEX idx_unread_messages_jid_id ON unread_messages (jid_id);
'''
)
con.commit()
except Exception:
pass
con.close()
gajim.config.set('version', '0.10.1.4')
def update_config_to_01015(self):
"""
Clean show values in logs database
"""
#FIXME see #2812
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
status = dict((i[5:].lower(), logger.constants.__dict__[i]) for i in \
logger.constants.__dict__.keys() if i.startswith('SHOW_'))
for show in status:
cur.execute('update logs set show = ? where show = ?;', (status[show],
show))
cur.execute('update logs set show = NULL where show not in (0, 1, 2, 3, 4, 5);')
con.commit()
cur.close() # remove this in 2007 [pysqlite old versions need this]
con.close()
gajim.config.set('version', '0.10.1.5')
def update_config_to_01016(self):
"""
#2494 : Now we play gc_received_message sound even if
notify_on_all_muc_messages is false. Keep precedent behaviour
"""
if 'notify_on_all_muc_messages' in self.old_values and \
self.old_values['notify_on_all_muc_messages'] == 'False' and \
gajim.config.get_per('soundevents', 'muc_message_received', 'enabled'):
gajim.config.set_per('soundevents',\
'muc_message_received', 'enabled', False)
gajim.config.set('version', '0.10.1.6')
def update_config_to_01017(self):
"""
trayicon_notification_on_new_messages -> trayicon_notification_on_events
"""
if 'trayicon_notification_on_new_messages' in self.old_values:
gajim.config.set('trayicon_notification_on_events',
self.old_values['trayicon_notification_on_new_messages'])
gajim.config.set('version', '0.10.1.7')
def update_config_to_01018(self):
"""
chat_state_notifications -> outgoing_chat_state_notifications
"""
if 'chat_state_notifications' in self.old_values:
gajim.config.set('outgoing_chat_state_notifications',
self.old_values['chat_state_notifications'])
gajim.config.set('version', '0.10.1.8')
def update_config_to_01101(self):
"""
Fill time_stamp from before_time and after_time
"""
if 'before_time' in self.old_values:
gajim.config.set('time_stamp', '%s%%X%s ' % (
self.old_values['before_time'], self.old_values['after_time']))
gajim.config.set('version', '0.11.0.1')
def update_config_to_01102(self):
"""
Fill time_stamp from before_time and after_time
"""
if 'ft_override_host_to_send' in self.old_values:
gajim.config.set('ft_add_hosts_to_send',
self.old_values['ft_override_host_to_send'])
gajim.config.set('version', '0.11.0.2')
def update_config_to_01111(self):
"""
Always_hide_chatbuttons -> compact_view
"""
if 'always_hide_groupchat_buttons' in self.old_values and \
'always_hide_chat_buttons' in self.old_values:
gajim.config.set('compact_view', self.old_values['always_hide_groupchat_buttons'] and \
self.old_values['always_hide_chat_buttons'])
gajim.config.set('version', '0.11.1.1')
def update_config_to_01112(self):
"""
GTK+ theme is renamed to default
"""
if 'roster_theme' in self.old_values and \
self.old_values['roster_theme'] == 'gtk+':
gajim.config.set('roster_theme', _('default'))
gajim.config.set('version', '0.11.1.2')
def update_config_to_01113(self):
# copy&pasted from update_config_to_01013, possibly 'FIXME see #2812' applies too
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
CREATE TABLE caps_cache (
node TEXT,
ver TEXT,
ext TEXT,
data BLOB
);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.11.1.3')
def update_config_to_01114(self):
# add default theme if it doesn't exist
d = ['accounttextcolor', 'accountbgcolor', 'accountfont',
'accountfontattrs', 'grouptextcolor', 'groupbgcolor', 'groupfont',
'groupfontattrs', 'contacttextcolor', 'contactbgcolor', 'contactfont',
'contactfontattrs', 'bannertextcolor', 'bannerbgcolor', 'bannerfont',
'bannerfontattrs']
theme_name = _('default')
if theme_name not in gajim.config.get_per('themes'):
gajim.config.add_per('themes', theme_name)
if gajim.config.get_per('themes', 'gtk+'):
# copy from old gtk+ theme
for o in d:
val = gajim.config.get_per('themes', 'gtk+', o)
gajim.config.set_per('themes', theme_name, o, val)
gajim.config.del_per('themes', 'gtk+')
else:
# copy from default theme
theme = gajim.config.themes_default[theme_name]
for o in d:
gajim.config.set_per('themes', theme_name, o, theme[d.index(o)])
gajim.config.set('version', '0.11.1.4')
def update_config_to_01115(self):
# copy&pasted from update_config_to_01013, possibly 'FIXME see #2812' applies too
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
DELETE FROM caps_cache;
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.11.1.5')
def update_config_to_01121(self):
# remove old unencrypted secrets file
from common.configpaths import gajimpaths
new_file = gajimpaths['SECRETS_FILE']
old_file = os.path.dirname(new_file) + '/secrets'
if os.path.exists(old_file):
os.remove(old_file)
gajim.config.set('version', '0.11.2.1')
def update_config_to_01141(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
CREATE TABLE IF NOT EXISTS caps_cache (
node TEXT,
ver TEXT,
ext TEXT,
data BLOB
);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.11.4.1')
def update_config_to_01142(self):
"""
next_message_received sound event is splittedin 2 events
"""
gajim.config.add_per('soundevents', 'next_message_received_focused')
gajim.config.add_per('soundevents', 'next_message_received_unfocused')
if gajim.config.get_per('soundevents', 'next_message_received'):
enabled = gajim.config.get_per('soundevents', 'next_message_received',
'enabled')
path = gajim.config.get_per('soundevents', 'next_message_received',
'path')
gajim.config.del_per('soundevents', 'next_message_received')
gajim.config.set_per('soundevents', 'next_message_received_focused',
'enabled', enabled)
gajim.config.set_per('soundevents', 'next_message_received_focused',
'path', path)
gajim.config.set('version', '0.11.1.2')
def update_config_to_01143(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
CREATE TABLE IF NOT EXISTS rooms_last_message_time(
jid_id INTEGER PRIMARY KEY UNIQUE,
time INTEGER
);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.11.4.3')
def update_config_to_01144(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript('DROP TABLE caps_cache;')
con.commit()
except sqlite.OperationalError:
pass
try:
cur.executescript(
'''
CREATE TABLE caps_cache (
hash_method TEXT,
hash TEXT,
data BLOB
);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.11.4.4')
def update_config_to_01201(self):
if 'uri_schemes' in self.old_values:
new_values = self.old_values['uri_schemes'].replace(' mailto', '').\
replace(' xmpp', '')
gajim.config.set('uri_schemes', new_values)
gajim.config.set('version', '0.12.0.1')
def update_config_to_01211(self):
if 'trayicon' in self.old_values:
if self.old_values['trayicon'] == 'False':
gajim.config.set('trayicon', 'never')
else:
gajim.config.set('trayicon', 'always')
gajim.config.set('version', '0.12.1.1')
def update_config_to_01212(self):
for opt in ('ignore_unknown_contacts', 'send_os_info',
'log_encrypted_sessions'):
if opt in self.old_values:
val = self.old_values[opt]
for account in gajim.config.get_per('accounts'):
gajim.config.set_per('accounts', account, opt, val)
gajim.config.set('version', '0.12.1.2')
def update_config_to_01213(self):
msgs = gajim.config.statusmsg_default
for msg_name in gajim.config.get_per('statusmsg'):
if msg_name in msgs:
gajim.config.set_per('statusmsg', msg_name, 'activity',
msgs[msg_name][1])
gajim.config.set_per('statusmsg', msg_name, 'subactivity',
msgs[msg_name][2])
gajim.config.set_per('statusmsg', msg_name, 'activity_text',
msgs[msg_name][3])
gajim.config.set_per('statusmsg', msg_name, 'mood',
msgs[msg_name][4])
gajim.config.set_per('statusmsg', msg_name, 'mood_text',
msgs[msg_name][5])
gajim.config.set('version', '0.12.1.3')
def update_config_to_01214(self):
for status in ['online', 'chat', 'away', 'xa', 'dnd', 'invisible',
'offline']:
if 'last_status_msg_' + status in self.old_values:
gajim.config.add_per('statusmsg', '_last_' + status)
gajim.config.set_per('statusmsg', '_last_' + status, 'message',
self.old_values['last_status_msg_' + status])
gajim.config.set('version', '0.12.1.4')
def update_config_to_01215(self):
"""
Remove hardcoded ../data/sounds from config
"""
dirs = ['../data', gajim.gajimpaths.data_root, gajim.DATA_DIR]
if os.name != 'nt':
dirs.append(os.path.expanduser('~/.gajim'))
for evt in gajim.config.get_per('soundevents'):
path = gajim.config.get_per('soundevents', evt, 'path')
# absolute and relative passes are necessary
path = helpers.strip_soundfile_path(path, dirs, abs=False)
path = helpers.strip_soundfile_path(path, dirs, abs=True)
gajim.config.set_per('soundevents', evt, 'path', path)
gajim.config.set('version', '0.12.1.5')
def update_config_to_01231(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
CREATE TABLE IF NOT EXISTS roster_entry(
account_jid_id INTEGER,
jid_id INTEGER,
name TEXT,
subscription INTEGER,
ask BOOLEAN,
PRIMARY KEY (account_jid_id, jid_id)
);
CREATE TABLE IF NOT EXISTS roster_group(
account_jid_id INTEGER,
jid_id INTEGER,
group_name TEXT,
PRIMARY KEY (account_jid_id, jid_id, group_name)
);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.12.3.1')
def update_config_from_0125(self):
# All those functions need to be called for 0.12.5 to 0.13 transition
self.update_config_to_01211()
self.update_config_to_01213()
self.update_config_to_01214()
self.update_config_to_01215()
self.update_config_to_01231()
def update_config_to_01251(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
ALTER TABLE unread_messages
ADD shown BOOLEAN default 0;
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.12.5.1')
def update_config_to_01252(self):
if 'alwaysauth' in self.old_values:
val = self.old_values['alwaysauth']
for account in gajim.config.get_per('accounts'):
gajim.config.set_per('accounts', account, 'autoauth', val)
gajim.config.set('version', '0.12.5.2')
def update_config_to_01253(self):
if 'enable_zeroconf' in self.old_values:
val = self.old_values['enable_zeroconf']
for account in gajim.config.get_per('accounts'):
if gajim.config.get_per('accounts', account, 'is_zeroconf'):
gajim.config.set_per('accounts', account, 'active', val)
else:
gajim.config.set_per('accounts', account, 'active', True)
gajim.config.set('version', '0.12.5.3')
def update_config_to_01254(self):
vals = {'inmsgcolor': ['#a34526', '#a40000'],
'outmsgcolor': ['#164e6f', '#3465a4'],
'restored_messages_color': ['grey', '#555753'],
'statusmsgcolor': ['#1eaa1e', '#73d216'],
'urlmsgcolor': ['#0000ff', '#204a87'],
'gc_nicknames_colors': ['#a34526:#c000ff:#0012ff:#388a99:#045723:#7c7c7c:#ff8a00:#94452d:#244b5a:#32645a', '#4e9a06:#f57900:#ce5c00:#3465a4:#204a87:#75507b:#5c3566:#c17d11:#8f5902:#ef2929:#cc0000:#a40000']}
for c in vals:
if c not in self.old_values:
continue
val = self.old_values[c]
if val == vals[c][0]:
# We didn't change default value, so update it with new default
gajim.config.set(c, vals[c][1])
gajim.config.set('version', '0.12.5.4')
def update_config_to_01255(self):
vals = {'statusmsgcolor': ['#73d216', '#4e9a06'],
'outmsgtxtcolor': ['#a2a2a2', '#555753']}
for c in vals:
if c not in self.old_values:
continue
val = self.old_values[c]
if val == vals[c][0]:
# We didn't change default value, so update it with new default
gajim.config.set(c, vals[c][1])
gajim.config.set('version', '0.12.5.5')
def update_config_to_01256(self):
vals = {'gc_nicknames_colors': ['#4e9a06:#f57900:#ce5c00:#3465a4:#204a87:#75507b:#5c3566:#c17d11:#8f5902:#ef2929:#cc0000:#a40000', '#f57900:#ce5c00:#204a87:#75507b:#5c3566:#c17d11:#8f5902:#ef2929:#cc0000:#a40000']}
for c in vals:
if c not in self.old_values:
continue
val = self.old_values[c]
if val == vals[c][0]:
# We didn't change default value, so update it with new default
gajim.config.set(c, vals[c][1])
gajim.config.set('version', '0.12.5.6')
def update_config_to_01257(self):
if 'iconset' in self.old_values:
if self.old_values['iconset'] in ('nuvola', 'crystal', 'gossip',
'simplebulb', 'stellar'):
gajim.config.set('iconset', gajim.config.DEFAULT_ICONSET)
gajim.config.set('version', '0.12.5.7')
def update_config_to_01258(self):
self.update_ft_proxies(to_remove=['proxy65.talkonaut.com',
'proxy.jabber.org', 'proxy.netlab.cz', 'transfer.jabber.freenet.de',
'proxy.jabber.cd.chalmers.se'], to_add=['proxy.eu.jabber.org',
'proxy.jabber.ru', 'proxy.jabbim.cz'])
gajim.config.set('version', '0.12.5.8')
def update_config_to_013100(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
ALTER TABLE caps_cache
ADD last_seen INTEGER default %d;
''' % int(time())
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.13.10.0')
def update_config_to_013101(self):
back = os.getcwd()
os.chdir(logger.LOG_DB_FOLDER)
con = sqlite.connect(logger.LOG_DB_FILE)
os.chdir(back)
cur = con.cursor()
try:
cur.executescript(
'''
DROP INDEX IF EXISTS idx_logs_jid_id_kind;
CREATE INDEX IF NOT EXISTS
idx_logs_jid_id_time ON logs (jid_id, time DESC);
'''
)
con.commit()
except sqlite.OperationalError:
pass
con.close()
gajim.config.set('version', '0.13.10.1')
def update_config_to_013901(self):
schemes = 'aaa:// aaas:// acap:// cap:// cid: crid:// data: dav: dict:// dns: fax: file:/ ftp:// geo: go: gopher:// h323: http:// https:// iax: icap:// im: imap:// info: ipp:// iris: iris.beep: iris.xpc: iris.xpcs: iris.lwz: ldap:// mid: modem: msrp:// msrps:// mtqp:// mupdate:// news: nfs:// nntp:// opaquelocktoken: pop:// pres: prospero:// rtsp:// service: shttp:// sip: sips: sms: snmp:// soap.beep:// soap.beeps:// tag: tel: telnet:// tftp:// thismessage:/ tip:// tv: urn:// vemmi:// xmlrpc.beep:// xmlrpc.beeps:// z39.50r:// z39.50s:// about: apt: cvs:// daap:// ed2k:// feed: fish:// git:// iax2: irc:// ircs:// ldaps:// magnet: mms:// rsync:// ssh:// svn:// sftp:// smb:// webcal://'
gajim.config.set('uri_schemes', schemes)
gajim.config.set('version', '0.13.90.1')
def update_config_to_01401(self):
if 'autodetect_browser_mailer' not in self.old_values or 'openwith' \
not in self.old_values or \
(self.old_values['autodetect_browser_mailer'] == False and \
self.old_values['openwith'] != 'custom'):
gajim.config.set('autodetect_browser_mailer', True)
gajim.config.set('openwith', gajim.config.DEFAULT_OPENWITH)
gajim.config.set('version', '0.14.0.1')
def update_config_to_014900(self):
if 'use_stun_server' in self.old_values and self.old_values[
'use_stun_server'] and not self.old_values['stun_server']:
gajim.config.set('use_stun_server', False)
if os.name == 'nt':
gajim.config.set('autodetect_browser_mailer', True)
|
irl/gajim
|
src/common/optparser.py
|
Python
|
gpl-3.0
| 37,112
|
[
"CRYSTAL"
] |
b81b782878608ed9a1bea47f3c6194767ba3263a8950ec823d9853ce0a441210
|
from abipy.abilab import Structure
from abiflows.fireworks.workflows.abinit_workflows import DfptFWWorkflow
from abiflows.database.mongoengine.utils import DatabaseData
from abiflows.database.mongoengine.abinit_results import RelaxResult
# data for the database where the relaxed structures were stored
source_db = DatabaseData(host='database_address', port=27017, collection='collection_name',
database='database_name', username='username', password='password')
# data for the database where the phonon results will be stored.
# note that these can be in different databases or in the same.
# The collections should be different
db = DatabaseData(host='database_address', port=27017, collection='another_collection_name',
database='database_name', username='username', password='password')
# Open the connection to the database
source_db.connect_mongoengine()
# in case you are using multiple workers for the same fireworks db (i.e. different clusters or queues)
# it may be a good idea to set the worker explicitly. One can just get the name from the configuration:
# fworker = FWorker.from_file(os.path.join(os.getenv("HOME"), ".fireworks", "my_fworker.yaml"))
# or you can also just write the name of the fworker explicitely
fworker_name = 'name_of_the_fworker'
mp_id = 'mp-149'
# This context manager is required to use the collection name selected in source_db
# By default mongoengine uses the name of the class (in this case RelaxResult) as
# name of the collection to query.
with source_db.switch_collection(RelaxResult) as RelaxResult:
# download from the database the relaxed structure
# This relies on mongoengine (http://mongoengine.org/) to interact with the database.
# See the module abiflows.database.mongoengine.abinit_results for the objects used to store the results
relaxed_results = RelaxResult.objects(mp_id=mp_id)
# Assume that there is one and only one result matching the query. In real cases you might want to check this.
# At this point is an instance of a RelaxResult object
relaxed = relaxed_results[0]
# load the relaxed Structure
structure = Structure.from_dict(relaxed.abinit_output.structure)
# use the same k-point sampling as the one of the relax
kppa = relaxed.abinit_input.kppa
ngkpt = relaxed.abinit_input.ngkpt
# The AbinitInput object used for the relax is stored in the database.
# We get it to use the same approximations used during the relaxation.
relax_input = relaxed.abinit_input.last_input.to_mgobj()
# We use the same k and q point grid
qppa = kppa
extra_abivars = dict(chkprim=1, nstep=100, chksymbreak=1)
# as for the relax workflow, information stored in the database for the calculation. In particular information
# about the source structure.
initialization_info = dict(kppa=kppa, mp_id=mp_id,
relax_db=source_db.as_dict_no_credentials(), relax_id=relaxed.id,
relax_tol_val=1e-6, qppa=qppa)
# In this case the base is the input file of the of the relax workflow.
# Use the DfptFWWorkflow that allow to calculate the different kind of Dfpt perturbations
# with abinit in a single workflow. In this case only the phonons.
gen = DfptFWWorkflow.from_gs_input(structure=structure, gs_input=relax_input, extra_abivars=extra_abivars, autoparal=True,
initialization_info=initialization_info, do_ddk=True, do_dde=True, ph_ngqpt=[1,1,1],
do_strain=False)
# add to the workflow a step that automatically adds the results to the database in the collection specified above.
gen.add_mongoengine_db_insertion(db)
# add a step to the workflow that cleans up files with this extensions once the other calculations are completed.
# The list of extensions is customizable and these are usually file that won't be needed again.
# Here we do not delete the DDB files.
gen.add_final_cleanup(["WFK", "1WF", "WFQ", "1POT", "1DEN"])
# This will specify that all the steps will be forced to be executed on the same worker
# and will set the worker to the one chosen before for the existing fireworks. This step is not mandatory.
gen.fix_fworker(fworker_name)
# adds the workflow to the fireworks database. It will use the fireworks LaunchPad that has been set by default.
# If a different one should be used it can be passed as an argument.
gen.add_to_db()
|
gmatteo/abiflows
|
abiflows/fireworks/examples/phonon_wf.py
|
Python
|
gpl-2.0
| 4,532
|
[
"ABINIT"
] |
2f54324dbd1398df9812a981d1dea2872ee231a72c6814b1d7d784f38c089e8b
|
#!/usr/bin/env python
from galaxy import eggs
import sys, string
from rpy import *
import numpy
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def sscombs(s):
if len(s) == 1:
return [s]
else:
ssc = sscombs(s[1:])
return [s[0]] + [s[0]+comb for comb in ssc] + ssc
infile = sys.argv[1]
y_col = int(sys.argv[2])-1
x_cols = sys.argv[3].split(',')
outfile = sys.argv[4]
print "Predictor columns: %s; Response column: %d" %(x_cols,y_col+1)
fout = open(outfile,'w')
for i, line in enumerate( file ( infile )):
line = line.rstrip('\r\n')
if len( line )>0 and not line.startswith( '#' ):
elems = line.split( '\t' )
break
if i == 30:
break # Hopefully we'll never get here...
if len( elems )<1:
stop_err( "The data in your input dataset is either missing or not formatted properly." )
y_vals = []
x_vals = []
for k,col in enumerate(x_cols):
x_cols[k] = int(col)-1
x_vals.append([])
"""
try:
float( elems[x_cols[k]] )
except:
try:
msg = "This operation cannot be performed on non-numeric column %d containing value '%s'." %( col, elems[x_cols[k]] )
except:
msg = "This operation cannot be performed on non-numeric data."
stop_err( msg )
"""
NA = 'NA'
for ind,line in enumerate( file( infile )):
if line and not line.startswith( '#' ):
try:
fields = line.split("\t")
try:
yval = float(fields[y_col])
except Exception, ey:
yval = r('NA')
#print >>sys.stderr, "ey = %s" %ey
y_vals.append(yval)
for k,col in enumerate(x_cols):
try:
xval = float(fields[col])
except Exception, ex:
xval = r('NA')
#print >>sys.stderr, "ex = %s" %ex
x_vals[k].append(xval)
except:
pass
x_vals1 = numpy.asarray(x_vals).transpose()
dat= r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
try:
full = r.lm(r("y ~ x"), data= r.na_exclude(dat)) #full model includes all the predictor variables specified by the user
except RException, rex:
stop_err("Error performing linear regression on the input data.\nEither the response column or one of the predictor columns contain no numeric values.")
set_default_mode(BASIC_CONVERSION)
summary = r.summary(full)
fullr2 = summary.get('r.squared','NA')
if fullr2 == 'NA':
stop_error("Error in linear regression")
if len(x_vals) < 10:
s = ""
for ch in range(len(x_vals)):
s += str(ch)
else:
stop_err("This tool only works with less than 10 predictors.")
print >>fout, "#Model\tR-sq\tRCVE_Terms\tRCVE_Value"
all_combos = sorted(sscombs(s), key=len)
all_combos.reverse()
for j,cols in enumerate(all_combos):
#if len(cols) == len(s): #Same as the full model above
# continue
if len(cols) == 1:
x_vals1 = x_vals[int(cols)]
else:
x_v = []
for col in cols:
x_v.append(x_vals[int(col)])
x_vals1 = numpy.asarray(x_v).transpose()
dat= r.list(x=array(x_vals1), y=y_vals)
set_default_mode(NO_CONVERSION)
red = r.lm(r("y ~ x"), data= dat) #Reduced model
set_default_mode(BASIC_CONVERSION)
summary = r.summary(red)
redr2 = summary.get('r.squared','NA')
try:
rcve = (float(fullr2)-float(redr2))/float(fullr2)
except:
rcve = 'NA'
col_str = ""
for col in cols:
col_str = col_str + str(int(x_cols[int(col)]) + 1) + " "
col_str.strip()
rcve_col_str = ""
for col in s:
if col not in cols:
rcve_col_str = rcve_col_str + str(int(x_cols[int(col)]) + 1) + " "
rcve_col_str.strip()
if len(cols) == len(s): #full model
rcve_col_str = "-"
rcve = "-"
try:
redr2 = "%.4f" %(float(redr2))
except:
pass
try:
rcve = "%.4f" %(float(rcve))
except:
pass
print >>fout, "%s\t%s\t%s\t%s" %(col_str,redr2,rcve_col_str,rcve)
|
volpino/Yeps-EURAC
|
tools/regVariation/rcve.py
|
Python
|
mit
| 4,111
|
[
"Galaxy"
] |
f0b67f8998c71bcae80b4d9c9f592238529665924380fc8eb01b342156e64306
|
#!/bin/env python
"""
Module simtk.unit.quantity
Physical quantities with units, intended to produce similar functionality
to Boost.Units package in C++ (but with a runtime cost).
Uses similar API as Scientific.Physics.PhysicalQuantities
but different internals to satisfy our local requirements.
In particular, there is no underlying set of 'canonical' base
units, whereas in Scientific.Physics.PhysicalQuantities all
units are secretly in terms of SI units. Also, it is easier
to add new fundamental dimensions to simtk.dimensions. You
might want to make new dimensions for, say, "currency" or
"information".
Some features of this implementation:
* Quantities are a combination of a value and a unit. The value
part can be any python type, including numbers, lists, numpy
arrays, and anything else. The unit part must be a simtk.unit.Unit.
* Operations like adding incompatible units raises an error.
* Multiplying or dividing units/quantities creates new units.
* Users can create new Units and Dimensions, but most of the useful
ones are predefined.
* Conversion factors between units are applied transitively, so all
possible conversions are available.
* I want dimensioned Quantities that are compatible with numpy arrays,
but do not necessarily require the python numpy package. In other
words, Quantities can be based on either numpy arrays or on built in
python types.
* Units are NOT necessarily stored in terms of SI units internally.
This is very important for me, because one important application
area for us is at the molecular scale. Using SI units internally
can lead to exponent overflow in commonly used molecular force
calculations. Internally, all unit systems are equally fundamental
in SimTK.
Two possible enhancements that have not been implemented are
1) Include uncertainties with propagation of errors
2) Incorporate offsets for celsius <-> kelvin conversion
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2012 Stanford University and the Authors.
Authors: Christopher M. Bruns
Contributors: Peter Eastman
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
__author__ = "Christopher M. Bruns"
__version__ = "0.5"
import math
import copy
from .standard_dimensions import *
from .unit import Unit, is_unit, dimensionless
class Quantity(object):
"""Physical quantity, such as 1.3 meters per second.
Quantities contain both a value, such as 1.3; and a unit,
such as 'meters per second'.
Supported value types include:
1 - numbers (float, int, long)
2 - lists of numbers, e.g. [1,2,3]
3 - tuples of numbers, e.g. (1,2,3)
Note - unit conversions will cause tuples to be converted to lists
4 - lists of tuples of numbers, lists of lists of ... etc. of numbers
5 - numpy.arrays
Create numpy.arrays with units using the Quantity constructor, not the
multiply operator. e.g.
Quantity(numpy.array([1,2,3]), centimeters) # correct
*NOT*
numpy.array([1,2,3]) * centimeters # won't work
because numpy.arrays already overload the multiply operator for EVERYTHING.
"""
def __init__(self, value=None, unit=None):
"""
Create a new Quantity from a value and a unit.
Parameters
- value: (any type, usually a number) Measure of this quantity
- unit: (Unit) the physical unit, e.g. simtk.unit.meters.
"""
# When no unit is specified, bend over backwards to handle all one-argument possibilities
if unit == None: # one argument version, copied from UList
if is_unit(value):
# Unit argument creates an empty list with that unit attached
unit = value
value = []
elif is_quantity(value):
# Ulist of a Quantity is just the Quantity itself
unit = value.unit
value = value._value
elif _is_string(value):
unit = dimensionless
else:
# Is value a container?
is_container = True
try:
i = iter(value)
except TypeError:
is_container = False
if is_container:
if len(value) < 1:
unit = dimensionless
else:
first_item = iter(value).next()
# Avoid infinite recursion for string, because a one-character
# string is its own first element
if value == first_item:
unit = dimensionless
else:
unit = Quantity(first_item).unit
# Notice that tuples, lists, and numpy.arrays can all be initialized with a list
new_container = Quantity([], unit)
for item in value:
new_container.append(Quantity(item)) # Strips off units into list new_container._value
# __class__ trick does not work for numpy.arrays
try:
import numpy
if isinstance(value, numpy.ndarray):
value = numpy.array(new_container._value)
else:
# delegate contruction to container class from list
value = value.__class__(new_container._value)
except ImportError:
# delegate contruction to container class from list
value = value.__class__(new_container._value)
else:
# Non-Quantity, non container
# Wrap in a dimensionless Quantity
unit = dimensionless
# Accept simple scalar quantities as units
if is_quantity(unit):
value = value * unit._value
unit = unit.unit
# Use empty list for unspecified values
if value == None:
value = []
self._value = value
self.unit = unit
def __getstate__(self):
state = dict()
state['_value'] = self._value
state['unit'] = self.unit
return state
def __setstate__(self, state):
self._value = state['_value']
self.unit = state['unit']
return
def __copy__(self):
"""
Shallow copy produces a new Quantity with the shallow copy of value and the same unit.
Because we want copy operations to work just the same way they would on the underlying value.
"""
return Quantity(copy.copy(self._value), self.unit)
def __deepcopy__(self, memo):
"""
Deep copy produces a new Quantity with a deep copy of the value, and the same unit.
Because we want copy operations to work just the same way they would on the underlying value.
"""
return Quantity(copy.deepcopy(self._value, memo), self.unit)
def __getattr__(self, attribute):
"""
Delegate unrecognized attribute calls to the underlying value type.
"""
ret_val = getattr(self._value, attribute)
return ret_val
def __str__(self):
"""Printable string version of this Quantity.
Returns a string consisting of quantity number followed by unit abbreviation.
"""
return str(self._value) + ' ' + str(self.unit.get_symbol())
def __repr__(self):
"""
"""
return (Quantity.__name__ + '(value=' + repr(self._value) + ', unit=' +
str(self.unit) + ')')
def format(self, format_spec):
return format_spec % self._value + ' ' + str(self.unit.get_symbol())
def __add__(self, other):
"""Add two Quantities.
Only Quantities with the same dimensions (e.g. length)
can be added. Raises TypeError otherwise.
Parameters
- self: left hand member of sum
- other: right hand member of sum
Returns a new Quantity that is the sum of the two arguments.
"""
# can only add using like units
if not self.unit.is_compatible(other.unit):
raise TypeError('Cannot add two quantities with incompatible units "%s" and "%s".' % (self.unit, other.unit))
value = self._value + other.value_in_unit(self.unit)
unit = self.unit
return Quantity(value, unit)
def __sub__(self, other):
"""Subtract two Quantities.
Only Quantities with the same dimensions (e.g. length)
can be subtracted. Raises TypeError otherwise.
Parameters
- self: left hand member (a) of a - b.
- other: right hand member (b) of a - b.
Returns a new Quantity that is the difference of the two arguments.
"""
if not self.unit.is_compatible(other.unit):
raise TypeError('Cannot subtract two quantities with incompatible units "%s" and "%s".' % (self.unit, other.unit))
value = self._value - other.value_in_unit(self.unit)
unit = self.unit
return Quantity(value, unit)
def __eq__(self, other):
"""
"""
if not is_quantity(other):
return False
if not self.unit.is_compatible(other.unit):
return False
return self.value_in_unit(other.unit) == other._value
def __ne__(self, other):
"""
"""
return not self.__eq__(other)
def __lt__(self, other):
"""Compares two quantities.
Raises TypeError if the Quantities are of different dimension (e.g. length vs. mass)
Returns True if self < other, False otherwise.
"""
return self._value < other.value_in_unit(self.unit)
def __ge__(self, other):
return self._value >= (other.value_in_unit(self.unit))
def __gt__(self, other):
return self._value > (other.value_in_unit(self.unit))
def __le__(self, other):
return self._value <= (other.value_in_unit(self.unit))
def __lt__(self, other):
return self._value < (other.value_in_unit(self.unit))
_reduce_cache = {}
def reduce_unit(self, guide_unit=None):
"""
Combine similar component units and scale, to form an
equal Quantity in simpler units.
Returns underlying value type if unit is dimensionless.
"""
key = (self.unit, guide_unit)
if key in Quantity._reduce_cache:
(unit, value_factor) = Quantity._reduce_cache[key]
else:
value_factor = 1.0
canonical_units = {} # dict of dimensionTuple: (Base/ScaledUnit, exponent)
# Bias result toward guide units
if guide_unit != None:
for u, exponent in guide_unit.iter_base_or_scaled_units():
d = u.get_dimension_tuple()
if d not in canonical_units:
canonical_units[d] = [u, 0]
for u, exponent in self.unit.iter_base_or_scaled_units():
d = u.get_dimension_tuple()
# Take first unit found in a dimension as canonical
if d not in canonical_units:
canonical_units[d] = [u, exponent]
else:
value_factor *= (u.conversion_factor_to(canonical_units[d][0])**exponent)
canonical_units[d][1] += exponent
new_base_units = {}
for d in canonical_units:
u, exponent = canonical_units[d]
if exponent != 0:
assert u not in new_base_units
new_base_units[u] = exponent
# Create new unit
if len(new_base_units) == 0:
unit = dimensionless
else:
unit = Unit(new_base_units)
# There might be a factor due to unit conversion, even though unit is dimensionless
# e.g. suppose unit is meter/centimeter
if unit.is_dimensionless():
unit_factor = unit.conversion_factor_to(dimensionless)
if unit_factor != 1.0:
value_factor *= unit_factor
# print "value_factor = %s" % value_factor
unit = dimensionless
Quantity._reduce_cache[key] = (unit, value_factor)
# Create Quantity, then scale (in case value is a container)
# That's why we don't just scale the value.
result = Quantity(self._value, unit)
if value_factor != 1.0:
# __mul__ strips off dimensionless, if appropriate
result = result * value_factor
if unit.is_dimensionless():
assert unit is dimensionless # should have been set earlier in this method
if is_quantity(result):
result = result._value
return result
def __mul__(self, other):
"""Multiply a quantity by another object
Returns a new Quantity that is the product of the self * other,
unless the resulting unit is dimensionless, in which case the
underlying value type is returned, instead of a Quantity.
"""
if is_unit(other):
# print "quantity * unit"
# Many other mul/div operations delegate to here because I was debugging
# a dimensionless unit conversion problem, which I ended up fixing within
# the reduce_unit() method.
unit = self.unit * other
return Quantity(self._value, unit).reduce_unit(self.unit)
elif is_quantity(other):
# print "quantity * quantity"
# Situations where the units cancel can result in scale factors from the unit cancellation.
# To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit
return (self * other._value) * other.unit
else:
# print "quantity * scalar"
return self._change_units_with_factor(self.unit, other, post_multiply=False)
# value type might not be commutative for multiplication
def __rmul__(self, other):
"""Multiply a scalar by a Quantity
Returns a new Quantity with the same units as self, but with the value
multiplied by other.
"""
if is_unit(other):
raise NotImplementedError('programmer is surprised __rmul__ was called instead of __mul__')
# print "R unit * quantity"
elif is_quantity(other):
# print "R quantity * quantity"
raise NotImplementedError('programmer is surprised __rmul__ was called instead of __mul__')
else:
# print "scalar * quantity"
return self._change_units_with_factor(self.unit, other, post_multiply=True)
# return Quantity(other * self._value, self.unit)
def __truediv__(self, other):
"""Divide a Quantity by another object
Returns a new Quantity, unless the resulting unit type is dimensionless,
in which case the underlying value type is returned.
"""
if is_unit(other):
# print "quantity / unit"
return self * pow(other, -1.0)
# unit = self.unit / other
# return Quantity(self._value, unit).reduce_unit(self.unit)
elif is_quantity(other):
# print "quantity / quantity"
# Delegate quantity/quantity to (quantity/scalar)/unit
return (self/other._value) / other.unit
else:
# print "quantity / scalar"
return self * pow(other, -1.0)
# return Quantity(self._value / other, self.unit)
__div__ = __truediv__
def __rtruediv__(self, other):
"""Divide a scalar by a quantity.
Returns a new Quantity. The resulting units are the inverse of the self argument units.
"""
if is_unit(other):
# print "R unit / quantity"
raise NotImplementedError('programmer is surprised __rtruediv__ was called instead of __truediv__')
elif is_quantity(other):
raise NotImplementedError('programmer is surprised __rtruediv__ was called instead of __truediv__')
else:
# print "R scalar / quantity"
return other * pow(self, -1.0)
# return Quantity(other / self._value, pow(self.unit, -1.0))
__rdiv__ = __rtruediv__
def __pow__(self, exponent):
"""Raise a Quantity to a power.
Generally both the value and the unit of the Quantity are affected by this operation.
Returns a new Quantity equal to self**exponent.
"""
return Quantity(pow(self._value, exponent), pow(self.unit, exponent))
def sqrt(self):
"""
Returns square root of a Quantity.
Raises ArithmeticError if component exponents are not even.
This behavior can be changed if you present a reasonable real life case to me.
"""
# There might be a conversion factor from taking the square root of the unit
new_value = math.sqrt(self._value)
new_unit = self.unit.sqrt()
unit_factor = self.unit.conversion_factor_to(new_unit*new_unit)
if unit_factor != 1.0:
new_value *= math.sqrt(unit_factor)
return Quantity(value=new_value, unit=new_unit)
def __abs__(self):
"""
Return absolute value of a Quantity.
The unit is unchanged. A negative value of self will result in a positive value
in the result.
"""
return Quantity(abs(self._value), self.unit)
def __pos__(self):
"""
Returns a reference to self.
"""
return Quantity(+(self._value), self.unit)
def __neg__(self):
"""Negate a Quantity.
Returns a new Quantity with a different sign on the value.
"""
return Quantity(-(self._value), self.unit)
def __nonzero__(self):
"""Returns True if value underlying Quantity is zero, False otherwise.
"""
return bool(self._value)
def __complex__(self):
return Quantity(complex(self._value), self.unit)
def __float__(self):
return Quantity(float(self._value), self.unit)
def __int__(self):
return Quantity(int(self._value), self.unit)
def __long__(self):
return Quantity(int(self._value), self.unit)
def value_in_unit(self, unit):
"""
Returns underlying value, in the specified units.
"""
val = self.in_units_of(unit)
if is_quantity(val):
return val._value
else: # naked dimensionless
return val
def value_in_unit_system(self, system):
"""
Returns the underlying value type, after conversion to a particular unit system.
"""
result = self.in_unit_system(system)
if is_quantity(result):
return result._value
else:
return result # dimensionless
def in_unit_system(self, system):
"""
Returns a new Quantity equal to this one, expressed in a particular unit system.
"""
new_units = system.express_unit(self.unit)
f = self.unit.conversion_factor_to(new_units)
return self._change_units_with_factor(new_units, f)
def in_units_of(self, other_unit):
"""
Returns an equal Quantity expressed in different units.
If the units are the same as those in self, a reference to self is returned.
Raises a TypeError if the new unit is not compatible with the original unit.
The post_multiply argument is used in case the multiplication operation is not commutative.
i.e. result = factor * value when post_multiply is False
and result = value * factor when post_multiply is True
"""
if not self.unit.is_compatible(other_unit):
raise TypeError('Unit "%s" is not compatible with Unit "%s".' % (self.unit, other_unit))
f = self.unit.conversion_factor_to(other_unit)
return self._change_units_with_factor(other_unit, f)
def _change_units_with_factor(self, new_unit, factor, post_multiply=True):
# numpy arrays cannot be compared with 1.0, so just "try"
factor_is_identity = False
try:
if (factor == 1.0):
factor_is_identity = True
except ValueError:
pass
if factor_is_identity:
# No multiplication required
if (self.unit is new_unit):
result = self
else:
result = Quantity(self._value, new_unit)
else:
try:
# multiply operator, if it exists, is preferred
if post_multiply:
value = self._value * factor # works for number, numpy.array, or vec3, e.g.
else:
value = factor * self._value # works for number, numpy.array, or vec3, e.g.
result = Quantity(value, new_unit)
except TypeError:
# list * float fails with TypeError
# Presumably a list type
# deep copy
value = self._value[:] # deep copy
# convert tuple to list
try:
value[0] = value[0] # tuple is immutable
except TypeError:
# convert immutable tuple to list
value = []
for i in self._value:
value.append(i)
result = Quantity(self._scale_sequence(value, factor, post_multiply), new_unit)
if (new_unit.is_dimensionless()):
return result._value
else:
return result
def _scale_sequence(self, value, factor, post_multiply):
try:
if post_multiply:
if isinstance(self._value, tuple):
value = tuple([x*factor for x in value])
else:
for i in range(len(value)):
value[i] = value[i]*factor
else:
if isinstance(self._value, tuple):
value = tuple([factor*x for x in value])
else:
for i in range(len(value)):
value[i] = factor*value[i]
except TypeError as ex:
for i in range(len(value)):
value[i] = self._scale_sequence(value[i], factor, post_multiply)
return value
####################################
### Sequence methods of Quantity ###
### in case value is a sequence ###
####################################
def __len__(self):
"""
Return size of internal value type.
"""
return len(self._value)
def __getitem__(self, key):
"""
Keep the same units on contained elements.
"""
assert not is_quantity(self._value[key])
return Quantity(self._value[key], self.unit)
def __setitem__(self, key, value):
# Delegate slices to one-at-a time ___setitem___
if isinstance(key, slice): # slice
indices = key.indices(len(self))
for i in range(*indices):
self[i] = value[i]
else: # single index
# Check unit compatibility
if self.unit.is_dimensionless() and is_dimensionless(value):
pass # OK
elif not self.unit.is_compatible(value.unit):
raise TypeError('Unit "%s" is not compatible with Unit "%s".' % (self.unit, value.unit))
self._value[key] = value / self.unit
assert not is_quantity(self._value[key])
def __delitem__(self, key):
del(self._value[key])
def __contains__(self, item):
return self._value.__contains__(item.value_in_unit(self.unit))
def __iter__(self):
for item in self._value:
yield Quantity(item, self.unit)
def count(self, item):
return self._value.count(item.value_in_unit(self.unit))
def index(self, item):
return self._value.index(item.value_in_unit(self.unit))
def append(self, item):
if is_quantity(item):
return self._value.append(item.value_in_unit(self.unit))
elif is_dimensionless(self.unit):
return self._value.append(item)
else:
raise TypeError("Cannot append item without units into list with units")
def extend(self, rhs):
self._value.extend(rhs.value_in_unit(self.unit))
def insert(self, index, item):
self._value.insert(index, item.value_in_unit(self.unit))
def remove(self, item):
self._value.remove(item)
def pop(self, *args):
return self._value.pop(*args) * self.unit
# list.reverse will automatically delegate correctly
# list.sort with no arguments will delegate correctly
# list.sort with a comparison function cannot be done correctly
def is_quantity(x):
"""
Returns True if x is a Quantity, False otherwise.
"""
return isinstance(x, Quantity)
def is_dimensionless(x):
"""
"""
if is_unit(x):
return x.is_dimensionless()
elif is_quantity(x):
return x.unit.is_dimensionless()
else:
# everything else in the universe is dimensionless
return True
# Strings can cause trouble
# as can any container that has infinite levels of containment
def _is_string(x):
# step 1) String is always a container
# and its contents are themselves containers.
if isinstance(x, str):
return True
try:
first_item = iter(x).next()
inner_item = iter(first_item).next()
if first_item is inner_item:
return True
else:
return False
except TypeError:
return False
except StopIteration:
return False
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
marscher/mdtraj
|
MDTraj/utils/unit/quantity.py
|
Python
|
lgpl-2.1
| 27,812
|
[
"OpenMM"
] |
898e1e8fe346f0b46caea780734ed1c741669550cd9f107a75252fcba25e4c8f
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Patch the sequences of one assembly using sequences from another assembly. This
is tested on merging the medicago WGS assembly with the clone-by-clone assembly.
There are a few techniques, used in curating medicago assembly.
1. Split chimeric scaffolds based on genetic map and then refine breakpoints
2. Create patchers by mix-and-max guided by optical map
3. Find gaps and fill N's using alternative assembly
4. Add telomeric sequences
5. Find gaps in optical map
6. Insert unplaced scaffolds using mates
"""
import sys
import math
import logging
from itertools import groupby
from collections import defaultdict
from jcvi.formats.bed import Bed, BedLine, complementBed, mergeBed, \
fastaFromBed, summary
from jcvi.formats.blast import BlastSlow
from jcvi.formats.sizes import Sizes
from jcvi.utils.range import range_parse, range_distance, ranges_depth, \
range_minmax, range_overlap, range_merge, range_closest, \
range_interleave
from jcvi.utils.iter import roundrobin
from jcvi.formats.base import FileMerger, FileShredder
from jcvi.apps.base import OptionParser, ActionDispatcher, sh
def main():
actions = (
# OM guided approach
('refine', 'find gaps within or near breakpoint regions'),
('patcher', 'given om alignment, prepare the patchers'),
# Gap filling through sequence matching
('fill', 'perform gap filling using one assembly vs the other'),
('install', 'install patches into backbone'),
# Placement through mates and manual insertions and deletions
('bambus', 'find candidate scaffolds to insert based on mates'),
('insert', 'insert scaffolds into assembly'),
('eject', 'eject scaffolds from assembly'),
('closest', 'find the nearest gaps flanking suggested regions'),
# Misc
('tips', 'append telomeric sequences based on patchers and complements'),
('gaps', 'create patches around OM gaps'),
# Touch-up
('pasteprepare', 'prepare sequences for paste'),
('paste', 'paste in good sequences in the final assembly'),
('pastegenes', 'paste in zero or low coverage genes'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def pastegenes(args):
"""
%prog pastegenes coverage.list old.genes.bed new.genes.bed old.assembly
Paste in zero or low coverage genes. For a set of neighboring genes
missing, add the whole cassette as unplaced scaffolds. For singletons the
program will try to make a patch.
"""
from jcvi.formats.base import DictFile
from jcvi.utils.cbook import gene_name
p = OptionParser(pastegenes.__doc__)
p.add_option("--cutoff", default=90, type="int",
help="Coverage cutoff to call gene missing [default: %default]")
p.add_option("--flank", default=2000, type="int",
help="Get the seq of size on two ends [default: %default]")
p.add_option("--maxsize", default=50000, type="int",
help="Maximum size of patchers to be replaced [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
coveragefile, oldbed, newbed, oldassembly = args
cutoff = opts.cutoff
flank = opts.flank
maxsize = opts.maxsize
coverage = DictFile(coveragefile, valuepos=2, cast=float)
obed = Bed(oldbed)
order = obed.order
bed = [x for x in obed if x.accn in coverage]
key = lambda x: coverage[x.accn] >= cutoff
extrabed = "extra.bed"
extendbed = "extend.bed"
pastebed = "paste.bed"
fw = open(extrabed, "w")
fwe = open(extendbed, "w")
fwp = open(pastebed, "w")
fw_ids = open(extendbed + ".ids", "w")
singletons, large, large_genes = 0, 0, 0
for chr, chrbed in groupby(bed, key=lambda x: x.seqid):
chrbed = list(chrbed)
for good, beds in groupby(chrbed, key=key):
if good:
continue
beds = list(beds)
blocksize = len(set([gene_name(x.accn) for x in beds]))
if blocksize == 1:
singletons += 1
accn = beds[0].accn
gi, gb = order[accn]
leftb = obed[gi - 1]
rightb = obed[gi + 1]
leftr = leftb.range
rightr = rightb.range
cur = gb.range
distance_to_left, oo = range_distance(leftr, cur)
distance_to_right, oo = range_distance(cur, rightr)
span, oo = range_distance(leftr, rightr)
if distance_to_left <= distance_to_right and \
distance_to_left > 0:
label = "LEFT"
else:
label = "RIGHT"
if 0 < span <= maxsize:
print >> fwp, "\t".join(str(x) for x in \
(chr, leftb.start, rightb.end, gb.accn))
print >> fwe, leftb
print >> fwe, gb
print >> fwe, rightb
print >> fwe, "L:{0} R:{1} [{2}]".format(distance_to_left, \
distance_to_right, label)
print >> fw_ids, gb.accn
continue
large += 1
large_genes += blocksize
ranges = [(x.start, x.end) for x in beds]
rmin, rmax = range_minmax(ranges)
rmin -= flank
rmax += flank
name = "-".join((beds[0].accn, beds[-1].accn))
print >> fw, "\t".join(str(x) for x in (chr, rmin - 1, rmax, name))
fw.close()
fwe.close()
extrabed = mergeBed(extrabed, d=flank, nms=True)
fastaFromBed(extrabed, oldassembly, name=True)
summary([extrabed])
logging.debug("Singleton blocks : {0}".format(singletons))
logging.debug("Large blocks : {0} ({1} genes)".format(large, large_genes))
def pasteprepare(args):
"""
%prog pasteprepare bacs.fasta
Prepare sequences for paste.
"""
p = OptionParser(pasteprepare.__doc__)
p.add_option("--flank", default=5000, type="int",
help="Get the seq of size on two ends [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
goodfasta, = args
flank = opts.flank
pf = goodfasta.rsplit(".", 1)[0]
extbed = pf + ".ext.bed"
sizes = Sizes(goodfasta)
fw = open(extbed, "w")
for bac, size in sizes.iter_sizes():
print >> fw, "\t".join(str(x) for x in \
(bac, 0, min(flank, size), bac + "L"))
print >> fw, "\t".join(str(x) for x in \
(bac, max(size - flank, 0), size, bac + "R"))
fw.close()
fastaFromBed(extbed, goodfasta, name=True)
def paste(args):
"""
%prog paste flanks.bed flanks_vs_assembly.blast backbone.fasta
Paste in good sequences in the final assembly.
"""
from jcvi.formats.bed import uniq
p = OptionParser(paste.__doc__)
p.add_option("--maxsize", default=300000, type="int",
help="Maximum size of patchers to be replaced [default: %default]")
p.add_option("--prefix", help="Prefix of the new object [default: %default]")
p.set_rclip(rclip=1)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pbed, blastfile, bbfasta = args
maxsize = opts.maxsize # Max DNA size to replace gap
order = Bed(pbed).order
beforebed, afterbed = blast_to_twobeds(blastfile, order, log=True,
rclip=opts.rclip, maxsize=maxsize,
flipbeds=True)
beforebed = uniq([beforebed])
afbed = Bed(beforebed)
bfbed = Bed(afterbed)
shuffle_twobeds(afbed, bfbed, bbfasta, prefix=opts.prefix)
def eject(args):
"""
%prog eject candidates.bed chr.fasta
Eject scaffolds from assembly, using the range identified by closest().
"""
p = OptionParser(eject.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
candidates, chrfasta = args
sizesfile = Sizes(chrfasta).filename
cbedfile = complementBed(candidates, sizesfile)
cbed = Bed(cbedfile)
for b in cbed:
b.accn = b.seqid
b.score = 1000
b.strand = '+'
cbed.print_to_file()
def closest(args):
"""
%prog closest candidates.bed gaps.bed fastafile
Identify the nearest gaps flanking suggested regions.
"""
p = OptionParser(closest.__doc__)
p.add_option("--om", default=False, action="store_true",
help="The bedfile is OM blocks [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
candidates, gapsbed, fastafile = args
sizes = Sizes(fastafile).mapping
bed = Bed(candidates)
ranges = []
for b in bed:
r = range_parse(b.accn) if opts.om else b
ranges.append([r.seqid, r.start, r.end])
gapsbed = Bed(gapsbed)
granges = [(x.seqid, x.start, x.end) for x in gapsbed]
ranges = range_merge(ranges)
for r in ranges:
a = range_closest(granges, r)
b = range_closest(granges, r, left=False)
seqid = r[0]
if a is not None and a[0] != seqid:
a = None
if b is not None and b[0] != seqid:
b = None
mmin = 1 if a is None else a[1]
mmax = sizes[seqid] if b is None else b[2]
print "\t".join(str(x) for x in (seqid, mmin - 1, mmax))
def insert(args):
"""
%prog insert candidates.bed gaps.bed chrs.fasta unplaced.fasta
Insert scaffolds into assembly.
"""
from jcvi.formats.agp import mask, bed
from jcvi.formats.sizes import agp
p = OptionParser(insert.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
candidates, gapsbed, chrfasta, unplacedfasta = args
refinedbed = refine([candidates, gapsbed])
sizes = Sizes(unplacedfasta).mapping
cbed = Bed(candidates)
corder = cbed.order
gbed = Bed(gapsbed)
gorder = gbed.order
gpbed = Bed()
gappositions = {} # (chr, start, end) => gapid
fp = open(refinedbed)
gap_to_scf = defaultdict(list)
seen = set()
for row in fp:
atoms = row.split()
unplaced = atoms[3]
strand = atoms[5]
gapid = atoms[9]
if gapid not in seen:
seen.add(gapid)
gi, gb = gorder[gapid]
gpbed.append(gb)
gappositions[(gb.seqid, gb.start, gb.end)] = gapid
gap_to_scf[gapid].append((unplaced, strand))
gpbedfile = "candidate.gaps.bed"
gpbed.print_to_file(gpbedfile, sorted=True)
agpfile = agp([chrfasta])
maskedagpfile = mask([agpfile, gpbedfile])
maskedbedfile = maskedagpfile.rsplit(".", 1)[0] + ".bed"
bed([maskedagpfile, "--outfile={0}".format(maskedbedfile)])
mbed = Bed(maskedbedfile)
beds = []
for b in mbed:
sid = b.seqid
key = (sid, b.start, b.end)
if key not in gappositions:
beds.append(b)
continue
gapid = gappositions[key]
scfs = gap_to_scf[gapid]
# For scaffolds placed in the same gap, sort according to positions
scfs.sort(key=lambda x: corder[x[0]][1].start + corder[x[0]][1].end)
for scf, strand in scfs:
size = sizes[scf]
beds.append(BedLine("\t".join(str(x) for x in \
(scf, 0, size, sid, 1000, strand))))
finalbed = Bed()
finalbed.extend(beds)
finalbedfile = "final.bed"
finalbed.print_to_file(finalbedfile)
# Clean-up
toclean = [gpbedfile, agpfile, maskedagpfile, maskedbedfile]
FileShredder(toclean)
def bambus(args):
"""
%prog bambus bambus.bed bambus.mates total.fasta
Insert unplaced scaffolds based on mates.
"""
from jcvi.utils.iter import pairwise
from jcvi.formats.posmap import MatesFile
p = OptionParser(bambus.__doc__)
p.add_option("--prefix", default="scaffold",
help="Prefix of the unplaced scaffolds [default: %default]")
p.add_option("--minlinks", default=3, type="int",
help="Minimum number of links to place [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
bedfile, matesfile, fastafile = args
pf = matesfile.rsplit(".", 1)[0]
logfile = pf + ".log"
log = open(logfile, "w")
mf = MatesFile(matesfile)
maxdist = max(x.max for x in mf.libraries.values())
logging.debug("Max separation: {0}".format(maxdist))
prefix = opts.prefix
minlinks = opts.minlinks
is_unplaced = lambda x: x.startswith(prefix)
bed = Bed(bedfile, sorted=False)
beds = []
unplaced = defaultdict(list)
for a, b in pairwise(bed):
aname, bname = a.accn, b.accn
aseqid, bseqid = a.seqid, b.seqid
if aname not in mf:
continue
pa, la = mf[aname]
if pa != bname:
continue
ia = is_unplaced(aseqid)
ib = is_unplaced(bseqid)
if ia == ib:
continue
if ia:
a, b = b, a
unplaced[b.seqid].append((a, b))
beds.extend([a, b])
sizes = Sizes(fastafile)
candidatebed = Bed()
cbeds = []
# For each unplaced scaffold, find most likely placement and orientation
for scf, beds in sorted(unplaced.items()):
print >> log
ranges = []
for a, b in beds:
aname, astrand = a.accn, a.strand
bname, bstrand = b.accn, b.strand
aseqid, bseqid = a.seqid, b.seqid
pa, lib = mf[aname]
print >> log, a
print >> log, b
flip_b = (astrand == bstrand)
fbstrand = '-' if flip_b else '+'
if flip_b:
b.reverse_complement(sizes)
lmin, lmax = lib.min, lib.max
L = sizes.get_size(scf)
assert astrand in ('+', '-')
if astrand == '+':
offset = a.start - b.end
sstart, sstop = offset + lmin, offset + lmax
else:
offset = a.end - b.start + L
sstart, sstop = offset - lmax, offset - lmin
# Prevent out of range error
size = sizes.get_size(aseqid)
sstart = max(0, sstart)
sstop = max(0, sstop)
sstart = min(size - 1, sstart)
sstop = min(size - 1, sstop)
start_range = (aseqid, sstart, sstop, scf, 1, fbstrand)
print >> log, "*" + "\t".join(str(x) for x in start_range)
ranges.append(start_range)
mranges = [x[:3] for x in ranges]
# Determine placement by finding the interval with the most support
rd = ranges_depth(mranges, sizes.mapping, verbose=False)
alldepths = []
for depth in rd:
alldepths.extend(depth)
print >> log, alldepths
maxdepth = max(alldepths, key=lambda x: x[-1])[-1]
if maxdepth < minlinks:
print >> log, "Insufficient links ({0} < {1})".format(maxdepth, minlinks)
continue
candidates = [x for x in alldepths if x[-1] == maxdepth]
nseqids = len(set(x[0] for x in candidates))
msg = "Multiple conflicting candidates found"
if nseqids != 1:
print >> log, msg
continue
seqid, mmin, mmax, depth = candidates[0]
mmin, mmax = range_minmax([x[1:3] for x in candidates])
if (mmax - mmin) > maxdist:
print >> log, msg
continue
# Determine orientation by voting
nplus, nminus = 0, 0
arange = (seqid, mmin, mmax)
for sid, start, end, sf, sc, fbstrand in ranges:
brange = (sid, start, end)
if range_overlap(arange, brange):
if fbstrand == '+':
nplus += 1
else:
nminus += 1
fbstrand = '+' if nplus >= nminus else '-'
candidate = (seqid, mmin, mmax, scf, depth, fbstrand)
bedline = BedLine("\t".join((str(x) for x in candidate)))
cbeds.append(bedline)
print >> log, "Plus: {0}, Minus: {1}".format(nplus, nminus)
print >> log, candidate
candidatebed.extend(cbeds)
logging.debug("A total of {0} scaffolds can be placed.".\
format(len(candidatebed)))
log.close()
candidatebedfile = pf + ".candidate.bed"
candidatebed.print_to_file(candidatebedfile, sorted=True)
def gaps(args):
"""
%prog gaps OM.bed fastafile
Create patches around OM gaps.
"""
from jcvi.formats.bed import uniq
from jcvi.utils.iter import pairwise
p = OptionParser(gaps.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ombed, fastafile = args
ombed = uniq([ombed])
bed = Bed(ombed)
for a, b in pairwise(bed):
om_a = (a.seqid, a.start, a.end, "+")
om_b = (b.seqid, b.start, b.end, "+")
ch_a = range_parse(a.accn)
ch_b = range_parse(b.accn)
ch_a = (ch_a.seqid, ch_a.start, ch_a.end, "+")
ch_b = (ch_b.seqid, ch_b.start, ch_b.end, "+")
om_dist, x = range_distance(om_a, om_b, distmode="ee")
ch_dist, x = range_distance(ch_a, ch_b, distmode="ee")
if om_dist <= 0 and ch_dist <= 0:
continue
print a
print b
print om_dist, ch_dist
def tips(args):
"""
%prog tips patchers.bed complements.bed original.fasta backbone.fasta
Append telomeric sequences based on patchers and complements.
"""
p = OptionParser(tips.__doc__)
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
pbedfile, cbedfile, sizesfile, bbfasta = args
pbed = Bed(pbedfile, sorted=False)
cbed = Bed(cbedfile, sorted=False)
complements = dict()
for object, beds in groupby(cbed, key=lambda x: x.seqid):
beds = list(beds)
complements[object] = beds
sizes = Sizes(sizesfile).mapping
bbsizes = Sizes(bbfasta).mapping
tbeds = []
for object, beds in groupby(pbed, key=lambda x: x.accn):
beds = list(beds)
startbed, endbed = beds[0], beds[-1]
start_id, end_id = startbed.seqid, endbed.seqid
if startbed.start == 1:
start_id = None
if endbed.end == sizes[end_id]:
end_id = None
print >> sys.stderr, object, start_id, end_id
if start_id:
b = complements[start_id][0]
b.accn = object
tbeds.append(b)
tbeds.append(BedLine("\t".join(str(x) for x in \
(object, 0, bbsizes[object], object, 1000, "+"))))
if end_id:
b = complements[end_id][-1]
b.accn = object
tbeds.append(b)
tbed = Bed()
tbed.extend(tbeds)
tbedfile = "tips.bed"
tbed.print_to_file(tbedfile)
def fill(args):
"""
%prog fill gaps.bed bad.fasta
Perform gap filling of one assembly (bad) using sequences from another.
"""
p = OptionParser(fill.__doc__)
p.add_option("--extend", default=2000, type="int",
help="Extend seq flanking the gaps [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gapsbed, badfasta = args
Ext = opts.extend
gapdist = 2 * Ext + 1 # This is to prevent to replacement ranges intersect
gapsbed = mergeBed(gapsbed, d=gapdist, nms=True)
bed = Bed(gapsbed)
sizes = Sizes(badfasta).mapping
pf = gapsbed.rsplit(".", 1)[0]
extbed = pf + ".ext.bed"
fw = open(extbed, "w")
for b in bed:
gapname = b.accn
start, end = max(0, b.start - Ext - 1), b.start - 1
print >> fw, "\t".join(str(x) for x in \
(b.seqid, start, end, gapname + "L"))
start, end = b.end, min(sizes[b.seqid], b.end + Ext)
print >> fw, "\t".join(str(x) for x in \
(b.seqid, start, end, gapname + "R"))
fw.close()
fastaFromBed(extbed, badfasta, name=True)
def blast_to_twobeds(blastfile, order, log=False,
rclip=1, maxsize=300000, flipbeds=False):
abed, bbed = "before.bed", "after.bed"
beforebed, afterbed = abed, bbed
if flipbeds:
beforebed, afterbed = afterbed, beforebed
fwa = open(beforebed, "w")
fwb = open(afterbed, "w")
if log:
logfile = "problems.log"
log = open(logfile, "w")
key1 = lambda x: x.query
key2 = lambda x: x.query[:-rclip] if rclip else key1
data = BlastSlow(blastfile)
OK = "OK"
seen = set()
for pe, lines in groupby(data, key=key2):
label = OK
lines = list(lines)
if len(lines) != 2:
label = "Singleton"
else:
a, b = lines
aquery, bquery = a.query, b.query
asubject, bsubject = a.subject, b.subject
if asubject != bsubject:
label = "Different chr {0}|{1}".format(asubject, bsubject)
else:
astrand, bstrand = a.orientation, b.orientation
assert aquery[-1] == 'L' and bquery[-1] == 'R', str((aquery, bquery))
ai, ax = order[aquery]
bi, bx = order[bquery]
qstart, qstop = ax.start + a.qstart - 1, bx.start + b.qstop - 1
if astrand == '+' and bstrand == '+':
sstart, sstop = a.sstart, b.sstop
elif astrand == '-' and bstrand == '-':
sstart, sstop = b.sstart, a.sstop
else:
label = "Strand {0}|{1}".format(astrand, bstrand)
if sstart > sstop:
label = "Start beyond stop"
if sstop > sstart + maxsize:
label = "Stop beyond start plus {0}".format(maxsize)
aquery = lines[0].query
bac_name = aquery[:-1]
seen.add(bac_name)
name = bac_name + "LR"
if label != OK:
if log:
print >> log, "\t".join((name, label))
continue
print >> fwa, "\t".join(str(x) for x in \
(ax.seqid, qstart - 1, qstop, name, 1000, "+"))
print >> fwb, "\t".join(str(x) for x in \
(asubject, sstart - 1, sstop, name, 1000, astrand))
# Missing
if log:
label = "Missing"
for k in order.keys():
k = k[:-1]
if k not in seen:
seen.add(k)
k += "LR"
print >> log, "\t".join((k, label))
log.close()
fwa.close()
fwb.close()
return abed, bbed
def shuffle_twobeds(afbed, bfbed, bbfasta, prefix=None):
# Shuffle the two bedfiles together
sz = Sizes(bbfasta)
sizes = sz.mapping
shuffled = "shuffled.bed"
border = bfbed.order
all = []
afbed.sort(key=afbed.nullkey)
totalids = len(sizes)
pad = int(math.log10(totalids)) + 1
cj = 0
seen = set()
accn = lambda x: "{0}{1:0{2}d}".format(prefix, x, pad)
for seqid, aa in afbed.sub_beds():
cj += 1
abeds, bbeds, beds = [], [], []
size = sizes[seqid]
ranges = [(x.seqid, x.start, x.end) for x in aa]
cranges = range_interleave(ranges, sizes={seqid: size}, empty=True)
for crange in cranges:
if crange:
seqid, start, end = crange
bedline = "\t".join(str(x) for x in (seqid, start - 1, end))
abeds.append(BedLine(bedline))
else:
abeds.append(None)
for a in aa:
gapid = a.accn
bi, b = border[gapid]
if a.strand == '-':
b.extra[1] = b.strand = ('-' if b.strand == '+' else '+')
bbeds.append(b)
n_abeds = len(abeds)
n_bbeds = len(bbeds)
assert n_abeds - n_bbeds == 1, \
"abeds: {0}, bbeds: {1}".format(n_abeds, n_bbeds)
beds = [x for x in roundrobin(abeds, bbeds) if x]
if prefix:
for b in beds:
b.accn = accn(cj)
all.extend(beds)
seen.add(seqid)
# Singletons
for seqid, size in sz.iter_sizes():
if seqid in seen:
continue
bedline = "\t".join(str(x) for x in (seqid, 0, size, accn(cj)))
b = BedLine(bedline)
cj += 1
if prefix:
b.accn = accn(cj)
all.append(b)
shuffledbed = Bed()
shuffledbed.extend(all)
shuffledbed.print_to_file(shuffled)
return shuffledbed
def install(args):
"""
%prog install patchers.bed patchers.fasta backbone.fasta alt.fasta
Install patches into backbone, using sequences from alternative assembly.
The patches sequences are generated via jcvi.assembly.patch.fill().
The output is a bedfile that can be converted to AGP using
jcvi.formats.agp.frombed().
"""
from jcvi.apps.align import blast
from jcvi.formats.fasta import SeqIO
p = OptionParser(install.__doc__)
p.set_rclip(rclip=1)
p.add_option("--maxsize", default=300000, type="int",
help="Maximum size of patchers to be replaced [default: %default]")
p.add_option("--prefix", help="Prefix of the new object [default: %default]")
p.add_option("--strict", default=False, action="store_true",
help="Only update if replacement has no gaps [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 4:
sys.exit(not p.print_help())
pbed, pfasta, bbfasta, altfasta = args
maxsize = opts.maxsize # Max DNA size to replace gap
rclip = opts.rclip
blastfile = blast([altfasta, pfasta,"--wordsize=100", "--pctid=99"])
order = Bed(pbed).order
beforebed, afterbed = blast_to_twobeds(blastfile, order, rclip=rclip,
maxsize=maxsize)
beforefasta = fastaFromBed(beforebed, bbfasta, name=True, stranded=True)
afterfasta = fastaFromBed(afterbed, altfasta, name=True, stranded=True)
# Exclude the replacements that contain more Ns than before
ah = SeqIO.parse(beforefasta, "fasta")
bh = SeqIO.parse(afterfasta, "fasta")
count_Ns = lambda x: x.seq.count('n') + x.seq.count('N')
exclude = set()
for arec, brec in zip(ah, bh):
an = count_Ns(arec)
bn = count_Ns(brec)
if opts.strict:
if bn == 0:
continue
elif bn < an:
continue
id = arec.id
exclude.add(id)
logging.debug("Ignore {0} updates because of decreasing quality."\
.format(len(exclude)))
abed = Bed(beforebed, sorted=False)
bbed = Bed(afterbed, sorted=False)
abed = [x for x in abed if x.accn not in exclude]
bbed = [x for x in bbed if x.accn not in exclude]
abedfile = "before.filtered.bed"
bbedfile = "after.filtered.bed"
afbed = Bed()
afbed.extend(abed)
bfbed = Bed()
bfbed.extend(bbed)
afbed.print_to_file(abedfile)
bfbed.print_to_file(bbedfile)
shuffle_twobeds(afbed, bfbed, bbfasta, prefix=opts.prefix)
def refine(args):
"""
%prog refine breakpoints.bed gaps.bed
Find gaps within or near breakpoint region.
"""
p = OptionParser(refine.__doc__)
p.add_option("--closest", default=False, action="store_true",
help="In case of no gaps, use closest [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
breakpointsbed, gapsbed = args
ncols = len(open(breakpointsbed).next().split())
logging.debug("File {0} contains {1} columns.".format(breakpointsbed, ncols))
cmd = "intersectBed -wao -a {0} -b {1}".format(breakpointsbed, gapsbed)
pf = "{0}.{1}".format(breakpointsbed.split(".")[0], gapsbed.split(".")[0])
ingapsbed = pf + ".bed"
sh(cmd, outfile=ingapsbed)
fp = open(ingapsbed)
data = [x.split() for x in fp]
nogapsbed = pf + ".nogaps.bed"
largestgapsbed = pf + ".largestgaps.bed"
nogapsfw = open(nogapsbed, "w")
largestgapsfw = open(largestgapsbed, "w")
for b, gaps in groupby(data, key=lambda x: x[:ncols]):
gaps = list(gaps)
gap = gaps[0]
if len(gaps) == 1 and gap[-1] == "0":
assert gap[-3] == "."
print >> nogapsfw, "\t".join(b)
continue
gaps = [(int(x[-1]), x) for x in gaps]
maxgap = max(gaps)[1]
print >> largestgapsfw, "\t".join(maxgap)
nogapsfw.close()
largestgapsfw.close()
beds = [largestgapsbed]
toclean = [nogapsbed, largestgapsbed]
if opts.closest:
closestgapsbed = pf + ".closestgaps.bed"
cmd = "closestBed -a {0} -b {1} -d".format(nogapsbed, gapsbed)
sh(cmd, outfile=closestgapsbed)
beds += [closestgapsbed]
toclean += [closestgapsbed]
refinedbed = pf + ".refined.bed"
FileMerger(beds, outfile=refinedbed).merge()
# Clean-up
FileShredder(toclean)
return refinedbed
def merge_ranges(beds):
m = [x.accn for x in beds]
mr = [range_parse(x) for x in m]
mc = set(x.seqid for x in mr)
if len(mc) != 1:
logging.error("Multiple seqid found in pocket. Aborted.")
return
mc = list(mc)[0]
ms = min(x.start for x in mr)
me = max(x.end for x in mr)
neg_strands = sum(1 for x in beds if x.strand == '-')
pos_strands = len(beds) - neg_strands
strand = '-' if neg_strands > pos_strands else '+'
return mc, ms, me, strand
def patcher(args):
"""
%prog patcher backbone.bed other.bed
Given optical map alignment, prepare the patchers. Use --backbone to suggest
which assembly is the major one, and the patchers will be extracted from
another assembly.
"""
from jcvi.formats.bed import uniq
p = OptionParser(patcher.__doc__)
p.add_option("--backbone", default="OM",
help="Prefix of the backbone assembly [default: %default]")
p.add_option("--object", default="object",
help="New object name [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
backbonebed, otherbed = args
backbonebed = uniq([backbonebed])
otherbed = uniq([otherbed])
pf = backbonebed.split(".")[0]
key = lambda x: (x.seqid, x.start, x.end)
# Make a uniq bed keeping backbone at redundant intervals
cmd = "intersectBed -v -wa"
cmd += " -a {0} -b {1}".format(otherbed, backbonebed)
outfile = otherbed.rsplit(".", 1)[0] + ".not." + backbonebed
sh(cmd, outfile=outfile)
uniqbed = Bed()
uniqbedfile = pf + ".merged.bed"
uniqbed.extend(Bed(backbonebed))
uniqbed.extend(Bed(outfile))
uniqbed.print_to_file(uniqbedfile, sorted=True)
# Condense adjacent intervals, allow some chaining
bed = uniqbed
key = lambda x: range_parse(x.accn).seqid
bed_fn = pf + ".patchers.bed"
bed_fw = open(bed_fn, "w")
for k, sb in groupby(bed, key=key):
sb = list(sb)
chr, start, end, strand = merge_ranges(sb)
print >> bed_fw, "\t".join(str(x) for x in \
(chr, start, end, opts.object, 1000, strand))
bed_fw.close()
if __name__ == '__main__':
main()
|
sgordon007/jcvi_062915
|
assembly/patch.py
|
Python
|
bsd-2-clause
| 31,696
|
[
"BLAST"
] |
f09051d1678f0137d93e6a3bcfab3dac73de2c13352385eeec944bea9eeb97a1
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
default_license_tier:
description:
- Specifies the default license tier which would be used by new clouds.
- Enum options - ENTERPRISE_16, ENTERPRISE_18.
- Field introduced in 17.2.5.
- Default value when not specified in API or module is interpreted by Avi Controller as ENTERPRISE_18.
version_added: "2.5"
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
admin_auth_configuration=dict(type='dict',),
default_license_tier=dict(type='str',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/avi/avi_systemconfiguration.py
|
Python
|
gpl-3.0
| 6,594
|
[
"VisIt"
] |
50d276a1097e68f53cc48945c0e11afbda04bef222c6cc0b0503bf26fff21a0f
|
from gpaw.transport.analysor import Transport_Plotter
import numpy as np
from pylab import *
import sys
if '*' in sys.argv[1]:
fd=0
nbias = int(sys.argv[1].split('*')[0])
else:
fd=1
nbias = int(sys.argv[1])
plotter = Transport_Plotter(fd)
dense_level=1
plotter.plot_setup()
if len(sys.argv) > 1:
bias, current = plotter.iv(nbias)
else:
bias, current = plotter.iv()
bias=np.abs(bias)
plot(bias, current, 'r-o')
if dense_level>1:
from scipy import interpolate
tck = interpolate.splrep(bias, current, s=0)
numb = len(bias)
newbias = np.linspace(bias[0], bias[-1], numb * (dense_level))
newcurrent = interpolate.splev(newbias, tck, der=0)
bias=newbias
current = newcurrent
plot(np.abs(bias), current, 'b-o')
xlabel('Bias(V)')
ylabel('Current($\mu$A)')
show()
|
qsnake/gpaw
|
doc/documentation/transport/transport_analysis_scripts/iv.py
|
Python
|
gpl-3.0
| 810
|
[
"GPAW"
] |
439efaa3f1c136e14ed98e021eabd49783b8881e6a16be81033ab6123c34328e
|
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
from inspect import signature
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..base import clone
from ..utils.validation import _num_samples
import warnings
from sklearn.exceptions import ConvergenceWarning
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : str
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : str
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default=None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
Examples
--------
>>> from sklearn.gaussian_process.kernels import ConstantKernel
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import Hyperparameter
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ConstantKernel(constant_value=1.0,
... constant_value_bounds=(0.0, 10.0))
We can access each hyperparameter:
>>> for hyperparameter in kernel.hyperparameters:
... print(hyperparameter)
Hyperparameter(name='constant_value', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
>>> params = kernel.get_params()
>>> for key in sorted(params): print(f"{key} : {params[key]}")
constant_value : 1.0
constant_value_bounds : (0.0, 10.0)
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, str) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, str) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(metaclass=ABCMeta):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : ndarray of shape (n_dims,)
The hyperparameters
"""
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = [getattr(self, attr) for attr in dir(self)
if attr.startswith("hyperparameter_")]
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = [hyperparameter.bounds
for hyperparameter in self.hyperparameters
if not hyperparameter.fixed]
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples,)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on fixed-length feature
vectors or generic objects. Defaults to True for backward
compatibility."""
return True
def _check_bounds_params(self):
"""Called after fitting to warn if bounds may have been too tight."""
list_close = np.isclose(self.bounds,
np.atleast_2d(self.theta).T)
idx = 0
for hyp in self.hyperparameters:
if hyp.fixed:
continue
for dim in range(hyp.n_elements):
if list_close[idx, 0]:
warnings.warn("The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified lower "
"bound %s. Decreasing the bound and"
" calling fit again may find a "
"better value." %
(dim, hyp.name, hyp.bounds[dim][0]),
ConvergenceWarning)
elif list_close[idx, 1]:
warnings.warn("The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified upper "
"bound %s. Increasing the bound and"
" calling fit again may find a "
"better value." %
(dim, hyp.name, hyp.bounds[dim][1]),
ConvergenceWarning)
idx += 1
class NormalizedKernelMixin:
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin:
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class GenericKernelMixin:
"""Mixin for kernels which operate on generic objects such as variable-
length sequences, trees, and graphs.
.. versionadded:: 0.22
"""
@property
def requires_vector_input(self):
"""Whether the kernel works only on fixed-length feature vectors."""
return False
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
Parameters
----------
kernels : list of Kernels
The other kernels
Examples
--------
>>> from sklearn.gaussian_process.kernels import WhiteKernel
>>> from sklearn.gaussian_process.kernels import RBF
>>> from sklearn.gaussian_process.kernels import CompoundKernel
>>> kernel = CompoundKernel(
... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
>>> print(kernel.bounds)
[[-11.51292546 11.51292546]
[-11.51292546 11.51292546]]
>>> print(kernel.n_dims)
2
>>> print(kernel.theta)
[1.09861229 0.69314718]
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of the
kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : ndarray of shape \
(n_samples_X, n_samples_X, n_dims, n_kernels), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures. """
return np.any([kernel.requires_vector_input
for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = [Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds, hyperparameter.n_elements)
for hyperparameter in self.k1.hyperparameters]
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
@property
def requires_vector_input(self):
"""Returns whether the kernel is stationary. """
return (self.k1.requires_vector_input or
self.k2.requires_vector_input)
class Sum(KernelOperator):
"""The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via
.. math::
k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)
Note that the `__add__` magic method is overridden, so
`Sum(RBF(), RBF())` is equivalent to using the + operator
with `RBF() + RBF()`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel
The first base-kernel of the sum-kernel
k2 : Kernel
The second base-kernel of the sum-kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Sum(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 + RBF(length_scale=1)
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via
.. math::
k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)
Note that the `__mul__` magic method is overridden, so
`Product(RBF(), RBF())` is equivalent to using the * operator
with `RBF() * RBF()`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel
The first base-kernel of the product-kernel
k2 : Kernel
The second base-kernel of the product-kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RBF, Product,
... ConstantKernel)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Product(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 * RBF(length_scale=1)
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""The Exponentiation kernel takes one base kernel and a scalar parameter
:math:`p` and combines them via
.. math::
k_{exp}(X, Y) = k(X, Y) ^p
Note that the `__pow__` magic method is overridden, so
`Exponentiation(RBF(), 2)` is equivalent to using the ** operator
with `RBF() ** 2`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel
The base kernel
exponent : float
The exponent for the base kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
... Exponentiation)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.419...
>>> gpr.predict(X[:1,:], return_std=True)
(array([635.5...]), array([0.559...]))
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures. """
return self.kernel.requires_vector_input
class ConstantKernel(StationaryKernelMixin, GenericKernelMixin,
Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
.. math::
k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
Adding a constant kernel is equivalent to adding a constant::
kernel = RBF() + ConstantKernel(constant_value=2)
is the same as::
kernel = RBF() + 2
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default=1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on `constant_value`.
If set to "fixed", `constant_value` cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = RBF() + ConstantKernel(constant_value=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3696...
>>> gpr.predict(X[:1,:], return_std=True)
(array([606.1...]), array([0.24...]))
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full((_num_samples(X), _num_samples(Y)), self.constant_value,
dtype=np.array(self.constant_value).dtype)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, np.full((_num_samples(X), _num_samples(X), 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype))
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(_num_samples(X), self.constant_value,
dtype=np.array(self.constant_value).dtype)
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, GenericKernelMixin,
Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise of the signal as independently and identically
normally-distributed. The parameter noise_level equals the variance of this
noise.
.. math::
k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default=1.0
Parameter controlling the noise level (variance)
noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'noise_level'.
If set to "fixed", 'noise_level' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(_num_samples(X))
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(_num_samples(X))[:, :, np.newaxis])
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
else:
return np.zeros((_num_samples(X), _num_samples(Y)))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(_num_samples(X), self.noise_level,
dtype=np.array(self.noise_level).dtype)
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length scale
parameter :math:`l>0`, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
.. math::
k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
where :math:`l` is the length scale of the kernel and
:math:`d(\\cdot,\\cdot)` is the Euclidean distance.
For advice on how to set the length scale parameter, see e.g. [1]_.
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
.. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.8354..., 0.03228..., 0.1322...],
[0.7906..., 0.0652..., 0.1441...]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the :class:`RBF`.
It has an additional parameter :math:`\\nu` which controls the
smoothness of the resulting function. The smaller :math:`\\nu`,
the less smooth the approximated function is.
As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to
the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel
becomes identical to the absolute exponential kernel.
Important intermediate values are
:math:`\\nu=1.5` (once differentiable functions)
and :math:`\\nu=2.5` (twice differentiable functions).
The kernel is given by:
.. math::
k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(
\\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )
\\Bigg)^\\nu K_\\nu\\Bigg(
\\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg)
where :math:`d(\\cdot,\\cdot)` is the Euclidean distance,
:math:`K_{\\nu}(\\cdot)` is a modified Bessel function and
:math:`\\Gamma(\\cdot)` is the gamma function.
See [1]_, Chapter 4, Section 4.2, for details regarding the different
variants of the Matern kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
nu : float, default=1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import Matern
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.8513..., 0.0368..., 0.1117...],
[0.8086..., 0.0693..., 0.1220...]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super().__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-dists ** 2 / 2.0)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
K_gradient = K[..., np.newaxis] * \
np.divide(D, denominator, where=denominator != 0)
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length scales. It is
parameterized by a length scale parameter :math:`l>0` and a scale
mixture parameter :math:`\\alpha>0`. Only the isotropic variant
where length_scale :math:`l` is a scalar is supported at the moment.
The kernel is given by:
.. math::
k(x_i, x_j) = \\left(
1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}
where :math:`\\alpha` is the scale mixture parameter, :math:`l` is
the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the
Euclidean distance.
For advice on how to set the parameters, see e.g. [1]_.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
alpha : float > 0, default=1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'alpha'.
If set to "fixed", 'alpha' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import Matern
>>> X, y = load_iris(return_X_y=True)
>>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733...
>>> gpc.predict_proba(X[:2,:])
array([[0.8881..., 0.0566..., 0.05518...],
[0.8678..., 0.0707... , 0.0614...]])
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if len(np.atleast_1d(self.length_scale)) > 1:
raise AttributeError(
"RationalQuadratic kernel only supports isotropic version, "
"please use a single scalar for length_scale")
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
r"""Exp-Sine-Squared kernel (aka periodic kernel).
The ExpSineSquared kernel allows one to model functions which repeat
themselves exactly. It is parameterized by a length scale
parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
Only the isotropic variant where :math:`l` is a scalar is
supported at the moment. The kernel is given by:
.. math::
k(x_i, x_j) = \text{exp}\left(-
\frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
where :math:`l` is the length scale of the kernel, :math:`p` the
periodicity of the kernel and :math:`d(\\cdot,\\cdot)` is the
Euclidean distance.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
periodicity : float > 0, default=1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'periodicity'.
If set to "fixed", 'periodicity' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import ExpSineSquared
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.0144...
>>> gpr.predict(X[:2,:], return_std=True)
(array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
"""Returns the length scale"""
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
r"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting :math:`N(0, 1)` priors on the coefficients
of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
on the bias. The DotProduct kernel is invariant to a rotation of
the coordinates about the origin, but not translations.
It is parameterized by a parameter sigma_0 :math:`\sigma`
which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
.. math::
k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
See [1]_, Chapter 4, Section 4.2, for further details regarding the
DotProduct kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default=1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'sigma_0'.
If set to "fixed", 'sigma_0' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y).
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X).
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma : float, default=1.0
Parameter gamma of the pairwise kernel specified by metric. It should
be positive.
gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'gamma'.
If set to "fixed", 'gamma' cannot be changed during
hyperparameter tuning.
metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
"rbf", "laplacian", "sigmoid", "cosine"} or callable, \
default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default=None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import PairwiseKernel
>>> X, y = load_iris(return_X_y=True)
>>> kernel = PairwiseKernel(metric='rbf')
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733...
>>> gpc.predict_proba(X[:2,:])
array([[0.8880..., 0.05663..., 0.05532...],
[0.8676..., 0.07073..., 0.06165...]])
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
|
anntzer/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
Python
|
bsd-3-clause
| 84,532
|
[
"Gaussian"
] |
82818a5855b0b9af55c76e7fc5a71de6add56b89f3952d7c599dd6a005d48bcc
|
import json
from .common import CatmaidApiTestCase
class LogsApiTests(CatmaidApiTestCase):
log_rows = [
[
'test2',
'create_neuron',
'2012-07-22T16:50:57.758000+00:00',
5290.0,
3930.0,
279.0,
'Create neuron 2434 and skeleton 2433'],
[
'test2',
'create_neuron',
'2012-07-22T19:12:54.541000+00:00',
4470.0,
2110.0,
180.0,
'Create neuron 2441 and skeleton 2440'],
[
'test2',
'create_neuron',
'2012-07-22T19:15:24.010000+00:00',
3680.0,
2530.0,
180.0,
'Create neuron 2452 and skeleton 2451']
]
def test_list_logs_user_param(self):
self.fake_authentication()
response = self.client.post(
'/%d/logs/list' % self.test_project_id, {'user_id': 1})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = {
'iTotalDisplayRecords': 0,
'iTotalRecords': 0,
'aaData': []
}
self.assertEqual(expected_result, parsed_response)
def test_list_logs_sort(self):
self.fake_authentication()
response = self.client.post(
'/%d/logs/list' % self.test_project_id, {
'iSortingCols': 2,
'iSortCol_0': 5, # z
'iSortDir_0': 'ASC',
'iSortCol_1': 3, # x
'iSortDir_1': 'DESC'
})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
expected_result = {
'iTotalDisplayRecords': 3,
'iTotalRecords': 3,
'aaData': [
self.log_rows[0], self.log_rows[1], self.log_rows[2]
]
}
self.assertEqual(expected_result, parsed_response)
def test_list_logs_subset(self):
self.fake_authentication()
response = self.client.post(
'/%d/logs/list' % self.test_project_id, {
'iDisplayStart': 1,
'iDisplayLength': 2
})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertEqual(2, parsed_response['iTotalDisplayRecords'])
self.assertEqual(2, parsed_response['iTotalRecords'])
def test_list_logs_no_params(self):
self.fake_authentication()
response = self.client.post(
'/%d/logs/list' % self.test_project_id, {})
self.assertEqual(response.status_code, 200)
parsed_response = json.loads(response.content)
self.assertEqual(3, parsed_response['iTotalDisplayRecords'])
self.assertEqual(3, parsed_response['iTotalRecords'])
self.assertTrue(self.log_rows[0] in parsed_response['aaData'])
self.assertTrue(self.log_rows[1] in parsed_response['aaData'])
self.assertTrue(self.log_rows[2] in parsed_response['aaData'])
|
catsop/CATMAID
|
django/applications/catmaid/tests/apis/test_logs.py
|
Python
|
gpl-3.0
| 3,523
|
[
"NEURON"
] |
84f4e5506aaca3206967e62f549091a30c9b748a4aa63c7a6b84ac415a6d88fe
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import pytest
def _plugin_import(plug):
import sys
if sys.version_info >= (3, 4):
from importlib import util
plug_spec = util.find_spec(plug)
else:
import pkgutil
plug_spec = pkgutil.find_loader(plug)
if plug_spec is None:
return False
else:
return True
def is_psi4_new_enough(version_feature_introduced):
if not _plugin_import('psi4'):
return False
import psi4
from pkg_resources import parse_version
return parse_version(psi4.__version__) >= parse_version(version_feature_introduced)
#def is_numpy_new_enough(version_feature_introduced):
# if not _plugin_import('numpy'):
# return False
# import numpy
# from pkg_resources import parse_version
# return parse_version(numpy.version.version) >= parse_version(version_feature_introduced)
#
#
#using_scipy = pytest.mark.skipif(_plugin_import('scipy') is False,
# reason='Not detecting module scipy. Install package if necessary and add to envvar PYTHONPATH')
using_psi4 = pytest.mark.skipif(_plugin_import('psi4') is False,
reason='Not detecting module psi4. Install package and add to envvar PYTHONPATH')
#using_psi4_libxc = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev100") is False,
# reason="Psi4 does not include DFT rewrite to use Libxc. Update to development head")
#
#using_psi4_efpmints = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev507") is False,
# reason="Psi4 does not include EFP integrals in mints. Update to development head")
#
#using_psi4_python_integral_deriv = pytest.mark.skipif(is_psi4_new_enough("1000") is False,
# reason="Psi4 does not include derivatives of integrals exported to python. Update to development head")
using_psi4_molrec = pytest.mark.skipif(is_psi4_new_enough("1.2a1.dev999") is False,
reason="Psi4 does not use the new Molecule parsing. Update to development head")
#using_numpy_113 = pytest.mark.skipif(is_numpy_new_enough("1.13.0") is False,
# reason='NumPy does not include 1.13 features. Update package and add to envvar PYTHONPATH')
#
#using_matplotlib = pytest.mark.skipif(_plugin_import('matplotlib') is False,
# reason='Note detecting module matplotlib. Install package if necessary and add to envvar PYTHONPATH')
using_pylibefp = pytest.mark.skipif(_plugin_import('pylibefp') is False,
reason='Not detecting module pylibefp. Install package if necessary and add to envvar PYTHONPATH')
|
psi4/psi4
|
psi4/driver/qcdb/pytest/addons.py
|
Python
|
lgpl-3.0
| 3,612
|
[
"Psi4"
] |
12de26b8b2b8f8a4a927820730587ab168c7ebd20b75a53747f79f2b15861f7e
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
import numpy
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name, ' ---------------\n'
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
train_filename = 'train.csv'
resource_filename = 'resource_type.csv'
event_type_filename = 'event_type.csv'
log_feature_filename = 'log_feature.csv'
train_frame = pd.read_csv(train_filename)
resource_frame = pd.read_csv(resource_filename)
event_frame = pd.read_csv(event_type_filename)
log_frame = pd.read_csv(log_feature_filename)
df_list = [train_frame, resource_frame, event_frame, log_frame]
merged_frame = reduce(lambda left,right: pd.merge(left,right,on='id'), df_list)
encoded_frame = label_encode_frame(merged_frame)
del encoded_frame['id']
class_labels = list(encoded_frame['fault_severity'].values)
del encoded_frame['fault_severity']
X_train,X_test,y_train,y_test = train_test_split(encoded_frame.values,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
Telestra Network Disruption Challenge/telestra-baseline.py
|
Python
|
mit
| 3,316
|
[
"Gaussian"
] |
3314a0ec5b970b4d0611ae9c5591277bce22b8223e539001ddc394f6666ac830
|
__author__ = 'amarch'
# -*- coding: utf-8 -*-
from utils import strutils as infoutils
import itertools
from scipy.integrate import *
from RotationCurve import *
from Galaxy import *
from utils import strutils as infoutils
import itertools
import copy
class RadialToAzimuthalRatioHandler():
def __init__(self, galaxy):
self.galaxy = galaxy
def eval_sigPhi_to_sigR(self):
self.sigPhi_to_sigR_expscale = 0
self.sigPhi_to_sigR_nullp = 0
self.poly_star = self.galaxy.star_rc.poly_fit
(Rmin, Rmax) = self.galaxy.rc_handler.rcs_radii_intersection(self.galaxy.star_rc, self.galaxy.gas_rc)
step = (Rmax - Rmin)/1000
self.xx = arange(Rmin, Rmax, step)
self.minxx = min(self.xx)
self.maxxx = max(self.xx)
xxx = filter(lambda x: x < (self.minxx+(self.maxxx-self.minxx) / 3) and x > 1, self.xx)
yy = [sigPhi_to_sigR_real(self.poly_star, x) for x in xxx]
maxyy = max(filter(lambda x: x < 1, yy))
self.maxyyy = (maxyy-0.5)/math.exp(1) + 0.5
maxyy_x = yy.index(maxyy)
maxyy_x = xxx[maxyy_x]
yy = zip(xxx, yy)
self.intersect_list = []
inters = 0
for y in enumerate(yy):
if inters == 2:
break
else:
if y[0] == yy.__len__() - 1:
break
if y[1][1] <= self.maxyyy and yy[y[0] + 1][1] > self.maxyyy:
self.intersect_list.append(y[1][0])
self.sigPhi_to_sigR_expscale += y[1][0]
inters += 1
if y[1][1] > self.maxyyy and yy[y[0] + 1][1] <= self.maxyyy:
self.intersect_list.append(y[1][0])
self.sigPhi_to_sigR_expscale += y[1][0]
inters += 1
if inters > 0:
self.sigPhi_to_sigR_expscale = self.sigPhi_to_sigR_expscale / inters - maxyy_x
self.sigPhi_to_sigR_nullp = (maxyy-0.5)*math.exp(maxyy_x/self.sigPhi_to_sigR_expscale)
else:
#TODO: НЕ ПРОВЕРЕНО!
expfit = poly1d(polyfit(xxx, map(math.log, [po[1] for po in yy]), deg=1))
self.sigPhi_to_sigR_expscale = (-1 / expfit.coeffs[0])
self.sigPhi_to_sigR_nullp = math.exp(expfit.coeffs[1])
def plot_sigPhi2_to_sigR2(self, color='red'):
for y in self.intersect_list:
plt.axvline(x=y, ls='--')
plt.plot(self.xx, [self.sigPhi2_to_sigR2(x) for x in self.xx], '.-', color=color)
plt.plot(self.xx, [self.sigPhi_to_sigR_real(x) for x in self.xx], '.-')
plt.axvline(x=(self.minxx+(self.maxxx-self.minxx) / 3), ls='-')
plt.axhline(y=(self.maxyyy), ls='-.')
plt.axhline(y=0)
plt.axhline(y=0.5)
plt.axhline(y=1)
plt.xlabel("$R,\ arcsec$")
plt.ylabel(r"$\sigma_{\varphi}^2/\sigma_{R}^2$")
def sigPhi_to_sigR_real(self, R):
return 0.5 * (1 + R * self.poly_star.deriv()(R) / self.poly_star(R))
def sigPhi2_to_sigR2(self, R):
return 0.5 + self.sigPhi_to_sigR_nullp * math.exp(-R/self.sigPhi_to_sigR_expscale)
|
Amarchuk/2FInstability
|
core/RadialToAzimuthalRatioHandler.py
|
Python
|
gpl-3.0
| 3,132
|
[
"Galaxy"
] |
f2cf1f2cb72285627e7943d328db8cf9c7421624fa7381bcca6cb40dd0245bf5
|
#!/usr/bin/env python
import sys
import os
import numpy as np
from datetime import datetime
"""
Obtain a tight binding Hamiltonian of Haldane model with Wannier90 format
How to run
python haldane_hr_gen.py
This will generate the tight binding hamiltonian Haldane_hr.dat
LATTICE
Angstrom
2.1377110 -1.2342080 0.0000000
0.0000000 2.4684160 0.0000000
0.0000000 0.0000000 10.000000
ATOM_POSITIONS
2 ! number of atoms for projectors
Direct ! Direct or Cartisen coordinate
C 0.333333 0.666667 0.500000
C 0.666667 0.333333 0.500000
"""
# Define tight-binding parameters
# You can find phase diagram in PRL 61,2015 (1988)
# Chern = 0
m=0.2; phi= np.pi/2.0; t1=1.0; t2=0.0;
# Gapless phase
#m=0.2; phi= np.pi/2.0; t1=1.0; t2=m/3.0/np.sqrt(3);
# Chern = 1
#m=0.2; phi= np.pi/2.0; t1=1.0; t2=m/3.0/np.sqrt(3)*2.0;
# maximum dimension for hr matrix
ndim = 2
nrpts = 7
num_patom=2
# hr matrix
norbs = num_patom*1
hmnr= np.zeros((norbs,norbs,nrpts),dtype = np.complex128)
# WS points
irvec = np.zeros((3,nrpts),dtype = np.int32)
# degeneracy
dege = np.zeros((nrpts),dtype = np.int32)+1
# complex unit
zi=1j
ir= 0
irvec[0, ir]= 0
irvec[1, ir]= 0
hmnr[0, 0, ir]= m
hmnr[1, 1, ir]= -m
hmnr[0, 1, ir]= t1
hmnr[1, 0, ir]= t1
# 1 0
ir= ir+1
irvec[0, ir]= 1
irvec[1, ir]= 0
hmnr[0, 0, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[0, 1, ir]= t1
# 0 1
ir= ir+1
irvec[0, ir]= 0
irvec[1, ir]= 1
hmnr[0, 0, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 0, ir]= t1
# 1 1
ir= ir+1
irvec[0, ir]= 1
irvec[1, ir]= 1
hmnr[0, 0, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
#-1 0
ir= ir+1
irvec[0, ir]=-1
irvec[1, ir]= 0
hmnr[0, 0, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 0, ir]= t1
# 0-1
ir= ir+1
irvec[0, ir]= 0
irvec[1, ir]=-1
hmnr[0, 0, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[0, 1, ir]= t1
#-1-1
ir= ir+1
irvec[0, ir]=-1
irvec[1, ir]=-1
hmnr[0, 0, ir]= (np.cos(phi)-zi*np.sin(phi)) *t2
hmnr[1, 1, ir]= (np.cos(phi)+zi*np.sin(phi)) *t2
#print "dump hr.dat..."
with open('Haldane_hr.dat','w') as f:
line="Haldane model with m="+str(m)+", phi="+str(phi/np.pi)+"pi, t1="+str(t1)+", t2="+str(t2)+"Ref:Physical Review Letters 61, 18(1988)"+'\n'
f.write(line)
nl = np.int32(np.ceil(nrpts/15.0))
f.write(str(norbs)+'\n')
f.write(str(nrpts)+'\n')
for l in range(nl):
line=" "+' '.join([str(np.int32(i)) for i in dege[l*15:(l+1)*15]])
f.write(line)
f.write('\n')
for irpt in range(nrpts):
rx = irvec[0,irpt];ry = irvec[1,irpt];rz = irvec[2,irpt]
for jatomorb in range(norbs):
for iatomorb in range(norbs):
rp =hmnr[iatomorb,jatomorb,irpt].real
ip =hmnr[iatomorb,jatomorb,irpt].imag
line="{:8d}{:8d}{:8d}{:8d}{:8d}{:20.10f}{:20.10f}\n".format(rx,ry,rz,jatomorb+1,iatomorb+1,rp,ip)
f.write(line)
|
quanshengwu/wannier_tools
|
examples/Haldane_model/haldane_hr_gen-trivial-insulator.py
|
Python
|
gpl-3.0
| 3,153
|
[
"Wannier90"
] |
26187be41090ef17f5d2289609c68614690e6814bc91011de924851f070380cd
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import numpy
from functools import reduce
from pyscf import gto, lib
from pyscf import scf, dft
from pyscf import mp
from pyscf import cc
from pyscf import ao2mo
from pyscf.cc import uccsd
from pyscf.cc import gccsd
from pyscf.cc import addons
from pyscf.cc import uccsd_rdm
from pyscf.fci import direct_uhf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
rhf = scf.RHF(mol)
rhf.conv_tol_grad = 1e-8
rhf.kernel()
mf = scf.addons.convert_to_uhf(rhf)
myucc = cc.UCCSD(mf).run(conv_tol=1e-10)
mol_s2 = gto.Mole()
mol_s2.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol_s2.basis = '631g'
mol_s2.spin = 2
mol_s2.verbose = 5
mol_s2.output = '/dev/null'
mol_s2.build()
mf_s2 = scf.UHF(mol_s2).run()
eris = uccsd.UCCSD(mf_s2).ao2mo()
def tearDownModule():
global mol, rhf, mf, myucc, mol_s2, mf_s2, eris
mol.stdout.close()
mol_s2.stdout.close()
del mol, rhf, mf, myucc, mol_s2, mf_s2, eris
class KnownValues(unittest.TestCase):
# def test_with_df(self):
# mf = scf.UHF(mol).density_fit(auxbasis='weigend').run()
# mycc = cc.UCCSD(mf).run()
# self.assertAlmostEqual(mycc.e_tot, -76.118403942938741, 7)
def test_ERIS(self):
ucc1 = cc.UCCSD(mf)
nao,nmo = mf.mo_coeff[0].shape
numpy.random.seed(1)
mo_coeff = numpy.random.random((2,nao,nmo))
eris = cc.uccsd._make_eris_incore(ucc1, mo_coeff)
self.assertAlmostEqual(lib.finger(eris.oooo), 4.9638849382825754, 11)
self.assertAlmostEqual(lib.finger(eris.ovoo),-1.3623681896983584, 11)
self.assertAlmostEqual(lib.finger(eris.ovov), 125.81550684442163, 11)
self.assertAlmostEqual(lib.finger(eris.oovv), 55.123681017639598, 11)
self.assertAlmostEqual(lib.finger(eris.ovvo), 133.48083527898248, 11)
self.assertAlmostEqual(lib.finger(eris.ovvv), 59.421927525288183, 11)
self.assertAlmostEqual(lib.finger(eris.vvvv), 43.556602622204778, 11)
self.assertAlmostEqual(lib.finger(eris.OOOO),-407.05319440524585, 11)
self.assertAlmostEqual(lib.finger(eris.OVOO), 56.284299937160796, 11)
self.assertAlmostEqual(lib.finger(eris.OVOV),-287.72899895597448, 11)
self.assertAlmostEqual(lib.finger(eris.OOVV),-85.484299959144522, 11)
self.assertAlmostEqual(lib.finger(eris.OVVO),-228.18996145476956, 11)
self.assertAlmostEqual(lib.finger(eris.OVVV),-10.715902258877399, 11)
self.assertAlmostEqual(lib.finger(eris.VVVV),-89.908425473958303, 11)
self.assertAlmostEqual(lib.finger(eris.ooOO),-336.65979260175226, 11)
self.assertAlmostEqual(lib.finger(eris.ovOO),-16.405125847288176, 11)
self.assertAlmostEqual(lib.finger(eris.ovOV), 231.59042209500075, 11)
self.assertAlmostEqual(lib.finger(eris.ooVV), 20.338077193028354, 11)
self.assertAlmostEqual(lib.finger(eris.ovVO), 206.48662856981386, 11)
self.assertAlmostEqual(lib.finger(eris.ovVV),-71.273249852220516, 11)
self.assertAlmostEqual(lib.finger(eris.vvVV), 172.47130671068496, 11)
self.assertAlmostEqual(lib.finger(eris.OVoo),-19.927660309103977, 11)
self.assertAlmostEqual(lib.finger(eris.OOvv),-27.761433381797019, 11)
self.assertAlmostEqual(lib.finger(eris.OVvo),-140.09648311337384, 11)
self.assertAlmostEqual(lib.finger(eris.OVvv), 40.700983950220547, 11)
uccsd.MEMORYMIN, bak = 0, uccsd.MEMORYMIN
ucc1.max_memory = 0
eris1 = ucc1.ao2mo(mo_coeff)
uccsd.MEMORYMIN = bak
self.assertAlmostEqual(abs(numpy.array(eris1.oooo)-eris.oooo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovoo)-eris.ovoo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovov)-eris.ovov).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.oovv)-eris.oovv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovvo)-eris.ovvo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovvv)-eris.ovvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.vvvv)-eris.vvvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOOO)-eris.OOOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVOO)-eris.OVOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVOV)-eris.OVOV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOVV)-eris.OOVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVVO)-eris.OVVO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVVV)-eris.OVVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.VVVV)-eris.VVVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ooOO)-eris.ooOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovOO)-eris.ovOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovOV)-eris.ovOV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ooVV)-eris.ooVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovVO)-eris.ovVO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovVV)-eris.ovVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.vvVV)-eris.vvVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVoo)-eris.OVoo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOvv)-eris.OOvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVvo)-eris.OVvo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVvv)-eris.OVvv).max(), 0, 11)
# Testing the complex MO integrals
def ao2mofn(mos):
if isinstance(mos, numpy.ndarray) and mos.ndim == 2:
mos = [mos]*4
nmos = [mo.shape[1] for mo in mos]
eri_mo = ao2mo.kernel(mf._eri, mos, compact=False).reshape(nmos)
return eri_mo * 1j
eris1 = cc.uccsd._make_eris_incore(ucc1, mo_coeff, ao2mofn=ao2mofn)
self.assertAlmostEqual(abs(eris1.oooo.imag-eris.oooo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovoo.imag-eris.ovoo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovov.imag-eris.ovov).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.oovv.imag-eris.oovv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovvo.imag-eris.ovvo).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.ovvv.imag-eris.ovvv).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.vvvv.imag-eris.vvvv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOOO.imag-eris.OOOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVOO.imag-eris.OVOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVOV.imag-eris.OVOV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOVV.imag-eris.OOVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVVO.imag-eris.OVVO).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.OVVV.imag-eris.OVVV).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.VVVV.imag-eris.VVVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ooOO.imag-eris.ooOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovOO.imag-eris.ovOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovOV.imag-eris.ovOV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ooVV.imag-eris.ooVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovVO.imag-eris.ovVO).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.ovVV.imag-eris.ovVV).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.vvVV.imag-eris.vvVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVoo.imag-eris.OVoo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOvv.imag-eris.OOvv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVvo.imag-eris.OVvo).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.OVvv.imag-eris.OVvv).max(), 0, 11)
def test_amplitudes_from_rccsd(self):
e, t1, t2 = cc.RCCSD(rhf).set(conv_tol=1e-10).kernel()
t1, t2 = myucc.amplitudes_from_rccsd(t1, t2)
self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 6)
self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 6)
def test_uccsd_frozen(self):
ucc1 = copy.copy(myucc)
ucc1.frozen = 1
self.assertEqual(ucc1.nmo, (12,12))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [0,1]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (3,3))
ucc1.frozen = [[0,1], [0,1]]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (3,3))
ucc1.frozen = [1,9]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [[1,9], [1,9]]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [9,10,12]
self.assertEqual(ucc1.nmo, (10,10))
self.assertEqual(ucc1.nocc, (5,5))
ucc1.nmo = (13,12)
ucc1.nocc = (5,4)
self.assertEqual(ucc1.nmo, (13,12))
self.assertEqual(ucc1.nocc, (5,4))
def test_uccsd_frozen(self):
# Freeze 1s electrons
frozen = [[0,1], [0,1]]
ucc = cc.UCCSD(mf_s2, frozen=frozen)
ucc.diis_start_cycle = 1
ecc, t1, t2 = ucc.kernel()
self.assertAlmostEqual(ecc, -0.07414978284611283, 8)
def test_rdm(self):
nocc = 5
nvir = 7
mol = gto.M()
mf = scf.UHF(mol)
mf.mo_occ = numpy.zeros((2,nocc+nvir))
mf.mo_occ[:,:nocc] = 1
mycc = uccsd.UCCSD(mf)
def antisym(t2):
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,2,3)
return t2
orbspin = numpy.zeros((nocc+nvir)*2, dtype=int)
orbspin[1::2] = 1
numpy.random.seed(1)
t1 = numpy.random.random((2,nocc,nvir))*.1 - .1
t2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1
t2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
t2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
t2 = (t2aa,t2ab,t2bb)
l1 = numpy.random.random((2,nocc,nvir))*.1 - .1
l2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1
l2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
l2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
l2 = (l2aa,l2ab,l2bb)
dm1a, dm1b = mycc.make_rdm1(t1, t2, l1, l2)
dm2aa, dm2ab, dm2bb = mycc.make_rdm2(t1, t2, l1, l2)
ia = orbspin == 0
ib = orbspin == 1
oa = orbspin[:nocc*2] == 0
ob = orbspin[:nocc*2] == 1
va = orbspin[nocc*2:] == 0
vb = orbspin[nocc*2:] == 1
t1 = addons.spatial2spin(t1, orbspin)
t2 = addons.spatial2spin(t2, orbspin)
l1 = addons.spatial2spin(l1, orbspin)
l2 = addons.spatial2spin(l2, orbspin)
mf1 = scf.GHF(mol)
mf1.mo_occ = numpy.zeros((nocc+nvir)*2)
mf.mo_occ[:,:nocc*2] = 1
mycc1 = gccsd.GCCSD(mf1)
dm1 = mycc1.make_rdm1(t1, t2, l1, l2)
dm2 = mycc1.make_rdm2(t1, t2, l1, l2)
self.assertAlmostEqual(abs(dm1[ia][:,ia]-dm1a).max(), 0, 9)
self.assertAlmostEqual(abs(dm1[ib][:,ib]-dm1b).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ia][:,:,:,ia]-dm2aa).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ib][:,:,:,ib]-dm2ab).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ib][:,ib][:,:,ib][:,:,:,ib]-dm2bb).max(), 0, 9)
def test_h2o_rdm(self):
mol = mol_s2
mf = mf_s2
mycc = uccsd.UCCSD(mf)
mycc.frozen = 2
ecc, t1, t2 = mycc.kernel()
l1, l2 = mycc.solve_lambda()
dm1a,dm1b = mycc.make_rdm1(t1, t2, l1, l2)
dm2aa,dm2ab,dm2bb = mycc.make_rdm2(t1, t2, l1, l2)
mo_a = mf.mo_coeff[0]
mo_b = mf.mo_coeff[1]
nmoa = mo_a.shape[1]
nmob = mo_b.shape[1]
eriaa = ao2mo.kernel(mf._eri, mo_a, compact=False).reshape([nmoa]*4)
eribb = ao2mo.kernel(mf._eri, mo_b, compact=False).reshape([nmob]*4)
eriab = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)
eriab = eriab.reshape([nmoa,nmoa,nmob,nmob])
hcore = mf.get_hcore()
h1a = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1b = reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1a, dm1a)
e1+= numpy.einsum('ij,ji', h1b, dm1b)
e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2aa) * .5
e1+= numpy.einsum('ijkl,ijkl', eriab, dm2ab)
e1+= numpy.einsum('ijkl,ijkl', eribb, dm2bb) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mycc.e_tot, 7)
d1 = uccsd_rdm._gamma1_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2)
mycc.max_memory = 0
d2 = uccsd_rdm._gamma2_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2, True)
dm2 = uccsd_rdm._make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True)
e1 = numpy.einsum('ij,ji', h1a, dm1a)
e1+= numpy.einsum('ij,ji', h1b, dm1b)
e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2[0]) * .5
e1+= numpy.einsum('ijkl,ijkl', eriab, dm2[1])
e1+= numpy.einsum('ijkl,ijkl', eribb, dm2[2]) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mycc.e_tot, 7)
def test_h4_rdm(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.spin = 2
mol.basis = '6-31g'
mol.build()
mf = scf.UHF(mol).set(init_guess='1e').run(conv_tol=1e-14)
ehf0 = mf.e_tot - mol.energy_nuc()
mycc = uccsd.UCCSD(mf).run()
mycc.solve_lambda()
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
efci, fcivec = direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)
dm1ref, dm2ref = direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)
t1, t2 = mycc.t1, mycc.t2
l1, l2 = mycc.l1, mycc.l2
rdm1 = mycc.make_rdm1(t1, t2, l1, l2)
rdm2 = mycc.make_rdm2(t1, t2, l1, l2)
self.assertAlmostEqual(abs(dm1ref[0] - rdm1[0]).max(), 0, 6)
self.assertAlmostEqual(abs(dm1ref[1] - rdm1[1]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[0] - rdm2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[1] - rdm2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[2] - rdm2[2]).max(), 0, 6)
def test_eris_contract_vvvv_t2(self):
mol = gto.Mole()
nocca, noccb, nvira, nvirb = 5, 4, 12, 13
nvira_pair = nvira*(nvira+1)//2
nvirb_pair = nvirb*(nvirb+1)//2
numpy.random.seed(9)
t2 = numpy.random.random((nocca,noccb,nvira,nvirb))
eris = uccsd._ChemistsERIs()
eris.vvVV = numpy.random.random((nvira_pair,nvirb_pair))
eris.mol = mol
myucc.max_memory, bak = 0, myucc.max_memory
vt2 = eris._contract_vvVV_t2(myucc, t2, eris.vvVV)
myucc.max_memory = bak
self.assertAlmostEqual(lib.finger(vt2), 12.00904827896089, 11)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
vvVV = eris.vvVV[:,idxb][idxa]
ref = lib.einsum('acbd,ijcd->ijab', vvVV, t2)
self.assertAlmostEqual(abs(vt2 - ref).max(), 0, 11)
# _contract_VVVV_t2, testing complex and real mixed contraction
VVVV =(numpy.random.random((nvirb,nvirb,nvirb,nvirb)) +
numpy.random.random((nvirb,nvirb,nvirb,nvirb))*1j - (.5+.5j))
VVVV = VVVV + VVVV.transpose(1,0,3,2).conj()
VVVV = VVVV + VVVV.transpose(2,3,0,1)
eris.VVVV = VVVV
t2 = numpy.random.random((noccb,noccb,nvirb,nvirb))
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,3,2)
myucc.max_memory, bak = 0, myucc.max_memory
vt2 = eris._contract_VVVV_t2(myucc, t2, eris.VVVV)
myucc.max_memory = bak
self.assertAlmostEqual(lib.finger(vt2), 47.903883794299404-50.501573400833429j, 11)
ref = lib.einsum('acbd,ijcd->ijab', eris.VVVV, t2)
self.assertAlmostEqual(abs(vt2 - ref).max(), 0, 11)
def test_update_amps1(self):
mf = scf.UHF(mol_s2)
numpy.random.seed(9)
nmo = mf_s2.mo_occ[0].size
mf.mo_coeff = numpy.random.random((2,nmo,nmo)) - 0.5
mf.mo_occ = numpy.zeros((2,nmo))
mf.mo_occ[0,:6] = 1
mf.mo_occ[1,:5] = 1
mycc = uccsd.UCCSD(mf)
nocca, noccb = 6, 5
nvira, nvirb = nmo-nocca, nmo-noccb
nvira_pair = nvira*(nvira+1)//2
nvirb_pair = nvirb*(nvirb+1)//2
eris = mycc.ao2mo()
fakeris = uccsd._ChemistsERIs()
fakeris.mo_coeff = eris.mo_coeff
fakeris.vvVV = eris.vvVV
fakeris.mol = mol_s2
t2ab = numpy.random.random((nocca,noccb,nvira,nvirb))
t1a = numpy.zeros((nocca,nvira))
t1b = numpy.zeros((noccb,nvirb))
self.assertAlmostEqual(lib.finger(mycc._add_vvVV(None, t2ab, fakeris)), 21.652482203108928, 9)
fakeris.vvVV = None
mycc.direct = True
mycc.max_memory = 0
self.assertAlmostEqual(lib.finger(mycc._add_vvVV(None, t2ab, fakeris)), 21.652482203108928, 9)
t1 = (numpy.random.random((nocca,nvira)), numpy.random.random((noccb,nvirb)))
t2 = (numpy.random.random((nocca,nocca,nvira,nvira)),
numpy.random.random((nocca,noccb,nvira,nvirb)),
numpy.random.random((noccb,noccb,nvirb,nvirb)))
t1, t2 = mycc.vector_to_amplitudes(mycc.amplitudes_to_vector(t1, t2))
t1, t2 = mycc.update_amps(t1, t2, eris)
self.assertAlmostEqual(lib.finger(t1[0]), 49.912690337392938, 10)
self.assertAlmostEqual(lib.finger(t1[1]), 74.596097348134776, 10)
self.assertAlmostEqual(lib.finger(t2[0]), -41.784696524955393, 10)
self.assertAlmostEqual(lib.finger(t2[1]), -9675.7677695314342, 7)
self.assertAlmostEqual(lib.finger(t2[2]), 270.75447826471577, 8)
self.assertAlmostEqual(lib.finger(mycc.amplitudes_to_vector(t1, t2)), 4341.9623137256776, 6)
def test_vector_to_amplitudes(self):
t1, t2 = myucc.vector_to_amplitudes(myucc.amplitudes_to_vector(myucc.t1, myucc.t2))
self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 12)
def test_vector_size(self):
self.assertEqual(myucc.vector_size(), 2240)
def test_update_amps2(self): # compare to gccsd.update_amps
mol = mol_s2
mf = mf_s2
myucc = uccsd.UCCSD(mf)
nocca, noccb = 6,4
nmo = mol.nao_nr()
nvira,nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.random.random((nocca,nvira))-.9,
numpy.random.random((noccb,nvirb))-.9]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
mo_a = mf.mo_coeff[0] + numpy.sin(mf.mo_coeff[0]) * .01j
mo_b = mf.mo_coeff[1] + numpy.sin(mf.mo_coeff[1]) * .01j
nao = mo_a.shape[0]
eri = ao2mo.restore(1, mf._eri, nao)
eri0aa = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_a.conj(), mo_a)
eri0ab = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_b.conj(), mo_b)
eri0bb = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_b.conj(), mo_b, mo_b.conj(), mo_b)
eri0ba = eri0ab.transpose(2,3,0,1)
nvira = nao - nocca
nvirb = nao - noccb
eris = uccsd._ChemistsERIs(mol)
eris.oooo = eri0aa[:nocca,:nocca,:nocca,:nocca].copy()
eris.ovoo = eri0aa[:nocca,nocca:,:nocca,:nocca].copy()
eris.oovv = eri0aa[:nocca,:nocca,nocca:,nocca:].copy()
eris.ovvo = eri0aa[:nocca,nocca:,nocca:,:nocca].copy()
eris.ovov = eri0aa[:nocca,nocca:,:nocca,nocca:].copy()
eris.ovvv = eri0aa[:nocca,nocca:,nocca:,nocca:].copy()
eris.vvvv = eri0aa[nocca:,nocca:,nocca:,nocca:].copy()
eris.OOOO = eri0bb[:noccb,:noccb,:noccb,:noccb].copy()
eris.OVOO = eri0bb[:noccb,noccb:,:noccb,:noccb].copy()
eris.OOVV = eri0bb[:noccb,:noccb,noccb:,noccb:].copy()
eris.OVVO = eri0bb[:noccb,noccb:,noccb:,:noccb].copy()
eris.OVOV = eri0bb[:noccb,noccb:,:noccb,noccb:].copy()
eris.OVVV = eri0bb[:noccb,noccb:,noccb:,noccb:].copy()
eris.VVVV = eri0bb[noccb:,noccb:,noccb:,noccb:].copy()
eris.ooOO = eri0ab[:nocca,:nocca,:noccb,:noccb].copy()
eris.ovOO = eri0ab[:nocca,nocca:,:noccb,:noccb].copy()
eris.ooVV = eri0ab[:nocca,:nocca,noccb:,noccb:].copy()
eris.ovVO = eri0ab[:nocca,nocca:,noccb:,:noccb].copy()
eris.ovOV = eri0ab[:nocca,nocca:,:noccb,noccb:].copy()
eris.ovVV = eri0ab[:nocca,nocca:,noccb:,noccb:].copy()
eris.vvVV = eri0ab[nocca:,nocca:,noccb:,noccb:].copy()
eris.OOoo = eri0ba[:noccb,:noccb,:nocca,:nocca].copy()
eris.OVoo = eri0ba[:noccb,noccb:,:nocca,:nocca].copy()
eris.OOvv = eri0ba[:noccb,:noccb,nocca:,nocca:].copy()
eris.OVvo = eri0ba[:noccb,noccb:,nocca:,:nocca].copy()
eris.OVov = eri0ba[:noccb,noccb:,:nocca,nocca:].copy()
eris.OVvv = eri0ba[:noccb,noccb:,nocca:,nocca:].copy()
eris.VVvv = eri0ba[noccb:,noccb:,nocca:,nocca:].copy()
eris.focka = numpy.diag(mf.mo_energy[0])
eris.fockb = numpy.diag(mf.mo_energy[1])
eris.mo_energy = mf.mo_energy
t1[0] = t1[0] + numpy.sin(t1[0]) * .05j
t1[1] = t1[1] + numpy.sin(t1[1]) * .05j
t2[0] = t2[0] + numpy.sin(t2[0]) * .05j
t2[1] = t2[1] + numpy.sin(t2[1]) * .05j
t2[2] = t2[2] + numpy.sin(t2[2]) * .05j
t1new_ref, t2new_ref = uccsd.update_amps(myucc, t1, t2, eris)
nocc = nocca + noccb
orbspin = numpy.zeros(nao*2, dtype=int)
orbspin[1::2] = 1
orbspin[nocc-1] = 0
orbspin[nocc ] = 1
eri1 = numpy.zeros([nao*2]*4, dtype=numpy.complex)
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
eri1[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa] = eri0aa
eri1[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb] = eri0ab
eri1[idxb[:,None,None,None],idxb[:,None,None],idxa[:,None],idxa] = eri0ba
eri1[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb] = eri0bb
eri1 = eri1.transpose(0,2,1,3) - eri1.transpose(0,2,3,1)
erig = gccsd._PhysicistsERIs()
erig.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()
erig.ooov = eri1[:nocc,:nocc,:nocc,nocc:].copy()
erig.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()
erig.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()
erig.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()
erig.ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()
erig.vvvv = eri1[nocc:,nocc:,nocc:,nocc:].copy()
mo_e = numpy.empty(nao*2)
mo_e[orbspin==0] = mf.mo_energy[0]
mo_e[orbspin==1] = mf.mo_energy[1]
erig.fock = numpy.diag(mo_e)
erig.mo_energy = mo_e.real
myccg = gccsd.GCCSD(scf.addons.convert_to_ghf(mf))
t1 = myccg.spatial2spin(t1, orbspin)
t2 = myccg.spatial2spin(t2, orbspin)
t1new, t2new = gccsd.update_amps(myccg, t1, t2, erig)
t1new = myccg.spin2spatial(t1new, orbspin)
t2new = myccg.spin2spatial(t2new, orbspin)
self.assertAlmostEqual(abs(t1new[0] - t1new_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t1new[1] - t1new_ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[0] - t2new_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[1] - t2new_ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[2] - t2new_ref[2]).max(), 0, 12)
def test_mbpt2(self):
myucc = uccsd.UCCSD(mf)
e = myucc.kernel(mbpt2=True)[0]
self.assertAlmostEqual(e, -0.12886859466216125, 10)
emp2 = mp.MP2(mf).kernel()[0]
self.assertAlmostEqual(e, emp2, 10)
myucc = uccsd.UCCSD(mf_s2)
e = myucc.kernel(mbpt2=True)[0]
self.assertAlmostEqual(e, -0.096257842171487293, 10)
emp2 = mp.MP2(mf_s2).kernel()[0]
self.assertAlmostEqual(e, emp2, 10)
def test_uintermediats(self):
from pyscf.cc import uintermediates
self.assertTrue(eris.get_ovvv().ndim == 4)
self.assertTrue(eris.get_ovVV().ndim == 4)
self.assertTrue(eris.get_OVvv().ndim == 4)
self.assertTrue(eris.get_OVVV().ndim == 4)
self.assertTrue(eris.get_ovvv(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_ovVV(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_OVvv(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_OVVV(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(uintermediates._get_vvvv(eris).ndim == 4)
self.assertTrue(uintermediates._get_vvVV(eris).ndim == 4)
self.assertTrue(uintermediates._get_VVVV(eris).ndim == 4)
def test_add_vvvv(self):
myucc = uccsd.UCCSD(mf_s2)
nocca, noccb = 6,4
nmo = mf_s2.mo_occ[0].size
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.zeros((nocca,nvira)),
numpy.zeros((noccb,nvirb))]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
eris1 = copy.copy(eris)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
ref =(lib.einsum('acbd,ijcd->ijab', eris1.vvvv[:,idxa][idxa], t2[0]),
lib.einsum('acbd,ijcd->ijab', eris1.vvVV[:,idxb][idxa], t2[1]),
lib.einsum('acbd,ijcd->ijab', eris1.VVVV[:,idxb][idxb], t2[2]))
t2a = myucc._add_vvvv((t1[0]*0,t1[1]*0), t2, eris, t2sym=False)
self.assertAlmostEqual(abs(ref[0]-t2a[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[1]-t2a[1]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[2]-t2a[2]).max(), 0, 12)
myucc.direct = True
eris1.vvvv = None # == with_ovvv=True in the call below
eris1.VVVV = None
eris1.vvVV = None
t1 = None
myucc.mo_coeff, eris1.mo_coeff = eris1.mo_coeff, None
t2b = myucc._add_vvvv(t1, t2, eris1)
self.assertAlmostEqual(abs(ref[0]-t2b[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[1]-t2b[1]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[2]-t2b[2]).max(), 0, 12)
def test_add_vvVV(self):
myucc = uccsd.UCCSD(mf_s2)
nocca, noccb = 6,4
nmo = mf_s2.mo_occ[0].size
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.zeros((nocca,nvira)),
numpy.zeros((noccb,nvirb))]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
eris1 = copy.copy(eris)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
vvVV = eris1.vvVV[:,idxb][idxa]
ref = lib.einsum('acbd,ijcd->ijab', vvVV, t2[1])
t2a = myucc._add_vvVV((t1[0]*0,t1[1]*0), t2[1], eris)
self.assertAlmostEqual(abs(ref-t2a).max(), 0, 12)
myucc.direct = True
eris1.vvvv = None # == with_ovvv=True in the call below
eris1.VVVV = None
eris1.vvVV = None
t1 = None
myucc.mo_coeff, eris1.mo_coeff = eris1.mo_coeff, None
t2b = myucc._add_vvVV(t1, t2[1], eris1)
self.assertAlmostEqual(abs(ref-t2b).max(), 0, 12)
def test_zero_beta_electrons(self):
mol = gto.M(atom='H', basis=('631g', [[0, (.2, 1)], [0, (.5, 1)]]),
spin=1, verbose=0)
mf = scf.UHF(mol).run()
mycc = uccsd.UCCSD(mf).run()
self.assertAlmostEqual(mycc.e_corr, 0, 9)
mol = gto.M(atom='He', basis=('631g', [[0, (.2, 1)], [0, (.5, 1)]]),
spin=2, verbose=0)
mf = scf.UHF(mol).run()
mycc = uccsd.UCCSD(mf).run()
self.assertAlmostEqual(mycc.e_corr, -2.6906675843462455e-05, 9)
self.assertEqual(mycc.t1[1].size, 0)
self.assertEqual(mycc.t2[1].size, 0)
self.assertEqual(mycc.t2[2].size, 0)
def test_reset(self):
mycc = cc.CCSD(scf.UHF(mol).newton())
mycc.reset(mol_s2)
self.assertTrue(mycc.mol is mol_s2)
self.assertTrue(mycc._scf.mol is mol_s2)
if __name__ == "__main__":
print("Full Tests for UCCSD")
unittest.main()
|
gkc1000/pyscf
|
pyscf/cc/test/test_uccsd.py
|
Python
|
apache-2.0
| 31,315
|
[
"PySCF"
] |
fb3c19bcff361fd38c8fb8d499e5aa4d07496c6fbbcb7d8321cf352b7faee47e
|
import needl, needl.schedule as schedule, needl.utils as utils
from needl.adapters.fingerprint import FingerprintAdapter
import requests
import zipfile
from io import BytesIO
import urllib.parse as url
AWS_ROOT = 'https://s3.amazonaws.com'
CSV_NAME = 'top-1m.csv'
TOP1M = '/alexa-static/' + CSV_NAME + '.zip'
AWS_THUMBPRINT = '46516b8e1492af030d2c747a5a3137b57423a843'
def register():
schedule.every(needl.settings['alexa']['update_interval']).days.do(update)
vi = needl.settings['alexa']['visit_interval']
args = map(int, vi.split('..'))
schedule.every(*args).minutes.do(visit)
def get_random_site():
return 'http://' + utils.get_line(needl.args.datadir + '/' + CSV_NAME).split(',')[1]
def visit():
site = get_random_site()
needl.log.info('Visiting %s', site)
browser = utils.get_browser()
browser.get(site)
utils.process_click_depth(browser, needl.settings['alexa']['click_depth'])
def update():
needl.log.info('Downloading Alexa top one million list (%s)', TOP1M)
r = requests.session()
r.mount(AWS_ROOT, FingerprintAdapter(AWS_THUMBPRINT))
file = r.get(url.urljoin(AWS_ROOT, TOP1M))
with zipfile.ZipFile(BytesIO(file.content)) as zip:
zip.extractall(needl.args.datadir)
|
eth0izzle/Needl
|
needl/tasks/alexa.py
|
Python
|
mit
| 1,254
|
[
"VisIt"
] |
cbbf98d0977d3a4882c981673f579e4884dfd0a8c42739022a122f5754f8b512
|
# Copyright (C) 2013 by Yanbo Ye (yeyanbo289@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes and methods for tree construction"""
import itertools
import copy
from Bio.Phylo import BaseTree
from Bio.Align import MultipleSeqAlignment
from Bio.SubsMat import MatrixInfo
from Bio import _py3k
def _is_numeric(x):
return _py3k._is_int_or_long(x) or isinstance(x, (float, complex))
class _Matrix(object):
"""Base class for distance matrix or scoring matrix
Accepts a list of names and a lower triangular matrix.::
matrix = [[0],
[1, 0],
[2, 3, 0],
[4, 5, 6, 0]]
represents the symmetric matrix of
[0,1,2,4]
[1,0,3,5]
[2,3,0,6]
[4,5,6,0]
:Parameters:
names : list
names of elements, used for indexing
matrix : list
nested list of numerical lists in lower triangular format
Example
-------
>>> from Bio.Phylo.TreeConstruction import _Matrix
>>> names = ['Alpha', 'Beta', 'Gamma', 'Delta']
>>> matrix = [[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]]
>>> m = _Matrix(names, matrix)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]])
You can use two indices to get or assign an element in the matrix.
>>> m[1,2]
3
>>> m['Beta','Gamma']
3
>>> m['Beta','Gamma'] = 4
>>> m['Beta','Gamma']
4
Further more, you can use one index to get or assign a list of elements related to that index.
>>> m[0]
[0, 1, 2, 4]
>>> m['Alpha']
[0, 1, 2, 4]
>>> m['Alpha'] = [0, 7, 8, 9]
>>> m[0]
[0, 7, 8, 9]
>>> m[0,1]
7
Also you can delete or insert a column&row of elemets by index.
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
>>> del m['Alpha']
>>> m
_Matrix(names=['Beta', 'Gamma', 'Delta'], matrix=[[0], [4, 0], [5, 6, 0]])
>>> m.insert('Alpha', [0, 7, 8, 9] , 0)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
"""
def __init__(self, names, matrix=None):
"""Initialize matrix by a list of names and a list of
lower triangular matrix data"""
# check names
if isinstance(names, list) and all(isinstance(s, str) for s in names):
if len(set(names)) == len(names):
self.names = names
else:
raise ValueError("Duplicate names found")
else:
raise TypeError("'names' should be a list of strings")
# check matrix
if matrix is None:
# create a new one with 0 if matrix is not assigned
matrix = [[0] * i for i in range(1, len(self) + 1)]
self.matrix = matrix
else:
# check if all elements are numbers
if (isinstance(matrix, list) and
all(isinstance(l, list) for l in matrix) and
all(_is_numeric(n) for n in [item for sublist in matrix
for item in sublist])):
# check if the same length with names
if len(matrix) == len(names):
# check if is lower triangle format
if [len(m) for m in matrix] == list(range(1, len(self) + 1)):
self.matrix = matrix
else:
raise ValueError(
"'matrix' should be in lower triangle format")
else:
raise ValueError(
"'names' and 'matrix' should be the same size")
else:
raise TypeError("'matrix' should be a list of numerical lists")
def __getitem__(self, item):
"""Access value(s) by the index(s) or name(s).
For a _Matrix object 'dm'::
dm[i] get a value list from the given 'i' to others;
dm[i, j] get the value between 'i' and 'j';
dm['name'] map name to index first
dm['name1', 'name2'] map name to index first
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
return [self.matrix[index][i] for i in range(0, index)] + [self.matrix[i][index] for i in range(index, len(self))]
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
if row_index > col_index:
return self.matrix[row_index][col_index]
else:
return self.matrix[col_index][row_index]
else:
raise TypeError("Invalid index type.")
def __setitem__(self, item, value):
"""Set value by the index(s) or name(s).
Similar to __getitem__::
dm[1] = [1, 0, 3, 4] set values from '1' to others;
dm[i, j] = 2 set the value from 'i' to 'j'
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if isinstance(value, list) and all(_is_numeric(n) for n in value):
if len(value) == len(self):
for i in range(0, index):
self.matrix[index][i] = value[i]
for i in range(index, len(self)):
self.matrix[i][index] = value[i]
else:
raise ValueError("Value not the same size.")
else:
raise TypeError("Invalid value type.")
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if _is_numeric(value):
if row_index > col_index:
self.matrix[row_index][col_index] = value
else:
self.matrix[col_index][row_index] = value
else:
raise TypeError("Invalid value type.")
else:
raise TypeError("Invalid index type.")
def __delitem__(self, item):
"""Delete related distances by the index or name"""
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
index = self.names.index(item)
else:
raise TypeError("Invalid index type.")
# remove distances related to index
for i in range(index + 1, len(self)):
del self.matrix[i][index]
del self.matrix[index]
# remove name
del self.names[index]
def insert(self, name, value, index=None):
"""Insert distances given the name and value.
:Parameters:
name : str
name of a row/col to be inserted
value : list
a row/col of values to be inserted
"""
if isinstance(name, str):
# insert at the given index or at the end
if index is None:
index = len(self)
if not isinstance(index, int):
raise TypeError("Invalid index type.")
# insert name
self.names.insert(index, name)
# insert elements of 0, to be assigned
self.matrix.insert(index, [0] * index)
for i in range(index, len(self)):
self.matrix[i].insert(index, 0)
# assign value
self[index] = value
else:
raise TypeError("Invalid name type.")
def __len__(self):
"""Matrix length"""
return len(self.names)
def __repr__(self):
return self.__class__.__name__ \
+ "(names=%s, matrix=%s)" \
% tuple(map(repr, (self.names, self.matrix)))
def __str__(self):
"""Get a lower triangular matrix string"""
matrix_string = '\n'.join(
[self.names[i] + "\t" + "\t".join([str(n) for n in self.matrix[i]])
for i in range(0, len(self))])
matrix_string = matrix_string + "\n\t" + "\t".join(self.names)
return matrix_string
class _DistanceMatrix(_Matrix):
"""Distance matrix class that can be used for distance based tree algorithms.
All diagonal elements will be zero no matter what the users provide.
"""
def __init__(self, names, matrix=None):
_Matrix.__init__(self, names, matrix)
self._set_zero_diagonal()
def __setitem__(self, item, value):
_Matrix.__setitem__(self, item, value)
self._set_zero_diagonal()
def _set_zero_diagonal(self):
"""set all diagonal elements to zero"""
for i in range(0, len(self)):
self.matrix[i][i] = 0
class DistanceCalculator(object):
"""Class to calculate the distance matrix from a DNA or Protein
Multiple Sequence Alignment(MSA) and the given name of the
substitution model.
Currently only scoring matrices are used.
:Parameters:
model : str
Name of the model matrix to be used to calculate distance.
The attribute `dna_matrices` contains the available model
names for DNA sequences and `protein_matrices` for protein
sequences.
Example
-------
>>> from Bio.Phylo.TreeConstruction import DistanceCalculator
>>> from Bio import AlignIO
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
DNA calculator with 'identity' model::
>>> calculator = DistanceCalculator('identity')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.230769230769 0
Gamma 0.384615384615 0.230769230769 0
Delta 0.538461538462 0.538461538462 0.538461538462 0
Epsilon 0.615384615385 0.384615384615 0.461538461538 0.153846153846 0
Alpha Beta Gamma Delta Epsilon
Protein calculator with 'blosum62' model::
>>> calculator = DistanceCalculator('blosum62')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.369047619048 0
Gamma 0.493975903614 0.25 0
Delta 0.585365853659 0.547619047619 0.566265060241 0
Epsilon 0.7 0.355555555556 0.488888888889 0.222222222222 0
Alpha Beta Gamma Delta Epsilon
"""
dna_alphabet = ['A', 'T', 'C', 'G']
# BLAST nucleic acid scoring matrix
blastn = [[5],
[-4, 5],
[-4, -4, 5],
[-4, -4, -4, 5]]
# transition/transversion scoring matrix
trans = [[6],
[-5, 6],
[-5, -1, 6],
[-1, -5, -5, 6]]
protein_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y',
'Z']
# matrices available
dna_matrices = {'blastn': blastn, 'trans': trans}
protein_models = MatrixInfo.available_matrices
protein_matrices = dict((name, getattr(MatrixInfo, name))
for name in protein_models)
dna_models = list(dna_matrices.keys())
models = ['identity'] + dna_models + protein_models
def __init__(self, model='identity'):
"""Initialize with a distance model"""
if model == 'identity':
self.scoring_matrix = None
elif model in self.dna_models:
self.scoring_matrix = _Matrix(self.dna_alphabet,
self.dna_matrices[model])
elif model in self.protein_models:
self.scoring_matrix = self._build_protein_matrix(
self.protein_matrices[model])
else:
raise ValueError("Model not supported. Available models: " +
", ".join(self.models))
def _pairwise(self, seq1, seq2):
"""Calculate pairwise distance from two sequences.
Returns a value between 0 (identical sequences) and 1 (completely
different, or seq1 is an empty string.)
"""
score = 0
max_score = 0
if self.scoring_matrix:
max_score1 = 0
max_score2 = 0
skip_letters = ['-', '*']
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 in skip_letters or l2 in skip_letters:
continue
if l1 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l1, seq1.id, i))
if l2 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l2, seq2.id, i))
max_score1 += self.scoring_matrix[l1, l1]
max_score2 += self.scoring_matrix[l2, l2]
score += self.scoring_matrix[l1, l2]
# Take the higher score if the matrix is asymmetrical
max_score = max(max_score1, max_score2)
else:
# Score by character identity, not skipping any special letters
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 == l2:
score += 1
max_score = len(seq1)
if max_score == 0:
return 1 # max possible scaled distance
return 1 - (score * 1.0 / max_score)
def get_distance(self, msa):
"""Return a _DistanceMatrix for MSA object
:Parameters:
msa : MultipleSeqAlignment
DNA or Protein multiple sequence alignment.
"""
if not isinstance(msa, MultipleSeqAlignment):
raise TypeError("Must provide a MultipleSeqAlignment object.")
names = [s.id for s in msa]
dm = _DistanceMatrix(names)
for seq1, seq2 in itertools.combinations(msa, 2):
dm[seq1.id, seq2.id] = self._pairwise(seq1, seq2)
return dm
def _build_protein_matrix(self, subsmat):
"""Convert matrix from SubsMat format to _Matrix object"""
protein_matrix = _Matrix(self.protein_alphabet)
for k, v in subsmat.items():
aa1, aa2 = k
protein_matrix[aa1, aa2] = v
return protein_matrix
class TreeConstructor(object):
"""Base class for all tree constructor."""
def build_tree(self, msa):
"""Caller to built the tree from a MultipleSeqAlignment object.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class DistanceTreeConstructor(TreeConstructor):
"""Distance based tree constructor.
:Parameters:
method : str
Distance tree construction method, 'nj'(default) or 'upgma'.
distance_calculator : DistanceCalculator
The distance matrix calculator for multiple sequence alignment.
It must be provided if `build_tree` will be called.
Example
--------
>>> from TreeConstruction import DistanceTreeConstructor
>>> constructor = DistanceTreeConstructor()
UPGMA Tree:
>>> upgmatree = constructor.upgma(dm)
>>> print upgmatree
Tree(rooted=True)
Clade(name='Inner4')
Clade(branch_length=0.171955155115, name='Inner1')
Clade(branch_length=0.111111111111, name='Epsilon')
Clade(branch_length=0.111111111111, name='Delta')
Clade(branch_length=0.0673103855608, name='Inner3')
Clade(branch_length=0.0907558806655, name='Inner2')
Clade(branch_length=0.125, name='Gamma')
Clade(branch_length=0.125, name='Beta')
Clade(branch_length=0.215755880666, name='Alpha')
NJ Tree:
>>> njtree = constructor.nj(dm)
>>> print njtree
Tree(rooted=False)
Clade(name='Inner3')
Clade(branch_length=0.0142054862889, name='Inner2')
Clade(branch_length=0.239265540676, name='Inner1')
Clade(branch_length=0.0853101915988, name='Epsilon')
Clade(branch_length=0.136912030623, name='Delta')
Clade(branch_length=0.292306275042, name='Alpha')
Clade(branch_length=0.0747705106139, name='Beta')
Clade(branch_length=0.175229489386, name='Gamma')
"""
methods = ['nj', 'upgma']
def __init__(self, distance_calculator=None, method="nj"):
if (distance_calculator is None or
isinstance(distance_calculator, DistanceCalculator)):
self.distance_calculator = distance_calculator
else:
raise TypeError("Must provide a DistanceCalculator object.")
if isinstance(method, str) and method in self.methods:
self.method = method
else:
raise TypeError("Bad method: " + method +
". Available methods: " + ", ".join(self.methods))
def build_tree(self, msa):
if self.distance_calculator:
dm = self.distance_calculator.get_distance(msa)
tree = None
if self.method == 'upgma':
tree = self.upgma(dm)
else:
tree = self.nj(dm)
return tree
else:
raise TypeError("Must provide a DistanceCalculator object.")
def upgma(self, distance_matrix):
"""Construct and return an UPGMA tree.
Constructs and returns an Unweighted Pair Group Method
with Arithmetic mean (UPGMA) tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 1:
min_dist = dm[1, 0]
# find minimum index
for i in range(1, len(dm)):
for j in range(0, i):
if min_dist >= dm[i, j]:
min_dist = dm[i, j]
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
if clade1.is_terminal():
clade1.branch_length = min_dist * 1.0 / 2
else:
clade1.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade1)
if clade2.is_terminal():
clade2.branch_length = min_dist * 1.0 / 2
else:
clade2.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade2)
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]) * 1.0 / 2
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
inner_clade.branch_length = 0
return BaseTree.Tree(inner_clade)
def nj(self, distance_matrix):
"""Construct and return an Neighbor Joining tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init node distance
node_dist = [0] * len(dm)
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 2:
# calculate nodeDist
for i in range(0, len(dm)):
node_dist[i] = 0
for j in range(0, len(dm)):
node_dist[i] += dm[i, j]
node_dist[i] = node_dist[i] / (len(dm) - 2)
# find minimum distance pair
min_dist = dm[1, 0] - node_dist[1] - node_dist[0]
min_i = 0
min_j = 1
for i in range(1, len(dm)):
for j in range(0, i):
temp = dm[i, j] - node_dist[i] - node_dist[j]
if min_dist > temp:
min_dist = temp
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
clade1.branch_length = (dm[min_i, min_j] + node_dist[min_i] -
node_dist[min_j]) / 2.0
clade2.branch_length = dm[min_i, min_j] - clade1.branch_length
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k] -
dm[min_i, min_j]) / 2.0
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
# set the last clade as one of the child of the inner_clade
root = None
if clades[0] == inner_clade:
clades[0].branch_length = 0
clades[1].branch_length = dm[1, 0]
clades[0].clades.append(clades[1])
root = clades[0]
else:
clades[0].branch_length = dm[1, 0]
clades[1].branch_length = 0
clades[1].clades.append(clades[0])
root = clades[1]
return BaseTree.Tree(root, rooted=False)
def _height_of(self, clade):
"""calculate clade height -- the longest path to any terminal."""
height = 0
if clade.is_terminal():
height = clade.branch_length
else:
height = height + max([self._height_of(c) for c in clade.clades])
return height
# #################### Tree Scoring and Searching Classes #####################
class Scorer(object):
"""Base class for all tree scoring methods"""
def get_score(self, tree, alignment):
"""Caller to get the score of a tree for the given alignment.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class TreeSearcher(object):
"""Base class for all tree searching methods"""
def search(self, starting_tree, alignment):
"""Caller to search the best tree with a starting tree.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class NNITreeSearcher(TreeSearcher):
"""Tree searching with Nearest Neighbor Interchanges (NNI) algorithm.
:Parameters:
scorer : ParsimonyScorer
parsimony scorer to calculate the parsimony score of
different trees during NNI algorithm.
"""
def __init__(self, scorer):
if isinstance(scorer, Scorer):
self.scorer = scorer
else:
raise TypeError("Must provide a Scorer object.")
def search(self, starting_tree, alignment):
"""Implement the TreeSearcher.search method.
:Parameters:
starting_tree : Tree
starting tree of NNI method.
alignment : MultipleSeqAlignment
multiple sequence alignment used to calculate parsimony
score of different NNI trees.
"""
return self._nni(starting_tree, alignment)
def _nni(self, starting_tree, alignment):
"""Search for the best parsimony tree using the NNI algorithm."""
best_tree = starting_tree
while True:
best_score = self.scorer.get_score(best_tree, alignment)
temp = best_score
for t in self._get_neighbors(best_tree):
score = self.scorer.get_score(t, alignment)
if score < best_score:
best_score = score
best_tree = t
# stop if no smaller score exist
if best_score >= temp:
break
return best_tree
def _get_neighbors(self, tree):
"""Get all neighbor trees of the given tree.
Currently only for binary rooted trees.
"""
# make child to parent dict
parents = {}
for clade in tree.find_clades():
if clade != tree.root:
node_path = tree.get_path(clade)
# cannot get the parent if the parent is root. Bug?
if len(node_path) == 1:
parents[clade] = tree.root
else:
parents[clade] = node_path[-2]
neighbors = []
root_childs = []
for clade in tree.get_nonterminals(order="level"):
if clade == tree.root:
left = clade.clades[0]
right = clade.clades[1]
root_childs.append(left)
root_childs.append(right)
if not left.is_terminal() and not right.is_terminal():
# make changes around the left_left clade
# left_left = left.clades[0]
left_right = left.clades[1]
right_left = right.clades[0]
right_right = right.clades[1]
# neightbor 1 (left_left + right_right)
del left.clades[1]
del right.clades[1]
left.clades.append(right_right)
right.clades.append(left_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (left_left + right_left)
del left.clades[1]
del right.clades[0]
left.clades.append(right_left)
right.clades.append(right_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (left_left + left_right)
del left.clades[1]
del right.clades[0]
left.clades.append(left_right)
right.clades.insert(0, right_left)
elif clade in root_childs:
# skip root child
continue
else:
# method for other clades
# make changes around the parent clade
left = clade.clades[0]
right = clade.clades[1]
parent = parents[clade]
if clade == parent.clades[0]:
sister = parent.clades[1]
# neighbor 1 (parent + right)
del parent.clades[1]
del clade.clades[1]
parent.clades.append(right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(sister)
clade.clades.insert(0, left)
else:
sister = parent.clades[0]
# neighbor 1 (parent + right)
del parent.clades[0]
del clade.clades[1]
parent.clades.insert(0, right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, sister)
clade.clades.insert(0, left)
return neighbors
# ######################## Parsimony Classes ##########################
class ParsimonyScorer(Scorer):
"""Parsimony scorer with a scoring matrix.
This is a combination of Fitch algorithm and Sankoff algorithm.
See ParsimonyTreeConstructor for usage.
:Parameters:
matrix : _Matrix
scoring matrix used in parsimony score calculation.
"""
def __init__(self, matrix=None):
if not matrix or isinstance(matrix, _Matrix):
self.matrix = matrix
else:
raise TypeError("Must provide a _Matrix object.")
def get_score(self, tree, alignment):
"""Calculate and return the parsimony score given a tree and
the MSA using the Fitch algorithm without the penalty matrix
the Sankoff algorithm with the matrix"""
# make sure the tree is rooted and bifurcating
if not tree.is_bifurcating():
raise ValueError("The tree provided should be bifurcating.")
if not tree.rooted:
tree.root_at_midpoint()
# sort tree terminals and alignment
terms = tree.get_terminals()
terms.sort(key=lambda term: term.name)
alignment.sort()
if not all(t.name == a.id for t, a in zip(terms, alignment)):
raise ValueError(
"Taxon names of the input tree should be the same with the alignment.")
# term_align = dict(zip(terms, alignment))
score = 0
for i in range(len(alignment[0])):
# parsimony score for column_i
score_i = 0
# get column
column_i = alignment[:, i]
# skip non-informative column
if column_i == len(column_i) * column_i[0]:
continue
# start calculating score_i using the tree and column_i
# Fitch algorithm without the penalty matrix
if not self.matrix:
# init by mapping terminal clades and states in column_i
clade_states = dict(zip(terms, [set([c]) for c in column_i]))
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_state = clade_states[clade_childs[0]]
right_state = clade_states[clade_childs[1]]
state = left_state & right_state
if not state:
state = left_state | right_state
score_i = score_i + 1
clade_states[clade] = state
# Sankoff algorithm with the penalty matrix
else:
inf = float('inf')
# init score arrays for terminal clades
alphabet = self.matrix.names
length = len(alphabet)
clade_scores = {}
for j in range(len(column_i)):
array = [inf] * length
index = alphabet.index(column_i[j])
array[index] = 0
clade_scores[terms[j]] = array
# bottom up calculation
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_score = clade_scores[clade_childs[0]]
right_score = clade_scores[clade_childs[1]]
array = []
for m in range(length):
min_l = inf
min_r = inf
for n in range(length):
sl = self.matrix[
alphabet[m], alphabet[n]] + left_score[n]
sr = self.matrix[
alphabet[m], alphabet[n]] + right_score[n]
if min_l > sl:
min_l = sl
if min_r > sr:
min_r = sr
array.append(min_l + min_r)
clade_scores[clade] = array
# minimum from root score
score_i = min(array)
# TODO: resolve internal states
score = score + score_i
return score
class ParsimonyTreeConstructor(TreeConstructor):
"""Parsimony tree constructor.
:Parameters:
searcher : TreeSearcher
tree searcher to search the best parsimony tree.
starting_tree : Tree
starting tree provided to the searcher.
Example
--------
>>> from Bio import AlignIO
>>> from TreeConstruction import *
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
>>> starting_tree = Phylo.read('Tests/TreeConstruction/nj.tre', 'newick')
>>> print tree
Tree(weight=1.0, rooted=False)
Clade(branch_length=0.0, name='Inner3')
Clade(branch_length=0.01421, name='Inner2')
Clade(branch_length=0.23927, name='Inner1')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.29231, name='Alpha')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.17523, name='Gamma')
>>> from TreeConstruction import *
>>> scorer = ParsimonyScorer()
>>> searcher = NNITreeSearcher(scorer)
>>> constructor = ParsimonyTreeConstructor(searcher, starting_tree)
>>> pars_tree = constructor.build_tree(aln)
>>> print pars_tree
Tree(weight=1.0, rooted=True)
Clade(branch_length=0.0)
Clade(branch_length=0.197335, name='Inner1')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.041935, name='Inner2')
Clade(branch_length=0.01421, name='Inner3')
Clade(branch_length=0.17523, name='Gamma')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.29231, name='Alpha')
"""
def __init__(self, searcher, starting_tree=None):
self.searcher = searcher
self.starting_tree = starting_tree
def build_tree(self, alignment):
"""Build the tree.
:Parameters:
alignment : MultipleSeqAlignment
multiple sequence alignment to calculate parsimony tree.
"""
# if starting_tree is none,
# create a upgma tree with 'identity' scoring matrix
if self.starting_tree is None:
dtc = DistanceTreeConstructor(DistanceCalculator("identity"),
"upgma")
self.starting_tree = dtc.build_tree(alignment)
return self.searcher.search(self.starting_tree, alignment)
|
zjuchenyuan/BioWeb
|
Lib/Bio/Phylo/TreeConstruction.py
|
Python
|
mit
| 39,710
|
[
"BLAST",
"Biopython"
] |
6897a7f85a9c302d74da8c1817b27192b35d6ed9d309653bab3e15c4234b3afb
|
#!/usr/bin/env python
# PySCUBA/src/PySCUBA/Preprocessing.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com; ggiecold@jimmy.harvard.edu
from __future__ import division
from operator import and_
from os import getcwd, makedirs, path
import re
from struct import calcsize, unpack
from sys import exit
from warnings import warn
import numpy as np
from rpy2.rinterface import NULL, TRUE
from rpy2.robjects import numpy2ri
from rpy2.robjects.packages import importr
from sklearn.manifold import TSNE
from . import Tree_classes
__all__ = ['Annotation', 'cytometry_preprocess', 'Cyto_data',
'FCS_handler', 'get_FCS_data', 'infer_pseudotime',
'PCR_preprocess', 'RNASeq_preprocess']
def infer_pseudotime(data, output_directory, tag = '', pcv_method = 'Rprincurve',
anchor_gene = None, markers = None):
assert pcv_method in {'Rprincurve'} # taking into account the possibility of adding
# in future versions other methods
# for principal curve analysis
N_dim = 3
model = TSNE(n_components = N_dim)
TSNE_data = model.fit_transform(data)
if pcv_method == 'Rprincurve':
with open(path.join(output_directory, "{0}_TSNE_d{1}.tsv".format(tag, N_dim)),
'w') as f:
f.write('\t'.join(['T{0}'.format(k) for k in xrange(1, N_dim + 1)]))
f.write('\n')
np.savetxt(f, TSNE_data, fmt = '%.6f', delimiter = '\t')
numpy2ri.activate()
princurve = importr('princurve')
procedure = princurve.principal_curve
fitpc = procedure(TSNE_data, NULL, 0.001, TRUE, 200, 2, 'lowess')
curve_projections_matrix = np.array(fitpc.rx('s')[0])
pseudotime_series = np.array(fitpc.rx('lambda')[0])
with open(path.join(output_directory, "{0}_TSNE_d{1}_pcv.tsv".format(tag,
N_dim)), 'w') as f:
np.savetxt(f, curve_projections_matrix, fmt = '%.6f', delimiter = '\t')
with open(path.join(output_directory, "{0}_TSNE_d{1}_lambda.tsv".format(tag,
N_dim)), 'w') as f:
np.savetxt(f, pseudotime_series, fmt = '%.6f', delimiter = '\t')
else:
print("ERROR: PySCUBA: Preprocessing: infer_pseudotime:\n"
"your choice of method for principal curve analysis is not supported "
"by the present version of PySCUBA.")
exit(1)
if anchor_gene:
assert isinstance(anchor_gene, str)
assert markers is not None
N_cells_anchor = 1000
gene_idx = np.where(markers == anchor_gene)[0]
pseudotime_idx = np.argsort(pseudotime_series)
anchor_gene_avg_beg = np.mean(data[pseudotime_idx[:N_cells_anchor], gene_idx])
anchor_gene_avg_end = np.mean(data[pseudotime_idx[N_cells_anchor:], gene_idx])
if anchor_gene_avg_end > anchor_gene_avg_beg:
pseudotime_series = np.max(pseudotime_series) - pseudotime_series
t_min = np.min(pseudotime_series)
t_max = np.max(pseudotime_series)
t_bins = 8
cell_stages = t_bins * (pseudotime_series - t_min + 0.0001) / (t_max - t_min + 0.0002)
cell_stages = np.ceil(cell_stages).astype(int).astype('str')
return cell_stages
def parse_pairs(text):
"""Return (key, value) pairs from a string featuring particular delimiters.
Modified from a corresponding function in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
delim = text[0]
if delim == r'|':
delim = '\|'
elif delim == r'\a'[0]:
delim = '\\\\'
if delim != text[-1]:
warn("WARNING: the text does not start and end with the same delimiter!")
regex = re.compile('(?<=[^%s])%s(?!%s)' % (delim, delim, delim))
tmp = text[1:-1].replace('$', '')
tmp = regex.split(tmp)
return dict(zip([x.lower() for x in tmp[::2]], tmp[1::2]))
class Annotation(object):
"""An annotation class instance stores meta-data from the recordings of a
cytometry experiment.
Modified from a corresponding class in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
def __init__(self, annotations = None):
if annotations == None:
annotations = {}
self.__dict__['_mydict'] = annotations
def __getattr__(self, name):
if name in self._mydict.keys():
self.__dict__[name] = self._mydict[name]
return self._mydict[name]
else:
try:
return self._mydict.__getattribute__(name)
except:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
def __getstate__(self):
return self._mydict
def __setstate__(self, dict):
self.__dict__['_mydict'] = dict
for i in dict.keys():
self.__dict__[i] = dict[i]
def __setattr__(self, name, value):
Annotation.__getattribute__(self, '_mydict')[name] = value
self.__dict__[name] = value
def __setitem__(self, name, value):
self._mydict[name] = value
self.__dict__[name] = self._mydict[name]
def __getitem__(self, name):
return self._mydict[name]
def __repr__(self):
return 'Annotation(' + self._mydict.__repr__() + ')'
def __getstate__(self):
return self.__dict__
def __setstate(self, state):
self.__dict__ = state
def __getinitargs__(self):
return (self._mydict,)
def copy(self):
return Annotation(self._mydict.copy())
class Cyto_data(object):
"""
A Cyto_data object stores the data from a cytometry experiment.
Modified from a corresponding class in the outdated 'fcm' Python
package by Jacob Frelinger.
Members:
--------
Cyto_data.data_points : numpy.array
The data points.
Cyto_data.channels : list
Records which markers or scatters are in which columns.
Cyto_data.scatters : list
Keeps track of which indexes in Cyto_data.channels are scatters.
"""
def __init__(self, name, data_points, channels, scatters = None, notes = None):
"""
Parameters
----------
name: name of the *.fcs file, barring any extension
data_points: an array of data points
channels: list
Records which markers/scatters are in which columns.
scatters: list
Which channels indexes denote scatters
"""
self.name = name
self.data_points = data_points
self.tree = Tree_classes.Tree(data_points, channels)
self.scatters = scatters
self.markers = []
if self.scatters is not None:
for channel in range(len(channels)):
if channel in self.scatters:
pass
elif self.tree.root.channels[channel] in self.scatters:
pass
else:
self.markers.append(channel)
if notes == None:
notes = Annotation()
self.notes = notes
def __unicode__(self):
return self.name
def __repr__(self):
return self.name
def __getitem__(self, item):
"""Return the Cyto_data points.
"""
if isinstance(item, tuple):
item = list(item)
if isinstance(item[1], str):
item[1] = self.name_to_index(item[1])
elif isinstance(item[1], tuple) or isinstance(item[1], list):
item[1] = list(item[1])
for i, j in enumerate(item[1]):
if isinstance(j, str):
print('{0} is string {1}'.format(i, j))
item[1][i] = self.name_to_index(j)
item = tuple(item)
return self.tree.view()[item]
@property
def channels(self):
return self.current_node.channels
def __getattr__(self, name):
if name in dir(self.current_node.view()):
return self.current_node.view().__getattribute__(name)
else:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
for i in dict.keys():
self.__dict__[i] = dict[i]
def name_to_index(self, channels):
"""Return the channel indexes for the channels provided as arguments.
"""
if isinstance(channels, str):
return self.channels.index(channels)
idx = []
for i in channels:
try:
idx.append(self.channels.index(i))
except ValueError:
try:
for j in range(1, int(self.notes.text['par']) + 1):
if i == self.notes.text['p%dn' % j]:
idx.append(self.channels.index(self.notes.text['p%ds' % j]))
except ValueError:
raise ValueError('{0} is not in list'.format(i))
if idx:
return idx
else:
raise ValueError('The field {0} was not found'.format(str(channels)))
def get_channel_by_name(self, channels):
"""Return the data associated with specific channel names.
"""
return self.tree.view()[:, self.name_to_index(channels)]
def get_markers(self):
"""Return the data associated with all the markers.
"""
return self.view()[:, self.markers]
def view(self):
"""Return the current view of the data.
"""
return self.tree.view()
def visit(self, name):
"""Switch the current view of the data.
"""
self.tree.visit(name)
@property
def current_node(self):
"""Return the current node.
"""
return self.tree.current
def copy(self):
"""Return a copy of the Cyto_data object.
"""
tname = self.name
tree_data_points = self.tree.root.data
tnotes = self.notes.copy()
tchannels = self.channels[:]
tscchannels = self.scatters[:]
tmp = Cyto_data(tname, tree_data_points, tchannels, tscchannels, tnotes)
from copy import deepcopy
tmp.tree = deepcopy(self.tree)
return tmp
def gate(self, g, chan=None):
"""Return a gated region of the cytometry data.
"""
return g.gate(self, chan)
def subsample(self, s):
"""Return subsampled cytometry data.
"""
return s.subsample(self)
def get_current_node(self):
"""Return the current node.
"""
return self.current_node
def add_view(self, node):
"""Add a new node to the visualization tree.
"""
self.tree.add_child(node.name, node)
return self
def summary(self):
"""Provide a summary of current view.
"""
data_points = self.view()
means = data_points.mean(0)
stds = data_points.std(0)
mins = data_points.min(0)
maxs = data_points.max(0)
medians = np.median(data_points, 0)
dim = data_points.shape[1]
summary = ''
for i in range(dim):
summary = summary + self.channels[i] + ":\n"
summary = summary + " max: " + str(maxs[i]) + "\n"
summary = summary + " mean: " + str(means[i]) + "\n"
summary = summary + " median: " + str(medians[i]) + "\n"
summary = summary + " min: " + str(mins[i]) + "\n"
summary = summary + " std: " + str(stds[i]) + "\n"
return summary
def boundary_events(self):
"""Return a dictionary of the percentage of all events that are in the first
and in the last channel for each channel.
"""
boundary_dict = {}
for k, channel in enumerate(self.channels):
col = self.view()[:, k]
boundary_dict[channel] = \
sum((col == min(col)) | (col == max(col))) / len(col)
return boundary_dict
class NotSupportedFCSDataMode(Exception):
"""Exception raised for data modes in a *.fcs file that are not currently supported.
Modified from a corresponding exception in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
def __init__(self, mode):
self.mode = mode
self.message = "Unfortunately, the FCS data stored as type {0} is not currently supported.".format(mode)
self.args = (mode,)
def integer_format(b):
"""Return the binary format of an integer.
"""
if b == 8:
return 'B'
elif b == 16:
return 'H'
elif b == 32:
return 'I'
else:
print("Cannot handle integers of bit size {0}.".format(b))
return None
def integer_bit_mask(b, ub):
"""Return the bit-mask of an integer and a bit-witdh.
"""
if b == 8:
return (0xFF >> (b - ub))
elif b == 16:
return (0xFFFF >> (b - ub))
elif b == 32:
return (0xFFFFFFFF >> (b - ub))
else:
print("Cannot handle integers of bit size {0}.".format(b))
return None
def fluorescent_channel(name):
"""Check if a channel is a fluorescent channel.
"""
name = name.lower()
if name.startswith('cs'):
return False
elif name.startswith('fs'):
return False
elif name.startswith('ss'):
return False
elif name.startswith('ae'):
return False
elif name.startswith('cv'):
return False
elif name.startswith('time'):
return False
else:
return True
class FCS_handler(object):
"""Hold object to read and parse *.fcs files.
Modified from a corresponding class in the outdated 'fcm'
Python package by Jacob Frelinger.
"""
def __init__(self, file_path):
self.file_path = file_path
self.current_offset = 0
def get_next_dataset(self, **kwargs):
"""Return the next cytometry dataset stored in a *.fcs file.
"""
with open(self.file_path, 'rb') as self._f:
header = self.parse_header(self.current_offset)
text = self.parse_text(self.current_offset, header['text_start'],
header['text_stop'])
try:
analysis_beg = text['begin_analysis']
except KeyError:
analysis_beg = header['analysis_start']
try:
analysis_end = text['end_analysis']
except KeyError:
analysis_end = header['analysis_end']
analysis = self.parse_analysis(self.current_offset, analysis_beg,
analysis_end)
try:
data_beg = int(text['begin_data'])
except KeyError:
data_beg = header['data_start']
try:
data_end = int(text['end_data'])
except KeyError:
data_end = header['data_end']
LMD = self.fix_LMD(self.current_offset, header['text_start'],
header['text_stop'])
data_end = data_end + LMD
data = self.parse_data(self.current_offset, data_beg, data_end, text)
channels = []
scchannels = []
scchannel_indexes = []
base_chan_name = []
for i in range(1, int(text['par']) + 1):
base_chan_name.append(text['p%dn' % i])
try:
if text['p%ds' % i] not in ['',' ']:
name = text['p%ds' % i]
else:
name = text['p%dn' % i]
except KeyError:
name = text['p%dn' % i]
channels.append(name)
if not fluorescent_channel(name):
scchannels.append(name)
if name != 'Time':
scchannel_indexes.append(i - 1)
_, name = path.split(self.file_path)
name, _ = path.splitext(name)
cyto_object = Cyto_data(name, data, channels, scchannels,
Annotation({'text': text,
'header': header,
'analysis': analysis,}))
return cyto_object
def read_bytes(self, offset, start, stop):
"""Read bytes from start to stop, included.
"""
self._f.seek(offset + start)
return self._f.read(stop - start + 1)
def parse_header(self, offset):
"""
Parse the cytometry data in a *.fcs file at the specified offset
(accounting for the possibility of several data parts in the said file).
"""
header = {}
header['version'] = float(self.read_bytes(offset, 3, 5))
header['text_start'] = int(self.read_bytes(offset, 10, 17))
header['text_stop'] = int(self.read_bytes(offset, 18, 25))
header['data_start'] = int(self.read_bytes(offset, 26, 33))
header['data_end'] = int(self.read_bytes(offset, 34, 41))
try:
header['analysis_start'] = int(self.read_bytes(offset, 42, 49))
except ValueError:
header['analysis_start'] = -1
try:
header['analysis_end'] = int(self.read_bytes(offset, 50, 57))
except ValueError:
header['analysis_end'] = -1
return header
def parse_text(self, offset, start, stop):
"""Return the parsed text segment of a *.fcs file.
"""
text = self.read_bytes(offset, start, stop)
return parse_pairs(text)
def parse_analysis(self, offset, start, stop):
"""Return the parsed analysis part of the *.fcs file under consideration.
"""
if start == stop:
return {}
else:
text = self.read_bytes(offset, start, stop)
return parse_pairs(text)
def fix_LMD(self, offset, start, stop):
"""Handle the LMD format (embedded FCS format) and the way it counts,
which differs from other FCS formats.
"""
text = self.read_bytes(offset, start, stop)
if text[0] == text[-1]:
return 0
else:
return -1
def parse_data(self, offset, start, stop, text):
"""Return an array holding the data part of *.fcs file at hand.
"""
dtype = text['datatype']
mode = text['mode']
tot = int(text['tot'])
if mode == 'c' or mode == 'u':
raise NotSupportedFCSDataMode(mode)
if text['byteord'] == '1,2,3,4' or text['byteord'] == '1,2':
order = '<'
elif text['byteord'] == '4,3,2,1' or text['byteord'] == '2,1':
order = '>'
else:
warn("WARNING: unsupported byte order {0}; using default @".format(text['byteord']))
order = '@'
bit_width = []
data_range = []
for i in range(1, int(text['par']) + 1):
bit_width.append(int(text['p%db' % i]))
data_range.append(int(text['p%dr' % i]))
if dtype.lower() == 'i':
data = self.parse_int_data(offset, start, stop, bit_width, data_range,
tot, order)
elif dtype.lower() == 'f' or dtype.lower() == 'd':
data = self.parse_float_data(offset, start, stop, dtype.lower(), tot, order)
else:
data = self.parse_ASCII_data(offset, start, stop, bit_width, dtype,
tot, order)
return data
def parse_int_data(self, offset, start, stop, bit_width, data_range, tot, order):
"""Parse *.fcs file and return data as an integer list.
"""
if reduce(and_, [item in [8, 16, 32] for item in bit_width]):
if len(set(bit_width)) == 1:
num_items = (stop - start + 1) / calcsize(integer_format(bit_width[0]))
tmp = unpack('%s%d%s' % (order, num_items, integer_format(bit_width[0])),
self.read_bytes(offset, start, stop))
else:
unused_bit_widths = map(int, map(np.log2, data_range))
tmp = []
current = start
while current < stop:
for i, current_width in enumerate(bit_width):
bit_mask = integer_bit_mask(current_width, unused_bit_widths[i])
N_bytes = current_width / 8
bin_string = self.read_bytes(offset, current, current + N_bytes - 1)
current += N_bytes
val = bit_mask & unpack('%s%s' % (order, integer_format(current_width)), bin_string)[0]
tmp.append(val)
else:
warn('WARNING: non-standard bit widths for the data part.')
return None
return np.array(tmp).reshape((tot, len(bit_width)))
def parse_float_data(self, offset, start, stop, dtype, tot, order):
"""Parse a *.fcs file and return list of float data entries.
"""
N_items = (stop - start + 1) / calcsize(dtype)
tmp = unpack('%s%d%s' % (order, N_items, dtype),
self.read_bytes(offset, start, stop))
return np.array(tmp).reshape((tot, len(tmp) / tot))
def parse_ASCII_data(self, offset, start, stop, bit_width, dtype, tot, order):
"""Parse ASCII encoded data from a *.fcs file.
"""
N_items = (stop - start + 1) / calcsize(dtype)
tmp = unpack('%s%d%s' % (order, N_items, dtype),
self.read_bytes(offset, start, stop))
return np.array(tmp).reshape((tot, len(tmp) / tot))
def cytometry_preprocess(file_path, log_mode = False, pseudotime_mode = True,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
data_tag, output_directory = create_output_directory(file_path)
cyto_object = get_FCS_data(file_path)
marker_idx = np.array(cyto_objects.markers, dtype = str)
marker_names = np.array(cyto_objects.channels[marker_idx], dtype = str)
data = cyto_object.data_points
data = data[:, marker_idx]
cell_IDs = np.array(['cell_{0}'.format(i) for i in xrange(1, data.shape[0] + 1)],
dtype = str)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(marker_names == name)[0])
data = np.delete(data, indices, axis = 1)
marker_names = np.delete(marker_names, indices)
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, marker_names)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, marker_names, cell_stages.astype(float), data_tag, output_directory
def PCR_preprocess(file_path, log_mode = False, pseudotime_mode = False,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
low_gene_fraction_max = 0.8
data_tag, output_directory = create_output_directory(file_path)
cell_IDs, cell_stages, data = get_PCR_or_RNASeq_data(file_path, pseudotime_mode)
with open(file_path, 'r') as f:
markers = np.loadtxt(f, dtype = str, delimiter = '\t',
skiprows = 1 if pseudotime_mode else 2, usecols = [0])
markers.reshape(markers.size)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(markers == name)[0])
data = np.delete(data, indices, axis = 1)
markers = np.delete(markers, indices)
if pseudotime_mode:
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, markers)
condition = np.mean(data == 0, axis = 0) < low_gene_fraction_max
data = np.compress(condition, data, 1)
markers = np.compress(condition, markers)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, markers, cell_stages.astype(float), data_tag, output_directory
def RNASeq_preprocess(file_path, log_mode = True, pseudotime_mode = False,
pcv_method = 'Rprincurve', anchor_gene = None,
exclude_marker_names = None):
assert isinstance(log_mode, bool)
assert isinstance(pseudotime_mode, bool)
# Threshold value for genes of low expression levels
low_gene_threshold = 1
# Maximum fraction of lowly-expressed cells allowed for each gene
low_gene_fraction_max = 0.7
# Number of highly variable genes selected
N_selected_genes = 1000
data_tag, output_directory = create_output_directory(file_path)
cell_IDs, cell_stages, data = get_PCR_or_RNASeq_data(file_path, pseudotime_mode)
with open(file_path, 'r') as f:
markers = np.loadtxt(f, dtype = str, delimiter = '\t',
skiprows = 1 if pseudotime_mode else 2, usecols = [0])
markers.reshape(markers.size)
if exclude_marker_names:
indices = np.zeros(0, dtype = int)
for name in exclude_marker_names:
indices = np.append(indices, np.where(markers == name)[0])
data = np.delete(data, indices, axis = 1)
markers = np.delete(markers, indices)
if pseudotime_mode:
cell_stages = infer_pseudotime(data, output_directory, data_tag, pcv_method,
anchor_gene, markers)
condition = np.mean(data < low_gene_threshold, axis = 0) < low_gene_fraction_max
data = np.compress(condition, data, 1)
markers = np.compress(condition, markers)
Fano_factors = np.var(data, axis = 0) / np.mean(data, axis = 0).astype(float)
idx = np.argsort(Fano_factors)[::-1][:N_selected_genes]
data = data[:, idx]
markers = markers[idx]
if log_mode:
np.log2(data + 1, data)
write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers)
return cell_IDs, data, markers, cell_stages.astype(float), data_tag, output_directory
def create_output_directory(file_path):
data_tag = path.basename(path.abspath(file_path)).split('.')[0]
output_directory = path.join(getcwd(), 'SCUBA_analysis_of_{0}'.format(data_tag))
try:
makedirs(output_directory)
except OSError:
if not path.isdir(output_directory):
raise
return data_tag, output_directory
def get_FCS_data(file_path, **kwargs):
"""Return a data object from an *.fcs file"""
cyto_object = FCS_handler(file_path)
data = cyto_object.get_next_dataset(**kwargs)
cyto_object._f.close()
del cyto_object
return data
def get_PCR_or_RNASeq_data(file_path, pseudotime_mode = False):
with open(file_path, 'r') as f:
cell_IDs = f.readline().rstrip('\n').split('\t')
cell_IDs = np.array(cell_IDs[1:], dtype = str)
if pseudotime_mode:
cell_stages = np.empty(0, dtype = float)
else:
cell_stages = f.readline().rstrip('\n').split('\t')
cell_stages = np.array(cell_stages[1:], dtype = str)
data = np.loadtxt(f, dtype = float, delimiter = '\t',
usecols = xrange(1, len(cell_IDs) + 1))
data = data.T
return cell_IDs, cell_stages, data
def write_preprocessed_data(output_directory, cell_IDs, cell_stages, data, markers):
processed_data_path = path.join(output_directory, 'processed_data.tsv')
with open(processed_data_path, 'w') as f:
f.write('\t'.join(cell_IDs))
f.write('\n')
f.write('\t'.join(cell_stages))
f.write('\n')
np.savetxt(f, data.T, fmt = '%.6f', delimiter = '\t')
dataset = np.genfromtxt(processed_data_path, delimiter = '\t', dtype = str)
dataset = np.insert(dataset, 0, np.append(['Cell ID', 'Stage'],
markers), axis = 1)
with open(processed_data_path, 'w') as f:
np.savetxt(f, dataset, fmt = '%s', delimiter = '\t')
|
GGiecold/PySCUBA
|
src/PySCUBA/Preprocessing.py
|
Python
|
mit
| 29,858
|
[
"VisIt"
] |
1b14daebffc84465fcacadd274ece25ecd60899e1f0eb1ba72fe8c4eb895eef4
|
#!/usr/bin/env python
"""
A simple utility to redo the failed/errored tests.
You need to specify the session directory in order for this script to locate the
tests which need to be re-run.
See also dotest.py, the test driver running the test suite.
Type:
./dotest.py -h
for help.
"""
import os, sys, datetime
import re
# If True, redo with no '-t' option for the test driver.
no_trace = False
# To be filled with the filterspecs found in the session logs.
redo_specs = []
# The filename components to match for. Only files with the contained component names
# will be considered for re-run. Examples: ['X86_64', 'clang'].
filename_components = []
do_delay = False
# There is a known bug with respect to comp_specs and arch_specs, in that if we
# encountered "-C clang" and "-C gcc" when visiting the session files, both
# compilers will end up in the invocation of the test driver when rerunning.
# That is: ./dotest -v -C clang^gcc ... -f ...". Ditto for "-A" flags.
# The "-C compiler" for comp_specs.
comp_specs = set()
# The "-A arch" for arch_specs.
arch_specs = set()
def usage():
print"""\
Usage: redo.py [-F filename_component] [-n] [session_dir] [-d]
where options:
-F : only consider the test for re-run if the session filename conatins the filename component
for example: -F x86_64
-n : when running the tests, do not turn on trace mode, i.e, no '-t' option
is passed to the test driver (this will run the tests faster)
-d : pass -d down to the test driver (introduces a delay so you can attach with a debugger)
and session_dir specifies the session directory which contains previously
recorded session infos for all the test cases which either failed or errored.
If sessin_dir is left unspecified, this script uses the heuristic to find the
possible session directories with names starting with %Y-%m-%d- (for example,
2012-01-23-) and employs the one with the latest timestamp."""
sys.exit(0)
def where(session_dir, test_dir):
"""Returns the full path to the session directory; None if non-existent."""
abspath = os.path.abspath(session_dir)
if os.path.isdir(abspath):
return abspath
session_dir_path = os.path.join(test_dir, session_dir)
if os.path.isdir(session_dir_path):
return session_dir_path
return None
# This is the pattern for the line from the log file to redo a test.
# We want the filter spec.
filter_pattern = re.compile("^\./dotest\.py.*-f (.*)$")
comp_pattern = re.compile(" -C ([^ ]+) ")
arch_pattern = re.compile(" -A ([^ ]+) ")
def redo(suffix, dir, names):
"""Visitor function for os.path.walk(path, visit, arg)."""
global redo_specs
global comp_specs
global arch_specs
global filter_pattern
global comp_pattern
global arch_pattern
global filename_components
global do_delay
for name in names:
if name.endswith(suffix):
#print "Find a log file:", name
if name.startswith("Error") or name.startswith("Failure"):
if filename_components:
if not all([comp in name for comp in filename_components]):
continue
with open(os.path.join(dir, name), 'r') as log:
content = log.read()
for line in content.splitlines():
match = filter_pattern.match(line)
if match:
filterspec = match.group(1)
print "adding filterspec:", filterspec
redo_specs.append(filterspec)
comp = comp_pattern.search(line)
if comp:
comp_specs.add(comp.group(1))
arch = arch_pattern.search(line)
if arch:
arch_specs.add(arch.group(1))
else:
continue
def main():
"""Read the session directory and run the failed test cases one by one."""
global no_trace
global redo_specs
global filename_components
global do_delay
test_dir = sys.path[0]
if not test_dir:
test_dir = os.getcwd()
if not test_dir.endswith('test'):
print "This script expects to reside in lldb's test directory."
sys.exit(-1)
index = 1
while index < len(sys.argv):
if sys.argv[index].startswith('-h') or sys.argv[index].startswith('--help'):
usage()
if sys.argv[index].startswith('-'):
# We should continue processing...
pass
else:
# End of option processing.
break
if sys.argv[index] == '-F':
# Increment by 1 to fetch the filename component spec.
index += 1
if index >= len(sys.argv) or sys.argv[index].startswith('-'):
usage()
filename_components.append(sys.argv[index])
elif sys.argv[index] == '-n':
no_trace = True
elif sys.argv[index] == '-d':
do_delay = True
index += 1
if index < len(sys.argv):
# Get the specified session directory.
session_dir = sys.argv[index]
else:
# Use heuristic to find the latest session directory.
name = datetime.datetime.now().strftime("%Y-%m-%d-")
dirs = [d for d in os.listdir(os.getcwd()) if d.startswith(name)]
if len(dirs) == 0:
print "No default session directory found, please specify it explicitly."
usage()
session_dir = max(dirs, key=os.path.getmtime)
if not session_dir or not os.path.exists(session_dir):
print "No default session directory found, please specify it explicitly."
usage()
#print "The test directory:", test_dir
session_dir_path = where(session_dir, test_dir)
print "Using session dir path:", session_dir_path
os.chdir(test_dir)
os.path.walk(session_dir_path, redo, ".log")
if not redo_specs:
print "No failures/errors recorded within the session directory, please specify a different session directory.\n"
usage()
filters = " -f ".join(redo_specs)
compilers = ''
for comp in comp_specs:
compilers += " -C %s" % (comp)
archs = ''
for arch in arch_specs:
archs += "--arch %s " % (arch)
command = "./dotest.py %s %s -v %s %s -f " % (compilers, archs, "" if no_trace else "-t", "-d" if do_delay else "")
print "Running %s" % (command + filters)
os.system(command + filters)
if __name__ == '__main__':
main()
|
s20121035/rk3288_android5.1_repo
|
external/lldb/test/redo.py
|
Python
|
gpl-3.0
| 6,653
|
[
"VisIt"
] |
959565f00144c7ccfdba2e2866e7084b224fd1039154aa88feaf04959dd458b5
|
#!/usr/bin/python
"""
Copyright 2013 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import ghShared
class recipeIngredient:
def __init__(self, ingObject=None, ingResource="", ingName="", ingAmount=0, ingType="", ingObjectName="", resQuality=None, resDetails=""):
self.ingredientObject = ingObject
self.ingredientObjectName = ingObjectName
self.ingredientResource = ingResource
self.ingredientName = ingName
self.ingredientAmount = ingAmount
self.ingredientType = ingType
self.resourceQuality = resQuality
self.resourceDetails = resDetails
class schematicRecipe:
def __init__(self):
self.recipeID = 0
self.schematicID = ""
self.schematicImage = "none.jpg"
self.recipeName = ""
self.recipeIngredients = []
# Return style to use for coloring on resource quality status preview
@staticmethod
def getResourceValueColor(resourceValue):
if (resourceValue >= 650):
return "green"
elif (resourceValue >= 350):
return "yellow"
else:
return "red"
def getIngredientSlots(self):
result = '<div id="recipeIngredients" onmousemove="slotHoverCheck(event)">'
for ing in self.recipeIngredients:
# Set up additional html for filled slot
addClass = ''
amountStr = '0'
qualityIndicatorStyle = ''
addProperties = ''
qualityBarStyle = ''
if ing.ingredientResource != None:
addClass = ' ingredientSlotFilled'
amountStr = str(ing.ingredientAmount)
if ing.resourceQuality != None:
qualityIndicatorStyle = ' style="height:' + str(70*ing.resourceQuality/1000) + 'px;background-color:' + self.getResourceValueColor(ing.resourceQuality) + ';"'
addProperties = ' tag="filled" spawnID="' + str(ing.ingredientResource) + '" tt="' + ing.resourceDetails + '"'
else:
qualityBarStyle = ' style="display:none;"'
# Add slot to result
result += '<div class="inlineBlock recipeIngredient" tag="' + ing.ingredientObject + '" title="Requires ' + str(ing.ingredientObjectName) + '">'
result += '<div class="ingredientHeader">' + ing.ingredientName.replace('_',' ') + '</div>'
result += '<div class="ingredientSlot' + addClass + '" style="background-repeat:no-repeat;background-position: 10px 10px;background-image:url(/images/resources/' + str(ing.ingredientType) + '.png)"' + addProperties + '>' + amountStr + '/' + str(ing.ingredientAmount) + '</div>'
result += '<div class="qualityBar"' + qualityBarStyle + '><div class="qualityIndicator"' + qualityIndicatorStyle + '></div></div>'
result += '</div>'
result += '</div>'
return result
def getRow(self, listType='normal', sid=''):
result = ''
# Get average quality of filled ingredient slots
qualityIndicatorStyle = ''
qualityBarInfo = ''
qualityAvg = self.getAverageQuality()
qualityIndicatorStyle = ' style="width:' + str(140*qualityAvg/1000.0) + 'px;background-color:' + self.getResourceValueColor(qualityAvg) + ';"'
qualityBarInfo = str(int(qualityAvg)) + '/1000 average quality.'
# Build row html
result += '<tr id="recipe' + str(self.recipeID) + '" class="recipeRow">'
if listType == 'suggest':
result += ' <td style="width:32px;"><img src="/images/schematics/' + self.schematicImage + '" class="schematicIngredient" /></td>'
result += ' <td title="' + qualityBarInfo + '"><div>' + self.recipeName + '</div>'
else:
linkTarget = ' href="' + ghShared.BASE_SCRIPT_URL + 'recipe.py/' + str(self.recipeID) + '?gh_sid=' + sid + '"'
result += ' <td style="width:32px;"><a' + linkTarget + '><img src="/images/schematics/' + self.schematicImage + '" class="schematicIngredient" /></a></td>'
result += ' <td title="' + qualityBarInfo + '"><div><a class="nameLink"' + linkTarget + '>' + self.recipeName + '</a></div>'
# Quality bar
result += ' <div class="recipeQualityBar"><div class="qualityIndicator"' + qualityIndicatorStyle + '></div></div>'
# Slot filled summary
result += '<div style="float:right;margin:2px;">'
for ing in self.recipeIngredients:
addStyle = ''
addTitle = ''
if ing.ingredientResource != None and ing.ingredientResource != '':
addStyle = ' slotIndicatorFilled'
if ing.resourceQuality != None:
addTitle = ' (quality:%.0f' % ing.resourceQuality + ')'
else:
addTitle = ' (quality:N/A)'
result += '<div class="inlineBlock slotIndicator' + addStyle + '" title="' + (ing.ingredientName + ': ' + ing.ingredientObject).replace('_',' ') + addTitle + '"></div>'
result += '</div>'
result += ' </td>'
if listType == 'suggest':
ingredientString = ''
for ing in self.recipeIngredients:
ingredientString += ing.ingredientName + ':' + ing.ingredientResource + ','
result += ' <td><button type=button value="Save" class="ghButton" onclick="saveSuggestedRecipe(\'' + self.schematicID + '\',\'' + self.recipeName + ' suggested\',\'' + ingredientString + '\');">Save</button></td>'
else:
result += ' <td><a alt="Delete Recipe" style="cursor: pointer;" onclick="deleteRecipe(this, \'' + str(self.recipeID) + '\');">[X]</a></td>'
result += '</tr>'
return result
def getAverageQuality(self):
qualityTotal = 0
qualityIngredients = 0
for ing in self.recipeIngredients:
if ing.ingredientResource != '' and ing.resourceQuality != None:
qualityTotal += ing.resourceQuality
qualityIngredients += 1
if qualityIngredients > 0:
qualityAvg = qualityTotal/qualityIngredients
else:
qualityAvg = 0
return qualityAvg
|
clreinki/GalaxyHarvester
|
ghObjectRecipe.py
|
Python
|
agpl-3.0
| 6,086
|
[
"Galaxy"
] |
cd2460fced71028693cb16ae6948958f5001e7c4ab8e6f0c796fef9b056cdd6d
|
import os
from fabric.api import task, env, run, local, put, cd, sudo
from fabric.contrib.files import exists
from .utils import die, err, yay, template
from . import django, db
def handle_rq(bundle_name, bundle_root, env):
# RQ forks processes and they load the latest version of the code.
# No need to restart the worker **unless** RQ has been updated (TODO).
for worker_id in range(env.rq['workers']):
env.worker_id = worker_id
template(
'rq.conf', '%s/conf/rq%s.conf' % (bundle_root, worker_id),
)
with cd('/etc/supervisor/conf.d'):
sudo('ln -sf %s/conf/rq%s.conf %s_worker%s.conf' % (
bundle_root, worker_id, bundle_name, worker_id,
))
# Scale down workers if the number decreased
workers = run('ls /etc/supervisor/conf.d/%s_worker*.conf' % bundle_name)
workers_conf = run('ls %s/conf/rq*.conf' % bundle_root)
to_delete = []
for w in workers.split():
if (int(w.split('%s_worker' % bundle_name, 1)[1][:-5]) >=
env.rq['workers']):
to_delete.append(w)
for w in workers_conf.split():
if int(w.split(bundle_name, 1)[1][8:-5]) >= env.rq['workers']:
to_delete.append(w)
if to_delete:
sudo('rm %s' % " ".join(to_delete))
def handle_celery(bundle_name, bundle_root, env):
for worker_id, worker in enumerate(env.celery['workers']):
env.worker_id = worker_id
worker_args = [
'--%s' % (k,)
if isinstance(v, bool) else '--%s=%s' % (k, v)
for k, v in worker.items()]
env.worker_args = ' '.join(worker_args)
template(
'celery.conf', '%s/conf/celery%04i.conf' % (bundle_root, worker_id)
)
with cd('/etc/supervisor/conf.d'):
sudo('ln -sf %s/conf/celery%04i.conf %s_worker%04i.conf' % (
bundle_root, worker_id, bundle_name, worker_id,
))
env.worker_id = None
# Scale down workers if the number decreased
workers = run('ls /etc/supervisor/conf.d/%s_worker*.conf' % bundle_name)
workers_conf = run('ls %s/conf/celery*.conf' % bundle_root)
to_delete = []
# for w in workers.split():
# if (int(w.split('%s_worker' % bundle_name, 1)[1][:-5]) >=
# env.rq['workers']):
# to_delete.append(w)
# for w in workers_conf.split():
# if int(w.split(bundle_name, 1)[1][8:-5]) >= env.rq['workers']:
# to_delete.append(w)
# if to_delete:
# sudo('rm %s' % " ".join(to_delete))
def create_virtualenv():
python_switch = ''
if hasattr(env, 'python'):
python_switch = '--python=%s' % getattr(env, 'python')
if not exists(env.bundle_root + '/env'):
run('virtualenv %s --no-site-packages %s/env' % (
python_switch, env.bundle_root))
run('%s/env/bin/pip install -U pip' % env.bundle_root)
def upload_vendor_packages():
packages_location = env.bundle_root + '/packages'
has_vendor = 'vendor' in os.listdir(os.getcwd())
if has_vendor:
local_files = set(os.listdir(os.path.join(os.getcwd(), 'vendor')))
uploaded = set(run('ls %s' % packages_location).split())
diff = local_files - uploaded
for file_name in diff:
put('vendor/%s' % file_name,
'%s/%s' % (packages_location, file_name))
def install_package(requirement, force_version, packages):
freeze = run('%s/env/bin/pip freeze' % env.bundle_root).split()
if requirement in freeze and force_version is None:
die("%s is already deployed. Increment the version number to deploy "
"a new release." % requirement)
cmd = (
'%s/env/bin/pip install -U %s gunicorn gevent greenlet '
'setproctitle --find-links file://%s') % (
env.bundle_root, requirement, packages
)
if 'index_url' in env:
cmd += ' --index-url %(index_url)s' % env
run(cmd)
env.path = env.bundle_root
python = run('ls %s/env/lib' % env.bundle_root)
template(
'path_extension.pth',
'%s/env/lib/%s/site-packages/_virtualenv_path_extensions.pth' % (
env.bundle_root, python
),
)
def setup_cron():
if 'cron' in env:
template('cron', '%(bundle_root)s/conf/cron' % env, use_sudo=True)
sudo('chown root:root %(bundle_root)s/conf/cron' % env)
sudo('chmod 644 %(bundle_root)s/conf/cron' % env)
sudo('ln -sf %(bundle_root)s/conf/cron /etc/cron.d/%(app)s' % env)
else:
# Make sure to deactivate tasks if the cron section is removed
sudo('rm -f %(bundle_root)s/conf/cron /etc/cron.d/%(app)s' % env)
def setup_nginx():
changed = template('nginx.conf', '%s/conf/nginx.conf' % env.bundle_root)
with cd('/etc/nginx/sites-available'):
sudo('ln -sf %s/conf/nginx.conf %s.conf' % (
env.bundle_root, env.http_host))
with cd('/etc/nginx/sites-enabled'):
sudo('ln -sf ../sites-available/%s.conf' % env.http_host)
if env.get('ssl_cert') and env.get('ssl_key'):
put(env.ssl_cert, '%s/conf/ssl.crt' % env.bundle_root)
put(env.ssl_key, '%s/conf/ssl.key' % env.bundle_root)
if env.get('remote_ssl'):
env.ssl_cert = env.remote_ssl + ".crt"
env.ssl_key = env.remote_ssl + ".key"
if changed: # TODO detect if the certs have changed
sudo('/etc/init.d/nginx reload')
@task()
def deploy(force_version=None):
"""Deploys to the current bundle"""
# Bundle creation
bundle_name = env.http_host
bundle_root = '%s/%s' % (env.get('bundle_root', run('pwd') + '/bundles'),
bundle_name)
env.bundle_root = bundle_root
run('mkdir -p %s/{log,conf,public}' % bundle_root)
# virtualenv, Packages
create_virtualenv()
#####
# Generate local package
local('python setup.py sdist')
dists = [
d for d in os.listdir(os.path.join(os.getcwd(),
'dist')) if d.endswith('.tar.gz')
]
version_string = lambda d: d.rsplit('-', 1)[1][:-7]
def int_or_s(num):
try:
return int(num)
except ValueError:
return num
dist = sorted(dists, key=lambda d: map(int_or_s,
version_string(d).split('.')))[-1]
version = force_version or version_string(dist)
dist_name = dist.rsplit('-', 1)[0]
requirement = '%s==%s' % (dist_name, version)
print('*' * 120)
print(dist)
print(requirement)
print('*' * 120)
packages = bundle_root + '/packages'
run('mkdir -p %s' % packages)
if not exists('%s/%s' % (packages, dist)):
put('dist/%s' % dist, '%s/%s' % (packages, dist))
# End of local package
#####
upload_vendor_packages()
install_package(requirement, force_version, packages)
django.setup()
# Do we have a DB?
db.creation(bundle_name)
django.database_migration()
django.collectstatic()
# Some things don't like dots
env.app = env.http_host.replace('.', '')
# Cron tasks
setup_cron()
# Log rotation
logrotate = '/etc/logrotate.d/%(app)s' % env
template('logrotate', logrotate, use_sudo=True)
sudo('chown root:root %s' % logrotate)
# Nginx vhost
setup_nginx()
# Supervisor task(s) -- gunicorn + rq
if not 'workers' in env:
env.workers = 2
changed = template('supervisor.conf',
'%s/conf/supervisor.conf' % bundle_root)
with cd('/etc/supervisor/conf.d'):
sudo('ln -sf %s/conf/supervisor.conf %s.conf' % (bundle_root,
bundle_name))
if 'rq' in env and env.rq:
changed = True # Always supervisorctl update
handle_rq(bundle_name, bundle_root, env)
if 'celery' in env and env.celery:
changed = True
handle_celery(bundle_name, bundle_root, env)
if changed:
sudo('supervisorctl update')
# TODO: don't kill all gunicorn instances
run('kill -HUP `pgrep gunicorn`')
# All set, user feedback
ip = run('curl http://ifconfig.me/')
dns = run('nslookup %s' % env.http_host)
if ip in dns:
proto = 'https' if 'ssl_cert' in env else 'http'
yay("Visit %s://%s" % (proto, env.http_host))
else:
err("Deployment successful but make sure %s points to %s" % (
env.http_host, ip))
@task()
def destroy():
"""Destroys the current bundle"""
pass
|
linovia/fab-bundle
|
fab_bundle/bundle.py
|
Python
|
bsd-3-clause
| 8,545
|
[
"VisIt"
] |
39f1399facf23b7d4ba5f18aecdfc4099700f383f230f46214b05d17b08d27d2
|
# Copyright (C) 2018-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from swh.web.api.apidoc import api_doc, format_docstring
from swh.web.api.apiurls import api_route
from swh.web.auth.utils import privileged_user
from swh.web.common.origin_save import (
create_save_origin_request,
get_save_origin_requests,
)
@api_route(
r"/origin/save/(?P<visit_type>.+)/url/(?P<origin_url>.+)/",
"api-1-save-origin",
methods=["GET", "POST"],
throttle_scope="swh_save_origin",
never_cache=True,
)
@api_doc("/origin/save/")
@format_docstring()
def api_save_origin(request, visit_type, origin_url):
"""
.. http:get:: /api/1/origin/save/(visit_type)/url/(origin_url)/
.. http:post:: /api/1/origin/save/(visit_type)/url/(origin_url)/
Request the saving of a software origin into the archive
or check the status of previously created save requests.
That endpoint enables to create a saving task for a software origin
through a POST request.
Depending of the provided origin url, the save request can either be:
* immediately **accepted**, for well known code hosting providers
like for instance GitHub or GitLab
* **rejected**, in case the url is blacklisted by Software Heritage
* **put in pending state** until a manual check is done in order to
determine if it can be loaded or not
Once a saving request has been accepted, its associated saving task
status can then be checked through a GET request on the same url.
Returned status can either be:
* **not created**: no saving task has been created
* **not yet scheduled**: saving task has been created but its
execution has not yet been scheduled
* **scheduled**: the task execution has been scheduled
* **succeeded**: the saving task has been successfully executed
* **failed**: the saving task has been executed but it failed
When issuing a POST request an object will be returned while a GET
request will return an array of objects (as multiple save requests
might have been submitted for the same origin).
:param string visit_type: the type of visit to perform
(currently the supported types are ``git``, ``hg`` and ``svn``)
:param string origin_url: the url of the origin to save
{common_headers}
:>json string origin_url: the url of the origin to save
:>json string visit_type: the type of visit to perform
:>json string save_request_date: the date (in iso format) the save
request was issued
:>json string save_request_status: the status of the save request,
either **accepted**, **rejected** or **pending**
:>json string save_task_status: the status of the origin saving task,
either **not created**, **not yet scheduled**, **scheduled**,
**succeeded** or **failed**
:>json string visit_date: the date (in iso format) of the visit if a visit
occurred, null otherwise.
:>json string visit_status: the status of the visit, either **full**,
**partial**, **not_found** or **failed** if a visit occurred, null
otherwise.
:statuscode 200: no error
:statuscode 400: an invalid visit type or origin url has been provided
:statuscode 403: the provided origin url is blacklisted
:statuscode 404: no save requests have been found for a given origin
"""
data = request.data or {}
if request.method == "POST":
sor = create_save_origin_request(
visit_type,
origin_url,
privileged_user(request),
user_id=request.user.id,
**data,
)
del sor["id"]
else:
sor = get_save_origin_requests(visit_type, origin_url)
for s in sor:
del s["id"]
return sor
|
SoftwareHeritage/swh-web-ui
|
swh/web/api/views/origin_save.py
|
Python
|
agpl-3.0
| 4,183
|
[
"VisIt"
] |
90bea849dd01020fbefc0868ce7705655cc7597f0398bdc6908cce6f8749d1b2
|
from DIRAC import S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.Types.Pilot import Pilot
from DIRAC.AccountingSystem.private.Plotters.BaseReporter import BaseReporter
class PilotPlotter( BaseReporter ):
_typeName = "Pilot"
_typeKeyFields = [ dF[0] for dF in Pilot().definitionKeyFields ]
def _reportCumulativeNumberOfJobs( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength',
'Jobs'
]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{} )
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
dataDict = self._accumulate( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableUnit( dataDict,
self._getAccumulationMaxValue( dataDict ),
"jobs" )
return S_OK( { 'data' : baseDataDict, 'graphDataDict' : graphDataDict,
'granularity' : granularity, 'unit' : unitName } )
def _plotCumulativeNumberOfJobs( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Cumulative Jobs by %s' % reportRequest[ 'grouping' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'ylabel' : plotInfo[ 'unit' ],
'sort_labels' : 'last_value' }
return self._generateCumulativePlot( filename, plotInfo[ 'graphDataDict' ], metadata )
def _reportNumberOfJobs( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength',
'Jobs'
]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{} )
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict, maxValue = self._divideByFactor( dataDict, granularity )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit( dataDict,
self._getAccumulationMaxValue( dataDict ),
"jobs" )
return S_OK( { 'data' : baseDataDict, 'graphDataDict' : graphDataDict,
'granularity' : granularity, 'unit' : unitName } )
def _plotNumberOfJobs( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Jobs by %s' % reportRequest[ 'grouping' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'ylabel' : plotInfo[ 'unit' ] }
return self._generateTimedStackedBarPlot( filename, plotInfo[ 'graphDataDict' ], metadata )
def _reportCumulativeNumberOfPilots( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength',
'entriesInBucket'
]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{} )
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
dataDict = self._accumulate( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableUnit( dataDict,
self._getAccumulationMaxValue( dataDict ),
"jobs" )
return S_OK( { 'data' : baseDataDict, 'graphDataDict' : graphDataDict,
'granularity' : granularity, 'unit' : unitName } )
def _plotCumulativeNumberOfPilots( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Cumulative Pilots by %s' % reportRequest[ 'grouping' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'ylabel' : plotInfo[ 'unit' ].replace( 'job', 'pilot' ),
'sort_labels' : 'last_value' }
return self._generateCumulativePlot( filename, plotInfo[ 'graphDataDict' ], metadata )
def _reportNumberOfPilots( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength',
'entriesInBucket'
]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{} )
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict, maxValue = self._divideByFactor( dataDict, granularity )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
baseDataDict, graphDataDict, maxValue, unitName = self._findSuitableRateUnit( dataDict,
self._getAccumulationMaxValue( dataDict ),
"jobs" )
return S_OK( { 'data' : baseDataDict, 'graphDataDict' : graphDataDict,
'granularity' : granularity, 'unit' : unitName } )
def _plotNumberOfPilots( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Pilots by %s' % reportRequest[ 'grouping' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'ylabel' : plotInfo[ 'unit' ].replace( 'job', 'pilot' ) }
return self._generateTimedStackedBarPlot( filename, plotInfo[ 'graphDataDict' ], metadata )
def _reportJobsPerPilot( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", %s, %s, SUM(%s), SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'startTime', 'bucketLength',
'Jobs', 'entriesInBucket'
]
)
retVal = self._getTimedData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{ 'checkNone' : True,
'convertToGranularity' : 'sum',
'calculateProportionalGauges' : False,
'consolidationFunction' : self._averageConsolidation } )
if not retVal[ 'OK' ]:
return retVal
dataDict, granularity = retVal[ 'Value' ]
self.stripDataField( dataDict, 0 )
dataDict = self._fillWithZero( granularity, reportRequest[ 'startTime' ], reportRequest[ 'endTime' ], dataDict )
return S_OK( { 'data' : dataDict, 'granularity' : granularity } )
def _plotJobsPerPilot( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Jobs per pilot by %s' % reportRequest[ 'grouping' ],
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ],
'span' : plotInfo[ 'granularity' ],
'ylabel' : "jobs/pilot",
'normalization' : max( x for y in plotInfo[ 'data' ].itervalues() for x in y.itervalues() ) }
return self._generateQualityPlot( filename, plotInfo[ 'data' ], metadata )
def _reportTotalNumberOfPilots( self, reportRequest ):
selectFields = ( self._getSelectStringForGrouping( reportRequest[ 'groupingFields' ] ) + ", SUM(%s)",
reportRequest[ 'groupingFields' ][1] + [ 'entriesInBucket'
]
)
retVal = self._getSummaryData( reportRequest[ 'startTime' ],
reportRequest[ 'endTime' ],
selectFields,
reportRequest[ 'condDict' ],
reportRequest[ 'groupingFields' ],
{} )
if not retVal[ 'OK' ]:
return retVal
dataDict = retVal[ 'Value' ]
return S_OK( { 'data' : dataDict } )
def _plotTotalNumberOfPilots( self, reportRequest, plotInfo, filename ):
metadata = { 'title' : 'Total Number of Pilots by %s' % reportRequest[ 'grouping' ],
'ylabel' : 'Pilots',
'starttime' : reportRequest[ 'startTime' ],
'endtime' : reportRequest[ 'endTime' ]
}
return self._generatePiePlot( filename, plotInfo[ 'data'], metadata )
|
andresailer/DIRAC
|
AccountingSystem/private/Plotters/PilotPlotter.py
|
Python
|
gpl-3.0
| 11,286
|
[
"DIRAC"
] |
19af29e0c4ba293fbffba74329a6c5ac5041112e88b59d148795778c7150d2cd
|
import sys
import os
import pylab as pl
import Tkinter as Tk
from matplotlib.backend_bases import cursors
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib._pylab_helpers import Gcf
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
class PlotFlag:
"""
(1) Start the internal python interpreter...
and make the 'pylab' module from the main python/casapy namespace
visible inside it. ( done inside TPPlotter ) Note that 'pylab' is the
only module of casapy that is visible from this internal interpreter.
(2) figmanager = pl.get_current_fig_manager()
-> This gets a handle to the current window, canvas, toolbar.
(3) Create the python-C++ call-back module -> PyBind.
( description in tables/implement/TablePlot/PlotterGlobals.cc )
(3) TablePlotTkAgg.py implements a python class called 'PlotFlag'
which takes an instance of 'PyBind' and 'figmanager' and makes
the connection between the two.
- Additional buttons are placed in the toolbar, and their callbacks
defined to call methods of PyBind.
- The toolbar event-loop is captured - by explicitly disconnecting
previous bindings, and re-defining them for 'pan','zoom','mark-region'
modes. (need to do all three, to get them to interact properly with each other)
- Some Canvas events are also redefined to allow mark-region boxes to
automatically resize and move around, when the window is resized or
when in pan or zoom modes. ( This is needed to allow flagging with
zooming ).
(4) Back to the internal python interpreter. The following steps are carried out.
-> figmanager = pl.get_current_fig_manager()
-> import PyBind
-> from TablePlotTkagg import PlotFlag
-> pf = PlotFlag( PyBind )
-> pf.setup_custom_features( figmanager )
----> All binding is complete at this point.
----> All other logic is to ensure things like... make sure new buttons are
added only when needed... make sure they *are* added when needed... and
this has to keep up with the native TkAgg matplotlib backend's
whimsical decisions of when to create a new figure and when not to.
"""
def __init__(self,PyBind):
#print "Init PlotFlag"
self.PyBind = PyBind;
self.newtoolbar = False;
self.quitted = False;
def sub(self):
#pass
self.quitted = True;
self.PyBind.quit(True);
def setup_custom_features(self,cfigman):
if (rcParams['backend'].lower() == 'agg'):
return
self.toolbar = cfigman.toolbar;
self.canvas = self.toolbar.canvas;
self.window = cfigman.window;
self.figmanager = cfigman;
self.figmanager.window.wm_title("CASA Plotter");
self.figmanager.window.protocol("WM_DELETE_WINDOW", self.sub);
if self.newtoolbar is True:
# Add new buttons
self.add_buttons();
# Reconfigure buttons.
self.configure_buttons();
self.newtoolbar = False;
# Toolbar parameters
self.panel=0;
self.rows=0;
self.cols=0;
# Canvas parameters
self.regionlist=[];
self.panelregionlist=[];
self.axeslist=[];
self.erase_rects();
# Re-Make event bindings
self.canvas.keyvald.update({65307 : 'escape'});
self.canvas.mpl_disconnect(self.toolbar._idDrag)
self.toolbar._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self.canvas._tkcanvas.bind("<KeyRelease>", self.key_release);
self.canvas._tkcanvas.bind("<Configure>", self.resize);
self.canvas._tkcanvas.bind("<Destroy>", self.destroy);
#self.window.bind("<Destroy>", self.destroy);
def plotflag_cleanup(self):
self.canvas._tkcanvas.bind("<Destroy>", None);
def set_cursor(self, cursor):
self.toolbar.set_cursor(cursor);
#self.toolbar.window.configure(cursor=cursord[cursor]);
def _NewButton(self, frame, text, file, command, side=Tk.LEFT):
#file = os.path.join(rcParams['datapath'], 'images', file)
#file = '/opt/casa/stable/darwin/python/2.5' + file;
#im = Tk.PhotoImage(master=frame, file=file)
if(os.uname()[0] == 'Darwin'):
b = Tk.Button(master=frame, text=text, command=command)
else:
b = Tk.Button(master=frame, text=text, padx=2, pady=2, command=command)
#master=frame, text=text, padx=2, pady=2, image=im, command=command)
#b._ntimage = im
b.pack(side=side)
return b
def add_buttons(self):
#self.newframe = Tk.Frame()
self.newframe = Tk.Frame(master=self.window)
bside = Tk.LEFT;
self.toolbar.bMarkRegion = self._NewButton( frame=self.newframe,
text="Mark Region",
file="markregion.ppm",
#file="markregion2.ppm",
command=self.markregion,
side=bside)
self.toolbar.bFlag = self._NewButton(frame=self.newframe,
text="Flag",
file="flag4.ppm",
command=None,
side=bside)
self.toolbar.bUnflag = self._NewButton(frame=self.newframe,
text="Unflag",
file="unflag4.ppm",
command=None,
side=bside)
self.toolbar.bLocate = self._NewButton(frame=self.newframe,
text="Locate",
file="locate4.ppm",
command=None,
side=bside)
self.toolbar.bIterNext = self._NewButton(frame=self.newframe,
text=" Next ",
file="locate4.ppm",
command=None,
side=bside)
self.toolbar.bClear =None;
#self.toolbar.bClear = self._NewButton(frame=self.newframe,
# text=" Clear ",
# file="locate4.ppm",
# command=None,
# side=bside)
self.toolbar.bQuit = self._NewButton(frame=self.newframe,
text=" Quit ",
file="locate4.ppm",
command=None,
side=bside)
self.toolbar.bMarkRegion.config(background='lightblue');
self.toolbar.bFlag.config(background='lightblue');
self.toolbar.bUnflag.config(background='lightblue');
self.toolbar.bLocate.config(background='lightblue');
self.toolbar.bIterNext.config(background='lightblue',state='disabled');
#self.toolbar.bClear.config(background='lightblue');
self.toolbar.bQuit.config(background='lightblue');
self.newframe.pack(side=Tk.BOTTOM,fill=Tk.BOTH);
#self.newframe.pack_propagate();
def configure_buttons(self):
self.toolbar.bHome.config(command=self.home);
self.toolbar.bForward.config(command=self.forward);
self.toolbar.bBack.config(command=self.back);
self.toolbar.bsubplot.config(command=self.configure_subplots);
self.toolbar.bPan.config(command=self.pan);
self.toolbar.bZoom.config(command=self.zoom);
self.toolbar.bMarkRegion.config(command=self.markregion);
self.toolbar.bFlag.config(command=self.flag);
self.toolbar.bUnflag.config(command=self.unflag);
self.toolbar.bLocate.config(command=self.locate);
self.toolbar.bIterNext.config(command=self.iterplotnext);
#self.toolbar.bClear.config(command=self.clearplot);
self.toolbar.bQuit.config(command=self.quit);
#self.toolbar.bIterstop.config(command=self.iterplotstop);
### comment the next line when Wes updates matplotlib.
#self.toolbar.bsave.config(command=self.savefig);
def flag(self, *args):
self.operate(1);
def unflag(self, *args):
self.operate(0);
def locate(self, *args):
self.operate(2);
def operate(self, flag=1):
#print '** Record the following regions'
#for pr in self.canvas.panelregionlist:
#print 'Region on panel [%(r)d,%(c)d,%(p)d] : [%(t1).3f, %(t2).3f, %(t3).3f, %(t4).3f] '%{'r':pr[5],'c':pr[6], 'p':pr[4],'t1':pr[0],'t2':pr[2], 't3':pr[1], 't4':pr[3]};
self.PyBind.markregion(self.panelregionlist);
self.erase_rects();
if( flag is 1 ):
#print "**Flag !!";
self.PyBind.flagdata();
if( flag is 0 ):
#print "**UnFlag !!";
self.PyBind.unflagdata();
if( flag is 2 ):
#print "**Locate !!";
self.PyBind.locatedata();
def iterplotnext(self, *args):
self.PyBind.iterplotnext();
def iterplotstop(self, *args):
self.PyBind.iterplotstop();
def clearplot(self, *args):
#print 'Gui::calling clearplot'
self.PyBind.clearplot();
#print 'Gui::finished clearplot'
def savefig(self, *args):
import time;
fname = 'plot-casapy-'+time.strftime('%Y-%m-%dT%H:%M:%S')+'.png';
print 'Saving figure as ', fname, ' in current working directory.'
self.canvas.figure.savefig(fname);
def enable_iter_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bIterNext is not None ):
# self.toolbar.bIterNext.config(state='normal');
return
def disable_iter_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bIterNext is not None ):
# self.toolbar.bIterNext.config(state='disabled');
return
def enable_markregion_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bMarkRegion is not None ):
# self.toolbar.bMarkRegion.config(state='normal');
return
def disable_markregion_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bMarkRegion is not None ):
# self.toolbar.bMarkRegion.config(state='disabled');
return
def enable_flag_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bFlag is not None ):
# self.toolbar.bFlag.config(state='normal');
return
def disable_flag_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bFlag is not None ):
# self.toolbar.bFlag.config(state='disabled');
return
def enable_unflag_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bUnflag is not None ):
# self.toolbar.bUnflag.config(state='normal');
return
def disable_unflag_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bUnflag is not None ):
# self.toolbar.bUnflag.config(state='disabled');
return
def enable_locate_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bLocate is not None ):
# self.toolbar.bLocate.config(state='normal');
return
def disable_locate_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bLocate is not None ):
# self.toolbar.bLocate.config(state='disabled');
return
def enable_clear_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bClear is not None ):
# self.toolbar.bClear.config(state='normal');
return
def disable_clear_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bClear is not None ):
# self.toolbar.bClear.config(state='disabled');
return
def enable_quit_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bQuit is not None ):
# self.toolbar.bQuit.config(state='normal');
return
def disable_quit_button(self):
#if (rcParams['backend'].lower() == 'agg'):
# return
#if( self.toolbar.bQuit is not None ):
# self.toolbar.bQuit.config(state='disabled');
return
def draw_rubberband(self, event, x0, y0, x1, y1):
if (rcParams['backend'].lower() == 'agg'):
return
### workaround for matplotlib API changes
#height = self.canvas.figure.bbox.height() #0.91.4
#height = self.canvas.figure.bbox.height #>=0.98
height = self.get_bbox_size(self.canvas.figure.bbox,"height") #workaround
y0 = height-y0
y1 = height-y1
try: self.toolbar.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.toolbar.lastrect)
self.toolbar.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1, width=2,outline='black')
def draw_rect(self, x0, y0, x1, y1, x0data, y0data, x1data, y1data,a,panel,rows,cols):
self.panelregionlist.append([x0data,y0data,x1data,y1data,panel+1,rows,cols]);
self.axeslist.append(a);
### workaround for matplotlib API changes
#height = self.canvas.figure.bbox.height() #0.91.4
#height = self.canvas.figure.bbox.height #>=0.98
height = self.get_bbox_size(self.canvas.figure.bbox,"height") #workaround
y0 = height-y0
y1 = height-y1
if(os.uname()[0] == 'Darwin'):
rect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1, width=2,outline='black')
else:
rect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1, width=2,fill='black',stipple='gray50',outline='black')
self.regionlist.append(rect);
def erase_rects(self):
#print "erase rects"
if (rcParams['backend'].lower() == 'agg'):
return
for q in self.regionlist:
self.canvas._tkcanvas.delete(q);
self.regionlist = [];
self.panelregionlist = [];
self.axeslist = [];
def redraw_rects(self):
for q in self.regionlist:
self.canvas._tkcanvas.delete(q);
self.regionlist = [];
for z in range(0,len(self.panelregionlist)):
q = self.panelregionlist[z];
a = self.axeslist[z];
x0=q[0]; y0=q[1]; x1=q[2]; y1=q[3];
# map to new zoom limits (current fig co-ords)
### workaround for matplotlib API changes
#px0,py0 = a.transData.xy_tup( (x0, y0) ) #0.91
#px0,py0 = a.transData.transform( (x0, y0) ) #>=0.98
px0,py0 = self.get_xy(a.transData, (x0, y0) ) #workaround
### workaround for matplotlib API changes
#px1,py1 = a.transData.xy_tup( (x1, y1) ) #0.91
#px1,py1 = a.transData.transform( (x1, y1) ) #>=0.98
px1,py1 = self.get_xy(a.transData, (x1, y1) )
### workaround for matplotlib API changes
#height = self.canvas.figure.bbox.height() #0.91
#height = self.canvas.figure.bbox.height #>=0.98
height = self.get_bbox_size(self.canvas.figure.bbox,"height") #workaround
py0 = height-py0
py1 = height-py1
if(os.uname()[0] == 'Darwin'):
rect = self.canvas._tkcanvas.create_rectangle(px0, py0, px1, py1, width=2,outline='black')
else:
rect = self.canvas._tkcanvas.create_rectangle(px0, py0, px1, py1, width=2,fill='black',stipple='gray50',outline='black')
self.regionlist.append(rect);
def resize(self, event):
#print 'canvas resize'
self.canvas.resize(event);
self.redraw_rects();
def destroy(self,*args):
#print 'Gui::destroy.'
self.erase_rects();
self.newtoolbar = True;
if self.quitted is False:
self.quit(closewin=True);
print " ";
#print "................................................................";
#print "............. Please IGNORE Tkinter error message. .............";
#print "................................................................";
def quit(self, closewin=True):
#print 'quit with close-window : ', closewin;
self.quitted = True;
self.PyBind.quit(closewin);
def key_release(self, event):
#print 'key release'
self.canvas.key_release(event);
key = self.canvas._get_key(event);
if(key=='escape'):
numreg = len(self.regionlist);
if(numreg>0):
self.canvas._tkcanvas.delete(self.regionlist[numreg-1]);
self.regionlist.pop();
self.panelregionlist.pop();
self.axeslist.pop();
def home(self, *args):
'restore the original view'
if (rcParams['backend'].lower() == 'agg'):
return
self.toolbar.home();
self.redraw_rects();
def back(self, *args):
'move back up the view lim stack'
if (rcParams['backend'].lower() == 'agg'):
return
self.toolbar.back();
self.redraw_rects();
def forward(self, *args):
'move forward in the view lim stack'
if (rcParams['backend'].lower() == 'agg'):
return
self.toolbar.forward();
self.redraw_rects();
def configure_subplots(self):
'configure subplots'
if (rcParams['backend'].lower() == 'agg'):
return
self.toolbar.configure_subplots();
self.redraw_rects();
def markregion(self, *args):
'activate mark-region mode'
if (rcParams['backend'].lower() == 'agg'):
return
if self.toolbar._active == 'MARKREGION':
#self.toolbar._active = None
self.erase_rects();
self.update_relief(newmode=None);
else:
#self.toolbar._active = 'MARKREGION'
self.update_relief(newmode='MARKREGION');
if self.toolbar._idPress is not None:
self.toolbar._idPress=self.canvas.mpl_disconnect(self.toolbar._idPress)
self.toolbar.mode = ''
if self.toolbar._idRelease is not None:
self.toolbar._idRelease=self.canvas.mpl_disconnect(self.toolbar._idRelease)
self.toolbar.mode = ''
if self.toolbar._active:
self.toolbar._idPress = self.canvas.mpl_connect('button_press_event', self.press_markregion)
self.toolbar._idRelease = self.canvas.mpl_connect('button_release_event', self.release_markregion)
self.toolbar.mode = 'Mark Region mode'
self.canvas.widgetlock(self.toolbar)
else:
self.canvas.widgetlock.release(self.toolbar)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self.toolbar._active)
self.toolbar.set_message(self.toolbar.mode)
def press_markregion(self, event):
'the press mouse button in mark region mode callback'
if (rcParams['backend'].lower() == 'agg'):
return
if event.button == 1:
self.toolbar._button_pressed=1
elif event.button == 3:
self.toolbar._button_pressed=3
else:
self.toolbar._button_pressed=None
return
# Check that the click is inside the canvas.
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self.toolbar._views.empty(): self.toolbar.push_current()
self.toolbar._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
#if x is not None and y is not None and a.in_axes(x, y) and a.get_navigate():
if x is not None and y is not None and event.inaxes==a and a.get_navigate():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lim = xmin, xmax, ymin, ymax
### workaround for matplotlib API changes
#self.toolbar._xypress.append(( x, y, a, i, lim, a.transData.deepcopy() )) #0.91.4
#self.toolbar._xypress.append(( x, y, a, i, lim, a.transData.frozen() )) #>=0.98
self.toolbar._xypress.append(( x, y, a, i, lim, self.copy_trans(a.transData))) #workaround
one, two, three = event.inaxes.get_geometry()
self.panel = three-1
self.rows = one
self.cols = two
self.toolbar.press(event)
def release_markregion(self, event):
'the release mouse button callback in mark region mode'
if (rcParams['backend'].lower() == 'agg'):
return
if not self.toolbar._xypress: return
for cur_xypress in self.toolbar._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
xmin, ymin, xmax, ymax = lim
# mark rect
### workaround for matplotlib API changes
#lastx, lasty = a.transData.inverse_xy_tup( (lastx, lasty) ) #0.91.4
#lastx, lasty = a.transData.inverted().transform( (lastx, lasty) ) #>=0.98
lastx, lasty = self.get_inverse_xy(a.transData, (lastx, lasty) ) #workaround
### workaround for matplotlib API changes
#x, y = a.transData.inverse_xy_tup( (x, y) ) #0.91.4
#x, y = a.transData.inverted().transform( (x, y) ) #>=0.98
x, y = self.get_inverse_xy(a.transData, (x, y) ) #workaround
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
if Xmin < Xmax:
if x<lastx: xmin, xmax = x, lastx
else: xmin, xmax = lastx, x
if xmin < Xmin: xmin=Xmin
if xmax > Xmax: xmax=Xmax
else:
if x>lastx: xmin, xmax = x, lastx
else: xmin, xmax = lastx, x
if xmin > Xmin: xmin=Xmin
if xmax < Xmax: xmax=Xmax
if Ymin < Ymax:
if y<lasty: ymin, ymax = y, lasty
else: ymin, ymax = lasty, y
if ymin < Ymin: ymin=Ymin
if ymax > Ymax: ymax=Ymax
else:
if y>lasty: ymin, ymax = y, lasty
else: ymin, ymax = lasty, y
if ymin > Ymin: ymin=Ymin
if ymax < Ymax: ymax=Ymax
### workaround for matplotlib API changes
#px1,py1 = a.transData.xy_tup( (xmin, ymin) ) #0.91.4
#px1,py1 = a.transData.transform( (xmin, ymin) ) #>=0.98
px1,py1 = self.get_xy(a.transData, (xmin, ymin) ) #workaround
#px2,py2 = a.transData.xy_tup( (xmax, ymax) ) #0.91.4
#px2,py2 = a.transData.transform( (xmax, ymax) ) #>=0.98
px2,py2 = self.get_xy(a.transData, (xmax, ymax) ) #workaround
self.draw_rect(px1, py1, px2, py2, xmin, ymin, xmax, ymax, a, self.panel, self.rows, self.cols)
#print 'Region on panel [%(r)d,%(c)d,%(p)d] : [%(t1).3f, %(t2).3f, %(t3).3f, %(t4).3f] '%{'r':self.rows,'c':self.cols, 'p':self.panel+1,'t1':xmin,'t2':xmax, 't3':ymin, 't4':ymax};
#self.toolbar.draw()
self.toolbar._xypress = None
self.toolbar._button_pressed = None
self.toolbar.push_current()
self.toolbar.release(event)
def zoom(self, *args):
'activate zoom to rect mode'
if (rcParams['backend'].lower() == 'agg'):
return
if self.toolbar._active == 'ZOOM':
#self.toolbar._active = None
self.update_relief(newmode=None);
else:
#self.toolbar._active = 'ZOOM'
self.update_relief(newmode='ZOOM');
if self.toolbar._idPress is not None:
self.toolbar._idPress=self.canvas.mpl_disconnect(self.toolbar._idPress)
self.toolbar.mode = ''
if self.toolbar._idRelease is not None:
self.toolbar._idRelease=self.canvas.mpl_disconnect(self.toolbar._idRelease)
self.toolbar.mode = ''
if self.toolbar._active:
self.toolbar._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self.toolbar._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.toolbar.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self.toolbar)
else:
self.canvas.widgetlock.release(self.toolbar)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self.toolbar._active)
self.toolbar.set_message(self.toolbar.mode)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if (rcParams['backend'].lower() == 'agg'):
return
if event.button == 1:
self.toolbar._button_pressed=1
elif event.button == 3:
self.toolbar._button_pressed=3
else:
self.toolbar._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self.toolbar._views.empty(): self.toolbar.push_current()
self.toolbar._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
#if x is not None and y is not None and a.in_axes(x, y) and a.get_navigate():
if x is not None and y is not None and event.inaxes==a and a.get_navigate():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lim = xmin, xmax, ymin, ymax
### workaround for matplotlib API changes
#self.toolbar._xypress.append(( x, y, a, i, lim, a.transData.deepcopy() )) #0.91.4
#self.toolbar._xypress.append(( x, y, a, i, lim, a.transData.frozen() )) #>=0.98
self.toolbar._xypress.append(( x, y, a, i, lim, self.copy_trans(a.transData))) #workaround
self.toolbar.press(event)
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if (rcParams['backend'].lower() == 'agg'):
return
if not self.toolbar._xypress: return
for cur_xypress in self.toolbar._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self.toolbar._xypress = None
self.toolbar.release(event)
self.toolbar.draw()
return
xmin, ymin, xmax, ymax = lim
# zoom to rect
### workaround for matplotlib API changes
#lastx, lasty = a.transData.inverse_xy_tup( (lastx, lasty) ) #0.91.4
#lastx, lasty = a.transData.inverted().transform( (lastx, lasty) ) #>=0.98
lastx, lasty = self.get_inverse_xy(a.transData, (lastx, lasty) ) #workaround
### workaround for matplotlib API changes
#x, y = a.transData.inverse_xy_tup( (x, y) ) #0.91.4
#x, y = a.transData.inverted().transform( (x, y) ) #>=0.98
x, y = self.get_inverse_xy(a.transData, (x, y) ) #workaround
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
if Xmin < Xmax:
if x<lastx: xmin, xmax = x, lastx
else: xmin, xmax = lastx, x
if xmin < Xmin: xmin=Xmin
if xmax > Xmax: xmax=Xmax
else:
if x>lastx: xmin, xmax = x, lastx
else: xmin, xmax = lastx, x
if xmin > Xmin: xmin=Xmin
if xmax < Xmax: xmax=Xmax
if Ymin < Ymax:
if y<lasty: ymin, ymax = y, lasty
else: ymin, ymax = lasty, y
if ymin < Ymin: ymin=Ymin
if ymax > Ymax: ymax=Ymax
else:
if y>lasty: ymin, ymax = y, lasty
else: ymin, ymax = lasty, y
if ymin > Ymin: ymin=Ymin
if ymax < Ymax: ymax=Ymax
if self.toolbar._button_pressed == 1:
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
elif self.toolbar._button_pressed == 3:
if a.get_xscale()=='log':
alpha=log(Xmax/Xmin)/log(xmax/xmin)
x1=pow(Xmin/xmin,alpha)*Xmin
x2=pow(Xmax/xmin,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(xmax-xmin)
x1=alpha*(Xmin-xmin)+Xmin
x2=alpha*(Xmax-xmin)+Xmin
if a.get_yscale()=='log':
alpha=log(Ymax/Ymin)/log(ymax/ymin)
y1=pow(Ymin/ymin,alpha)*Ymin
y2=pow(Ymax/ymin,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(ymax-ymin)
y1=alpha*(Ymin-ymin)+Ymin
y2=alpha*(Ymax-ymin)+Ymin
a.set_xlim((x1, x2))
a.set_ylim((y1, y2))
self.toolbar.draw()
self.redraw_rects();
self.toolbar._xypress = None
self.toolbar._button_pressed = None
self.toolbar.push_current()
self.toolbar.release(event)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if (rcParams['backend'].lower() == 'agg'):
return
if self.toolbar._active == 'PAN':
#self.toolbar._active = None
self.update_relief(newmode=None);
else:
#self.toolbar._active = 'PAN'
self.update_relief(newmode='PAN');
if self.toolbar._idPress is not None:
self.toolbar._idPress = self.canvas.mpl_disconnect(self.toolbar._idPress)
self.toolbar.mode = ''
if self.toolbar._idRelease is not None:
self.toolbar._idRelease = self.canvas.mpl_disconnect(self.toolbar._idRelease)
self.toolbar.mode = ''
if self.toolbar._active:
self.toolbar._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self.toolbar._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.toolbar.mode = 'pan/zoom mode'
self.canvas.widgetlock(self.toolbar)
else:
self.canvas.widgetlock.release(self.toolbar)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self.toolbar._active)
self.toolbar.set_message(self.toolbar.mode)
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if (rcParams['backend'].lower() == 'agg'):
return
if event.button == 1:
self.toolbar._button_pressed=1
elif event.button == 3:
self.toolbar._button_pressed=3
else:
self.toolbar._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self.toolbar._views.empty(): self.toolbar.push_current()
self.toolbar._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
#if x is not None and y is not None and a.in_axes(x, y) and a.get_navigate():
if x is not None and y is not None and event.inaxes==a and a.get_navigate():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lim = xmin, xmax, ymin, ymax
### workaround for matplotlib API changes
#self.toolbar._xypress.append((x, y, a, i, lim,a.transData.deepcopy())) #0.91.4
#self.toolbar._xypress.append((x, y, a, i, lim,a.transData.frozen())) #>=0.98
self.toolbar._xypress.append((x, y, a, i, lim,self.copy_trans(a.transData))) #workaround
self.canvas.mpl_disconnect(self.toolbar._idDrag)
self.toolbar._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.toolbar.press(event)
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
if (rcParams['backend'].lower() == 'agg'):
return
self.canvas.mpl_disconnect(self.toolbar._idDrag)
self.toolbar._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
if not self.toolbar._xypress: return
self.toolbar._xypress = None
self.toolbar._button_pressed=None
self.toolbar.push_current()
self.toolbar.release(event)
self.toolbar.draw()
self.redraw_rects();
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
if (rcParams['backend'].lower() == 'agg'):
return
def format_deltas(event,dx,dy):
if event.key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif event.key=='x':
dy = 0
elif event.key=='y':
dx = 0
elif event.key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
for cur_xypress in self.toolbar._xypress:
lastx, lasty, a, ind, lim, trans = cur_xypress
xmin, xmax, ymin, ymax = lim
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
if self.toolbar._button_pressed==1:
### workaround for matplotlib API changes
#lastx, lasty = trans.inverse_xy_tup( (lastx, lasty) ) #0.91.4
#lastx, lasty = trans.inverted().transform( (lastx, lasty) ) #>=0.98
lastx, lasty = self.get_inverse_xy(trans, (lastx, lasty) ) #workaround
### workaround for matplotlib API changes
#x, y = trans.inverse_xy_tup( (event.x, event.y) ) #0.91.4
#x, y = trans.inverted().transform( (event.x, event.y) ) #>=0.98
x, y = self.get_inverse_xy(trans, (event.x, event.y) ) #workaround
if a.get_xscale()=='log':
dx=1-lastx/x
else:
dx=x-lastx
if a.get_yscale()=='log':
dy=1-lasty/y
else:
dy=y-lasty
dx,dy=format_deltas(event,dx,dy)
if a.get_xscale()=='log':
xmin *= 1-dx
xmax *= 1-dx
else:
xmin -= dx
xmax -= dx
if a.get_yscale()=='log':
ymin *= 1-dy
ymax *= 1-dy
else:
ymin -= dy
ymax -= dy
elif self.toolbar._button_pressed==3:
try:
### workaround for matplotlib API changes
#dx=(lastx-event.x)/float(a.bbox.width()) #0.91.4
#dx=(lastx-event.x)/float(a.bbox.width) #>=0.98
dx=(lastx-event.x)/float(self.get_bbox_size(a.bbox,"width")) #workaround
### workaround for matplotlib API changes
#dy=(lasty-event.y)/float(a.bbox.height()) #0.91.4
#dy=(lasty-event.y)/float(a.bbox.height) #>=0.98
dy=(lasty-event.y)/float(self.get_bbox_size(a.bbox,"height")) #workaround
dx,dy=format_deltas(event,dx,dy)
if a.get_aspect() != 'auto':
dx = 0.5*(dx + dy)
dy = dx
alphax = pow(10.0,dx)
alphay = pow(10.0,dy)#use logscaling, avoid singularities and smother scaling...
### workaround for matplotlib API changes
#lastx, lasty = trans.inverse_xy_tup( (lastx, lasty) ) #0.91.4
#lastx, lasty = trans.inverted().transform( (lastx, lasty) ) #>=0.98
lastx, lasty = self.get_inverse_xy(trans, (lastx, lasty) ) #workaround
if a.get_xscale()=='log':
xmin = lastx*(xmin/lastx)**alphax
xmax = lastx*(xmax/lastx)**alphax
else:
xmin = lastx+alphax*(xmin-lastx)
xmax = lastx+alphax*(xmax-lastx)
if a.get_yscale()=='log':
ymin = lasty*(ymin/lasty)**alphay
ymax = lasty*(ymax/lasty)**alphay
else:
ymin = lasty+alphay*(ymin-lasty)
ymax = lasty+alphay*(ymax-lasty)
except OverflowError:
warnings.warn('Overflow while panning')
return
a.set_xlim(xmin, xmax)
a.set_ylim(ymin, ymax)
self.redraw_rects();
self.toolbar.dynamic_update()
def mouse_move(self, event):
#print 'mouse_move', event.button
if (rcParams['backend'].lower() == 'agg'):
return
if not event.inaxes or not self.toolbar._active:
if self.toolbar._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self.toolbar._lastCursor = cursors.POINTER
else:
if self.toolbar._active=='ZOOM':
if self.toolbar._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self.toolbar._lastCursor = cursors.SELECT_REGION
if self.toolbar._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans= self.toolbar._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif self.toolbar._active=='MARKREGION':
if self.toolbar._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self.toolbar._lastCursor = cursors.SELECT_REGION
if self.toolbar._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans= self.toolbar._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self.toolbar._active=='PAN' and
self.toolbar._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self.toolbar._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.toolbar.mode):
self.toolbar.set_message('%s : %s' % (self.toolbar.mode, s))
else:
self.toolbar.set_message(s)
else: self.toolbar.set_message(self.toolbar.mode)
def update_relief(self,newmode):
'activate new mode'
if (rcParams['backend'].lower() == 'agg'):
return
if self.toolbar._active == 'ZOOM':
self.toolbar.bZoom.config(relief='raised');
if self.toolbar._active == 'PAN':
self.toolbar.bPan.config(relief='raised');
if self.toolbar._active == 'MARKREGION':
self.toolbar.bMarkRegion.config(relief='raised');
self.toolbar._active = newmode;
if self.toolbar._active == 'ZOOM':
self.toolbar.bZoom.config(relief='sunken');
if self.toolbar._active == 'PAN':
self.toolbar.bPan.config(relief='sunken');
if self.toolbar._active == 'MARKREGION':
self.toolbar.bMarkRegion.config(relief='sunken');
#### Workarounds for Matplotlib version handling (ugly) ####
def ismatlab_new(self):
verstr=matplotlib.__version__.split(".")
maj=int(verstr[0])
sub=int(verstr[1])
return (maj>0 or sub>=98)
def get_inverse_xy(self,trans,(x,y)):
if hasattr(trans,"inverse_xy_tup"): return trans.inverse_xy_tup((x, y))
elif hasattr(trans,"inverted"): return trans.inverted().transform((x, y))
else: return None
def get_xy(self,trans,(x,y)):
return self.switch_func(trans,["xy_tup","transform"],(x,y))
def copy_trans(self,trans):
return self.switch_func(trans,["deepcopy","frozen"])
def get_bbox_size(self,obj,func=""):
return self.get_called_or_attr(obj,func)
def switch_func(self,obj,funcs=[],*args,**kwargs):
"""
Tries a list of functions and return a result from callable one.
Deals with function name changes but parameters have to be unchanged.
"""
for func in funcs:
called_func=self.get_called_or_attr(obj,func,*args,**kwargs)
if called_func != None: break
return called_func
def get_called_or_attr(self,obj,func="",*args,**kwargs):
"""
Returns a result from function call if it's callable.
If not callable, returns the attribute or False (non-existent).
"""
#if not hasattr(obj,func): return False
#else: called=getattr(obj,func)
try: called=getattr(obj,func)
#except: return False
except: return None
if callable(called): return called(*args,**kwargs)
else: return called
|
aardk/jupyter-casa
|
python/casa/TablePlotTkAgg.py
|
Python
|
gpl-2.0
| 41,002
|
[
"FLEUR"
] |
7df64f5592d355626d23af91e867e19dbd4dcf765f12a4c4bb3d6f7aaebea39f
|
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from itertools import izip
from calibre.customize import Plugin as _Plugin
FONT_SIZES = [('xx-small', 1),
('x-small', None),
('small', 2),
('medium', 3),
('large', 4),
('x-large', 5),
('xx-large', 6),
(None, 7)]
class Plugin(_Plugin):
fbase = 12
fsizes = [5, 7, 9, 12, 13.5, 17, 20, 22, 24]
screen_size = (1600, 1200)
dpi = 100
def __init__(self, *args, **kwargs):
_Plugin.__init__(self, *args, **kwargs)
self.width, self.height = self.screen_size
fsizes = list(self.fsizes)
self.fkey = list(self.fsizes)
self.fsizes = []
for (name, num), size in izip(FONT_SIZES, fsizes):
self.fsizes.append((name, num, float(size)))
self.fnames = dict((name, sz) for name, _, sz in self.fsizes if name)
self.fnums = dict((num, sz) for _, num, sz in self.fsizes if num)
self.width_pts = self.width * 72./self.dpi
self.height_pts = self.height * 72./self.dpi
# Input profiles {{{
class InputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Input profile')
name = 'Default Input Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you know nothing about the input document.')
class SonyReaderInput(InputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (584, 754)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Input(SonyReaderInput):
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS 300.')
dpi = 200
class SonyReader900Input(SonyReaderInput):
author = 'John Schember'
name = 'Sony Reader 900'
short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (584, 978)
class MSReaderInput(InputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 22, 26]
class MobipocketInput(InputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Input(InputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Input(HanlinV3Input):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 200
class CybookG3Input(InputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusInput(InputProfile):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class KindleInput(InputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class IlliadInput(InputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Input(InputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Input(InputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookInput(InputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 800)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
input_profiles = [InputProfile, SonyReaderInput, SonyReader300Input,
SonyReader900Input, MSReaderInput, MobipocketInput, HanlinV3Input,
HanlinV5Input, CybookG3Input, CybookOpusInput, KindleInput, IlliadInput,
IRexDR1000Input, IRexDR800Input, NookInput]
input_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
# }}}
class OutputProfile(Plugin):
author = 'Kovid Goyal'
supported_platforms = set(['windows', 'osx', 'linux'])
can_be_disabled = False
type = _('Output profile')
name = 'Default Output Profile'
short_name = 'default' # Used in the CLI so dont use spaces etc. in it
description = _('This profile tries to provide sane defaults and is useful '
'if you want to produce a document intended to be read at a '
'computer or on a range of devices.')
#: The image size for comics
comic_screen_size = (584, 754)
#: If True the MOBI renderer on the device supports MOBI indexing
supports_mobi_indexing = False
#: If True output should be optimized for a touchscreen interface
touchscreen = False
touchscreen_news_css = ''
#: A list of extra (beyond CSS 2.1) modules supported by the device
#: Format is a cssutils profile dictionary (see iPad for example)
extra_css_modules = []
#: If True, the date is appended to the title of downloaded news
periodical_date_in_title = True
#: Characters used in jackets and catalogs
ratings_char = u'*'
empty_ratings_char = u' '
#: Unsupported unicode characters to be replaced during preprocessing
unsupported_unicode_chars = []
#: Number of ems that the left margin of a blockquote is rendered as
mobi_ems_per_blockquote = 1.0
#: Special periodical formatting needed in EPUB
epub_periodical_format = None
@classmethod
def tags_to_string(cls, tags):
from xml.sax.saxutils import escape
return escape(', '.join(tags))
class iPadOutput(OutputProfile):
name = 'iPad'
short_name = 'ipad'
description = _('Intended for the iPad and similar devices with a '
'resolution of 768x1024')
screen_size = (768, 1024)
comic_screen_size = (768, 1024)
dpi = 132.0
extra_css_modules = [
{
'name':'webkit',
'props': {'-webkit-border-bottom-left-radius':'{length}',
'-webkit-border-bottom-right-radius':'{length}',
'-webkit-border-top-left-radius':'{length}',
'-webkit-border-top-right-radius':'{length}',
'-webkit-border-radius': r'{border-width}(\s+{border-width}){0,3}|inherit',
},
'macros': {'border-width': '{length}|medium|thick|thin'}
}
]
ratings_char = u'\u2605' # filled star
empty_ratings_char = u'\u2606' # hollow star
touchscreen = True
# touchscreen_news_css {{{
touchscreen_news_css = u'''
/* hr used in articles */
.article_articles_list {
width:18%;
}
.article_link {
color: #593f29;
font-style: italic;
}
.article_next {
-webkit-border-top-right-radius:4px;
-webkit-border-bottom-right-radius:4px;
font-style: italic;
width:32%;
}
.article_prev {
-webkit-border-top-left-radius:4px;
-webkit-border-bottom-left-radius:4px;
font-style: italic;
width:32%;
}
.article_sections_list {
width:18%;
}
.articles_link {
font-weight: bold;
}
.sections_link {
font-weight: bold;
}
.caption_divider {
border:#ccc 1px solid;
}
.touchscreen_navbar {
background:#c3bab2;
border:#ccc 0px solid;
border-collapse:separate;
border-spacing:1px;
margin-left: 5%;
margin-right: 5%;
page-break-inside:avoid;
width: 90%;
-webkit-border-radius:4px;
}
.touchscreen_navbar td {
background:#fff;
font-family:Helvetica;
font-size:80%;
/* UI touchboxes use 8px padding */
padding: 6px;
text-align:center;
}
.touchscreen_navbar td a:link {
color: #593f29;
text-decoration: none;
}
/* Index formatting */
.publish_date {
text-align:center;
}
.divider {
border-bottom:1em solid white;
border-top:1px solid gray;
}
hr.caption_divider {
border-color:black;
border-style:solid;
border-width:1px;
}
/* Feed summary formatting */
.article_summary {
display:inline-block;
padding-bottom:0.5em;
}
.feed {
font-family:sans-serif;
font-weight:bold;
font-size:larger;
}
.feed_link {
font-style: italic;
}
.feed_next {
-webkit-border-top-right-radius:4px;
-webkit-border-bottom-right-radius:4px;
font-style: italic;
width:40%;
}
.feed_prev {
-webkit-border-top-left-radius:4px;
-webkit-border-bottom-left-radius:4px;
font-style: italic;
width:40%;
}
.feed_title {
text-align: center;
font-size: 160%;
}
.feed_up {
font-weight: bold;
width:20%;
}
.summary_headline {
font-weight:bold;
text-align:left;
}
.summary_byline {
text-align:left;
font-family:monospace;
}
.summary_text {
text-align:left;
}
'''
# }}}
class iPad3Output(iPadOutput):
screen_size = comic_screen_size = (2048, 1536)
dpi = 264.0
name = 'iPad 3'
short_name = 'ipad3'
description = _('Intended for the iPad 3 and similar devices with a '
'resolution of 1536x2048')
class TabletOutput(iPadOutput):
name = 'Tablet'
short_name = 'tablet'
description = _('Intended for generic tablet devices, does no resizing of images')
screen_size = (10000, 10000)
comic_screen_size = (10000, 10000)
class SamsungGalaxy(TabletOutput):
name = 'Samsung Galaxy'
short_name = 'galaxy'
description = _('Intended for the Samsung Galaxy and similar tablet devices with '
'a resolution of 600x1280')
screen_size = comic_screen_size = (600, 1280)
class NookHD(TabletOutput):
name = 'Nook HD+'
short_name = 'nook_hd_plus'
description = _('Intended for the Nook HD+ and similar tablet devices with '
'a resolution of 1280x1920')
screen_size = comic_screen_size = (1280, 1920)
class SonyReaderOutput(OutputProfile):
name = 'Sony Reader'
short_name = 'sony'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/600/700 etc.')
screen_size = (590, 775)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
unsupported_unicode_chars = [u'\u201f', u'\u201b']
epub_periodical_format = 'sony'
#periodical_date_in_title = False
class KoboReaderOutput(OutputProfile):
name = 'Kobo Reader'
short_name = 'kobo'
description = _('This profile is intended for the Kobo Reader.')
screen_size = (536, 710)
comic_screen_size = (536, 710)
dpi = 168.451
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class SonyReader300Output(SonyReaderOutput):
author = 'John Schember'
name = 'Sony Reader 300'
short_name = 'sony300'
description = _('This profile is intended for the SONY PRS-300.')
dpi = 200
class SonyReader900Output(SonyReaderOutput):
author = 'John Schember'
name = 'Sony Reader 900'
short_name = 'sony900'
description = _('This profile is intended for the SONY PRS-900.')
screen_size = (600, 999)
comic_screen_size = screen_size
class SonyReaderT3Output(SonyReaderOutput):
author = 'Kovid Goyal'
name = 'Sony Reader T3'
short_name = 'sonyt3'
description = _('This profile is intended for the SONY PRS-T3.')
screen_size = (758, 934)
comic_screen_size = screen_size
class GenericEink(SonyReaderOutput):
name = 'Generic e-ink'
short_name = 'generic_eink'
description = _('Suitable for use with any e-ink device')
epub_periodical_format = None
class GenericEinkLarge(GenericEink):
name = 'Generic e-ink large'
short_name = 'generic_eink_large'
description = _('Suitable for use with any large screen e-ink device')
screen_size = (600, 999)
comic_screen_size = screen_size
class JetBook5Output(OutputProfile):
name = 'JetBook 5-inch'
short_name = 'jetbook5'
description = _('This profile is intended for the 5-inch JetBook.')
screen_size = (480, 640)
dpi = 168.451
class SonyReaderLandscapeOutput(SonyReaderOutput):
name = 'Sony Reader Landscape'
short_name = 'sony-landscape'
description = _('This profile is intended for the SONY PRS line. '
'The 500/505/700 etc, in landscape mode. Mainly useful '
'for comics.')
screen_size = (784, 1012)
comic_screen_size = (784, 1012)
class MSReaderOutput(OutputProfile):
name = 'Microsoft Reader'
short_name = 'msreader'
description = _('This profile is intended for the Microsoft Reader.')
screen_size = (480, 652)
dpi = 96
fbase = 13
fsizes = [10, 11, 13, 16, 18, 20, 22, 26]
class MobipocketOutput(OutputProfile):
name = 'Mobipocket Books'
short_name = 'mobipocket'
description = _('This profile is intended for the Mobipocket books.')
# Unfortunately MOBI books are not narrowly targeted, so this information is
# quite likely to be spurious
screen_size = (600, 800)
dpi = 96
fbase = 18
fsizes = [14, 14, 16, 18, 20, 22, 24, 26]
class HanlinV3Output(OutputProfile):
name = 'Hanlin V3'
short_name = 'hanlinv3'
description = _('This profile is intended for the Hanlin V3 and its clones.')
# Screen size is a best guess
screen_size = (584, 754)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class HanlinV5Output(HanlinV3Output):
name = 'Hanlin V5'
short_name = 'hanlinv5'
description = _('This profile is intended for the Hanlin V5 and its clones.')
dpi = 200
class CybookG3Output(OutputProfile):
name = 'Cybook G3'
short_name = 'cybookg3'
description = _('This profile is intended for the Cybook G3.')
# Screen size is a best guess
screen_size = (600, 800)
comic_screen_size = (600, 757)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class CybookOpusOutput(SonyReaderOutput):
author = 'John Schember'
name = 'Cybook Opus'
short_name = 'cybook_opus'
description = _('This profile is intended for the Cybook Opus.')
# Screen size is a best guess
dpi = 200
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
epub_periodical_format = None
class KindleOutput(OutputProfile):
name = 'Kindle'
short_name = 'kindle'
description = _('This profile is intended for the Amazon Kindle.')
# Screen size is a best guess
screen_size = (525, 640)
dpi = 168.451
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
supports_mobi_indexing = True
periodical_date_in_title = False
empty_ratings_char = u'\u2606'
ratings_char = u'\u2605'
mobi_ems_per_blockquote = 2.0
@classmethod
def tags_to_string(cls, tags):
return u'%s <br/><span style="color:white">%s</span>' % (', '.join(tags),
'ttt '.join(tags)+'ttt ')
class KindleDXOutput(OutputProfile):
name = 'Kindle DX'
short_name = 'kindle_dx'
description = _('This profile is intended for the Amazon Kindle DX.')
# Screen size is a best guess
screen_size = (744, 1022)
dpi = 150.0
comic_screen_size = (771, 1116)
#comic_screen_size = (741, 1022)
supports_mobi_indexing = True
periodical_date_in_title = False
empty_ratings_char = u'\u2606'
ratings_char = u'\u2605'
mobi_ems_per_blockquote = 2.0
@classmethod
def tags_to_string(cls, tags):
return u'%s <br/><span style="color: white">%s</span>' % (', '.join(tags),
'ttt '.join(tags)+'ttt ')
class KindlePaperWhiteOutput(KindleOutput):
name = 'Kindle PaperWhite'
short_name = 'kindle_pw'
description = _('This profile is intended for the Amazon Kindle PaperWhite')
# Screen size is a best guess
screen_size = (658, 940)
dpi = 212.0
comic_screen_size = screen_size
class KindleFireOutput(KindleDXOutput):
name = 'Kindle Fire'
short_name = 'kindle_fire'
description = _('This profile is intended for the Amazon Kindle Fire.')
screen_size = (570, 1016)
dpi = 169.0
comic_screen_size = (570, 1016)
@classmethod
def tags_to_string(cls, tags):
# The idiotic fire doesn't obey the color:white directive
from xml.sax.saxutils import escape
return escape(', '.join(tags))
class IlliadOutput(OutputProfile):
name = 'Illiad'
short_name = 'illiad'
description = _('This profile is intended for the Irex Illiad.')
screen_size = (760, 925)
comic_screen_size = (760, 925)
dpi = 160.0
fbase = 12
fsizes = [7.5, 9, 10, 12, 15.5, 20, 22, 24]
class IRexDR1000Output(OutputProfile):
author = 'John Schember'
name = 'IRex Digital Reader 1000'
short_name = 'irexdr1000'
description = _('This profile is intended for the IRex Digital Reader 1000.')
# Screen size is a best guess
screen_size = (1024, 1280)
comic_screen_size = (996, 1241)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class IRexDR800Output(OutputProfile):
author = 'Eric Cronin'
name = 'IRex Digital Reader 800'
short_name = 'irexdr800'
description = _('This profile is intended for the IRex Digital Reader 800.')
# Screen size is a best guess
screen_size = (768, 1024)
comic_screen_size = (768, 1024)
dpi = 160
fbase = 16
fsizes = [12, 14, 16, 18, 20, 22, 24]
class NookOutput(OutputProfile):
author = 'John Schember'
name = 'Nook'
short_name = 'nook'
description = _('This profile is intended for the B&N Nook.')
# Screen size is a best guess
screen_size = (600, 730)
comic_screen_size = (584, 730)
dpi = 167
fbase = 16
fsizes = [12, 12, 14, 16, 18, 20, 22, 24]
class NookColorOutput(NookOutput):
name = 'Nook Color'
short_name = 'nook_color'
description = _('This profile is intended for the B&N Nook Color.')
screen_size = (600, 900)
comic_screen_size = (594, 900)
dpi = 169
class BambookOutput(OutputProfile):
author = 'Li Fanxi'
name = 'Sanda Bambook'
short_name = 'bambook'
description = _('This profile is intended for the Sanda Bambook.')
# Screen size is for full screen display
screen_size = (580, 780)
# Comic size is for normal display
comic_screen_size = (540, 700)
dpi = 168.451
fbase = 12
fsizes = [10, 12, 14, 16]
class PocketBook900Output(OutputProfile):
author = 'Chris Lockfort'
name = 'PocketBook Pro 900'
short_name = 'pocketbook_900'
description = _('This profile is intended for the PocketBook Pro 900 series of devices.')
screen_size = (810, 1180)
dpi = 150.0
comic_screen_size = screen_size
class PocketBookPro912Output(OutputProfile):
author = 'Daniele Pizzolli'
name = 'PocketBook Pro 912'
short_name = 'pocketbook_pro_912'
description = _('This profile is intended for the PocketBook Pro 912 series of devices.')
# According to http://download.pocketbook-int.com/user-guides/E_Ink/912/User_Guide_PocketBook_912(EN).pdf
screen_size = (825, 1200)
dpi = 155.0
comic_screen_size = screen_size
output_profiles = [OutputProfile, SonyReaderOutput, SonyReader300Output,
SonyReader900Output, SonyReaderT3Output, MSReaderOutput, MobipocketOutput, HanlinV3Output,
HanlinV5Output, CybookG3Output, CybookOpusOutput, KindleOutput,
iPadOutput, iPad3Output, KoboReaderOutput, TabletOutput, SamsungGalaxy,
SonyReaderLandscapeOutput, KindleDXOutput, IlliadOutput, NookHD,
IRexDR1000Output, IRexDR800Output, JetBook5Output, NookOutput,
BambookOutput, NookColorOutput, PocketBook900Output, PocketBookPro912Output,
GenericEink, GenericEinkLarge, KindleFireOutput, KindlePaperWhiteOutput]
output_profiles.sort(cmp=lambda x,y:cmp(x.name.lower(), y.name.lower()))
|
insomnia-lab/calibre
|
src/calibre/customize/profiles.py
|
Python
|
gpl-3.0
| 26,072
|
[
"Galaxy"
] |
0b1b5a210897b36a5ba6df23c0d47830ff710f73d70c0d0e657c408d1b0dab03
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/].*",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
),
(
'FilePathWatcher::Delegate',
(
'New code should not use FilePathWatcher::Delegate. Use the callback',
'interface instead.',
),
False,
),
(
'browser::FindLastActiveWithProfile',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindBrowserWithProfile',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindAnyBrowser',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindOrCreateTabbedBrowser',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindTabbedBrowser',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
platform_specifiers = r'(_(android|chromeos|gtk|mac|posix|win))?'
source_extensions = r'\.(cc|cpp|cxx|mm)$'
file_inclusion_pattern = r'.+%s' % source_extensions
file_exclusion_patterns = (
r'.*[/\\](test_|mock_).+%s' % source_extensions,
r'.+_test_(base|support|util)%s' % source_extensions,
r'.+_(api|browser|perf|unit|ui)?test%s%s' % (platform_specifiers,
source_extensions),
r'.+profile_sync_service_harness%s' % source_extensions,
)
path_exclusion_patterns = (
r'.*[/\\](test|tool(s)?)[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (file_exclusion_patterns + path_exclusion_patterns +
_EXCLUDED_PATHS + input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
if not input_api.is_committing:
return [output_api.PresubmitPromptWarning(_TEST_ONLY_WARNING, problems)]
else:
# We don't warn on commit, to avoid stopping commits going through CQ.
return [output_api.PresubmitNotifyResult(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
for line_num, line in f.ChangedContents():
if 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling an API that accepts a wstring, fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_CPP_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if input_api.verbose:
print 'Valid authors are %s' % ', '.join(valid_authors)
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasTestField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files:
return []
if all(re.search('\.(m|mm)$|[/_]mac[/_.]', f) for f in files):
return ['mac_rel']
if all(re.search('[/_]win[/_.]', f) for f in files):
return ['win_rel']
if all(re.search('[/_]android[/_.]', f) for f in files):
return ['android']
trybots = ['win_rel', 'linux_rel', 'mac_rel', 'linux_clang:compile',
'android']
# match things like aurax11.cc or aura_oak.cc
if any(re.search('[/_]aura', f) for f in files):
trybots.append('linux_chromeos')
return trybots
|
keishi/chromium
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 18,140
|
[
"VisIt"
] |
1f385e0b320245e82c8d05d2b38e879c57bb9e283d70b82553d3af6a7e1d91bf
|
""" IdProvider based on OAuth2 protocol
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from authlib.oauth2.rfc6749.util import scope_to_list
from DIRAC import S_OK
from DIRAC.Resources.IdProvider.OAuth2IdProvider import OAuth2IdProvider
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup, getGroupOption
__RCSID__ = "$Id$"
class IAMIdProvider(OAuth2IdProvider):
def getGroupScopes(self, group):
"""Get group scopes
:param str group: DIRAC group
:return: list
"""
idPScope = getGroupOption(group, "IdPRole")
if not idPScope:
idPScope = "wlcg.groups:/%s/%s" % (getVOForGroup(group), group.split("_")[1])
return S_OK(scope_to_list(idPScope))
|
ic-hep/DIRAC
|
src/DIRAC/Resources/IdProvider/IAMIdProvider.py
|
Python
|
gpl-3.0
| 809
|
[
"DIRAC"
] |
d8c6f91e1c8b099bac2b083513819ebbe051a353a246f4839caae1bb96870b3a
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Li Li <aiki.nogard@gmail.com>
# License: BSD 3 clause
from numbers import Integral
import numpy as np
import warnings
from ..externals import six
from ..utils.validation import check_is_fitted
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False, precision=3):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default='tree.dot')
Handle or name of the output file. If ``None``, the result is
returned as a string. This will the default from version 0.20.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, optional (default=3)
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] -
sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
precision),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], precision)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, precision)
elif proportion:
# Classification
value_text = np.around(value, precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
check_is_fitted(decision_tree, 'tree_')
own_file = False
return_string = False
try:
if out_file == SENTINEL:
warnings.warn("out_file can be set to None starting from 0.18. "
"This will be the default in 0.20.",
DeprecationWarning)
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_ in the decision_tree
if feature_names is not None:
if len(feature_names) != decision_tree.n_features_:
raise ValueError("Length of feature_names, %d "
"does not match number of features, %d"
% (len(feature_names),
decision_tree.n_features_))
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
|
zorroblue/scikit-learn
|
sklearn/tree/export.py
|
Python
|
bsd-3-clause
| 18,326
|
[
"Brian"
] |
f0eb201201ecf84524c72ea3a4f834aebe29e8d9bb5b1c0f9fd1dff39ca5b1d9
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
import copy
import warnings
import numpy as np
from ..io.pick import pick_channels_cov
from ..forward import apply_forward
from ..utils import check_random_state, verbose, _time_mask
@verbose
def simulate_evoked(fwd, stc, info, cov, snr=3., tmin=None, tmax=None,
iir_filter=None, random_state=None, verbose=None):
"""Generate noisy evoked data
.. note:: No projections from ``info`` will be present in the
output ``evoked``. You can use e.g.
:func:`evoked.add_proj <mne.Evoked.add_proj>` or
:func:`evoked.add_eeg_average_proj
<mne.Evoked.add_eeg_average_proj>`
to add them afterward as necessary.
Parameters
----------
fwd : Forward
a forward solution.
stc : SourceEstimate object
The source time courses.
info : dict
Measurement info to generate the evoked.
cov : Covariance object
The noise covariance.
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) ).
tmin : float | None
start of time interval to estimate SNR. If None first time point
is used.
tmax : float | None
start of time interval to estimate SNR. If None last time point
is used.
iir_filter : None | array
IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
random_state : None | int | np.random.RandomState
To specify the random generator state.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked object
The simulated evoked data
See Also
--------
simulate_raw
simulate_stc
simulate_sparse_stc
Notes
-----
.. versionadded:: 0.10.0
"""
evoked = apply_forward(fwd, stc, info)
if snr < np.inf:
noise = simulate_noise_evoked(evoked, cov, iir_filter, random_state)
evoked_noise = add_noise_evoked(evoked, noise, snr, tmin=tmin,
tmax=tmax)
else:
evoked_noise = evoked
return evoked_noise
def simulate_noise_evoked(evoked, cov, iir_filter=None, random_state=None):
"""Creates noise as a multivariate Gaussian
The spatial covariance of the noise is given from the cov matrix.
Parameters
----------
evoked : evoked object
an instance of evoked used as template
cov : Covariance object
The noise covariance
iir_filter : None | array
IIR filter coefficients (denominator)
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
noise : evoked object
an instance of evoked
Notes
-----
.. versionadded:: 0.10.0
"""
noise = evoked.copy()
noise.data = _generate_noise(evoked.info, cov, iir_filter, random_state,
evoked.data.shape[1])[0]
return noise
def _generate_noise(info, cov, iir_filter, random_state, n_samples, zi=None):
"""Helper to create spatially colored and temporally IIR-filtered noise"""
from scipy.signal import lfilter
noise_cov = pick_channels_cov(cov, include=info['ch_names'], exclude=[])
if set(info['ch_names']) != set(noise_cov.ch_names):
raise ValueError('Evoked and covariance channel names are not '
'identical. Cannot generate the noise matrix. '
'Channels missing in covariance %s.' %
np.setdiff1d(info['ch_names'], noise_cov.ch_names))
rng = check_random_state(random_state)
c = np.diag(noise_cov.data) if noise_cov['diag'] else noise_cov.data
mu_channels = np.zeros(len(c))
# we almost always get a positive semidefinite warning here, so squash it
with warnings.catch_warnings(record=True):
noise = rng.multivariate_normal(mu_channels, c, n_samples).T
if iir_filter is not None:
if zi is None:
zi = np.zeros((len(c), len(iir_filter) - 1))
noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi)
else:
zf = None
return noise, zf
def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
"""Adds noise to evoked object with specified SNR.
SNR is computed in the interval from tmin to tmax.
Parameters
----------
evoked : Evoked object
An instance of evoked with signal
noise : Evoked object
An instance of evoked with noise
snr : float
signal to noise ratio in dB. It corresponds to
10 * log10( var(signal) / var(noise) )
tmin : float
start time before event
tmax : float
end time after event
Returns
-------
evoked_noise : Evoked object
An instance of evoked corrupted by noise
"""
evoked = copy.deepcopy(evoked)
tmask = _time_mask(evoked.times, tmin, tmax, sfreq=evoked.info['sfreq'])
tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
np.mean((noise.data ** 2).ravel()))
noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
evoked.data += noise.data
return evoked
|
jniediek/mne-python
|
mne/simulation/evoked.py
|
Python
|
bsd-3-clause
| 5,459
|
[
"Gaussian"
] |
0c60d98baad2e47676eac56932cd9918ea5bd16eda109dbcdc9ca275c7a7044a
|
import sys
from birdfeeder.client import write_spider, write_walker, clear, feed_from_thredds, feed_from_walker, feed_from_spider
import logging
logging.basicConfig(format='%(message)s', level=logging.WARN)
logger = logging.getLogger(__name__)
def create_parser():
import argparse
parser = argparse.ArgumentParser(
prog="birdfeeder",
usage='''birdfeeder [<options>] <command> [<args>]''',
description="Feeds Solr with Datasets (NetCDF Format) from Thredds Catalogs and File System.",
)
parser.add_argument("-v",
dest="verbose",
help="enable verbose mode",
action="store_true")
parser.add_argument("--service",
dest='service',
required=False,
type=type(''),
default='http://localhost:8983/solr/birdhouse',
help="Solr URL. Default: http://localhost:8983/solr/birdhouse",
action="store")
parser.add_argument("--maxrecords",
dest='maxrecords',
required=False,
type=type(-1),
default=-1,
help="Maximum number of records to publish. Default: -1 (unlimited)",
action="store")
parser.add_argument("--batch-size",
dest='batch_size',
required=False,
type=type(1),
default=50000,
help="Batch size of records to publish. Default: 50000",
action="store")
subparsers = parser.add_subparsers(
dest='command',
title='command',
description='List of available commands',
help='Run "birdfeeder <command> -h" to get additional help.'
)
# spider command
subparser = subparsers.add_parser(
'spider',
prog="birdfeeder spider",
help="Runs spider to crawl NetCDF files on a HTTP file service and writes the path list to a CSV file."
)
subparser.add_argument("--url",
dest='url',
required=True,
type=type(''),
help="HTTP file service URL",
action="store")
subparser.add_argument("--depth",
dest='depth',
required=False,
type=type(0),
default=100,
help="Depth level for crawler. Default: 100",
action="store")
subparser.add_argument("-o",
dest='output',
required=False,
type=type(''),
default='out.csv',
help="Filename of the output CSV file. Default: out.csv",
action="store")
# walker command
subparser = subparsers.add_parser(
'walker',
prog="birdfeeder walker",
help="Runs walker to crawl NetCDF files from filesystem and writes the path list to a CSV file."
)
subparser.add_argument("--start-dir",
dest='start_dir',
required=True,
type=type(''),
help="Start directory",
action="store")
subparser.add_argument("-o",
dest='output',
required=False,
type=type(''),
default='out.csv',
help="Filename of the output CSV file. Default: out.csv",
action="store")
# clear command
subparser = subparsers.add_parser(
'clear',
prog="birdfeeder clear",
help="Clears the complete solr index. Use with caution!"
)
# from-thredds command
subparser = subparsers.add_parser(
'from-thredds',
prog="birdfeeder from-thredds",
help="Publish datasets from Thredds Catalog to Solr."
)
subparser.add_argument("--catalog-url",
dest='catalog_url',
required=True,
type=type(''),
help="Thredds Catalog URL",
action="store")
subparser.add_argument("--depth",
dest='depth',
required=False,
type=type(0),
default=1,
help="Depth level for Thredds catalog crawler. Default: 1",
action="store")
# from-walker command
subparser = subparsers.add_parser(
'from-walker',
prog="birdfeeder from-walker",
help="Publish NetCDF files from directory to Solr."
)
subparser.add_argument("--start-dir",
dest='start_dir',
required=True,
type=type(''),
help="Start directory",
action="store")
# from-spider command
subparser = subparsers.add_parser(
'from-spider',
prog="birdfeeder from-spider",
help="Runs spider to crawl NetCDF files on a HTTP file service and publishes them to Solr."
)
subparser.add_argument("--url",
dest='url',
required=True,
type=type(''),
help="HTTP file service URL",
action="store")
subparser.add_argument("--depth",
dest='depth',
required=False,
type=type(0),
default=100,
help="Depth level for crawler. Default: 100",
action="store")
return parser
def execute(args):
if args.verbose:
logger.setLevel(logging.DEBUG)
if args.command == 'spider':
write_spider(url=args.url, depth=args.depth, filename=args.output)
if args.command == 'walker':
write_walker(start_dir=args.start_dir, filename=args.output)
elif args.command == 'clear':
clear(service=args.service)
elif args.command == 'from-thredds':
feed_from_thredds(service=args.service, catalog_url=args.catalog_url, depth=args.depth,
maxrecords=args.maxrecords, batch_size=args.batch_size)
elif args.command == 'from-walker':
feed_from_walker(service=args.service, start_dir=args.start_dir,
maxrecords=args.maxrecords,
batch_size=args.batch_size)
elif args.command == 'from-spider':
feed_from_spider(service=args.service, url=args.url, depth=args.depth,
maxrecords=args.maxrecords,
batch_size=args.batch_size)
logger.info('Done.')
def main():
import argcomplete
logger.setLevel(logging.INFO)
parser = create_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args()
execute(args)
if __name__ == '__main__':
sys.exit(main())
|
bird-house/bird-feeder
|
birdfeeder/__init__.py
|
Python
|
apache-2.0
| 7,360
|
[
"NetCDF"
] |
fd1c349a0fd7502d9d99b4eab3be2009377f8194f2d5d69497593eb16ecfdfb3
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classes:
NavWebReport - main class that produces the report. Entry point to produce
the report is write_report
NavWebOptions - class that defines the options and provides the handling
interface
"""
#------------------------------------------------
# python modules
#------------------------------------------------
import logging
from functools import partial
import os
import sys
import time
import shutil
import tarfile
from io import BytesIO, TextIOWrapper
from collections import defaultdict
from decimal import getcontext
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (EventType, Name,
Person,
Family, Event, Place, Source,
Citation, Media, Repository, Note, Tag)
from gramps.gen.plug.menu import (PersonOption, NumberOption, StringOption,
BooleanOption, EnumeratedListOption,
FilterOption, NoteOption, MediaOption,
DestinationOption)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.constfunc import win, get_curr_dir
from gramps.gen.config import config
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.proxy import CacheProxyDb
from gramps.plugins.lib.libhtmlconst import _CHARACTER_SETS, _CC, _COPY_OPTIONS
from gramps.gen.relationship import get_relationship_calculator
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.person import PersonPages
from gramps.plugins.webreport.family import FamilyPages
from gramps.plugins.webreport.event import EventPages
from gramps.plugins.webreport.media import MediaPages
from gramps.plugins.webreport.place import PlacePages
from gramps.plugins.webreport.source import SourcePages
from gramps.plugins.webreport.repository import RepositoryPages
from gramps.plugins.webreport.citation import CitationPages
from gramps.plugins.webreport.surnamelist import SurnameListPage
from gramps.plugins.webreport.surname import SurnamePage
from gramps.plugins.webreport.thumbnail import ThumbnailPreviewPage
from gramps.plugins.webreport.statistics import StatisticsPage
from gramps.plugins.webreport.home import HomePage
from gramps.plugins.webreport.contact import ContactPage
from gramps.plugins.webreport.download import DownloadPage
from gramps.plugins.webreport.introduction import IntroductionPage
from gramps.plugins.webreport.addressbook import AddressBookPage
from gramps.plugins.webreport.addressbooklist import AddressBookListPage
from gramps.plugins.webreport.common import (get_gendex_data,
HTTP, HTTPS, _WEB_EXT, CSS,
_NARRATIVESCREEN, _NARRATIVEPRINT,
_WRONGMEDIAPATH, sort_people)
LOG = logging.getLogger(".NarrativeWeb")
_ = glocale.translation.sgettext
getcontext().prec = 8
#------------------------------------------------
# constants
#------------------------------------------------
_DEFAULT_MAX_IMG_WIDTH = 800 # resize images that are wider than this
_DEFAULT_MAX_IMG_HEIGHT = 600 # resize images that are taller than this
# The two values above are settable in options.
class NavWebReport(Report):
"""
Create WebReport object that produces the report.
"""
def __init__(self, database, options, user):
"""
@param: database -- The Gramps database instance
@param: options -- Instance of the Options class for this report
@param: user -- Instance of a gen.user.User()
"""
Report.__init__(self, database, options, user)
self.user = user
menu = options.menu
self.link_prefix_up = True
self.options = {}
for optname in menu.get_all_option_names():
menuopt = menu.get_option_by_name(optname)
self.options[optname] = menuopt.get_value()
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, menu)
self.rlocale = self._locale
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu)
self.database = CacheProxyDb(self.database)
self._db = self.database
filters_option = menu.get_option_by_name('filter')
self.filter = filters_option.get_filter()
self.copyright = self.options['cright']
self.target_path = self.options['target']
self.ext = self.options['ext']
self.css = self.options['css']
self.navigation = self.options["navigation"]
self.citationreferents = self.options['citationreferents']
self.title = self.options['title']
self.inc_gallery = self.options['gallery']
self.inc_unused_gallery = self.options['unused']
self.create_thumbs_only = self.options['create_thumbs_only']
self.opts = self.options
self.inc_contact = self.opts['contactnote'] or self.opts['contactimg']
# name format options
self.name_format = self.options['name_format']
# include families or not?
self.inc_families = self.options['inc_families']
# create an event pages or not?
self.inc_events = self.options['inc_events']
# include repository page or not?
self.inc_repository = self.options['inc_repository']
# include GENDEX page or not?
self.inc_gendex = self.options['inc_gendex']
# Download Options Tab
self.inc_download = self.options['incdownload']
self.dl_fname1 = self.options['down_fname1']
self.dl_descr1 = self.options['dl_descr1']
self.dl_fname2 = self.options['down_fname2']
self.dl_descr2 = self.options['dl_descr2']
self.encoding = self.options['encoding']
self.use_archive = self.options['archive']
self.use_intro = self.options['intronote'] or self.options['introimg']
self.use_home = self.options['homenote'] or self.options['homeimg']
self.use_contact = self.opts['contactnote'] or self.opts['contactimg']
# Do we need to include this in a cms ?
self.usecms = self.options['usecms']
self.target_uri = self.options['cmsuri']
# Do we need to include web calendar ?
self.usecal = self.options['usecal']
self.target_cal_uri = self.options['caluri']
# either include the gender graphics or not?
self.ancestortree = self.options['ancestortree']
# whether to display children in birthorder or entry order?
self.birthorder = self.options['birthorder']
# get option for Internet Address Book
self.inc_addressbook = self.options["inc_addressbook"]
# Place Map tab options
self.placemappages = self.options['placemappages']
self.familymappages = self.options['familymappages']
self.mapservice = self.options['mapservice']
self.googleopts = self.options['googleopts']
self.googlemapkey = self.options['googlemapkey']
if self.use_home:
self.index_fname = "index"
self.surname_fname = "surnames"
self.intro_fname = "introduction"
elif self.use_intro:
self.index_fname = None
self.surname_fname = "surnames"
self.intro_fname = "index"
else:
self.index_fname = None
self.surname_fname = "index"
self.intro_fname = None
self.archive = None
self.cur_fname = None # Internal use. The name of the output file,
# to be used for the tar archive.
self.string_io = None
if self.use_archive:
self.html_dir = None
else:
self.html_dir = self.target_path
self.warn_dir = True # Only give warning once.
self.obj_dict = None
self.visited = None
self.bkref_dict = None
self.rel_class = None
self.tab = None
if self.options['securesite']:
self.secure_mode = HTTPS
else:
self.secure_mode = HTTP
def write_report(self):
"""
The first method called to write the Narrative Web after loading options
"""
global _WRONGMEDIAPATH
_WRONGMEDIAPATH = []
if not self.use_archive:
dir_name = self.target_path
if dir_name is None:
dir_name = get_curr_dir()
elif not os.path.isdir(dir_name):
parent_dir = os.path.dirname(dir_name)
if not os.path.isdir(parent_dir):
msg = _("Neither %(current)s nor %(parent)s "
"are directories") % {
'current': dir_name, 'parent': parent_dir}
self.user.notify_error(msg)
return
else:
try:
os.mkdir(dir_name)
except IOError as value:
msg = _("Could not create the directory: %s"
) % dir_name + "\n" + value[1]
self.user.notify_error(msg)
return
except:
msg = _("Could not create the directory: %s") % dir_name
self.user.notify_error(msg)
return
try:
image_dir_name = os.path.join(dir_name, 'images')
if not os.path.isdir(image_dir_name):
os.mkdir(image_dir_name)
image_dir_name = os.path.join(dir_name, 'thumb')
if not os.path.isdir(image_dir_name):
os.mkdir(image_dir_name)
except IOError as value:
msg = _("Could not create the directory: %s"
) % image_dir_name + "\n" + value[1]
self.user.notify_error(msg)
return
except:
msg = _("Could not create the directory: %s"
) % image_dir_name + "\n" + value[1]
self.user.notify_error(msg)
return
else:
if os.path.isdir(self.target_path):
self.user.notify_error(
_('Invalid file name'),
_('The archive file must be a file, not a directory'))
return
try:
self.archive = tarfile.open(self.target_path, "w:gz")
except (OSError, IOError) as value:
self.user.notify_error(
_("Could not create %s") % self.target_path,
str(value))
return
config.set('paths.website-directory',
os.path.dirname(self.target_path) + os.sep)
if self.usecms:
config.set('paths.website-cms-uri',
os.path.dirname(self.target_uri))
if self.usecal:
config.set('paths.website-cal-uri',
os.path.dirname(self.target_cal_uri))
# for use with discovering biological, half, and step siblings for use
# in display_ind_parents()...
self.rel_class = get_relationship_calculator(reinit=True,
clocale=self.rlocale)
#################################################
#
# Pass 0 Initialise the plug-ins
#
#################################################
# FIXME: The whole of this section of code should be implemented by the
# registration process for the Web Page plugins.
# Note that by use of a dictionary we ensure that at most one Web Page
# plugin is provided for any object class
self.tab = {}
# FIXME: Initialising self.tab in this way means that this code has to
# run before the Web Page registration - I am not sure whether this is
# possible, in which case an alternative approach to provinding the
# mapping of object class to Web Page plugin will be needed.
for obj_class in ("Person", "Family", "Source", "Citation", "Place",
"Event", "Media", "Repository"):
# FIXME: Would it be better if the Web Page plugins used a different
# base class rather than BasePage, which is really just for each web
# page
self.tab[obj_class] = BasePage(report=self, title="")
# Note that by not initialising any Web Page plugins that are not going
# to generate pages, we ensure that there is not performance implication
# for such plugins.
self.tab["Person"] = PersonPages(self)
if self.inc_families:
self.tab["Family"] = FamilyPages(self)
if self.inc_events:
self.tab["Event"] = EventPages(self)
if self.inc_gallery:
self.tab["Media"] = MediaPages(self)
self.tab["Place"] = PlacePages(self)
self.tab["Source"] = SourcePages(self)
self.tab["Repository"] = RepositoryPages(self)
self.tab["Citation"] = CitationPages(self)
# FIXME: The following routines that are not run in two passes have not
# yet been converted to a form suitable for separation into Web Page
# plugins: SurnamePage, SurnameListPage, IntroductionPage, HomePage,
# ThumbnailPreviewPage, DownloadPage, ContactPage,AddressBookListPage,
# AddressBookPage
#################################################
#
# Pass 1 Build the lists of objects to be output
#
#################################################
self._build_obj_dict()
#################################################
#
# Pass 2 Generate the web pages
#
#################################################
self.base_pages()
self.visited = []
# build classes IndividualListPage and IndividualPage
self.tab["Person"].display_pages(self.title)
self.build_gendex(self.obj_dict[Person])
# build classes SurnameListPage and SurnamePage
self.surname_pages(self.obj_dict[Person])
# build classes FamilyListPage and FamilyPage
if self.inc_families:
self.tab["Family"].display_pages(self.title)
# build classes EventListPage and EventPage
if self.inc_events:
self.tab["Event"].display_pages(self.title)
# build classes PlaceListPage and PlacePage
self.tab["Place"].display_pages(self.title)
# build classes RepositoryListPage and RepositoryPage
if self.inc_repository:
self.tab["Repository"].display_pages(self.title)
# build classes MediaListPage and MediaPage
if self.inc_gallery:
if not self.create_thumbs_only:
self.tab["Media"].display_pages(self.title)
# build Thumbnail Preview Page...
self.thumbnail_preview_page()
# build classes AddressBookListPage and AddressBookPage
if self.inc_addressbook:
self.addressbook_pages(self.obj_dict[Person])
# build classes SourceListPage and SourcePage
self.tab["Source"].display_pages(self.title)
# build classes StatisticsPage
self.statistics_preview_page(self.title)
# copy all of the neccessary files
self.copy_narrated_files()
# if an archive is being used, close it?
if self.archive:
self.archive.close()
if len(_WRONGMEDIAPATH) > 0:
error = '\n'.join([
_('ID=%(grampsid)s, path=%(dir)s') % {
'grampsid' : x[0],
'dir' : x[1]} for x in _WRONGMEDIAPATH[:10]])
if len(_WRONGMEDIAPATH) > 10:
error += '\n ...'
self.user.warn(_("Missing media objects:"), error)
def _build_obj_dict(self):
"""
Construct the dictionaries of objects to be included in the reports.
There are two dictionaries, which have the same structure: they are two
level dictionaries,the first key is the class of object
(e.g. gen.lib.Person).
The second key is the handle of the object.
For the obj_dict, the value is a tuple containing the gramps_id,
the text name for the object, and the file name for the display.
For the bkref_dict, the value is a tuple containg the class of object
and the handle for the object that refers to the 'key' object.
"""
_obj_class_list = (Person, Family, Event, Place, Source, Citation,
Media, Repository, Note, Tag)
# setup a dictionary of the required structure
self.obj_dict = defaultdict(lambda: defaultdict(set))
self.bkref_dict = defaultdict(lambda: defaultdict(set))
# initialise the dictionary to empty in case no objects of any
# particular class are incuded in the web report
for obj_class in _obj_class_list:
self.obj_dict[obj_class] = defaultdict(set)
ind_list = self._db.iter_person_handles()
ind_list = self.filter.apply(self._db, ind_list, user=self.user)
with self.user.progress(_("Narrated Web Site Report"),
_('Constructing list of other objects...'),
sum(1 for _ in ind_list)) as step:
for handle in ind_list:
step()
self._add_person(handle, "", "")
LOG.debug("final object dictionary \n" +
"".join(("%s: %s\n" % item)
for item in self.obj_dict.items()))
LOG.debug("final backref dictionary \n" +
"".join(("%s: %s\n" % item)
for item in self.bkref_dict.items()))
def _add_person(self, person_handle, bkref_class, bkref_handle):
"""
Add person_handle to the obj_dict, and recursively all referenced
objects
@param: person_handle -- The handle for the person to add
@param: bkref_class -- The class associated to this handle (person)
@param: bkref_handle -- The handle associated to this person
"""
person = self._db.get_person_from_handle(person_handle)
if person:
person_name = self.get_person_name(person)
person_fname = self.build_url_fname(person_handle, "ppl",
False) + self.ext
self.obj_dict[Person][person_handle] = (person_fname, person_name,
person.gramps_id)
self.bkref_dict[Person][person_handle].add((bkref_class,
bkref_handle,
""))
############### Header section ##############
for citation_handle in person.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Name section ##############
for name in [person.get_primary_name()
] + person.get_alternate_names():
for citation_handle in name.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Events section ##############
# Now tell the events tab to display the individual events
evt_ref_list = person.get_event_ref_list()
if evt_ref_list:
for evt_ref in evt_ref_list:
role = evt_ref.get_role().xml_str()
event = self._db.get_event_from_handle(evt_ref.ref)
if event:
self._add_event(evt_ref.ref, Person, person_handle,
role)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Person,
person_handle, event)
# If event pages are not being output, then tell the
# media tab to display the perosn's event media. If
# events are being displayed, then the media are linked
# from the event tab
if not self.inc_events:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person,
person_handle)
for citation_handle in event.get_citation_list():
self._add_citation(citation_handle, Person,
person_handle)
############### Families section ##############
# Tell the families tab to display this individuals families
family_handle_list = person.get_family_handle_list()
if family_handle_list:
for family_handle in person.get_family_handle_list():
self._add_family(family_handle, Person, person_handle)
# Tell the events tab to display the family events which
# are referenced from the individual page.
family = self._db.get_family_from_handle(family_handle)
if family:
family_evt_ref_list = family.get_event_ref_list()
if family_evt_ref_list:
for evt_ref in family_evt_ref_list:
role = evt_ref.get_role().xml_str()
event = self._db.get_event_from_handle(
evt_ref.ref)
if event:
self._add_event(evt_ref.ref, Person,
person_handle, "Primary")
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Person,
person_handle, event)
for cite_hdl in event.get_citation_list():
self._add_citation(cite_hdl, Person,
person_handle)
# add the family media and the family event media if the
# families page is not being displayed (If it is displayed,
# the media are linked from the families page)
if not self.inc_families:
for m_ref in event.get_media_list():
m_hdl = m_ref.get_reference_handle()
self._add_media(m_hdl, Person,
person_handle)
for lds_ord in family.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle,
Person, person_handle)
for attr in family.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle,
Person, person_handle)
if not self.inc_families:
for media_ref in family.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person,
person_handle)
############### LDS Ordinance section ##############
for lds_ord in person.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Attribute section ##############
for attr in person.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Person, person_handle)
############### Address section ##############
for addr in person.get_address_list():
for addr_handle in addr.get_citation_list():
self._add_citation(addr_handle, Person, person_handle)
############### Media section ##############
# Now tell the Media tab which media objects to display
# First the person's media objects
for media_ref in person.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Person, person_handle)
def get_person_name(self, person):
"""
Return a string containing the person's primary name in the name
format chosen in the web report options
@param: person -- person object from database
"""
name_format = self.options['name_format']
primary_name = person.get_primary_name()
name = Name(primary_name)
name.set_display_as(name_format)
return _nd.display_name(name)
def _add_family(self, family_handle, bkref_class, bkref_handle):
"""
Add family to the Family object list
@param: family_handle -- The handle for the family to add
@param: bkref_class -- The class associated to this handle (family)
@param: bkref_handle -- The handle associated to this family
"""
family = self._db.get_family_from_handle(family_handle)
family_name = self.get_family_name(family)
if self.inc_families:
family_fname = self.build_url_fname(family_handle, "fam",
False) + self.ext
else:
family_fname = ""
self.obj_dict[Family][family_handle] = (family_fname, family_name,
family.gramps_id)
self.bkref_dict[Family][family_handle].add((bkref_class,
bkref_handle,
""))
if self.inc_gallery:
for media_ref in family.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Family, family_handle)
############### Events section ##############
for evt_ref in family.get_event_ref_list():
role = evt_ref.get_role().xml_str()
event = self._db.get_event_from_handle(evt_ref.ref)
place_handle = event.get_place_handle()
if place_handle:
self._add_place(place_handle, Family, family_handle, event)
if self.inc_events:
# detail for family events are displayed on the events pages as
# well as on this family page
self._add_event(evt_ref.ref, Family, family_handle, role)
else:
# There is no event page. Family events are displayed on the
# family page, but the associated family event media may need to
# be displayed on the media page
if self.inc_gallery:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Family, family_handle)
############### LDS Ordinance section ##############
for lds_ord in family.get_lds_ord_list():
for citation_handle in lds_ord.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
############### Attributes section ##############
for attr in family.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
############### Sources section ##############
for citation_handle in family.get_citation_list():
self._add_citation(citation_handle, Family, family_handle)
def get_family_name(self, family):
"""
Return a string containing the name of the family (e.g. 'Family of John
Doe and Jane Doe')
@param: family -- family object from database
"""
husband_handle = family.get_father_handle()
spouse_handle = family.get_mother_handle()
if husband_handle:
husband = self._db.get_person_from_handle(husband_handle)
else:
husband = None
if spouse_handle:
spouse = self._db.get_person_from_handle(spouse_handle)
else:
spouse = None
if husband and spouse:
husband_name = self.get_person_name(husband)
spouse_name = self.get_person_name(spouse)
title_str = self._("Family of %(husband)s and %(spouse)s"
) % {'husband' : husband_name,
'spouse' : spouse_name}
elif husband:
husband_name = self.get_person_name(husband)
# Only the name of the husband is known
title_str = self._("Family of %s") % husband_name
elif spouse:
spouse_name = self.get_person_name(spouse)
# Only the name of the wife is known
title_str = self._("Family of %s") % spouse_name
else:
title_str = ''
return title_str
def _add_event(self, event_handle, bkref_class, bkref_handle, role):
"""
Add event to the Event object list
@param: event_handle -- The handle for the event to add
@param: bkref_class -- The class associated to this handle (event)
@param: bkref_handle -- The handle associated to this event
"""
event = self._db.get_event_from_handle(event_handle)
event_name = event.get_description()
# The event description can be Y on import from GEDCOM. See the
# following quote from the GEDCOM spec: "The occurrence of an event is
# asserted by the presence of either a DATE tag and value or a PLACe tag
# and value in the event structure. When neither the date value nor the
# place value are known then a Y(es) value on the parent event tag line
# is required to assert that the event happened.""
if event_name == "" or event_name is None or event_name == 'Y':
event_name = str(event.get_type())
# begin add generated descriptions to media pages
# (request 7074 : acrider)
ref_name = ""
for reference in self._db.find_backlink_handles(event_handle):
ref_class, ref_handle = reference
if ref_class == 'Person':
person = self._db.get_person_from_handle(ref_handle)
ref_name = self.get_person_name(person)
elif ref_class == 'Family':
family = self._db.get_family_from_handle(ref_handle)
ref_name = self.get_family_name(family)
if ref_name != "":
# TODO for Arabic, should the next line's comma be translated?
event_name += ", " + ref_name
# end descriptions to media pages
if self.inc_events:
event_fname = self.build_url_fname(event_handle, "evt",
False) + self.ext
else:
event_fname = ""
self.obj_dict[Event][event_handle] = (event_fname, event_name,
event.gramps_id)
self.bkref_dict[Event][event_handle].add((bkref_class, bkref_handle,
role))
############### Attribute section ##############
for attr in event.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Event, event_handle)
############### Source section ##############
for citation_handle in event.get_citation_list():
self._add_citation(citation_handle, Event, event_handle)
############### Media section ##############
if self.inc_gallery:
for media_ref in event.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Event, event_handle)
def _add_place(self, place_handle, bkref_class, bkref_handle, event):
"""
Add place to the Place object list
@param: place_handle -- The handle for the place to add
@param: bkref_class -- The class associated to this handle (place)
@param: bkref_handle -- The handle associated to this place
"""
place = self._db.get_place_from_handle(place_handle)
if place is None:
return
if config.get('preferences.place-auto'):
place_name = _pd.display_event(self._db, event)
else:
place_name = place.get_title()
place_fname = self.build_url_fname(place_handle, "plc",
False) + self.ext
self.obj_dict[Place][place_handle] = (place_fname, place_name,
place.gramps_id, event)
self.bkref_dict[Place][place_handle].add((bkref_class, bkref_handle,
"" # no role for a place
))
############### Media section ##############
if self.inc_gallery:
for media_ref in place.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Place, place_handle)
############### Sources section ##############
for citation_handle in place.get_citation_list():
self._add_citation(citation_handle, Place, place_handle)
def _add_source(self, source_handle, bkref_class, bkref_handle):
"""
Add source to the Source object list
@param: source_handle -- The handle for the source to add
@param: bkref_class -- The class associated to this handle (source)
@param: bkref_handle -- The handle associated to this source
"""
if len(self.obj_dict[Source][source_handle]) > 0:
for bkref in self.bkref_dict[Source][source_handle]:
if bkref_handle == bkref[1]:
return
source = self._db.get_source_from_handle(source_handle)
source_name = source.get_title()
#if isinstance(source_name, bytes):
# print("source name :", source_name)
source_fname = self.build_url_fname(source_handle, "src",
False) + self.ext
self.obj_dict[Source][source_handle] = (source_fname, source_name,
source.gramps_id)
self.bkref_dict[Source][source_handle].add((bkref_class,
bkref_handle,
"" # no role
))
############### Media section ##############
if self.inc_gallery:
for media_ref in source.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Source, source_handle)
############### Repository section ##############
if self.inc_repository:
for repo_ref in source.get_reporef_list():
repo_handle = repo_ref.get_reference_handle()
self._add_repository(repo_handle, Source, source_handle)
def _add_citation(self, citation_handle, bkref_class, bkref_handle):
"""
Add citation to the Citation object list
@param: citation_handle -- The handle for the citation to add
@param: bkref_class -- The class associated to this handle
@param: bkref_handle -- The handle associated to this citation
"""
if len(self.obj_dict[Citation][citation_handle]) > 0:
for bkref in self.bkref_dict[Citation][citation_handle]:
if bkref_handle == bkref[1]:
return
citation = self._db.get_citation_from_handle(citation_handle)
# If Page is none, we want to make sure that a tuple is generated for
# the source backreference
citation_name = citation.get_page() or ""
source_handle = citation.get_reference_handle()
self.obj_dict[Citation][citation_handle] = ("", citation_name,
citation.gramps_id)
self.bkref_dict[Citation][citation_handle].add((bkref_class,
bkref_handle,
"" # no role
))
############### Source section ##############
self._add_source(source_handle, Citation, citation_handle)
############### Media section ##############
if self.inc_gallery:
for media_ref in citation.get_media_list():
media_handle = media_ref.get_reference_handle()
self._add_media(media_handle, Citation, citation_handle)
def _add_media(self, media_handle, bkref_class, bkref_handle):
"""
Add media to the Media object list
@param: media_handle -- The handle for the media to add
@param: bkref_class -- The class associated to this handle (media)
@param: bkref_handle -- The handle associated to this media
"""
if len(self.obj_dict[Media][media_handle]) > 0:
for bkref in self.bkref_dict[Media][media_handle]:
if bkref_handle == bkref[1]:
return
media_refs = self.bkref_dict[Media].get(media_handle)
if media_refs and (bkref_class, bkref_handle) in media_refs:
return
media = self._db.get_media_from_handle(media_handle)
# use media title (request 7074 acrider)
media_name = media.get_description()
if media_name is None or media_name == "":
media_name = "Media"
#end media title
if self.inc_gallery:
media_fname = self.build_url_fname(media_handle, "img",
False) + self.ext
else:
media_fname = ""
self.obj_dict[Media][media_handle] = (media_fname, media_name,
media.gramps_id)
self.bkref_dict[Media][media_handle].add((bkref_class, bkref_handle,
"" # no role for a media
))
############### Attribute section ##############
for attr in media.get_attribute_list():
for citation_handle in attr.get_citation_list():
self._add_citation(citation_handle, Media, media_handle)
############### Sources section ##############
for citation_handle in media.get_citation_list():
self._add_citation(citation_handle, Media, media_handle)
def _add_repository(self, repos_handle, bkref_class, bkref_handle):
"""
Add repository to the Repository object list
@param: repos_handle -- The handle for the repository to add
@param: bkref_class -- The class associated to this handle (source)
@param: bkref_handle -- The handle associated to this source
"""
if len(self.obj_dict[Repository][repos_handle]) > 0:
for bkref in self.bkref_dict[Repository][repos_handle]:
if bkref_handle == bkref[1]:
return
repos = self._db.get_repository_from_handle(repos_handle)
repos_name = repos.name
if self.inc_repository:
repos_fname = self.build_url_fname(repos_handle, "repo",
False) + self.ext
else:
repos_fname = ""
self.obj_dict[Repository][repos_handle] = (repos_fname, repos_name,
repos.gramps_id)
self.bkref_dict[Repository][repos_handle].add((bkref_class,
bkref_handle,
"" # no role
))
def copy_narrated_files(self):
"""
Copy all of the CSS, image, and javascript files for Narrated Web
"""
imgs = []
# copy screen style sheet
if CSS[self.css]["filename"]:
fname = CSS[self.css]["filename"]
self.copy_file(fname, _NARRATIVESCREEN, "css")
# copy printer style sheet
fname = CSS["Print-Default"]["filename"]
self.copy_file(fname, _NARRATIVEPRINT, "css")
# copy ancestor tree style sheet if tree is being created?
if self.ancestortree:
fname = CSS["ancestortree"]["filename"]
self.copy_file(fname, "ancestortree.css", "css")
# copy behaviour style sheet
fname = CSS["behaviour"]["filename"]
self.copy_file(fname, "behaviour.css", "css")
# copy Menu Layout Style Sheet if Blue or Visually is being
# used as the stylesheet?
if CSS[self.css]["navigation"]:
if self.navigation == "Horizontal":
fname = CSS["Horizontal-Menus"]["filename"]
elif self.navigation == "Vertical":
fname = CSS["Vertical-Menus"]["filename"]
elif self.navigation == "Fade":
fname = CSS["Fade-Menus"]["filename"]
elif self.navigation == "dropdown":
fname = CSS["DropDown-Menus"]["filename"]
self.copy_file(fname, "narrative-menus.css", "css")
# copy narrative-maps Style Sheet if Place or Family Map pages
# are being created?
if self.placemappages or self.familymappages:
fname = CSS["NarrativeMaps"]["filename"]
self.copy_file(fname, "narrative-maps.css", "css")
# Copy the Creative Commons icon if the Creative Commons
# license is requested
if 0 < self.copyright <= len(_CC):
imgs += [CSS["Copyright"]["filename"]]
# copy Gramps favorite icon #2
imgs += [CSS["favicon2"]["filename"]]
# we need the blank image gif needed by behaviour.css
# add the document.png file for media other than photos
imgs += CSS["All Images"]["images"]
# copy Ancestor Tree graphics if needed???
if self.ancestortree:
imgs += CSS["ancestortree"]["images"]
# Anything css-specific:
imgs += CSS[self.css]["images"]
# copy all to images subdir:
for from_path in imgs:
fdir, fname = os.path.split(from_path)
self.copy_file(from_path, fname, "images")
# copy Gramps marker icon for openstreetmap
fname = CSS["marker"]["filename"]
self.copy_file(fname, "marker.png", "images")
def build_gendex(self, ind_list):
"""
Create a gendex file
@param: ind_list -- The list of person to use
"""
if self.inc_gendex:
with self.user.progress(_("Narrated Web Site Report"),
_('Creating GENDEX file'),
len(ind_list)) as step:
fp_gendex, gendex_io = self.create_file("gendex", ext=".txt")
date = 0
for person_handle in ind_list:
step()
person = self._db.get_person_from_handle(person_handle)
datex = person.get_change_time()
if datex > date:
date = datex
if self.archive:
self.write_gendex(gendex_io, person)
else:
self.write_gendex(fp_gendex, person)
self.close_file(fp_gendex, gendex_io, date)
def write_gendex(self, filep, person):
"""
Reference|SURNAME|given name /SURNAME/|date of birth|place of birth|
date of death|place of death|
* field 1: file name of web page referring to the individual
* field 2: surname of the individual
* field 3: full name of the individual
* field 4: date of birth or christening (optional)
* field 5: place of birth or christening (optional)
* field 6: date of death or burial (optional)
* field 7: place of death or burial (optional)
@param: filep -- The gendex output file name
@param: person -- The person to use for gendex file
"""
url = self.build_url_fname_html(person.handle, "ppl")
surname = person.get_primary_name().get_surname()
fullname = person.get_primary_name().get_gedcom_name()
# get birth info:
dob, pob = get_gendex_data(self._db, person.get_birth_ref())
# get death info:
dod, pod = get_gendex_data(self._db, person.get_death_ref())
linew = '|'.join((url, surname, fullname, dob, pob, dod, pod)) + '|\n'
if self.archive:
filep.write(bytes(linew, "utf8"))
else:
filep.write(linew)
def surname_pages(self, ind_list):
"""
Generates the surname related pages from list of individual
people.
@param: ind_list -- The list of person to use
"""
local_list = sort_people(self._db, ind_list, self.rlocale)
with self.user.progress(_("Narrated Web Site Report"),
_("Creating surname pages"),
len(local_list)) as step:
SurnameListPage(self, self.title, ind_list,
SurnameListPage.ORDER_BY_NAME,
self.surname_fname)
SurnameListPage(self, self.title, ind_list,
SurnameListPage.ORDER_BY_COUNT,
"surnames_count")
for (surname, handle_list) in local_list:
SurnamePage(self, self.title, surname, sorted(handle_list))
step()
def thumbnail_preview_page(self):
"""
creates the thumbnail preview page
"""
with self.user.progress(_("Narrated Web Site Report"),
_("Creating thumbnail preview page..."),
len(self.obj_dict[Media])) as step:
ThumbnailPreviewPage(self, self.title, step)
def statistics_preview_page(self, title):
"""
creates the statistics preview page
"""
with self.user.progress(_("Narrated Web Site Report"),
_("Creating statistics page..."),
len(self.obj_dict[Media])) as step:
StatisticsPage(self, title, step)
def addressbook_pages(self, ind_list):
"""
Create a webpage with a list of address availability for each person
and the associated individual address pages.
@param: ind_list -- The list of person to use
"""
url_addr_res = []
for person_handle in ind_list:
person = self._db.get_person_from_handle(person_handle)
addrlist = person.get_address_list()
evt_ref_list = person.get_event_ref_list()
urllist = person.get_url_list()
add = addrlist or None
url = urllist or None
res = []
for event_ref in evt_ref_list:
event = self._db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.RESIDENCE:
res.append(event)
if add or res or url:
primary_name = person.get_primary_name()
sort_name = ''.join([primary_name.get_surname(), ", ",
primary_name.get_first_name()])
url_addr_res.append((sort_name, person_handle, add, res, url))
url_addr_res.sort()
AddressBookListPage(self, self.title, url_addr_res)
# begin Address Book pages
addr_size = len(url_addr_res)
with self.user.progress(_("Narrated Web Site Report"),
_("Creating address book pages ..."),
addr_size) as step:
for (sort_name, person_handle, add, res, url) in url_addr_res:
AddressBookPage(self, self.title, person_handle, add, res, url)
step()
def base_pages(self):
"""
creates HomePage, ContactPage, DownloadPage, and IntroductionPage
if requested by options in plugin
"""
if self.use_home:
HomePage(self, self.title)
if self.inc_contact:
ContactPage(self, self.title)
if self.inc_download:
DownloadPage(self, self.title)
if self.use_intro:
IntroductionPage(self, self.title)
def build_subdirs(self, subdir, fname, uplink=False):
"""
If subdir is given, then two extra levels of subdirectory are inserted
between 'subdir' and the filename. The reason is to prevent directories
with too many entries.
For example, this may return "8/1/aec934857df74d36618"
@param: subdir -- The subdirectory name to use
@param: fname -- The file name for which we need to build the path
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
If uplink = None then [./] for use in EventListPage
"""
subdirs = []
if subdir:
subdirs.append(subdir)
subdirs.append(fname[-1].lower())
subdirs.append(fname[-2].lower())
if self.usecms:
if self.target_uri not in subdirs:
subdirs = [self.target_uri] + subdirs
else:
if uplink is True:
subdirs = ['..']*3 + subdirs
# added for use in EventListPage
elif uplink is None:
subdirs = ['.'] + subdirs
return subdirs
def build_path(self, subdir, fname, uplink=False):
"""
Return the name of the subdirectory.
Notice that we DO use os.path.join() here.
@param: subdir -- The subdirectory name to use
@param: fname -- The file name for which we need to build the path
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
return os.path.join(*self.build_subdirs(subdir, fname, uplink))
def build_url_image(self, fname, subdir=None, uplink=False):
"""
builds a url from an image
@param: fname -- The file name for which we need to build the path
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
subdirs = []
if subdir:
subdirs.append(subdir)
if self.usecms:
if self.target_uri not in subdirs:
subdirs = [self.target_uri] + subdirs
else:
if uplink:
subdirs = ['..']*3 + subdirs
nname = "/".join(subdirs + [fname])
if win():
nname = nname.replace('\\', "/")
return nname
def build_url_fname_html(self, fname, subdir=None, uplink=False):
"""
builds a url filename from html
@param: fname -- The file name to create
@param: subdir -- The subdirectory name to use
@param: uplink -- If True, then "../../../" is inserted in front of the
result.
"""
return self.build_url_fname(fname, subdir, uplink) + self.ext
def build_link(self, prop, handle, obj_class):
"""
Build a link to an item.
@param: prop -- Property
@param: handle -- The handle for which we need to build a link
@param: obj_class -- The class of the related object.
"""
if prop == "gramps_id":
if obj_class in self._db.get_table_names():
obj = self._db.get_table_metadata(obj_class)[
"gramps_id_func"](handle)
if obj:
handle = obj.handle
else:
raise AttributeError("gramps_id '%s' not found in '%s'" %
handle, obj_class)
else:
raise AttributeError("invalid gramps_id lookup "
"in table name '%s'" % obj_class)
uplink = self.link_prefix_up
# handle, ppl
if obj_class == "Person":
if self.person_in_webreport(handle):
return self.build_url_fname(handle, "ppl", uplink) + self.ext
else:
return None
elif obj_class == "Source":
subdir = "src"
elif obj_class == "Place":
subdir = "plc"
elif obj_class == "Event":
subdir = "evt"
elif obj_class == "Media":
subdir = "img"
elif obj_class == "Repository":
subdir = "repo"
elif obj_class == "Family":
subdir = "fam"
else:
print("NarrativeWeb ignoring link type '%s'" % obj_class)
return None
return self.build_url_fname(handle, subdir, uplink) + self.ext
def build_url_fname(self, fname, subdir=None, uplink=False):
"""
Create part of the URL given the filename and optionally the
subdirectory. If the subdirectory is given, then two extra levels of
subdirectory are inserted between 'subdir' and the filename.
The reason is to prevent directories with too many entries.
@param: fname -- The file name to create
@param: subdir -- The subdirectory name to use
@param: uplink -- if True, then "../../../" is inserted in front of the
result.
The extension is added to the filename as well.
Notice that we do NOT use os.path.join() because we're creating a URL.
Imagine we run gramps on Windows (heaven forbits), we don't want to
see backslashes in the URL.
"""
if not fname:
return ""
if win():
fname = fname.replace('\\', "/")
fname = fname.replace(self.target_uri + "/", "")
if self.usecms:
subdirs = self.build_subdirs(subdir, fname, False)
else:
subdirs = self.build_subdirs(subdir, fname, uplink)
return "/".join(subdirs + [fname])
def create_file(self, fname, subdir=None, ext=None):
"""
will create filename given
@param: fname -- File name to be created
@param: subdir -- A subdir to be added to filename
@param: ext -- An extension to be added to filename
"""
if ext is None:
ext = self.ext
if self.usecms and subdir is None:
self.cur_fname = os.path.join(self.target_uri, fname) + ext
else:
if subdir:
subdir = self.build_path(subdir, fname)
self.cur_fname = os.path.join(subdir, fname) + ext
else:
self.cur_fname = fname + ext
if self.archive:
string_io = BytesIO()
output_file = TextIOWrapper(string_io, encoding=self.encoding,
errors='xmlcharrefreplace')
else:
string_io = None
if subdir:
subdir = os.path.join(self.html_dir, subdir)
if not os.path.isdir(subdir):
os.makedirs(subdir)
fname = os.path.join(self.html_dir, self.cur_fname)
output_file = open(fname, 'w', encoding=self.encoding,
errors='xmlcharrefreplace')
return (output_file, string_io)
def close_file(self, output_file, string_io, date):
"""
will close any file passed to it
@param: output_file -- The output file to flush
@param: string_io -- The string IO used when we are in archive mode
@param: date -- The last modification date for this object
If we have "zero", we use the current time.
This is related to bug 8950 and very useful
when we use rsync.
"""
if self.archive:
output_file.flush()
tarinfo = tarfile.TarInfo(self.cur_fname)
tarinfo.size = len(string_io.getvalue())
tarinfo.mtime = date if date != 0 else time.time()
if not win():
tarinfo.uid = os.getuid()
tarinfo.gid = os.getgid()
string_io.seek(0)
self.archive.addfile(tarinfo, string_io)
output_file.close()
else:
output_file.close()
if date > 0:
os.utime(output_file.name, (date, date))
def prepare_copy_media(self, photo):
"""
prepares a media object to copy
@param: photo -- The photo for which we need a real path
and a thumbnail path
"""
handle = photo.get_handle()
ext = os.path.splitext(photo.get_path())[1]
real_path = os.path.join(self.build_path('images', handle),
handle + ext)
thumb_path = os.path.join(self.build_path('thumb', handle),
handle + '.png')
return real_path, thumb_path
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination.
If to_dir is not present and if the target is not an archive,
then the destination directory will be created.
@param: from_fname -- The path of the file to copy.
@param: to_fname -- Will be just a filename, without directory path.
@param: to_dir -- Is the relative path name in the destination root.
It will be prepended before 'to_fname'.
"""
if self.usecms:
to_dir = "/" + self.target_uri + "/" + to_dir
# LOG.debug("copying '%s' to '%s/%s'" % (from_fname, to_dir, to_fname))
mtime = os.stat(from_fname).st_mtime
if self.archive:
def set_mtime(tarinfo):
"""
For each file, we set the last modification time.
We could also set uid, gid, uname, gname and mode
#tarinfo.uid = os.getuid()
#tarinfo.mode = 0660
#tarinfo.uname = tarinfo.gname = "www-data"
"""
tarinfo.mtime = mtime
return tarinfo
dest = os.path.join(to_dir, to_fname)
self.archive.add(from_fname, dest, filter=set_mtime)
else:
dest = os.path.join(self.html_dir, to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
try:
shutil.copyfile(from_fname, dest)
os.utime(dest, (mtime, mtime))
except:
print("Copying error: %s" % sys.exc_info()[1])
print("Continuing...")
elif self.warn_dir:
self.user.warn(
_("Possible destination error") + "\n" +
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def person_in_webreport(self, person_handle):
"""
Return the handle if we created a page for this person.
@param: person_handle -- The person we are looking for
"""
return person_handle in self.obj_dict[Person]
#################################################
#
# Creates the NarrativeWeb Report Menu Options
#
#################################################
class NavWebOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
"""
@param: name -- The name of the report
@param: dbase -- The Gramps database instance
"""
self.__db = dbase
self.__archive = None
self.__target = None
self.__target_uri = None
self.__pid = None
self.__filter = None
self.__graph = None
self.__graphgens = None
self.__living = None
self.__yearsafterdeath = None
self.__usecms = None
self.__cms_uri = None
self.__usecal = None
self.__calendar_uri = None
self.__create_thumbs_only = None
self.__mapservice = None
self.__maxinitialimageheight = None
self.__maxinitialimagewidth = None
self.__citationreferents = None
self.__incdownload = None
self.__placemappages = None
self.__familymappages = None
self.__googleopts = None
self.__googlemapkey = None
self.__ancestortree = None
self.__css = None
self.__dl_descr1 = None
self.__dl_descr2 = None
self.__down_fname2 = None
self.__gallery = None
self.__unused = None
self.__down_fname1 = None
self.__navigation = None
self.__target_cal_uri = None
self.__securesite = False
db_options = name + ' ' + dbase.get_dbname()
MenuReportOptions.__init__(self, db_options, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the web site.
@param: menu -- The menu for which we add options
"""
self.__add_report_options(menu)
self.__add_report_html(menu)
self.__add_report_display(menu)
self.__add_page_generation_options(menu)
self.__add_images_generation_options(menu)
self.__add_download_options(menu)
self.__add_advanced_options(menu)
self.__add_advanced_options_2(menu)
self.__add_place_map_options(menu)
self.__add_others_options(menu)
def __add_report_options(self, menu):
"""
Options on the "Report Options" tab.
"""
category_name = _("Report Options")
addopt = partial(menu.add_option, category_name)
self.__archive = BooleanOption(_('Store web pages in .tar.gz archive'),
False)
self.__archive.set_help(_('Whether to store the web pages in an '
'archive file'))
addopt("archive", self.__archive)
self.__archive.connect('value-changed', self.__archive_changed)
dbname = self.__db.get_dbname()
default_dir = dbname + "_" + "NAVWEB"
self.__target = DestinationOption(
_("Destination"),
os.path.join(config.get('paths.website-directory'),
default_dir))
self.__target.set_help(_("The destination directory for the web "
"files"))
addopt("target", self.__target)
self.__archive_changed()
title = StringOption(_("Web site title"), _('My Family Tree'))
title.set_help(_("The title of the web site"))
addopt("title", title)
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select filter to restrict people that appear on web site"))
addopt("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
addopt("pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name, default=False)
addopt = partial(menu.add_option, category_name)
def __add_report_html(self, menu):
"""
Html Options for the Report.
"""
category_name = _("Html options")
addopt = partial(menu.add_option, category_name)
ext = EnumeratedListOption(_("File extension"), ".html")
for etype in _WEB_EXT:
ext.add_item(etype, etype)
ext.set_help(_("The extension to be used for the web files"))
addopt("ext", ext)
cright = EnumeratedListOption(_('Copyright'), 0)
for index, copt in enumerate(_COPY_OPTIONS):
cright.add_item(index, copt)
cright.set_help(_("The copyright to be used for the web files"))
addopt("cright", cright)
self.__css = EnumeratedListOption(_('StyleSheet'), CSS["default"]["id"])
for (fname, gid) in sorted([(CSS[key]["translation"], CSS[key]["id"])
for key in list(CSS.keys())]):
if CSS[gid]["user"]:
self.__css.add_item(CSS[gid]["id"], CSS[gid]["translation"])
self.__css.set_help(_('The stylesheet to be used for the web pages'))
addopt("css", self.__css)
self.__css.connect("value-changed", self.__stylesheet_changed)
_nav_opts = [
(_("Horizontal -- Default"), "Horizontal"),
(_("Vertical -- Left Side"), "Vertical"),
(_("Fade -- WebKit Browsers Only"), "Fade"),
(_("Drop-Down -- WebKit Browsers Only"), "dropdown")
]
self.__navigation = EnumeratedListOption(_("Navigation Menu Layout"),
_nav_opts[0][1])
for layout in _nav_opts:
self.__navigation.add_item(layout[1], layout[0])
self.__navigation.set_help(_("Choose which layout "
"for the Navigation Menus."))
addopt("navigation", self.__navigation)
self.__stylesheet_changed()
_cit_opts = [
(_("Normal Outline Style"), "Outline"),
(_("Drop-Down -- WebKit Browsers Only"), "DropDown")
]
self.__citationreferents = EnumeratedListOption(
_("Citation Referents Layout"), _cit_opts[0][1])
for layout in _cit_opts:
self.__citationreferents.add_item(layout[1], layout[0])
self.__citationreferents.set_help(
_("Determine the default layout for the "
"Source Page's Citation Referents section"))
addopt("citationreferents", self.__citationreferents)
self.__ancestortree = BooleanOption(_("Include ancestor's tree"), True)
self.__ancestortree.set_help(_('Whether to include an ancestor '
'graph on each individual page'))
addopt("ancestortree", self.__ancestortree)
self.__ancestortree.connect('value-changed', self.__graph_changed)
self.__graphgens = NumberOption(_("Graph generations"), 4, 2, 5)
self.__graphgens.set_help(_("The number of generations to include in "
"the ancestor graph"))
addopt("graphgens", self.__graphgens)
self.__graph_changed()
self.__securesite = BooleanOption(_("This is a secure site (https)"),
False)
self.__securesite.set_help(_('Whether to use http:// or https://'))
addopt("securesite", self.__securesite)
def __add_report_display(self, menu):
"""
How to display names, datyes, ...
"""
category_name = _("Display")
addopt = partial(menu.add_option, category_name)
stdoptions.add_name_format_option(menu, category_name)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
nogid = BooleanOption(_('Suppress Gramps ID'), False)
nogid.set_help(_('Whether to include the Gramps ID of objects'))
addopt("nogid", nogid)
addopt = partial(menu.add_option, category_name)
birthorder = BooleanOption(
_('Sort all children in birth order'), False)
birthorder.set_help(
_('Whether to display children in birth order or in entry order?'))
addopt("birthorder", birthorder)
def __add_page_generation_options(self, menu):
"""
Options on the "Page Generation" tab.
"""
category_name = _("Page Generation")
addopt = partial(menu.add_option, category_name)
homenote = NoteOption(_('Home page note'))
homenote.set_help(_("A note to be used on the home page"))
addopt("homenote", homenote)
homeimg = MediaOption(_('Home page image'))
homeimg.set_help(_("An image to be used on the home page"))
addopt("homeimg", homeimg)
intronote = NoteOption(_('Introduction note'))
intronote.set_help(_("A note to be used as the introduction"))
addopt("intronote", intronote)
introimg = MediaOption(_('Introduction image'))
introimg.set_help(_("An image to be used as the introduction"))
addopt("introimg", introimg)
contactnote = NoteOption(_("Publisher contact note"))
contactnote.set_help(_("A note to be used as the publisher contact."
"\nIf no publisher information is given,"
"\nno contact page will be created")
)
addopt("contactnote", contactnote)
contactimg = MediaOption(_("Publisher contact image"))
contactimg.set_help(_("An image to be used as the publisher contact."
"\nIf no publisher information is given,"
"\nno contact page will be created")
)
addopt("contactimg", contactimg)
headernote = NoteOption(_('HTML user header'))
headernote.set_help(_("A note to be used as the page header"))
addopt("headernote", headernote)
footernote = NoteOption(_('HTML user footer'))
footernote.set_help(_("A note to be used as the page footer"))
addopt("footernote", footernote)
def __add_images_generation_options(self, menu):
"""
Options on the "Page Generation" tab.
"""
category_name = _("Images Generation")
addopt = partial(menu.add_option, category_name)
self.__gallery = BooleanOption(_("Include images and media objects"),
True)
self.__gallery.set_help(_('Whether to include '
'a gallery of media objects'))
addopt("gallery", self.__gallery)
self.__gallery.connect('value-changed', self.__gallery_changed)
self.__unused = BooleanOption(
_("Include unused images and media objects"), True)
self.__unused.set_help(_('Whether to include unused or unreferenced'
' media objects'))
addopt("unused", self.__unused)
self.__create_thumbs_only = BooleanOption(
_("Create and only use thumbnail- sized images"), False)
self.__create_thumbs_only.set_help(
_("This option allows you to create only thumbnail images "
"instead of the full-sized images on the Media Page. "
"This will allow you to have a much "
"smaller total upload size to your web hosting site."))
addopt("create_thumbs_only", self.__create_thumbs_only)
self.__create_thumbs_only.connect("value-changed",
self.__gallery_changed)
self.__maxinitialimagewidth = NumberOption(
_("Max width of initial image"), _DEFAULT_MAX_IMG_WIDTH, 0, 2000)
self.__maxinitialimagewidth.set_help(
_("This allows you to set the maximum width "
"of the image shown on the media page. Set to 0 for no limit."))
addopt("maxinitialimagewidth", self.__maxinitialimagewidth)
self.__maxinitialimageheight = NumberOption(
_("Max height of initial image"), _DEFAULT_MAX_IMG_HEIGHT, 0, 2000)
self.__maxinitialimageheight.set_help(
_("This allows you to set the maximum height "
"of the image shown on the media page. Set to 0 for no limit."))
addopt("maxinitialimageheight", self.__maxinitialimageheight)
self.__gallery_changed()
def __add_download_options(self, menu):
"""
Options for the download tab ...
"""
category_name = _("Download")
addopt = partial(menu.add_option, category_name)
self.__incdownload = BooleanOption(_("Include download page"), False)
self.__incdownload.set_help(
_('Whether to include a database download option'))
addopt("incdownload", self.__incdownload)
self.__incdownload.connect('value-changed', self.__download_changed)
self.__down_fname1 = DestinationOption(
_("Download Filename"),
os.path.join(config.get('paths.website-directory'), ""))
self.__down_fname1.set_help(
_("File to be used for downloading of database"))
addopt("down_fname1", self.__down_fname1)
self.__dl_descr1 = StringOption(_("Description for download"),
_('Smith Family Tree'))
self.__dl_descr1.set_help(_('Give a description for this file.'))
addopt("dl_descr1", self.__dl_descr1)
self.__down_fname2 = DestinationOption(
_("Download Filename"),
os.path.join(config.get('paths.website-directory'), ""))
self.__down_fname2.set_help(
_("File to be used for downloading of database"))
addopt("down_fname2", self.__down_fname2)
self.__dl_descr2 = StringOption(_("Description for download"),
_('Johnson Family Tree'))
self.__dl_descr2.set_help(_('Give a description for this file.'))
addopt("dl_descr2", self.__dl_descr2)
self.__download_changed()
def __add_advanced_options(self, menu):
"""
Options on the "Advanced" tab.
"""
category_name = _("Advanced Options")
addopt = partial(menu.add_option, category_name)
encoding = EnumeratedListOption(_('Character set encoding'),
_CHARACTER_SETS[0][1])
for eopt in _CHARACTER_SETS:
encoding.add_item(eopt[1], eopt[0])
encoding.set_help(_("The encoding to be used for the web files"))
addopt("encoding", encoding)
linkhome = BooleanOption(
_('Include link to active person on every page'), False)
linkhome.set_help(
_('Include a link to the active person (if they have a webpage)'))
addopt("linkhome", linkhome)
showbirth = BooleanOption(
_("Include a column for birth dates on the index pages"), True)
showbirth.set_help(_('Whether to include a birth column'))
addopt("showbirth", showbirth)
showdeath = BooleanOption(
_("Include a column for death dates on the index pages"), False)
showdeath.set_help(_('Whether to include a death column'))
addopt("showdeath", showdeath)
showpartner = BooleanOption(_("Include a column for partners on the "
"index pages"), False)
showpartner.set_help(_('Whether to include a partners column'))
menu.add_option(category_name, 'showpartner', showpartner)
showparents = BooleanOption(_("Include a column for parents on the "
"index pages"), False)
showparents.set_help(_('Whether to include a parents column'))
addopt("showparents", showparents)
showallsiblings = BooleanOption(
_("Include half and/ or step-siblings on the individual pages"),
False)
showallsiblings.set_help(
_("Whether to include half and/ or "
"step-siblings with the parents and siblings"))
addopt('showhalfsiblings', showallsiblings)
def __add_advanced_options_2(self, menu):
"""
Continue options on the "Advanced" tab.
"""
category_name = _("Include")
addopt = partial(menu.add_option, category_name)
inc_families = BooleanOption(_("Include family pages"), False)
inc_families.set_help(_("Whether or not to include family pages."))
addopt("inc_families", inc_families)
inc_events = BooleanOption(_('Include event pages'), False)
inc_events.set_help(
_('Add a complete events list and relevant pages or not'))
addopt("inc_events", inc_events)
inc_repository = BooleanOption(_('Include repository pages'), False)
inc_repository.set_help(
_('Whether or not to include the Repository Pages.'))
addopt("inc_repository", inc_repository)
inc_gendex = BooleanOption(
_('Include GENDEX file (/gendex.txt)'), False)
inc_gendex.set_help(_('Whether to include a GENDEX file or not'))
addopt("inc_gendex", inc_gendex)
inc_addressbook = BooleanOption(_("Include address book pages"), False)
inc_addressbook.set_help(_("Whether or not to add Address Book pages,"
"which can include e-mail and website "
"addresses and personal address/ residence "
"events."))
addopt("inc_addressbook", inc_addressbook)
def __add_place_map_options(self, menu):
"""
options for the Place Map tab.
"""
category_name = _("Place Map Options")
addopt = partial(menu.add_option, category_name)
mapopts = [
[_("OpenStreetMap"), "OpenStreetMap"],
[_("Google"), "Google"]]
self.__mapservice = EnumeratedListOption(_("Map Service"),
mapopts[0][1])
for trans, opt in mapopts:
self.__mapservice.add_item(opt, trans)
self.__mapservice.set_help(_("Choose your choice of map service for "
"creating the Place Map Pages."))
self.__mapservice.connect("value-changed", self.__placemap_options)
addopt("mapservice", self.__mapservice)
self.__placemappages = BooleanOption(
_("Include Place map on Place Pages"), False)
self.__placemappages.set_help(
_("Whether to include a place map on the Place Pages, "
"where Latitude/ Longitude are available."))
self.__placemappages.connect("value-changed", self.__placemap_options)
addopt("placemappages", self.__placemappages)
self.__familymappages = BooleanOption(_("Include Family Map Pages with "
"all places shown on the map"),
False)
self.__familymappages.set_help(
_("Whether or not to add an individual page map "
"showing all the places on this page. "
"This will allow you to see how your family "
"traveled around the country."))
self.__familymappages.connect("value-changed", self.__placemap_options)
addopt("familymappages", self.__familymappages)
googleopts = [
(_("Family Links"), "FamilyLinks"),
(_("Drop"), "Drop"),
(_("Markers"), "Markers")]
self.__googleopts = EnumeratedListOption(_("Google/ FamilyMap Option"),
googleopts[0][1])
for trans, opt in googleopts:
self.__googleopts.add_item(opt, trans)
self.__googleopts.set_help(
_("Select which option that you would like "
"to have for the Google Maps Family Map pages..."))
addopt("googleopts", self.__googleopts)
self.__googlemapkey = StringOption(_("Google maps API key"), "")
self.__googlemapkey.set_help(_("The API key used for the Google maps"))
addopt("googlemapkey", self.__googlemapkey)
self.__placemap_options()
def __add_others_options(self, menu):
"""
Options for the cms tab, web calendar inclusion, php ...
"""
category_name = _("Other inclusion (CMS, Web Calendar, Php)")
addopt = partial(menu.add_option, category_name)
self.__usecms = BooleanOption(
_("Do we include these pages in a cms web ?"), False)
addopt("usecms", self.__usecms)
default_dir = "/NAVWEB"
self.__cms_uri = DestinationOption(_("URI"),
os.path.join(
config.get(
'paths.website-cms-uri'),
default_dir))
self.__cms_uri.set_help(
_("Where do you place your web site ? default = /NAVWEB"))
self.__cms_uri.connect('value-changed', self.__cms_uri_changed)
addopt("cmsuri", self.__cms_uri)
self.__cms_uri_changed()
self.__usecal = BooleanOption(
_("Do we include the web calendar ?"), False)
addopt("usecal", self.__usecal)
default_calendar = "/WEBCAL"
self.__calendar_uri = DestinationOption(_("URI"),
os.path.join(
config.get('paths.website'
'-cal-uri'),
default_calendar))
self.__calendar_uri.set_help(
_("Where do you place your web site ? default = /WEBCAL"))
self.__calendar_uri.connect('value-changed',
self.__calendar_uri_changed)
addopt("caluri", self.__calendar_uri)
self.__calendar_uri_changed()
def __cms_uri_changed(self):
"""
Update the change of storage: archive or directory
"""
self.__target_uri = self.__cms_uri.get_value()
def __calendar_uri_changed(self):
"""
Update the change of storage: Where is the web calendar ?
Possible cases :
1 - /WEBCAL (relative URI to the navweb site)
2 - http://mysite.org/WEBCAL (URL is on another website)
3 - //mysite.org/WEBCAL (PRL depend on the protocol used)
"""
self.__target_cal_uri = self.__calendar_uri.get_value()
def __archive_changed(self):
"""
Update the change of storage: archive or directory
"""
if self.__archive.get_value() is True:
self.__target.set_extension(".tar.gz")
self.__target.set_directory_entry(False)
else:
self.__target.set_directory_entry(True)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = utils.get_person_filters(person, include_single=False)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value == 0: # "Entire Database" (as "include_single=False")
self.__pid.set_available(False)
else:
# The other filters need a center person (assume custom ones too)
self.__pid.set_available(True)
def __stylesheet_changed(self):
"""
Handles the changing nature of the stylesheet
"""
css_opts = self.__css.get_value()
if CSS[css_opts]["navigation"]:
self.__navigation.set_available(True)
else:
self.__navigation.set_available(False)
self.__navigation.set_value("Horizontal")
def __graph_changed(self):
"""
Handle enabling or disabling the ancestor graph
"""
self.__graphgens.set_available(self.__ancestortree.get_value())
def __gallery_changed(self):
"""
Handles the changing nature of gallery
"""
_gallery_option = self.__gallery.get_value()
_create_thumbs_only_option = self.__create_thumbs_only.get_value()
# images and media objects to be used, make all opti8ons available...
if _gallery_option:
self.__create_thumbs_only.set_available(True)
self.__maxinitialimagewidth.set_available(True)
self.__maxinitialimageheight.set_available(True)
# thumbnail-sized images only...
if _create_thumbs_only_option:
self.__maxinitialimagewidth.set_available(False)
self.__maxinitialimageheight.set_available(False)
# full- sized images and Media Pages will be created...
else:
self.__maxinitialimagewidth.set_available(True)
self.__maxinitialimageheight.set_available(True)
# no images or media objects are to be used...
else:
self.__create_thumbs_only.set_available(False)
self.__maxinitialimagewidth.set_available(False)
self.__maxinitialimageheight.set_available(False)
def __download_changed(self):
"""
Handles the changing nature of include download page
"""
if self.__incdownload.get_value():
self.__down_fname1.set_available(True)
self.__dl_descr1.set_available(True)
self.__down_fname2.set_available(True)
self.__dl_descr2.set_available(True)
else:
self.__down_fname1.set_available(False)
self.__dl_descr1.set_available(False)
self.__down_fname2.set_available(False)
self.__dl_descr2.set_available(False)
def __placemap_options(self):
"""
Handles the changing nature of the place map Options
"""
# get values for all Place Map Options tab...
place_active = self.__placemappages.get_value()
family_active = self.__familymappages.get_value()
mapservice_opts = self.__mapservice.get_value()
#google_opts = self.__googleopts.get_value()
if place_active or family_active:
self.__mapservice.set_available(True)
else:
self.__mapservice.set_available(False)
if family_active and mapservice_opts == "Google":
self.__googleopts.set_available(True)
else:
self.__googleopts.set_available(False)
if (place_active or family_active) and mapservice_opts == "Google":
self.__googlemapkey.set_available(True)
else:
self.__googlemapkey.set_available(False)
# See : http://www.gramps-project.org/bugs/view.php?id = 4423
# Contraction data taken from CLDR 22.1. Only the default variant is considered.
# The languages included below are, by no means, all the langauges that have
# contractions - just a sample of langauges that have been supported
# At the time of writing (Feb 2013), the following langauges have greater that
# 50% coverage of translation of Gramps: bg Bulgarian, ca Catalan, cs Czech, da
# Danish, de German, el Greek, en_GB, es Spanish, fi Finish, fr French, he
# Hebrew, hr Croation, hu Hungarian, it Italian, ja Japanese, lt Lithuanian, nb
# Noregian Bokmål, nn Norwegian Nynorsk, nl Dutch, pl Polish, pt_BR Portuguese
# (Brazil), pt_P Portugeuse (Portugal), ru Russian, sk Slovak, sl Slovenian, sv
# Swedish, vi Vietnamese, zh_CN Chinese.
# Key is the language (or language and country), Value is a list of
# contractions. Each contraction consists of a tuple. First element of the
# tuple is the list of characters, second element is the string to use as the
# index entry.
# The DUCET contractions (e.g. LATIN CAPIAL LETTER L, MIDDLE DOT) are ignored,
# as are the supresscontractions in some locales.
CONTRACTIONS_DICT = {
# bg Bulgarian validSubLocales="bg_BG" no contractions
# ca Catalan validSubLocales="ca_AD ca_ES"
"ca" : [(("l·", "L·"), "L")],
# Czech, validSubLocales="cs_CZ" Czech_Czech Republic
"cs" : [(("ch", "cH", "Ch", "CH"), "CH")],
# Danish validSubLocales="da_DK" Danish_Denmark
"da" : [(("aa", "Aa", "AA"), "Å")],
# de German validSubLocales="de_AT de_BE de_CH de_DE de_LI de_LU" no
# contractions in standard collation.
# el Greek validSubLocales="el_CY el_GR" no contractions.
# es Spanish validSubLocales="es_419 es_AR es_BO es_CL es_CO es_CR es_CU
# es_DO es_EA es_EC es_ES es_GQ es_GT es_HN es_IC es_MX es_NI es_PA es_PE
# es_PH es_PR es_PY es_SV es_US es_UY es_VE" no contractions in standard
# collation.
# fi Finish validSubLocales="fi_FI" no contractions in default (phonebook)
# collation.
# fr French no collation data.
# he Hebrew validSubLocales="he_IL" no contractions
# hr Croation validSubLocales="hr_BA hr_HR"
"hr" : [(("dž", "Dž"), "dž"),
(("lj", "Lj", 'LJ'), "LJ"),
(("Nj", "NJ", "nj"), "NJ")],
# Hungarian hu_HU for two and three character contractions.
"hu" : [(("cs", "Cs", "CS"), "CS"),
(("dzs", "Dzs", "DZS"), "DZS"), # order is important
(("dz", "Dz", "DZ"), "DZ"),
(("gy", "Gy", "GY"), "GY"),
(("ly", "Ly", "LY"), "LY"),
(("ny", "Ny", "NY"), "NY"),
(("sz", "Sz", "SZ"), "SZ"),
(("ty", "Ty", "TY"), "TY"),
(("zs", "Zs", "ZS"), "ZS")
],
# it Italian no collation data.
# ja Japanese unable to process the data as it is too complex.
# lt Lithuanian no contractions.
# Norwegian Bokmål
"nb" : [(("aa", "Aa", "AA"), "Å")],
# nn Norwegian Nynorsk validSubLocales="nn_NO"
"nn" : [(("aa", "Aa", "AA"), "Å")],
# nl Dutch no collation data.
# pl Polish validSubLocales="pl_PL" no contractions
# pt Portuguese no collation data.
# ru Russian validSubLocales="ru_BY ru_KG ru_KZ ru_MD ru_RU ru_UA" no
# contractions
# Slovak, validSubLocales="sk_SK" Slovak_Slovakia
# having DZ in Slovak as a contraction was rejected in
# http://unicode.org/cldr/trac/ticket/2968
"sk" : [(("ch", "cH", "Ch", "CH"), "Ch")],
# sl Slovenian validSubLocales="sl_SI" no contractions
# sv Swedish validSubLocales="sv_AX sv_FI sv_SE" default collation is
# "reformed" no contractions.
# vi Vietnamese validSubLocales="vi_VN" no contractions.
# zh Chinese validSubLocales="zh_Hans zh_Hans_CN zh_Hans_SG" no contractions
# in Latin characters the others are too complex.
}
# The comment below from the glibc locale sv_SE in
# localedata/locales/sv_SE :
#
# % The letter w is normally not present in the Swedish alphabet. It
# % exists in some names in Swedish and foreign words, but is accounted
# % for as a variant of 'v'. Words and names with 'w' are in Swedish
# % ordered alphabetically among the words and names with 'v'. If two
# % words or names are only to be distinguished by 'v' or % 'w', 'v' is
# % placed before 'w'.
#
# See : http://www.gramps-project.org/bugs/view.php?id = 2933
#
# HOWEVER: the characters V and W in Swedish are not considered as a special
# case for several reasons. (1) The default collation for Swedish (called the
# 'reformed' collation type) regards the difference between 'v' and 'w' as a
# primary difference. (2) 'v' and 'w' in the 'standard' (non-default) collation
# type are not a contraction, just a case where the difference is secondary
# rather than primary. (3) There are plenty of other languages where a
# difference that is primary in other languages is secondary, and those are not
# specially handled.
|
jralls/gramps
|
gramps/plugins/webreport/narrativeweb.py
|
Python
|
gpl-2.0
| 96,157
|
[
"Brian"
] |
324b35e4fce8ef42c53d6e832089111c6824522b57ad0396e625d16123d04110
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements various equation of states.
Note: Most of the code were initially adapted from ASE and deltafactor by
@gmatteo but has since undergone major refactoring.
"""
import logging
import warnings
from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
from scipy.optimize import leastsq, minimize
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt, pretty_plot
__author__ = "Kiran Mathew, gmatteo"
__credits__ = "Cormac Toher"
logger = logging.getLogger(__file__)
class EOSBase(metaclass=ABCMeta):
"""
Abstract class that must be subcalssed by all equation of state
implementations.
"""
def __init__(self, volumes, energies):
"""
Args:
volumes (list/numpy.array): volumes in Ang^3
energies (list/numpy.array): energy in eV
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
# minimum energy(e0), buk modulus(b0),
# derivative of bulk modulus wrt pressure(b1), minimum volume(v0)
self._params = None
# the eos function parameters. It is the same as _params except for
# equation of states that uses polynomial fits(deltafactor and
# numerical_eos)
self.eos_params = None
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b / (2 * a)
e0 = a * (v0 ** 2) + b * v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError("The minimum volume of a fitted parabola is not in the input volumes\n.")
return e0, b0, b1, v0
def fit(self):
"""
Do the fitting. Does least square fitting. If you want to use custom
fitting, must override this.
"""
# the objective function that will be minimized in the least square
# fitting
self._params = self._initial_guess()
self.eos_params, ierr = leastsq(
lambda pars, x, y: y - self._func(x, pars),
self._params,
args=(self.volumes, self.energies),
)
# e0, b0, b1, v0
self._params = self.eos_params
if ierr not in [1, 2, 3, 4]:
raise EOSError("Optimal parameters not found")
@abstractmethod
def _func(self, volume, params):
"""
The equation of state function. This must be implemented by all classes
that derive from this abstract class.
Args:
volume (float/numpy.array)
params (list/tuple): values for the parameters other than the
volume used by the eos.
"""
pass
def func(self, volume):
"""
The equation of state function with the paramters other than volume set
to the ones obtained from fitting.
Args:
volume (list/numpy.array)
Returns:
numpy.array
"""
return self._func(np.array(volume), self.eos_params)
def __call__(self, volume):
"""
Args:
volume (): Volume
Returns:
Compute EOS with this volume.
"""
return self.func(volume)
@property
def e0(self):
"""
Returns the min energy.
"""
return self._params[0]
@property
def b0(self):
"""
Returns the bulk modulus.
Note: the units for the bulk modulus: unit of energy/unit of volume^3.
"""
return self._params[1]
@property
def b0_GPa(self):
"""
Returns the bulk modulus in GPa.
Note: This assumes that the energy and volumes are in eV and Ang^3
respectively
"""
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
def b1(self):
"""
Returns the derivative of bulk modulus wrt pressure(dimensionless)
"""
return self._params[2]
@property
def v0(self):
"""
Returns the minimum or the reference volume in Ang^3.
"""
return self._params[3]
@property
def results(self):
"""
Returns a summary dict.
Returns:
dict
"""
return dict(e0=self.e0, b0=self.b0, b1=self.b1, v0=self.v0)
def plot(self, width=8, height=None, plt=None, dpi=None, **kwargs):
"""
Plot the equation of state.
Args:
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width *
golden ratio.
plt (matplotlib.pyplot): If plt is supplied, changes will be made
to an existing plot. Otherwise, a new plot will be created.
dpi:
kwargs (dict): additional args fed to pyplot.plot.
supported keys: style, color, text, label
Returns:
Matplotlib plot object.
"""
# pylint: disable=E1307
plt = pretty_plot(width=width, height=height, plt=plt, dpi=dpi)
color = kwargs.get("color", "r")
label = kwargs.get("label", f"{self.__class__.__name__} fit")
lines = [
"Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
f"Bulk modulus = {self.b0:1.2f} eV/Ang^3 = {self.b0_GPa:1.2f} GPa",
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1,
]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
plt.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
plt.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
plt.grid(True)
plt.xlabel("Volume $\\AA^3$")
plt.ylabel("Energy (eV)")
plt.legend(loc="best", shadow=True)
# Add text with fit parameters.
plt.text(0.4, 0.5, text, transform=plt.gca().transAxes)
return plt
@add_fig_kwargs
def plot_ax(self, ax=None, fontsize=12, **kwargs):
"""
Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object.
"""
# pylint: disable=E1307
ax, fig, plt = get_ax_fig_plt(ax=ax)
color = kwargs.get("color", "r")
label = kwargs.get("label", f"{self.__class__.__name__} fit")
lines = [
"Equation of State: %s" % self.__class__.__name__,
"Minimum energy = %1.2f eV" % self.e0,
"Minimum or reference volume = %1.2f Ang^3" % self.v0,
f"Bulk modulus = {self.b0:1.2f} eV/Ang^3 = {self.b0_GPa:1.2f} GPa",
"Derivative of bulk modulus wrt pressure = %1.2f" % self.b1,
]
text = "\n".join(lines)
text = kwargs.get("text", text)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color)
# Plot eos fit.
vmin, vmax = min(self.volumes), max(self.volumes)
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
vfit = np.linspace(vmin, vmax, 100)
ax.plot(vfit, self.func(vfit), linestyle="dashed", color=color, label=label)
ax.grid(True)
ax.set_xlabel("Volume $\\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
ax.text(
0.5,
0.5,
text,
fontsize=fontsize,
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
)
return fig
class Murnaghan(EOSBase):
"""
Murnaghan EOS.
"""
def _func(self, volume, params):
"""
From PRB 28,5480 (1983)
"""
e0, b0, b1, v0 = tuple(params)
return e0 + b0 * volume / b1 * (((v0 / volume) ** b1) / (b1 - 1.0) + 1.0) - v0 * b0 / (b1 - 1.0)
class Birch(EOSBase):
"""
Birch EOS.
"""
def _func(self, volume, params):
"""
From Intermetallic compounds: Principles and Practice, Vol. I:
Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein,
D. Papaconstantopoulos.
case where n=0
"""
e0, b0, b1, v0 = tuple(params)
return (
e0
+ 9.0 / 8.0 * b0 * v0 * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 2
+ 9.0 / 16.0 * b0 * v0 * (b1 - 4.0) * ((v0 / volume) ** (2.0 / 3.0) - 1.0) ** 3
)
class BirchMurnaghan(EOSBase):
"""
BirchMurnaghan EOS
"""
def _func(self, volume, params):
"""
BirchMurnaghan equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (v0 / volume) ** (1.0 / 3.0)
return e0 + 9.0 * b0 * v0 / 16.0 * (eta ** 2 - 1) ** 2 * (6 + b1 * (eta ** 2 - 1.0) - 4.0 * eta ** 2)
class PourierTarantola(EOSBase):
"""
PourierTarantola EOS
"""
def _func(self, volume, params):
"""
Pourier-Tarantola equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1.0 / 3.0)
squiggle = -3.0 * np.log(eta)
return e0 + b0 * v0 * squiggle ** 2 / 6.0 * (3.0 + squiggle * (b1 - 2))
class Vinet(EOSBase):
"""
Vinet EOS.
"""
def _func(self, volume, params):
"""
Vinet equation from PRB 70, 224107
"""
e0, b0, b1, v0 = tuple(params)
eta = (volume / v0) ** (1.0 / 3.0)
return e0 + 2.0 * b0 * v0 / (b1 - 1.0) ** 2 * (
2.0 - (5.0 + 3.0 * b1 * (eta - 1.0) - 3.0 * eta) * np.exp(-3.0 * (b1 - 1.0) * (eta - 1.0) / 2.0)
)
class PolynomialEOS(EOSBase):
"""
Derives from EOSBase. Polynomial based equations of states must subclass
this.
"""
def _func(self, volume, params):
return np.poly1d(list(params))(volume)
def fit(self, order):
"""
Do polynomial fitting and set the parameters. Uses numpy polyfit.
Args:
order (int): order of the fit polynomial
"""
self.eos_params = np.polyfit(self.volumes, self.energies, order)
self._set_params()
def _set_params(self):
"""
Use the fit polynomial to compute the parameter e0, b0, b1 and v0
and set to the _params attribute.
"""
fit_poly = np.poly1d(self.eos_params)
# the volume at min energy, used as the intial guess for the
# optimization wrt volume.
v_e_min = self.volumes[np.argmin(self.energies)]
# evaluate e0, v0, b0 and b1
min_wrt_v = minimize(fit_poly, v_e_min)
e0, v0 = min_wrt_v.fun, min_wrt_v.x[0]
pderiv2 = np.polyder(fit_poly, 2)
pderiv3 = np.polyder(fit_poly, 3)
b0 = v0 * np.poly1d(pderiv2)(v0)
db0dv = np.poly1d(pderiv2)(v0) + v0 * np.poly1d(pderiv3)(v0)
# db/dp
b1 = -v0 * db0dv / b0
self._params = [e0, b0, b1, v0]
class DeltaFactor(PolynomialEOS):
"""
Fitting a polynomial EOS using delta factor.
"""
def _func(self, volume, params):
x = volume ** (-2.0 / 3.0)
return np.poly1d(list(params))(x)
def fit(self, order=3):
"""
Overriden since this eos works with volume**(2/3) instead of volume.
"""
x = self.volumes ** (-2.0 / 3.0)
self.eos_params = np.polyfit(x, self.energies, order)
self._set_params()
def _set_params(self):
"""
Overriden to account for the fact the fit with volume**(2/3) instead
of volume.
"""
deriv0 = np.poly1d(self.eos_params)
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x ** (-3.0 / 2.0)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4.0 / 9.0 * x ** 5.0 * deriv2(x)
derivV3 = -20.0 / 9.0 * x ** (13.0 / 2.0) * deriv2(x) - 8.0 / 27.0 * x ** (15.0 / 2.0) * deriv3(x)
b0 = derivV2 / x ** (3.0 / 2.0)
b1 = -1 - x ** (-3.0 / 2.0) * derivV3 / derivV2
# e0, b0, b1, v0
self._params = [deriv0(v0 ** (-2.0 / 3.0)), b0, b1, v0]
class NumericalEOS(PolynomialEOS):
"""
A numerical EOS.
"""
def fit(self, min_ndata_factor=3, max_poly_order_factor=5, min_poly_order=2):
"""
Fit the input data to the 'numerical eos', the equation of state employed
in the quasiharmonic Debye model described in the paper:
10.1103/PhysRevB.90.174107.
credits: Cormac Toher
Args:
min_ndata_factor (int): parameter that controls the minimum number
of data points that will be used for fitting.
minimum number of data points =
total data points-2*min_ndata_factor
max_poly_order_factor (int): parameter that limits the max order
of the polynomial used for fitting.
max_poly_order = number of data points used for fitting -
max_poly_order_factor
min_poly_order (int): minimum order of the polynomial to be
considered for fitting.
"""
warnings.simplefilter("ignore", np.RankWarning)
def get_rms(x, y):
return np.sqrt(np.sum((np.array(x) - np.array(y)) ** 2) / len(x))
# list of (energy, volume) tuples
e_v = list(zip(self.energies, self.volumes))
ndata = len(e_v)
# minimum number of data points used for fitting
ndata_min = max(ndata - 2 * min_ndata_factor, min_poly_order + 1)
rms_min = np.inf
# number of data points available for fit in each iteration
ndata_fit = ndata
# store the fit polynomial coefficients and the rms in a dict,
# where the key=(polynomial order, number of data points used for
# fitting)
all_coeffs = {}
# sort by energy
e_v = sorted(e_v, key=lambda x: x[0])
# minimum energy tuple
e_min = e_v[0]
# sort by volume
e_v = sorted(e_v, key=lambda x: x[1])
# index of minimum energy tuple in the volume sorted list
emin_idx = e_v.index(e_min)
# the volume lower than the volume corresponding to minimum energy
v_before = e_v[emin_idx - 1][1]
# the volume higher than the volume corresponding to minimum energy
v_after = e_v[emin_idx + 1][1]
e_v_work = deepcopy(e_v)
# loop over the data points.
while (ndata_fit >= ndata_min) and (e_min in e_v_work):
max_poly_order = ndata_fit - max_poly_order_factor
e = [ei[0] for ei in e_v_work]
v = [ei[1] for ei in e_v_work]
# loop over polynomial order
for i in range(min_poly_order, max_poly_order + 1):
coeffs = np.polyfit(v, e, i)
pder = np.polyder(coeffs)
a = np.poly1d(pder)(v_before)
b = np.poly1d(pder)(v_after)
if a * b < 0:
rms = get_rms(e, np.poly1d(coeffs)(v))
rms_min = min(rms_min, rms * i / ndata_fit)
all_coeffs[(i, ndata_fit)] = [coeffs.tolist(), rms]
# store the fit coefficients small to large,
# i.e a0, a1, .. an
all_coeffs[(i, ndata_fit)][0].reverse()
# remove 1 data point from each end.
e_v_work.pop()
e_v_work.pop(0)
ndata_fit = len(e_v_work)
logger.info(f"total number of polynomials: {len(all_coeffs)}")
norm = 0.0
fit_poly_order = ndata
# weight average polynomial coefficients.
weighted_avg_coeffs = np.zeros((fit_poly_order,))
# combine all the filtered polynomial candidates to get the final fit.
for k, v in all_coeffs.items():
# weighted rms = rms * polynomial order / rms_min / ndata_fit
weighted_rms = v[1] * k[0] / rms_min / k[1]
weight = np.exp(-(weighted_rms ** 2))
norm += weight
coeffs = np.array(v[0])
# pad the coefficient array with zeros
coeffs = np.lib.pad(coeffs, (0, max(fit_poly_order - len(coeffs), 0)), "constant")
weighted_avg_coeffs += weight * coeffs
# normalization
weighted_avg_coeffs /= norm
weighted_avg_coeffs = weighted_avg_coeffs.tolist()
# large to small(an, an-1, ..., a1, a0) as expected by np.poly1d
weighted_avg_coeffs.reverse()
self.eos_params = weighted_avg_coeffs
self._set_params()
class EOS:
"""
Convenient wrapper. Retained in its original state to ensure backward
compatibility.
Fit equation of state for bulk systems.
The following equations are supported::
murnaghan: PRB 28, 5480 (1983)
birch: Intermetallic compounds: Principles and Practice, Vol I:
Principles. pages 195-210
birch_murnaghan: PRB 70, 224107
pourier_tarantola: PRB 70, 224107
vinet: PRB 70, 224107
deltafactor
numerical_eos: 10.1103/PhysRevB.90.174107.
Usage::
eos = EOS(eos_name='murnaghan')
eos_fit = eos.fit(volumes, energies)
eos_fit.plot()
"""
MODELS = {
"murnaghan": Murnaghan,
"birch": Birch,
"birch_murnaghan": BirchMurnaghan,
"pourier_tarantola": PourierTarantola,
"vinet": Vinet,
"deltafactor": DeltaFactor,
"numerical_eos": NumericalEOS,
}
def __init__(self, eos_name="murnaghan"):
"""
Args:
eos_name (str): Type of EOS to fit.
"""
if eos_name not in self.MODELS:
raise EOSError(
"The equation of state '{}' is not supported. "
"Please choose one from the following list: {}".format(eos_name, list(self.MODELS.keys()))
)
self._eos_name = eos_name
self.model = self.MODELS[eos_name]
def fit(self, volumes, energies):
"""
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
"""
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
class EOSError(Exception):
"""
Error class for EOS fitting.
"""
pass
|
vorwerkc/pymatgen
|
pymatgen/analysis/eos.py
|
Python
|
mit
| 19,652
|
[
"ASE",
"pymatgen"
] |
d743109f79f506c65a0161af4c4fef98d7b651308cbdaa118f16fa214389c883
|
"""
The GLEAM REsidual Source IDentifier program
Created by:
Robin Cook
March 24 2016
Modifications by:
Robin Cook
"""
# Imports
import sys
import os
import numpy as np
import math
import time
# Other Imports
import scipy
import astropy
from astropy.table import Table
import ntpath
from optparse import OptionParser
from Tkinter import Tk
from tkFileDialog import askopenfilename, askdirectory
from progressbar import ProgressBar, Bar, Percentage
# Imports for get_gleam()
from gleam_vo_example import GleamVoProxy, download_file
import pyvo
# Hardcoding
IDR_version = "4"
def choose(files):
"""
Given a list of filenames, this function will list all filenames in a formatted order and prompt the user to select a file
<param: filenames> - a list of filenames
<return: filename> - the chosen filename
"""
print " ** Multiple ({0}) files found ** ".format(len(files))
for kk in range(0,len(files)): print " {0} - {1}".format(kk+1,files[kk])
while True:
choice = raw_input("\n >> Choose file: ")
try:
choice = int(choice)
if (choice >= 1 and choice <= len(files)):
return files[choice-1]; break
else:
print " ** ERROR: input out of bounds ** "
except ValueError:
print " ** ERROR: invalid input ** "
def find_file(search_path):
"""
Find .fits file in directory.
<param: search_path> - The name of the fits file (and path) to be searched for
<return: file path>
"""
dir, in_filename = ntpath.split(search_path)
if (verbose): print "\n <Searching for .fits file>\n ** Searching for '{0}' in .../{1[0]}/{1[1]} **".format(filename,dir.split("\\")[-2:])
found_files = []
for filename in os.listdir(dir):
if (in_filename in filename):
found_files.append(filename)
if (len(found_files) == 1): # if only one appropriate file found
if (verbose): print " ** Found .fits file: {0} **".format(found_files[0])
file = "{0}\\{1}".format(dir,found_files[0])
elif (len(found_files) > 1): # if multiple appropriate files found
file = "{0}\\{1}".format(dir,choose(found_files))
else: # if no appropriate files found
if (verbose): print " ** \"{0}\" not found in .../{1[0]}/{1[1]} **".format(in_filename,dir.split("\\")[-2:])
file = ""
return file
def get_position(ra,dec,ang_diam):
"""
get position parameters from user.
<param: ra> - right ascension
<param: dec> - declination
<param: ang_diam> - angular diameter
<return: [ra,dec,ang_diam]> - right ascension, declination and angular diameter after validity checks
"""
if (verbose): print "\n <Checking positional parameters>\n"
# Check if RA, DEC, and Angular diameter are given, if not: enter now
if (ra == None): # no RA given
while (True):
try:
ra = float(raw_input("\n>> Enter Right Ascension: "))
if (ra >= 0.0 and ra < 360.0): break
else: print "\n ** WARNING: RA out of bounds (0 < RA < 360)**"
except ValueError: print "\n ** ERROR: invalid input **"
if (dec == None): # no DEC given
while (True):
try:
dec = float(raw_input("\n>> Enter Declination: "))
if (dec >= -90.0 and dec <= +90.0): break
else: print "\n ** WARNING: DEC out of bounds (-90 < DEC < +90) **"
except ValueError: print "\n ** ERROR: invalid input **"
if (ang_diam == None): # no angular diameter given
while (True):
try:
ang_diam = float(raw_input("\n>> Enter Angular Diameter: "))
if (ang_diam > 0.0 and ang_diam < 5.0): break
else: print "\n ** WARNING: Angular diameter out of bounds (ang. diameter < 5 degrees) **"
except ValueError: print "\n ** ERROR: invalid input **"
return (ra,dec,ang_diam)
def find_gal_file(gals_dir,galaxy,ra,dec,ang_diam,freq):
"""
Firstly, find whether or not the given galaxy name exists within the galaxies directory - if it does not exists, ask the user whether they wish to create a new galaxy directory and set this as the directory to look for files in.
If the galaxy's directory does exist, set this as the directory to look for files within. Secondly search for a .fits file with the key words 'cutout' and the specified frequency. If it does exist, return the path.
If the GLEAM cutout file does not exist (as is the case when a new galaxy directory has been created), propmt the user whether they wish to dowload the cutout. If yes, use the RA, DEC, angular diameter and frequency specified in ReSId.py options, else ask the user for RA, DEC and angular diameter; Note, default frequency is 'wide'.
<param: gals_dir> - The directory in which to look for galaxy folders
<param: galaxy> - The name of the fits file to be searched for
<param: RA> - right ascension of the map
<param: DEC> - declination of the map
<param: ang_diam> - angular diameter of the fits image
<param: freq> - the frequency of the image
:return: The .fits file name and path
"""
if (verbose): print "\n <Searching for .fits file>\n\n ** Searching for galaxy: '{0}' **".format(galaxy)
# dir = "C:\\Users\\user\\OneDrive\\Documents\\Uni\\2016 - Semester 1\\Physics Dissertation\\Dwarf Spheroidal Galaxies\\Images\\" # root directory for DSph galaxy images
# look for directories with the name of the galaxy given
catch = False
for dir_name in os.listdir(gals_dir): # iterate over all galaxy directories
if (catch): break
if (dir_name == galaxy):
if (verbose): print " ** Found directory: '{0}' ** \n".format(galaxy)
dir = "{0}\\{1}".format(gals_dir,galaxy)
catch = True; break
if (catch == False):
print "\n ** WARNING: no directory \"{0}\" found in \"{1}\"**\n ".format(galaxy,gals_dir)
while True:
choice = str(raw_input("\n >> Make new galaxy directory \"{0}\" (y/n)?: ".format(galaxy)))
if ("y" in choice.lower()): # make new directory for this folder
os.system("mkdir \"{0}\"".format(dir+"\\"+galaxy))
if (verbose): print " ** Directory: '{0}' has been created. **".format(galaxy)
dir = "{0}\\{1}".format(gals_dir,galaxy)
catch = True; break
elif ("n" in choice.lower()): # don't make new directory -> ABORT
if (verbose): print "\n -- ABORTING -- "; exit()
# look for files in galaxy directory with 'cutout' in their name
if (verbose): print " ** Searching for appropriate 'GLEAM_cutout' in '.../{0}' ** ".format(dir.split("\\")[-1])
found_files = []
for filename in os.listdir(dir):
if ("cutout" in filename and freq in filename):
found_files.append(filename)
if (len(found_files) == 1): # if only one appropriate file found
if (verbose): print " ** Found .fits file: {0} **".format(found_files[0])
file = "{0}\\{1}".format(dir,found_files[0])
elif (len(found_files) > 1): # if multiple appropriate files found
print " ** Multiple ({0}) files found ** ".format(len(found_files))
file = "{0}\\{1}".format(dir,choose(found_files))
else: # if no appropriate files found
print " ** WARNING: No GLEAM_cutout_.fits files found in '{0}' directory **".format(galaxy)
while True:
choice = str(raw_input(">> Download cutout(y/n)?: "))
if ("y" in choice.lower()): # download cutout
ra, dec, ang_diam = get_position(ra,dec,ang_diam)
if (verbose): print " ** Downloading GLEAM cutout for \"{0}\" using parameters: **\n - RA: {1}\n - DEC: {2}\n - Angular diameter: {3}\n - Frequency: {4} MHz".format(galaxy,ra,dec,ang_diam,freq)
DL_file = get_cutout(ra, dec, freq, ang_diam, download_dir=dir, listf=False)
file = "{0}\\GLEAM_cutout_{1}_{2}.fits".format(dir,freq,galaxy)
os.system("rename \"{0}\" \"GLEAM_cutout_{1}_{2}.fits\"".format(DL_file,freq,galaxy))
break
elif ("n" in choice.lower()): # don't download cutout, return None
file = ""; break
else:
print "\n ** ERROR: invalid input ** "
return file
def check_for_file(dir, RA, DEC, ang_diam, in_freq="N/A"):
"""
Check whether a file already exists in current directory
:param dir: path for where to search for pre existing data tables
:param RA: right ascension of the map
:param DEC: declination of the map
:param ang_diam: angular diameter of the fits image
:param in_freq: required frequency of the map
:return filename: the filename and path of the found file
"""
if (verbose): print "\n <Searching for pre-existing {0} data file> ".format("chunk" if in_freq=="N/A" else "snippet")
# Searching for filenames of form "GLEAM_[chunk/snippet]_{RA}_{DEC}_{ang_diam}_{freq?}.fits" .
found_filenames = []
for filename in os.listdir(dir):
if (in_freq=="N/A"): # user is looking for a GLEAM_chunk
if (".fits" in filename and "chunk" in filename):
(RA_file,DEC_file,ang_diam_file) = filename.replace(".fits","").split("_")[2:]
RA_file = float(RA_file); DEC_file = float(DEC_file); ang_diam_file = float(ang_diam_file)
if (RA - ang_diam >= RA_file - ang_diam_file and RA + ang_diam <= RA_file + ang_diam_file and DEC - ang_diam >= DEC_file - ang_diam_file and DEC + ang_diam <= DEC_file + ang_diam_file):
found_filenames.append(filename)
else: # user is looking for a GLEAM_snippet
if (".fits" in filename and "snippet" in filename):
(RA_file,DEC_file,ang_diam_file,freq_file) = filename.replace(".fits","").split("_")[2:6]
RA_file = float(RA_file); DEC_file = float(DEC_file); ang_diam_file = float(ang_diam_file); freq_file = str(freq_file)
if (RA - ang_diam >= RA_file - ang_diam_file and RA + ang_diam <= RA_file + ang_diam_file and DEC - ang_diam >= DEC_file - ang_diam_file and DEC + ang_diam <= DEC_file + ang_diam_file and freq_file == in_freq):
found_filenames.append(filename)
if (len(found_filenames) == 1):
if (verbose): print " ** Found pre-existing file '{0}' ** ".format(found_filenames[0])
return dir + "\\" + found_filenames[0]
elif (len(found_filenames) > 1):
print " ** Multiple ({0}) pre-existing files found ** ".format(len(found_filenames))
filename = dir + "\\" + choose(found_filenames)
else:
print " ** WARNING: no appropriate files found - Returning 'None' ** "
return None
# run chunk creation process
def get_frequency(freq):
"""
Standardizes the input frequency to one that ReSId can understand. Accepts many different input formats for frequency.
:param freq: frequency input by the user
:return:
freq: for numerical comparison
"""
if (verbose): print "\n <Finding frequency range>"
if ('mhz' in freq.lower()): freq = freq.lower().replace('mhz','')
if (len(freq) == 2): # check if this is a valid 2 digit input, if so add a zero prefix
try:
if (int(freq) < 72): print " ** WARNING: input frequency out of range **\n -- ABORTING -- "; exit()
else: freq = '0'+freq
except ValueError:
print " ** WARNING: input frequency not a number **"
if (freq.lower() in ['red','r']): freq = 'red'
if (freq.lower() in ['green','g']): freq = 'green'
if (freq.lower() in ['blue','b']): freq = 'blue'
if (freq.lower() in ['white','deep','wide','w']): freq = 'wide'
return freq
def log(last_fits_file,last_gal_dir,last_gal_name,last_catalogue_file,last_Aegean_dir):
"""
Log usage history information to a file for later reference
<param: last_fits_file> - most recently used .fits file.
<param: last_gal_dir> - most recently used directory for accessing galaxies.
<param: last_gal_name> - most recently used galaxy.
<param: last_catalogue_file> - most recently used directory for accesing the catalogue of sources.
<param: last_Aegean_dir> - most recently used directory for reference Aegean programs.
<return: None>
"""
if (verbose): print "\n <Logging usage to \"ReSId_log.txt\">"
log_file = open("ReSId_log.txt",'w')
log_file.write("########################################\n## Residual Source Identifier (ReSId) ##\n########################################\n")
# date-time
log_file.write("\n** last used ReSId.py **\n{0}\n".format(time.strftime("%c")))
# most recents directories
log_file.write("\n** last .fits file path **\n{0}\n".format(last_fits_file))
log_file.write("\n** last dSph galaxies directory **\n{0}\n".format(last_gal_dir))
log_file.write("\n** last dSph galaxy name **\n{0}\n".format(last_gal_name))
log_file.write("\n** last catalogue file path **\n{0}\n".format(last_catalogue_file))
log_file.write("\n** last AEGEAN directory **\n{0}\n".format(last_Aegean_dir))
# print #EOF
log_file.write("\n#EOF")
log_file.close()
def read_log():
"""
Reads usage history information from ReSId_log.txt
<return: [last_fit,last_gal_dir,last_gal_name,last_catalogue]>
"""
try:
file = open("ReSId_log.txt",'r')
line = file.readline()
while ("#EOF" not in line):
line = file.readline()
if ("last .fits file path" in line): last_fits_file = file.readline().replace('\n','')
elif ("last dSph galaxies directory" in line): last_gal_dir = file.readline().replace('\n','')
elif ("last dSph galaxy name" in line): last_gal_name = file.readline().replace('\n','')
elif ("last catalogue file path" in line): last_catalogue_file = file.readline().replace('\n','')
elif ("last AEGEAN directory" in line): last_Aegean_dir = file.readline().replace('\n','')
return [last_fits_file,last_gal_dir,last_gal_name,last_catalogue_file,last_Aegean_dir]
file.close()
except IOError:
if (verbose): print "\n ** No previous ReSId_log.txt file found **"
return [""]*5
def read_data(filename, RA, DEC, ang_diam, head): # ** Now REDUNDANT! **
"""
Read data from file.
Take input data of sources and convert to a table
data format: [[col1,col2,col3,...],[col1,col2,col3,...],[col1,col2,col3,...],...,n_rows]
dict format: {0:Names,1:Background_deep,...,34:int_flux_84,...,n_columns}
:param filename: name of the input filename
:param RA: right ascension of the map
:param DEC: declination of the map
:param ang_diam: angular diameter of the fits image
:param head: path for where to search for pre existing data tables
:return: The data given from the input table in an astropy Table
"""
# function should check for existing filename outside of this function, i.e. in main()
if (verbose): print '\n <Reading in data>'
# this needs some cleaning up
found_filename = check_for_file(head,RA,DEC,ang_diam)
if (found_filename != None): filename = found_filename
if verbose: print "\n ** Using input data file: **\n "+ filename
in_data = Table.read(filename)
return in_data
def extract_sources(catalogue_file, in_RA, in_DEC, ang_diam, freq, dir, base):
"""
Find sources that are positioned with the dimensions of the image specified.
<param: catalogue_file> - path to the catalogue file
<param: in_RA> - right ascension of the image
<param: in_DEC> - declination of the image
<param: ang_diam> - angular diameter of the image
<param: freq> - the central frequency of the image
<param: dir> - path to destination folder
<param: base> - base name for output file
<return> - the name of the catalogue file produced.
"""
if (verbose): print "\n <Extracting sources> \n"
# calculate the position bounds
try:
RA_min = in_RA - (0.5*ang_diam*math.sqrt(2.0))/(abs(math.cos(math.radians(in_DEC))))
RA_max = in_RA + (0.5*ang_diam*math.sqrt(2.0))/(abs(math.cos(math.radians(in_DEC))))
except ZeroDivisionError:
RA_min = 0.0
RA_max = 360.0
DEC_min = in_DEC - 0.5*ang_diam*math.sqrt(2)
DEC_max = in_DEC + 0.5*ang_diam*math.sqrt(2)
RA_underflow = False; RA_overflow = False
if (RA_min < 0.0): # i.e. RA overlaps 0h
RA_min_over = 360.0 + RA_min
RA_max_over = 360.0
RA_min = 0.0
RA_underflow = True
elif (RA_max > 360.0): # i.e. RA overlaps 24h
RA_min_over = 0.0
RA_max_over = RA_max - 360.0
RA_max = 360.0
RA_overflow = True
else:
RA_min_over = 0.0
RA_max_over = 0.0
#stilts_path = "C:\\Users\\user\\Documents\\TopCat\\stilts.jar"
stilts_path = "/Users/rcook/bin/stilts.jar"
input_fmt = "fits"
output_fmt = "csv"
out_file="{0}\\GLEAM_catalogue_{1}.{2}".format(dir,base,output_fmt)
# use stilts tpipe to extract all sources from GLEAM catalogue that fall within RA and DEC constraints + rename columns for Aegean
if (verbose): print "\n <Using 'stilts tpipe' to extract sources from GLEAMIDR{0}.fits>".format(IDR_version)
os.system("java -jar {1} tpipe ifmt={2} omode=out out=sources_temp.txt ofmt={4} "
"cmd='select \"(DEJ2000 > {5} && DEJ2000 < {6} && ((RAJ2000 >= {7} && RAJ2000 <= {8}) || (RAJ2000 >= {9} && RAJ2000 <= {10})))\"' "
"cmd='replacecol -name background background_{0} (background_{0})' "
"cmd='replacecol -name local_rms local_rms_{0} (local_rms_{0})' "
"cmd='replacecol -name ra_str ra_str (ra_str)' "
"cmd='replacecol -name dec_str dec_str (dec_str)' "
"cmd='replacecol -name ra RAJ2000 (RAJ2000)' "
"cmd='replacecol -name err_ra err_RAJ2000 (err_RAJ2000)' "
"cmd='replacecol -name dec DEJ2000 (DEJ2000)' "
"cmd='replacecol -name err_dec err_DEJ2000 (err_DEJ2000)' "
"cmd='replacecol -name peak_flux peak_flux_{0} (peak_flux_{0})' "
"cmd='replacecol -name err_peak_flux err_peak_flux_{0} (err_peak_flux_{0})' "
"cmd='replacecol -name int_flux int_flux_{0} (int_flux_{0})' "
"cmd='replacecol -name err_int_flux err_int_flux_{0} (err_int_flux_{0})' "
"cmd='replacecol -name a a_{0} (a_{0})' "
#"cmd='replacecol -name err_a _{0} (_{0})' "
"cmd='replacecol -name b b_{0} (b_{0})' "
#"cmd='replacecol -name err_b_{0} (_{0})' "
"cmd='replacecol -name pa pa_{0} (pa_{0})' "
#"cmd='replacecol -name err_pa _{0} (_{0})' "
# "cmd='replacecol -name flags _{0} (_{0})' "
"cmd='replacecol -name residual_mean residual_mean_{0} (residual_mean_{0})' "
"cmd='replacecol -name residual_std residual_std_{0} (residual_std_{0})' "
#"cmd='replacecol -name uuid _{0} (_{0})' "
"cmd='replacecol -name psf_a psf_a_{0} (psf_a_{0})' "
"cmd='replacecol -name psf_b psf_b_{0} (psf_b_{0})' "
"cmd='replacecol -name psf_pa psf_pa_{0} (psf_pa_{0})' "
"{11}".format(freq, stilts_path, input_fmt, out_file, output_fmt, DEC_min, DEC_max, RA_min, RA_max, RA_min_over, RA_max_over, catalogue_file))
# create a table using Astropy which contains the missing columns in GLEAMIDRn.fits, i.e. island, source, err_a, err_b, err_pa, flags, uuid
if (verbose): print " <Creating temporary island table>"
num_rows = len(Table.read("sources_temp.txt",format='csv'))
Table([[kk for kk in range(1,num_rows+1)]]+[[0 for kk in range(num_rows)]]*6,names=("island","source","err_a","err_b","err_pa","flags","uuid")).write("islands_temp.txt",format='csv')
# join the GLEAM data and the false columns
if (verbose): print " <Joining islands table to source data table>"
os.system("java -jar {0} tjoin nin=2 ifmt1={1} in1=sources_temp.txt ifmt2=csv in2=islands_temp.txt ofmt=csv out=joined_temp.txt".format(stilts_path,output_fmt))
# output catalogue with Aegean appropriate column names and ordering
if (verbose): print " <Rearranging columns>"
os.system("java -jar {0} tpipe ifmt={1} omode=out ofmt={1} out={2} "
"cmd='keepcols \"island source background local_rms ra_str dec_str ra err_ra dec err_dec peak_flux err_peak_flux int_flux err_int_flux a err_a b err_b pa err_pa flags residual_mean residual_std psf_a psf_b psf_pa\"' "
"joined_temp.txt".format(stilts_path,output_fmt,out_file))
if (verbose): print " <Deleting temporary files>"
os.system("rm sources_temp.txt islands_temp.txt joined_temp.txt") # remove temporary files
if (verbose):
if (RA_underflow): print "\n ** Position bounds: \n - RA: {0} -> {1}\n - DEC: {2} -> {3}\n ** Number of sources sources found: {4}".format(RA_min_over,RA_max,DEC_min,DEC_max,found_sources)
elif (RA_overflow): print "\n ** Position bounds: \n - RA: {0} -> {1}\n - DEC: {2} -> {3}\n ** Number of sources sources found: {4}".format(RA_min,RA_max_over,DEC_min,DEC_max,found_sources)
else: print "\n ** Position bounds: \n - RA: {0} -> {1}\n - DEC: {2} -> {3}\n ** Number of sources sources found: {4}".format(RA_min,RA_max,DEC_min,DEC_max,found_sources)
return out_file
def calc_peak_flux (a,b,psf_a,psf_b,int_flux,err_int_flux):
"""
Calculates th peak flux of the source using the equation: peak_flux = int_flux x (psf_a x psf_b)/(a x b)
:param a: source FWHM semi-major axis
:param b: source FWHM semi-minor axis
:param psf_a: synthesized beam FWHM semi-major axis
:param psf_a: synthesized beam FWHM semi-major axis
:param int_flux: integrated flux of the source
:param err_int_flux: error in the integrated flux of the source
:return: peak_flux and err_peak_flux
"""
# now redundant -> IDR4 has peak fluxes given.
peak_flux = ((psf_a*psf_b)/(a*b))*int_flux
err_peak_flux = (peak_flux/int_flux)*err_int_flux
return [peak_flux, err_peak_flux]
def to_Aegean_table(in_data, c_freq, RA, DEC, ang_diam, head):
"""
configures source data into a format that Aegean/AeRes can understand.
<param: source_data> - data arrays for sources constrained by RA and DEC values.
<param: c_freq> - central frequency of the data
<param: RA> - right ascension of the image
<param: DEC> - declination of the image
<param: ang_diam> - angular diameter of the image
<param: head> - The path for the data 'snippet'; i.e. the Aegean formatted single frequency column table of the in_data
<return: catalogue_csv_file> - the path to the csv catalogue file
"""
# now redundant -> STILTS will rename and format names
num_sources = len(in_data)
out_data = Table()
# Aegean requirement information
out_data['island'] = [kk for kk in range(1,num_sources+1)] #island: 1 -> num_sources
out_data['source'] = [0]*num_sources # source = 0
# Background rms information
out_data['background'] = in_data['background_'+c_freq] # background noise for this frequency band
out_data['local_rms'] = in_data['local_rms_'+c_freq] # local rms for this frequency band
# Positional information
out_data['ra_str'] = in_data['ra_str']; out_data['dec_str'] = in_data['dec_str'] # RA/DEC Strings
out_data['ra'] = in_data['RAJ2000']; out_data['err_ra'] = in_data['err_RAJ2000'] # RA + RA_err
out_data['dec'] = in_data['DEJ2000']; out_data['err_dec'] = in_data['err_DEJ2000'] # DEC + DEC_err
# Peak and integrated flux data
# peak_flux_arr = [0]*num_sources; err_peak_flux_arr = [0]*num_sources
# for ii in range(0,num_sources):
# (peak_flux_arr[ii], err_peak_flux_arr[ii]) = calc_peak_flux(in_data['a_'+c_freq][ii],in_data['b_'+c_freq][ii],in_data['psf_a_'+c_freq][ii],in_data['psf_b_'+c_freq][ii],in_data['int_flux_'+c_freq][ii],in_data['err_fit_flux_'+c_freq][ii])
out_data['peak_flux'] = in_data['peak_flux_'+c_freq]
out_data['err_peak_flux'] = in_data['err_peak_flux_'+c_freq]
out_data['int_flux'] = in_data['int_flux_'+c_freq]
out_data['err_int_flux'] = in_data['err_int_flux_'+c_freq]
# Source shape information
out_data['a'] = in_data['a_'+c_freq]
out_data['err_a'] = 0.0 # no a_err (8GHz) given in GLEAMIDR4.fits
out_data['b'] = in_data['b_'+c_freq]
out_data['err_b'] = 0.0 # no b_err (8GHz) given in GLEAMIDR4.fits
out_data['pa'] = in_data['pa_'+c_freq]
out_data['err_pa'] = 0.0 # no pa_err (8GHz) given in GLEAMIDR4.fits
# Flags information -> not in IDR4
out_data['flags'] = 0
# Residuals information
out_data['residual_mean'] = in_data['residual_mean_'+c_freq]
out_data['residual_std'] = in_data['residual_std_'+c_freq]
# uuid information
out_data['uuid'] = ['None']*num_sources
# psf information
out_data['psf_a'] = in_data['psf_a_'+c_freq]
out_data['psf_b'] = in_data['psf_b_'+c_freq]
out_data['psf_pa'] = in_data['psf_pa_'+c_freq]
filename = head+"\\"+"GLEAM_snippet_"+str(RA)+"_"+str(DEC)+"_"+str(ang_diam)+'_'+c_freq+'.fits'
if (verbose): print "\n ** Writing source data snippet to file: ** \n ", filename
out_data.write(filename,format='fits')
return filename
def to_catalogue_table(filename):
"""
Writes Aegean table to catalogue format (.csv) for plotting
<param: filename> - the filename of the catalogue
<return: N/A>
"""
# now redundant -> STILTS creates .csv table
catalogue_filename = filename.replace(".fits","_catalogue.csv")
if (verbose): print "\n <Writing catalogue to '.../{0[0]}/{0[1]}'>".format(catalogue_filename.split("\\")[-2:])
data = Table.read(filename)
data.write(catalogue_filename,format='ascii.csv')
def run_Aegean(fits_name, wide_catalogue_file, ra, dec, ang_diam, freq, path, Aegean_path):
"""
Runs Aegean.py source finding program by Paul Hancock. Outputs tables associated with the priorized source finding of the input .fits image
<param: fits_name> - the input .fits filename to have sources detected with
<param: wide_catalogue_file> - the input table of sources to be used in the priorized fitting
<param: ra> - right ascension of the image
<param: dec> - declination of the image
<param: ang_diam> - angular diameter of the image
<param: freq> - central frequency of the data
<param: path> - the path to the input source table
<param: Aegean_path> - the path to the directory with Aegean programs within
<return: None>
"""
if (verbose): print "\n <Running Aegean.py>\n"
out_filename = "GLEAM_catalogue_{0}_{1}_{2}_{3}".format(ra,dec,ang_diam,freq)
input_filename = wide_catalogue_file.replace(".fits","")
if (verbose): print "python \"{0}\\Aegean.py\" --input=\"{1}\" --priorized=1 --table=\"{2}\\{3}.fits\",\"{2}\\{3}.reg\" --telescope=MWA \"{4}\" ".format(Aegean_path,wide_catalogue_file,path,out_filename,fits_name)
os.system("python \"{0}\\Aegean.py\" --input=\"{1}\" --priorized=1 --table=\"{2}\\{3}.fits\",\"{2}\\{3}.reg\" --telescope=MWA \"{4}\" ".format(Aegean_path,wide_catalogue_file,path,out_filename,fits_name))
#os.system('python ' + '"'+'C:\\Users\\user\\OneDrive\Documents\\Uni\\2016 - Semester 1\\Physics Dissertation\\Aegean\\Aegean-master\\Aegean.py'+'"' + ' --input='+'"'+input_table_name+'"' + ' --priorized=1' + ' --table='+'"'+head+"\\"+out_filename+'.fits'+'","'+head+"\\"+out_filename+'.reg'+'"' + ' --telescope=MWA ' + '"'+input_fits_name+'"')
def run_AeRes(fits_file, catalogue_file, path, base, Aegean_path):
"""
Runs AeRes.py source subtracting program by Paul Hancock. Outputs a .fits image of the original image with the specified sources subtracted
<param: fits_file> - the fits file to have sources subtracted from
<param: catalogue_file> - the source catalogue of relevant extracted sources in Aegean format
<param: path> - the path to the input source table
<param: base> - the base name of the file #not implemented
<param: Aegean_path> - the path to the directory with Aegean programs within
<return: None>
"""
if (verbose): print "\n <Running AeRes.py>\n"
if (verbose): print "python \"{0}\\Aeres.py\" -c \"{1}\" -f \"{2}\" -r \"{3}\\GLEAM_residual_{4}.fits\"".format(Aegean_path,catalogue_file,fits_file,path,base)
os.system("python \"{0}\\Aeres.py\" -c \"{1}\" -f \"{2}\" -r \"{3}\\GLEAM_residual_{4}.fits\"".format(Aegean_path,catalogue_file,fits_file,path,base))
#os.system('python ' + '"'+'C:\\Users\\user\\OneDrive\Documents\\Uni\\2016 - Semester 1\\Physics Dissertation\\Aegean\\Aegean-master\\AeRes.py'+'"' + ' -c ' + '"'+catalogue_filename+'"' + ' -f ' + '"'+fits_filename+'"' + ' -r ' + '"'+path+'\\GLEAM_residual_'+c_freq+'_'+base+'.fits"')
def run_BANE(fits_filename,Aegean_path):
"""
Runs Bane.py background and rms generator program by Paul Hancock. Outputs {bkg,rms}.fits files from the input .fits image.
<param: fits_filename> - the file name of the fits file to have sources subtracted from
<param: Aegean_path> - the path to the directory with Aegean programs within
<return: None>
"""
if (verbose): print "\n <Running BANE.py>"
print "python \"{0}\\BANE.py\" \"{1}\"".format(Aegean_path,fits_filename)
os.system("python \"{0}\\BANE.py\" \"{1}\"".format(Aegean_path,fits_filename))
#os.system('python ' + '"'+'C:\\Users\\user\\OneDrive\Documents\\Uni\\2016 - Semester 1\\Physics Dissertation\\Aegean\\Aegean-master\\BANE.py'+'"' + ' ' + '"'+fits_file+'"')
def get_cutout(ra, dec, freq, size=4.0, download_dir=None, listf=False):
"""
Automatically download GLEAM images from the postage stamp server using the template code that Chen has written.
This function was written in majority by Paul Hancock, Aug-2015.
<param: ra> - the centre RA of the map
<param: dec> - the centre DEC of the map
<param: freq> - central frequency of map; usage in file rename
<param: size> - the angular diameter of the map
<param: download_dir> - Directory for which to save .fits image to
<param: listf> - True/False depending on whether one wishes to print frequency list or not.
<return: filename> - the file name of the downloaded .fitsfile
"""
if (verbose): print "\n <Downloading .fits file>"
freq_ref = {'076':'072-080','084':'080-088','092':'088-095','099':'095-103','107':'103-111','115':'111-118','122':'118-126','130':'126-134','143':'139-147','151':'147-154','158':'154-162','166':'162-170','174':'170-177','181':'177-185','189':'185-193','197':'193-200','204':'200-208','212':'208-216','220':'216-223','227':'223-231','red':'072-103','green':'103-134','blue':'139-170','wide':'170-231'}
try:
freq_band = freq_ref[freq]
except KeyError: # this should actually be handled by get_frequency()
print " ** WARNING: no frequency '{0}' found **\n Available frequencies: ".format(freq)
for ii in freq_ref: print " - {0}".format(ii)
while True:
choice = str(raw_input("\n >> Choose frequency: "))
if (choice in freq_ref): freq_band = freq_ref[freq]; break
else: print "\n ** ERROR: invalid choice ** "
gvp = GleamVoProxy() # start the gleam proxy // gvp = GleamVoProxy(p_port=7799)
gvp.start()
# check if downloads file exists, if not -> create it
if (os.path.exists(download_dir) == False):
os.system("md \"{0}\"".format(download_dir))
if (download_dir and (not os.path.exists(download_dir))):
print "Invalid download dir: {0}".format(download_dir)
return
from pyvo.dal import sia
svc = sia.SIAService(gvp.access_url) #start Simple Image Access service
pos = (ra, dec) # position
images = svc.search(pos, size)
if listf:
print "Available freq ranges are:"
for img in images:
print img.get('freq')
return
for img in images:
# for each mached image, download or print its frequency and access url
freq = img.get('freq')
# only process the frequencies of interest
if not freq in freq_band:
continue
print ' ** Downloading **'
url = img.acref
if (download_dir):
download_file(url, ra, dec, freq, download_dir)
else:
print freq, url
if (verbose): print "rename \"{0}\\{1}_{2}_{3}.fits\" \"GLEAM_cutout_{1}_{2}_{4}_{3}.fits\"".format(download_dir,ra,dec,freq_band,size)
os.system("rename \"{0}\\{1}_{2}_{3}.fits\" \"GLEAM_cutout_{1}_{2}_{4}_{3}.fits\"".format(download_dir,ra,dec,freq_band,size))
return "{0}\\GLEAM_cutout_{1}_{2}_{4}_{3}.fits".format(download_dir,ra,dec,freq_band,size)
gvp.stop()
def main():
usage = "usage: %prog [options] "
parser = OptionParser(usage=usage)
parser.add_option('-n', '--fits_file',
action='store',type='string',dest='fits_file',default=None,
help=".fits file path", metavar="FITS_FILE")
parser.add_option('-g','--galaxy',
action='store', type='string', dest='galaxy_name',default=None,
help="The name of the Dwarf galaxy",metavar="GALAXY_NAME")
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help="don't print status messages to stdout")
parser.add_option('-v','--verbose',
action='store_true', dest='verbose',default=False,
help="print status messages to stdout")
parser.add_option('-c','--catalogue',
action='store_true', dest='catalogue',default=False,
help="write to catalogue file")
parser.add_option('-s','--source_data',
action='store', dest='data_file', default=None,
help="destination of input table for source data",metavar="SOURCE_DATA")
parser.add_option('-f', '--freq',
action='store',type='string',dest='freq', default="wide",
help="provide central frequency (MHz)", metavar="FREQUENCY")
parser.add_option('-r','--ra',
action='store', type='float', dest='RA',default=None,
help="right ascension of the image",metavar="RA")
parser.add_option('-d','--dec',
action='store', type='float', dest='DEC',default=None,
help="declination of the image",metavar="DEC")
parser.add_option('-a','--angular_diameter',
action='store', type='float', dest='ang_diameter',default=2.0,
help="angular diameter of the sides of the image",metavar="ANGULAR_DIAMETER")
parser.add_option('-b','--base',
action='store', type='string', dest='base_name',default=None,
help="The base name for the output Aegean formatted table")
#parser.add_option("-","--",
# action="", dest="",default=,
# help="")
(options, args) = parser.parse_args()
global verbose; verbose = options.verbose
options.freq = get_frequency(options.freq)
if (verbose): print "\n ** Using frequency: {0}**".format(options.freq)
# read log file, returns None(s) if no log file exists.
last_fits_file, last_gal_dir, last_gal_name, last_catalogue_file, last_Aegean_dir = read_log()
if(verbose):
print "\n ** Last used files: **"
for kk in read_log(): print " - {0}".format(kk) # print last usages
Tk().withdraw() # keeps the root window from appearing
# if no .fits file or galaxy has been specified
if (options.galaxy_name == None and options.fits_file == None):
if (last_fits_file != ''): print "\n Select an option:\n 1. Select a .fits file\n 2. Download .fits file from GLEAM server\n 3. Use previous .fits file: \"{0}\"".format(last_fits_file); num_choices = 3
else: print "\n Select an option:\n 1. Select a .fits file\n 2. Download .fits file from GLEAM server\n"; num_choices = 2
while True:
try:
choice = int(raw_input(">>"))
if (choice>=1 and choice<=num_choices): break
else: print " ** ERROR: input out of bounds **"
except ValueError:
print " ** ERROR: Invalid input **"
if (choice == 1): # set fits_file to selected file
while True:
fits_file = askopenfilename(initialdir='\\'.join(last_fits_file.split('/')[0:-1])).replace('/','\\') # open dialog box and return the path to the selected file
# AutoDownload option required
if (fits_file != ''): break
else: print "\n ** ERROR: invalid selection **"
if (choice == 2): # set fits_file to downloaded file
if (options.RA == None or options.DEC == None): options.RA, options.DEC, options.ang_diameter = get_position(options.RA,options.DEC,options.ang_diameter)
if (verbose): print " ** Downloading GLEAM cutout for \"{0}\" using parameters: **\n - RA: {1}\n - DEC: {2}\n - Angular diameter: {3}\n - Frequency: {4} MHz".format(galaxy,options.RA,options.DEC,options.ang_diameter,options.freq)
if (os.path.exists("Downloads") == False): os.system("mkdir Downloads")
################################# HERE ########################################
os.mkdir("Downloads\\RA_{0}-DEC_{1}-FREQ_{2}-DIAM_{3}.fits".format(option.RA,options.DEC,options.freq,options.ang_diam))
DL_file = get_cutout(options.RA, options.DEC, options.freq, options.ang_diameter, download_dir="Downloads\\RA_{0}-DEC_{1}-FREQ_{2}-DIAM_{3}.fits".format(option.RA,options.DEC,options.freq,options.ang_diam), listf=False)
os.system("rename \"{0}\" \"GLEAM_cutout_{1}_{2}.fits\"".format(DL_file,freq,galaxy))
if (choice == 3): # set fits_file to previous file
fits_file = last_fits_file
if (options.galaxy_name == None and options.fits_file == None): # *** Neither a 'fits_file', nor a 'galaxy_name' have been given
if (last_fits_file != ''): # last_fits_file does exist
print "\n ** WARNING: No .fits file specified **\n previous .fits file used:\n \"{0}\"".format(last_fits_file)
catch = False
while True:
if (catch): break
choice = str(raw_input("\n>> Use this file (y/n)?:"))
if (choice.lower() == 'y'):
fits_file = last_fits_file; catch = True # set fits_file to last selected
elif (choice.lower() == 'n'):
print " ** Select a GLEAM cutout .fits file ** "
while True:
fits_file = askopenfilename(initialdir='\\'.join(last_fits_file.split('/')[0:-1])).replace('/','\\') # open dialog box and return the path to the selected file
# AutoDownload option required
if (fits_file != ''): catch = True; break
else: print "\n ** ERROR: invalid selection **"
else: # last_fits_file does not exist
print " Select an option:\n 1. Select a .fits file\n 2. Download .fits file from GLEAM server"
while True:
try:
choice = int(raw_input(">>"))
break
except ValueError:
print " ** ERROR: Invalid input **"
if (choice==1):
if (options.RA == None or options.DEC == None):
options.ra, options.dec, options.ang_diam = get_position(options.RA,options.DEC,options.ang_diameter)
if (verbose): print " ** Downloading GLEAM cutout for \"{0}\" using parameters: **\n - RA: {1}\n - DEC: {2}\n - Angular diameter: {3}\n - Frequency: {4} MHz".format(galaxy,options.RA,options.DEC,options.ang_diameter,options.freq)
DL_file = get_cutout(options.RA, options.DEC, options.freq, options.ang_diameter, download_dir=dir, listf=False)
file = "{0}\\GLEAM_cutout_{1}_{2}.fits".format(dir,freq,galaxy)
os.system("rename \"{0}\" \"GLEAM_cutout_{1}_{2}.fits\"".format(DL_file,freq,galaxy))
elif (choice==2)
print " ** Select a GLEAM cutout .fits file ** "
while True:
fits_file = askopenfilename().replace('/','\\') # open dialog box and return the path to the selected file
if (fits_file != ''): break
else: print "\n ** ERROR: invalid selection **"
# if both galaxy and .fits filename specified -> keep galaxy
if (options.fits_file != None and options.galaxy_name != None): # *** 'fits_file' and 'galaxy_name' have been given.
print "\n ** WARNING: '--fits_file' and '--galaxy' have been specified -> using '--galaxy' only **"
options.fits_file = None
# when galaxy name has been specified
if (options.galaxy_name != None):
last_gal_name = options.galaxy_name
if (last_gal_dir != ""): # look in previous galaxy directory
fits_file = find_gal_file(last_gal_dir,options.galaxy_name, options.RA, options.DEC, options.ang_diameter, options.freq)
if (fits_file == ""):
print " ** WARNING: No GLEAM cutout was found for \"{0}\" **\n ** Select a GLEAM cutout .fits file ** ".format(options.galaxy_name)
while True:
fits_file = askopenfilename(initialdir=last_gal_dir).replace('/','\\') # open dialog box and return the path to the selected file
if (fits_file != ""): break
else: print "\n ** ERROR: invalid selection **"
last_gal_dir = '\\'.join(fits_file.split('\\')[0:-2]) # update latest last_gal_dir
last_gal_name = fits_file.split('\\')[-2]
else:
print " ** Select the GLEAM cutout .fits file for the galaxy ** "
fits_file = askopenfilename().replace('/','\\') # open dialog box and return the path to the selected file
last_gal_dir = '\\'.join(fits_file.split('\\')[0:-2]) # update latest last_gal_dir
last_gal_name = fits_file.split('\\')[-2]
# if .fits filename has been specified
if (options.fits_file != None):
fits_file = find_file(options.fits_filename)
if (fits_file == ""):
print " ** WARNING: No .fits file was found for \"{0}\" **\n ** Select a GLEAM cutout .fits file ** ".format(options.fits_file)
while True:
fits_file = askopenfilename().replace('/','\\') # open dialog box and return the path to the selected file
if (fits_file != ""): break
else: print "\n ** ERROR: invalid selection **"
# specifying catalogue filename
if (options.data_file != None): # if catalogue has been specified
catalogue_file = options.data_file
else: # if no catalogue has been specified
if (last_catalogue_file != ""):
print "\n ** WARNING: No catalogue file specified **\n previous catalogue file used:\n \"{0}\" ".format(last_catalogue_file)
catch = False
while True:
if (catch): break
choice = str(raw_input("\n>> Use this file (y/n)?:"))
if (choice.lower() == 'y'):
catalogue_file = last_catalogue_file; catch = True # set catalgoue file to last used
elif (choice.lower() == 'n'):
print " ** Select the input file for source data ** "
while True:
catalogue_file = askopenfilename(initialdir='\\'.join(last_catalogue_file.split('/')[0:-1])).replace('/','\\')
if (catalogue_file != ""): catch = True; break
else: print " ** ERROR: invalid selection **"
else:
print " ** Select the input file for source data ** "
while True:
catalogue_file = askopenfilename().replace('/','\\')
if (catalogue_file != ""): break
else: print " ** ERROR: invalid selection **"
# dir = path to directory, filename = name of the file only.
dir, filename = '\\'.join(fits_file.split('\\')[:-1]), fits_file.split('\\')[-1]
if (verbose): print "\n ** Using .fits file: '.../{0[0]}/{0[1]}/{0[2]}/{1}' ** ".format(dir.split("\\")[-3:],filename)
if (options.RA == None or options.DEC == None):
if (verbose): print " ** WARNING: No RA or DEC specified -> using RA and DEC from \".../{0}\"".format(filename)
# make a base name for output files
if (options.base_name == None):
base = filename.replace(".fits","").replace("GLEAM_","").replace("Gleam_","").replace("cutout_","") if (options.galaxy_name==None) else "{0}_{1}".format(options.freq,options.galaxy_name)
else:
base = options.base_name
# check for last Aegean directory
if (last_Aegean_dir == ""):
last_Aegean_dir = askdirectory(initialdir="~/",title="Select directory of 'Aegean' Tools").replace('/','\\')
if (last_Aegean_dir == ""): print "\n ** ERROR: AEGEAN directory was not given **\n -- ABORTING -- "; exit() # exit if no directory given
log(fits_file,last_gal_dir,last_gal_name,catalogue_file,last_Aegean_dir)
# check if c_freq is RGB freq band -> in which case, need to generate table of found sources in .fits image by running Aegean
if ((options.freq).lower() in ["red","r","green","g","blue","b"]):
if (verbose): print "\n ** Stacked frequency band: '{0}' chosen ** ".format(options.freq)
run_BANE(fits_file,last_Aegean_dir) # *BANE.py does not work on windows ~Paul Hancock
wide_catalogue_file = extract_sources(catalogue_file,options.RA,options.DEC,options.ang_diameter,"wide",dir,base)
snippet_file = run_Aegean(fits_filename,wide_catalogue_file,options.RA,options.DEC,options.ang_diameter,options.freq,dir,Aegean_dir)
os.system("rename \"{0}\\GLEAM_snippet_{1}_{2}_{3}_{4}_comp.fits\" \"GLEAM_snippet_{1}_{2}_{3}_{4}.fits\"".format(dir,options.RA,options.DEC,options.ang_diameter,options.freq))
os.system("rename \"{0}\\GLEAM_snippet_{1}_{2}_{3}_{4}_comp.reg\" \"GLEAM_snippet_{1}_{2}_{3}_{4}.reg\"".format(dir,options.RA,options.DEC,options.ang_diameter,options.freq))
else: # user has not specified an RGB frequency band
snippet_file = extract_sources(catalogue_file,options.RA,options.DEC,options.ang_diameter,options.freq,dir,base)
# read data from input table
#in_data = read_data(options.data_filename,float(options.RA),float(options.DEC),float(options.ang_diameter),dir)
# Extract sources which are constrained by input RA/DEC and ang_diam
# Convert source data to Aegean format table
# catalogue_csv_file = to_Aegean_table(source_data,options.central_freq,options.ra_map,options.dec_map,options.ang_diameter,dir)
# run AeRes.py
run_AeRes(fits_file, snippet_file, options.central_freq, dir, base, Aegean_dir)
main()
|
AstroRobin/GLEAM-RESID
|
ReSId.py
|
Python
|
mit
| 44,533
|
[
"Galaxy"
] |
8be25541b4fd895d90bb201c6e067b25cd0e81a1396150ff978de45849bdaedc
|
# Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""classes checker for Python code
"""
from __future__ import generators
import sys
from collections import defaultdict
import six
import astroid
from astroid.bases import Generator, BUILTINS
from astroid.exceptions import InconsistentMroError, DuplicateBasesError
from astroid import objects
from astroid.scoped_nodes import function_to_method
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
PYMETHODS, SPECIAL_METHODS_PARAMS,
overrides_a_method, check_messages, is_attr_private,
is_attr_protected, node_frame_class, is_builtin_object,
decorated_with_property, unimplemented_abstract_methods,
decorated_with, class_is_abstract,
safe_infer, has_known_bases)
from pylint.utils import deprecated_option, get_global_option
if sys.version_info >= (3, 0):
NEXT_METHOD = '__next__'
else:
NEXT_METHOD = 'next'
ITER_METHODS = ('__iter__', '__getitem__')
INVALID_BASE_CLASSES = {'bool', 'range', 'slice', 'memoryview'}
def _get_method_args(method):
args = method.args.args
if method.type in ('classmethod', 'method'):
return len(args) - 1
return len(args)
def _is_invalid_base_class(cls):
return cls.name in INVALID_BASE_CLASSES and is_builtin_object(cls)
def _has_data_descriptor(cls, attr):
attributes = cls.getattr(attr)
for attribute in attributes:
try:
for inferred in attribute.infer():
if isinstance(inferred, astroid.Instance):
try:
inferred.getattr('__get__')
inferred.getattr('__set__')
except astroid.NotFoundError:
continue
else:
return True
except astroid.InferenceError:
# Can't infer, avoid emitting a false positive in this case.
return True
return False
def _called_in_methods(func, klass, methods):
""" Check if the func was called in any of the given methods,
belonging to the *klass*. Returns True if so, False otherwise.
"""
if not isinstance(func, astroid.FunctionDef):
return False
for method in methods:
try:
infered = klass.getattr(method)
except astroid.NotFoundError:
continue
for infer_method in infered:
for callfunc in infer_method.nodes_of_class(astroid.Call):
try:
bound = next(callfunc.func.infer())
except (astroid.InferenceError, StopIteration):
continue
if not isinstance(bound, astroid.BoundMethod):
continue
func_obj = bound._proxied
if isinstance(func_obj, astroid.UnboundMethod):
func_obj = func_obj._proxied
if func_obj.name == func.name:
return True
return False
def _is_attribute_property(name, klass):
""" Check if the given attribute *name* is a property
in the given *klass*.
It will look for `property` calls or for functions
with the given name, decorated by `property` or `property`
subclasses.
Returns ``True`` if the name is a property in the given klass,
``False`` otherwise.
"""
try:
attributes = klass.getattr(name)
except astroid.NotFoundError:
return False
property_name = "{0}.property".format(BUILTINS)
for attr in attributes:
try:
infered = next(attr.infer())
except astroid.InferenceError:
continue
if (isinstance(infered, astroid.FunctionDef) and
decorated_with_property(infered)):
return True
if infered.pytype() == property_name:
return True
return False
def _has_bare_super_call(fundef_node):
for call in fundef_node.nodes_of_class(astroid.Call):
func = call.func
if (isinstance(func, astroid.Name) and
func.name == 'super' and
not call.args):
return True
return False
def _safe_infer_call_result(node, caller, context=None):
"""
Safely infer the return value of a function.
Returns None if inference failed or if there is some ambiguity (more than
one node has been inferred). Otherwise returns infered value.
"""
try:
inferit = node.infer_call_result(caller, context=context)
value = next(inferit)
except astroid.InferenceError:
return # inference failed
except StopIteration:
return # no values infered
try:
next(inferit)
return # there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'method-check-failed',
'Used when Pylint has been unable to check methods signature '
'compatibility for an unexpected reason. Please report this kind '
'if you don\'t make sense of it.'),
'E0202': ('An attribute defined in %s line %s hides this method',
'method-hidden',
'Used when a class defines a method which is hidden by an '
'instance attribute from an ancestor class or set by some '
'client code.'),
'E0203': ('Access to member %r before its definition line %s',
'access-member-before-definition',
'Used when an instance member is accessed before it\'s actually '
'assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'attribute-defined-outside-init',
'Used when an instance attribute is defined outside the __init__ '
'method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'protected-access',
'Used when a protected member (i.e. class member with a name '
'beginning with an underscore) is access outside the class or a '
'descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'no-method-argument',
'Used when a method which should have the bound instance as '
'first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'no-self-argument',
'Used when a method has an attribute different the "self" as '
'first argument. This is considered as an error since this is '
'a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method %s should have %s as first argument',
'bad-classmethod-argument',
'Used when a class method has a first argument named differently '
'than the value specified in valid-classmethod-first-arg option '
'(default to "cls"), recommended to easily differentiate them '
'from regular instance methods.'),
'C0203': ('Metaclass method %s should have %s as first argument',
'bad-mcs-method-argument',
'Used when a metaclass method has a first agument named '
'differently than the value specified in valid-classmethod-first'
'-arg option (default to "cls"), recommended to easily '
'differentiate them from regular instance methods.'),
'C0204': ('Metaclass class method %s should have %s as first argument',
'bad-mcs-classmethod-argument',
'Used when a metaclass class method has a first argument named '
'differently than the value specified in valid-metaclass-'
'classmethod-first-arg option (default to "mcs"), recommended to '
'easily differentiate them from regular instance methods.'),
'W0211': ('Static method with %r as first argument',
'bad-staticmethod-argument',
'Used when a static method has "self" or a value specified in '
'valid-classmethod-first-arg option or '
'valid-metaclass-classmethod-first-arg option as first argument.'
),
'R0201': ('Method could be a function',
'no-self-use',
'Used when a method doesn\'t use its bound instance, and so could '
'be written as a function.'
),
'W0221': ('Arguments number differs from %s %r method',
'arguments-differ',
'Used when a method has a different number of arguments than in '
'the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s %r method',
'signature-differs',
'Used when a method signature is different than in the '
'implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'abstract-method',
'Used when an abstract method (i.e. raise NotImplementedError) is '
'not overridden in concrete class.'
),
'W0231': ('__init__ method from base class %r is not called',
'super-init-not-called',
'Used when an ancestor class method has an __init__ method '
'which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'no-init',
'Used when a class has no __init__ method, neither its parent '
'classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'non-parent-init-called',
'Used when an __init__ method is called on a class which is not '
'in the direct ancestors for the analysed class.'),
'E0236': ('Invalid object %r in __slots__, must contain '
'only non empty strings',
'invalid-slots-object',
'Used when an invalid (non-string) object occurs in __slots__.'),
'E0237': ('Assigning to attribute %r not defined in class slots',
'assigning-non-slot',
'Used when assigning to an attribute not defined '
'in the class slots.'),
'E0238': ('Invalid __slots__ object',
'invalid-slots',
'Used when an invalid __slots__ is found in class. '
'Only a string, an iterable or a sequence is permitted.'),
'E0239': ('Inheriting %r, which is not a class.',
'inherit-non-class',
'Used when a class inherits from something which is not a '
'class.'),
'E0240': ('Inconsistent method resolution order for class %r',
'inconsistent-mro',
'Used when a class has an inconsistent method resolutin order.'),
'E0241': ('Duplicate bases for class %r',
'duplicate-bases',
'Used when a class has duplicate bases.'),
'R0202': ('Consider using a decorator instead of calling classmethod',
'no-classmethod-decorator',
'Used when a class method is defined without using the decorator '
'syntax.'),
'R0203': ('Consider using a decorator instead of calling staticmethod',
'no-staticmethod-decorator',
'Used when a static method is defined without using the decorator '
'syntax.'),
}
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* unreachable code
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('ignore-iface-methods',
# TODO(cpopa): remove this in Pylint 1.6.
deprecated_option(opt_type="csv",
help_msg="This is deprecated, because "
"it is not used anymore.")
),
('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
('valid-classmethod-first-arg',
{'default' : ('cls',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a class method.'}
),
('valid-metaclass-classmethod-first-arg',
{'default' : ('mcs',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a metaclass class method.'}
),
('exclude-protected',
{
'default': (
# namedtuple public API.
'_asdict', '_fields', '_replace', '_source', '_make'),
'type': 'csv',
'metavar': '<protected access exclusions>',
'help': ('List of member names, which should be excluded '
'from the protected access warning.')}
))
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
def visit_classdef(self, node):
"""init visit variable _accessed
"""
self._accessed.append(defaultdict(list))
self._check_bases_classes(node)
# if not an exception or a metaclass
if node.type == 'class' and has_known_bases(node):
try:
node.local_attr('__init__')
except astroid.NotFoundError:
self.add_message('no-init', args=node, node=node)
self._check_slots(node)
self._check_proper_bases(node)
self._check_consistent_mro(node)
def _check_consistent_mro(self, node):
"""Detect that a class has a consistent mro or duplicate bases."""
try:
node.mro()
except InconsistentMroError:
self.add_message('inconsistent-mro', args=node.name, node=node)
except DuplicateBasesError:
self.add_message('duplicate-bases', args=node.name, node=node)
except NotImplementedError:
# Old style class, there's no mro so don't do anything.
pass
def _check_proper_bases(self, node):
"""
Detect that a class inherits something which is not
a class or a type.
"""
for base in node.bases:
ancestor = safe_infer(base)
if ancestor in (astroid.YES, None):
continue
if (isinstance(ancestor, astroid.Instance) and
ancestor.is_subtype_of('%s.type' % (BUILTINS,))):
continue
if (not isinstance(ancestor, astroid.ClassDef) or
_is_invalid_base_class(ancestor)):
self.add_message('inherit-non-class',
args=base.as_string(), node=node)
def leave_classdef(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
ignore_mixins = get_global_option(self, 'ignore-mixin-members',
default=True)
if ignore_mixins and cnode.name[-5:].lower() == 'mixin':
# We are in a mixin class. No need to try to figure out if
# something is missing, since it is most likely that it will
# miss.
return
accessed = self._accessed.pop()
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if not self.linter.is_message_enabled('attribute-defined-outside-init'):
return
defining_methods = self.config.defining_attr_methods
current_module = cnode.root()
for attr, nodes in six.iteritems(cnode.instance_attrs):
# skip nodes which are not in the current module and it may screw up
# the output, while it's not worth it
nodes = [n for n in nodes if not
isinstance(n.statement(), (astroid.Delete, astroid.AugAssign))
and n.root() is current_module]
if not nodes:
continue # error detected by typechecking
# check if any method attr is defined in is a defining method
if any(node.frame().name in defining_methods
for node in nodes):
continue
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astroid.NotFoundError:
for node in nodes:
if node.frame().name not in defining_methods:
# If the attribute was set by a callfunc in any
# of the defining methods, then don't emit
# the warning.
if _called_in_methods(node.frame(), cnode,
defining_methods):
continue
self.add_message('attribute-defined-outside-init',
args=attr, node=node)
def visit_functiondef(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astroid for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astroid build from living objects
continue
if not isinstance(meth_node, astroid.FunctionDef):
continue
self._check_signature(node, meth_node, 'overridden', klass)
break
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Attribute) and \
decorator.attrname in ('getter', 'setter', 'deleter'):
# attribute affectation will call this method, not hiding it
return
if isinstance(decorator, astroid.Name) and decorator.name == 'property':
# attribute affectation will either call a setter or raise
# an attribute error, anyway not hiding the function
return
# check if the method is hidden by an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
overridden_frame = overridden.frame()
if (isinstance(overridden_frame, astroid.FunctionDef)
and overridden_frame.type == 'method'):
overridden_frame = overridden_frame.parent.frame()
if (isinstance(overridden_frame, astroid.ClassDef)
and klass.is_subtype_of(overridden_frame.qname())):
args = (overridden.root().name, overridden.fromlineno)
self.add_message('method-hidden', args=args, node=node)
except astroid.NotFoundError:
pass
visit_asyncfunctiondef = visit_functiondef
def _check_slots(self, node):
if '__slots__' not in node.locals:
return
for slots in node.igetattr('__slots__'):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except astroid.NotFoundError:
continue
else:
self.add_message('invalid-slots', node=node)
continue
if isinstance(slots, astroid.Const):
# a string, ignore the following checks
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, astroid.Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is astroid.YES:
return
for elt in values:
try:
self._check_slots_elt(elt)
except astroid.InferenceError:
continue
def _check_slots_elt(self, elt):
for infered in elt.infer():
if infered is astroid.YES:
continue
if (not isinstance(infered, astroid.Const) or
not isinstance(infered.value, six.string_types)):
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
continue
if not infered.value:
self.add_message('invalid-slots-object',
args=infered.as_string(),
node=elt)
def leave_functiondef(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class.
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
if not self.linter.is_message_enabled('no-self-use'):
return
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and node.name not in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name) or
decorated_with_property(node) or
(six.PY3 and _has_bare_super_call(node)))):
self.add_message('no-self-use', node=node)
def visit_attribute(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
# Check self
if self.is_first_attr(node):
self._accessed[-1][attrname].append(node)
return
if not self.linter.is_message_enabled('protected-access'):
return
self._check_protected_attribute_access(node)
def visit_assignattr(self, node):
if isinstance(node.assign_type(), astroid.AugAssign) and self.is_first_attr(node):
self._accessed[-1][node.attrname].append(node)
self._check_in_slots(node)
def _check_in_slots(self, node):
""" Check that the given assattr node
is defined in the class slots.
"""
infered = safe_infer(node.expr)
if infered and isinstance(infered, astroid.Instance):
klass = infered._proxied
if '__slots__' not in klass.locals or not klass.newstyle:
return
slots = klass.slots()
if slots is None:
return
# If any ancestor doesn't use slots, the slots
# defined for this class are superfluous.
if any('__slots__' not in ancestor.locals and
ancestor.name != 'object'
for ancestor in klass.ancestors()):
return
if not any(slot.value == node.attrname for slot in slots):
# If we have a '__dict__' in slots, then
# assigning any name is valid.
if not any(slot.value == '__dict__' for slot in slots):
if _is_attribute_property(node.attrname, klass):
# Properties circumvent the slots mechanism,
# so we should not emit a warning for them.
return
if (node.attrname in klass.locals
and _has_data_descriptor(klass, node.attrname)):
# Descriptors circumvent the slots mechanism as well.
return
self.add_message('assigning-non-slot',
args=(node.attrname, ), node=node)
@check_messages('protected-access', 'no-classmethod-decorator',
'no-staticmethod-decorator')
def visit_assign(self, assign_node):
self._check_classmethod_declaration(assign_node)
node = assign_node.targets[0]
if not isinstance(node, astroid.AssignAttr):
return
if self.is_first_attr(node):
return
self._check_protected_attribute_access(node)
def _check_classmethod_declaration(self, node):
"""Checks for uses of classmethod() or staticmethod()
When a @classmethod or @staticmethod decorator should be used instead.
A message will be emitted only if the assignment is at a class scope
and only if the classmethod's argument belongs to the class where it
is defined.
`node` is an assign node.
"""
if not isinstance(node.value, astroid.Call):
return
# check the function called is "classmethod" or "staticmethod"
func = node.value.func
if (not isinstance(func, astroid.Name) or
func.name not in ('classmethod', 'staticmethod')):
return
msg = ('no-classmethod-decorator' if func.name == 'classmethod' else
'no-staticmethod-decorator')
# assignment must be at a class scope
parent_class = node.scope()
if not isinstance(parent_class, astroid.ClassDef):
return
# Check if the arg passed to classmethod is a class member
classmeth_arg = node.value.args[0]
if not isinstance(classmeth_arg, astroid.Name):
return
method_name = classmeth_arg.name
if any(method_name == member.name
for member in parent_class.mymethods()):
self.add_message(msg, node=node.targets[0])
def _check_protected_attribute_access(self, node):
'''Given an attribute access node (set or get), check if attribute
access is legitimate. Call _check_first_attr with node before calling
this method. Valid cases are:
* self._attr in a method or cls._attr in a classmethod. Checked by
_check_first_attr.
* Klass._attr inside "Klass" class.
* Klass2._attr inside "Klass" class when Klass2 is a base class of
Klass.
'''
attrname = node.attrname
if (is_attr_protected(attrname) and
attrname not in self.config.exclude_protected):
klass = node_frame_class(node)
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
# We are not in a class, no remaining valid case
if klass is None:
self.add_message('protected-access', node=node, args=attrname)
return
# If the expression begins with a call to super, that's ok.
if isinstance(node.expr, astroid.Call) and \
isinstance(node.expr.func, astroid.Name) and \
node.expr.func.name == 'super':
return
# We are in a class, one remaining valid cases, Klass._attr inside
# Klass
if not (callee == klass.name or callee in klass.basenames):
# Detect property assignments in the body of the class.
# This is acceptable:
#
# class A:
# b = property(lambda: self._b)
stmt = node.parent.statement()
if (isinstance(stmt, astroid.Assign)
and len(stmt.targets) == 1
and isinstance(stmt.targets[0], astroid.AssignName)):
name = stmt.targets[0].name
if _is_attribute_property(name, klass):
return
self.add_message('protected-access', node=node, args=attrname)
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
excs = ('AttributeError', 'Exception', 'BaseException')
for attr, nodes in six.iteritems(accessed):
try:
# is it a class attribute ?
node.local_attr(attr)
# yes, stop here
continue
except astroid.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
next(node.instance_attr_ancestors(attr))
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astroid.NotFoundError:
pass
else:
# filter out augment assignment nodes
defstmts = [stmt for stmt in defstmts if stmt not in nodes]
if not defstmts:
# only augment assignment for this node, no-member should be
# triggered by the typecheck checker
continue
# filter defstmts to only pick the first one when there are
# several assignments in the same scope
scope = defstmts[0].scope()
defstmts = [stmt for i, stmt in enumerate(defstmts)
if i == 0 or stmt.scope() is not scope]
# if there are still more than one, don't attempt to be smarter
# than we can be
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not astroid.are_exclusive(_node.statement(), defstmt, excs):
self.add_message('access-member-before-definition',
node=_node, args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method or a metaclass regular method (actually
valid-classmethod-first-arg value)
* 'mcs' for a metaclass class method (actually
valid-metaclass-classmethod-first-arg)
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if (first_arg == 'self' or
first_arg in self.config.valid_classmethod_first_arg or
first_arg in self.config.valid_metaclass_classmethod_first_arg):
self.add_message('bad-staticmethod-argument', args=first, node=node)
return
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('no-method-argument', node=node)
# metaclass
elif metaclass:
# metaclass __new__ or classmethod
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_metaclass_classmethod_first_arg, node,
'bad-mcs-classmethod-argument', node.name)
# metaclass regular method
else:
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-mcs-method-argument',
node.name)
# regular class
else:
# class method
if node.type == 'classmethod':
self._check_first_arg_config(
first,
self.config.valid_classmethod_first_arg, node,
'bad-classmethod-argument',
node.name)
# regular method without self as argument
elif first != 'self':
self.add_message('no-self-argument', node=node)
def _check_first_arg_config(self, first, config, node, message,
method_name):
if first not in config:
if len(config) == 1:
valid = repr(config[0])
else:
valid = ', '.join(repr(v) for v in config[:-1])
valid = '%s or %r' % (valid, config[-1])
self.add_message(message, args=(method_name, valid), node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
def is_abstract(method):
return method.is_abstract(pass_is_abstract=False)
# check if this class abstract
if class_is_abstract(node):
return
methods = sorted(
unimplemented_abstract_methods(node, is_abstract).items(),
key=lambda item: item[0],
)
for name, method in methods:
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if name in node.locals:
# it is redefined as an attribute or with a descriptor
continue
self.add_message('abstract-method', node=node,
args=(name, owner.name))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if (not self.linter.is_message_enabled('super-init-not-called') and
not self.linter.is_message_enabled('non-parent-init-called')):
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astroid.Call):
expr = stmt.func
if not isinstance(expr, astroid.Attribute) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astroid.Call) and \
isinstance(expr.expr.func, astroid.Name) and \
expr.expr.func.name == 'super':
return
try:
for klass in expr.expr.infer():
if klass is astroid.YES:
continue
# The infered klass can be super(), which was
# assigned to a variable and the `__init__`
# was called later.
#
# base = super()
# base.__init__(...)
if (isinstance(klass, astroid.Instance) and
isinstance(klass._proxied, astroid.ClassDef) and
is_builtin_object(klass._proxied) and
klass._proxied.name == 'super'):
return
elif isinstance(klass, objects.Super):
return
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('non-parent-init-called',
node=expr, args=klass.name)
except astroid.InferenceError:
continue
for klass, method in six.iteritems(not_called_yet):
if klass.name == 'object' or method.parent.name == 'object':
continue
self.add_message('super-init-not-called', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type, cls):
"""check that the signature of the two given methods match
"""
if not (isinstance(method1, astroid.FunctionDef)
and isinstance(refmethod, astroid.FunctionDef)):
self.add_message('method-check-failed',
args=(method1, refmethod), node=method1)
return
instance = cls.instanciate_class()
method1 = function_to_method(method1, instance)
refmethod = function_to_method(refmethod, instance)
# Don't care about functions with unknown argument (builtins).
if method1.args.args is None or refmethod.args.args is None:
return
# If we use *args, **kwargs, skip the below checks.
if method1.args.vararg or method1.args.kwarg:
return
# Ignore private to class methods.
if is_attr_private(method1.name):
return
# Ignore setters, they have an implicit extra argument,
# which shouldn't be taken in consideration.
if method1.decorators:
for decorator in method1.decorators.nodes:
if (isinstance(decorator, astroid.Attribute) and
decorator.attrname == 'setter'):
return
method1_args = _get_method_args(method1)
refmethod_args = _get_method_args(refmethod)
if method1_args != refmethod_args:
self.add_message('arguments-differ',
args=(class_type, method1.name),
node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('signature-differs',
args=(class_type, method1.name),
node=method1)
def is_first_attr(self, node):
"""Check that attribute lookup name use first attribute variable name
(self for method, cls for classmethod and mcs for metaclass).
"""
return self._first_attrs and isinstance(node.expr, astroid.Name) and \
node.expr.name == self._first_attrs[-1]
class SpecialMethodsChecker(BaseChecker):
"""Checker which verifies that special methods
are implemented correctly.
"""
__implements__ = (IAstroidChecker, )
name = 'classes'
msgs = {
'E0301': ('__iter__ returns non-iterator',
'non-iterator-returned',
'Used when an __iter__ method returns something which is not an '
'iterable (i.e. has no `%s` method)' % NEXT_METHOD,
{'old_names': [('W0234', 'non-iterator-returned'),
('E0234', 'non-iterator-returned')]}),
'E0302': ('The special method %r expects %s param(s), %d %s given',
'unexpected-special-method-signature',
'Emitted when a special method was defined with an '
'invalid number of parameters. If it has too few or '
'too many, it might not work at all.',
{'old_names': [('E0235', 'bad-context-manager')]}),
}
priority = -2
@check_messages('unexpected-special-method-signature',
'non-iterator-returned')
def visit_functiondef(self, node):
if not node.is_method():
return
if node.name == '__iter__':
self._check_iter(node)
if node.name in PYMETHODS:
self._check_unexpected_method_signature(node)
visit_asyncfunctiondef = visit_functiondef
def _check_unexpected_method_signature(self, node):
expected_params = SPECIAL_METHODS_PARAMS[node.name]
if expected_params is None:
# This can support a variable number of parameters.
return
if not len(node.args.args) and not node.args.vararg:
# Method has no parameter, will be catched
# by no-method-argument.
return
if decorated_with(node, [BUILTINS + ".staticmethod"]):
# We expect to not take in consideration self.
all_args = node.args.args
else:
all_args = node.args.args[1:]
mandatory = len(all_args) - len(node.args.defaults)
optional = len(node.args.defaults)
current_params = mandatory + optional
if isinstance(expected_params, tuple):
# The expected number of parameters can be any value from this
# tuple, although the user should implement the method
# to take all of them in consideration.
emit = mandatory not in expected_params
expected_params = "between %d or %d" % expected_params
else:
# If the number of mandatory parameters doesn't
# suffice, the expected parameters for this
# function will be deduced from the optional
# parameters.
rest = expected_params - mandatory
if rest == 0:
emit = False
elif rest < 0:
emit = True
elif rest > 0:
emit = not ((optional - rest) >= 0 or node.args.vararg)
if emit:
verb = "was" if current_params <= 1 else "were"
self.add_message('unexpected-special-method-signature',
args=(node.name, expected_params, current_params, verb),
node=node)
@staticmethod
def _is_iterator(node):
if node is astroid.YES:
# Just ignore YES objects.
return True
if isinstance(node, Generator):
# Generators can be itered.
return True
if isinstance(node, astroid.Instance):
try:
node.local_attr(NEXT_METHOD)
return True
except astroid.NotFoundError:
pass
elif isinstance(node, astroid.ClassDef):
metaclass = node.metaclass()
if metaclass and isinstance(metaclass, astroid.ClassDef):
try:
metaclass.local_attr(NEXT_METHOD)
return True
except astroid.NotFoundError:
pass
return False
def _check_iter(self, node):
infered = _safe_infer_call_result(node, node)
if infered is not None:
if not self._is_iterator(infered):
self.add_message('non-iterator-returned', node=node)
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
to_call[base_node] = next(base_node.igetattr(method))
except astroid.InferenceError:
continue
return to_call
def node_method(node, method_name):
"""get astroid for <method_name> on the given class node, ensuring it
is a Function node
"""
for node_attr in node.local_attr(method_name):
if isinstance(node_attr, astroid.Function):
return node_attr
raise astroid.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
linter.register_checker(SpecialMethodsChecker(linter))
|
spirrello/spirrello-pynet-work
|
applied_python/lib/python2.7/site-packages/pylint/checkers/classes.py
|
Python
|
gpl-3.0
| 47,374
|
[
"VisIt"
] |
f360953da376b1c5088189765183b6b16d0796523c0854b2a0afaa56f0c57dc3
|
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""StreetLearn level for the curriculum-based courier task.
This is a version of the courier task that increases the distance to the
goal with each episode using the given annealing rate.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import time
from streetlearn.python.environment import courier_game
_SECONDS_IN_HOUR = 3600
class CurriculumCourierGame(courier_game.CourierGame):
"""Coin game that gives extra reward for finding the goal pano. A courier goal
is randomly selected from panos in the graph according to a curriculum that
starts with panos within a maximum distance from the current agent position,
then anneals it with time. On success or timeout, a new goal is chosen. The
episode ends after a fixed episode length.
"""
def __init__(self, config):
"""Creates an instance of the RandomTaxiCurriculum level.
This coin game gives extra reward for finding the goal pano, and resets the
goal once the goal has been found (or on timeout). Panos can be assigned
rewards (coins) randomly and the agent will receive the reward the first
time they visit these panos. Goal panos are assigned within a circle whose
radius grows in time from min_goal_distance to max_goal_distance.
Args:
config: config dict of various settings.
"""
super(CurriculumCourierGame, self).__init__(config)
self._timestamp_start = config['timestamp_start_curriculum']
self._annealing_rate = config['annealing_rate_curriculum']
self._hours_curriculum_part_1 = config['hours_curriculum_part_1']
self._hours_curriculum_part_2 = config['hours_curriculum_part_2']
self._min_goal_distance = config['min_goal_distance_curriculum']
self._max_goal_distance = config['max_goal_distance_curriculum']
self._allowed_goal_distance = self._min_goal_distance
assert self._timestamp_start <= time.time()
assert self._annealing_rate > 0
assert self._hours_curriculum_part_1 >= 0
assert self._hours_curriculum_part_2 > 0
assert self._min_goal_distance < self._max_goal_distance
logging.info(
'Curriculum: starts at t=%d, dist <= %f in P1 (%f h)',
self._timestamp_start, self._min_goal_distance,
self._hours_curriculum_part_1)
logging.info(
'Curriculum: then %f < dist <= %f in P2 (%f h)',
self._min_goal_distance, self._max_goal_distance,
self._hours_curriculum_part_2)
logging.info('Curriculum: annealing rate: %f', self._annealing_rate)
def _update_curriculum_goal_distance(self):
"""Updates the allowed distance to the goal according to the curriculum."""
hours_train = max(0,
(time.time() - self._timestamp_start) / _SECONDS_IN_HOUR)
if hours_train <= self._hours_curriculum_part_1:
# During part 1 of the curriculum, sample goals within a minimal distance.
self._allowed_goal_distance = self._min_goal_distance
else:
# During part 2 of the curriculum, sample goals within a distance
# that grows from a minimum value to a maximum value.
numerator = hours_train - self._hours_curriculum_part_1
denom = self._hours_curriculum_part_2
time_factor = pow(min(1, max(0, numerator / denom)), self._annealing_rate)
self._allowed_goal_distance = (
(self._max_goal_distance - self._min_goal_distance
) * time_factor + self._min_goal_distance)
def on_reset(self, streetlearn):
"""Gets called after StreetLearn:reset().
Selects a random pano as goal destination.
If there are any coins, clears the set of touched panos and randomly
generates reward-yielding coins and populates pano_id_to_color.
Args:
streetlearn: a streetlearn instance.
Returns:
A newly populated pano_id_to_color dictionary.
"""
# Update the allowed distance to the goal according to the curriculum.
self._update_curriculum_goal_distance()
# Populate the list of panos and assign optional coins to panos.
# Assign the goal location to one of the panos.
return super(CurriculumCourierGame, self).on_reset(streetlearn)
def get_info(self, streetlearn):
""""Returns current information about the state of the environment.
Args:
streetlearn: a StreetLearn instance.
Returns:
info: information from the environment at the last step.
"""
info = super(CurriculumCourierGame, self).get_info(streetlearn)
info['allowed_goal_distance'] = self._allowed_goal_distance
return info
def _sample_random_goal(self, streetlearn):
"""Randomly sets a new pano for the current goal according to a curriculum.
Args:
streetlearn: The StreetLearn environment.
"""
# Sample a goal among the pano ids that is within that distance.
goals = [goal for goal in streetlearn.graph
if ((goal != self._current_goal_id) and
(goal != streetlearn.current_pano_id))]
self._initial_distance_to_goal = float('inf')
while self._initial_distance_to_goal > self._allowed_goal_distance:
self._current_goal_id = np.random.choice(goals)
self._min_distance_reached = streetlearn.engine.GetPanoDistance(
streetlearn.current_pano_id, self._current_goal_id)
self._initial_distance_to_goal = self._min_distance_reached
logging.info(
'seed %d, frame %d: distance to goal: %f (max allowed: %f)',
streetlearn.seed, streetlearn.frame_count,
self._initial_distance_to_goal, self._allowed_goal_distance)
|
deepmind/streetlearn
|
streetlearn/python/environment/curriculum_courier_game.py
|
Python
|
apache-2.0
| 6,183
|
[
"VisIt"
] |
9be3c466b246c2b7c83bdf4aba5c26cdfe08cca1bab49df6e183cc5c3134066d
|
"""
Tools to process galaxy spectra .fits files from SDSS-II Legacy survey.
Authored by Grace Telford 02/13/16
"""
# TODO: add parameter descriptions to SpecProcessor, normalize, and process_fits
from __future__ import absolute_import, print_function, division
import numpy as np
from scipy import interp
import time
import sys
from .io import FitsData
class SpecProcessor(object):
"""
Perform basic processing of raw spectra.
Attributes
----------
loglam_grid: ndarray
Nsamples: integer
galaxy_params: numpy record array
filenames: string, list, or ndarray
spectra_directory: string
Nspectra: integer
"""
def __init__(self, filenames, galaxy_params, spectra_directory=None, n_samples=5000, loglam_grid=None):
if len(galaxy_params) != len(filenames):
sys.exit('filenames and galaxy_params must be same length')
self.galaxy_params = galaxy_params
self.filenames = filenames
self.Nspectra = len(self.filenames)
self.spectra_directory = spectra_directory
if loglam_grid:
self.loglam_grid = loglam_grid
self.Nsamples = len(loglam_grid)
else:
self.loglam_grid = 3.5 + 0.0001 * np.arange(n_samples)
self.Nsamples = n_samples
@staticmethod
def k(wavelength, r_v=3.1):
"""
Calculate A_wavelength/A_V using CCM 1989 extincton law.
Parameters
----------
wavelength: float or ndarray
Wavelength(s) at which to compute the reddening correction.
r_v: float (default=3.1)
R_V value assumed in reddening law.
Returns
-------
k: float or ndarray
Value(s) of k(lambda) at the specified wavelength(s).
"""
x = 1. / (wavelength / 10000.)
"""
Valid for 1.1 < x < 3.3 - all wavelengths in this code are between 1.35 and 2.7.
"""
y = x - 1.82
a = 1. + 0.17699 * y - 0.50447 * (y ** 2) - 0.02427 * (y ** 3) + 0.72085 * (y ** 4) + 0.01979 * (
y ** 5) - 0.77530 * (y ** 6) + 0.32999 * (y ** 7)
b = 1.41338 * y + 2.28305 * (y ** 2) + 1.07233 * (y ** 3) - 5.38434 * (y ** 4) - 0.62251 * (
y ** 5) + 5.30260 * (y ** 6) - 2.09002 * (y ** 7)
return a + b / r_v
def deredden(self, log_wavelength, flux, ebv):
"""
Correct flux at specified wavelength(s) for reddening using CCM 1989 extinction law.
Parameters
----------
log_wavelength: float or ndarray
Wavelength(s) at which to compute the reddening correction.
flux: float or array-like
Uncorrected flux(es).
ebv: float
Value of E(B-V).
Returns
-------
flux_corr: float or ndarray
Flux(es) corrected for reddening.
"""
return flux * 10 ** (0.4 * self.k(10 ** log_wavelength) * ebv)
def normalize(self, spectra, weights):
"""
Normalize the array of spectra to mean value of each spectrum between 4400 and 4450 A
Multiply inverse variances by the square of the normalization
Parameters
----------
spectra: ndarray
weights: ndarray
Returns
-------
spectra: ndarray
weights: ndarray
"""
# TODO: check that mean flux in this window is nonzero!
norm = np.mean(spectra[:, (10 ** self.loglam_grid > 4400.) * (10 ** self.loglam_grid < 4450.)], axis=1)
spectra /= norm[:, None]
weights *= norm[:, None] ** 2
return spectra, weights
def process_fits(self, normalize=False, mask=False, return_id=False, indices=None):
"""
Iterate over all .fits filenames, read in and process spectra.
Check that redshift in header matches redshift in parameters file.
Parameters
----------
normalize: boolean (default=False)
mask: boolean (default=False)
indices: integer, list, or ndarray (default=None)
return_id: boolean (default=False)
Returns
-------
spectra: ndarray
weights: ndarray
id_dict: dictionary
Only returned if return_id=True.
"""
start_time = time.time()
counter = 0
spectra = np.zeros((self.Nspectra, self.Nsamples))
weights = np.zeros((self.Nspectra, self.Nsamples))
redshifts = []
plates = []
mjds = []
fibers = []
if indices is not None:
index_list = indices
else:
index_list = np.arange(self.Nspectra)
for ind in index_list:
data = FitsData(self.filenames[ind], spectra_directory=self.spectra_directory)
redshifts.append(data.z)
plates.append(data.plate)
mjds.append(data.mjd)
fibers.append(data.fiber)
if mask:
data.ivars[data.andmask > 0] = np.nan
# Shift to restframe, apply mask, correct for reddening
loglam = np.log10(data.wavelengths / (1. + data.z))
ebv = self.galaxy_params['EBV'][ind]
data.fluxes = self.deredden(loglam, data.fluxes, ebv)
# Interpolate spectrum/ivars & resample to common grid; set all NaNs in ivar array to 0 (masked)
spectra[ind, :] = interp(self.loglam_grid, loglam, data.fluxes, left=0., right=0.)
weights[ind, :] = interp(self.loglam_grid, loglam, data.ivars, left=0., right=0.)
weights[ind, np.isnan(weights[ind, :])] = 0.
# Progress report
if counter % 10 == 0:
current_time = time.time()
print('Time to iteration %d: %g' % (counter, current_time - start_time))
counter += 1
if normalize:
spectra, weights = self.normalize(spectra, weights)
end_time = time.time()
print('Total time:', end_time - start_time)
if return_id:
id_dict = {'redshifts': redshifts, 'plates': plates, 'mjds': mjds, 'fibers': fibers}
return spectra, weights, id_dict
else:
return spectra, weights
|
ogtelford/specprep
|
specprep/processing.py
|
Python
|
bsd-3-clause
| 6,212
|
[
"Galaxy"
] |
6df932628de39ca72f661079010bf7b88e105972f1ef0cf0dff4f112229aabba
|
import sys, itertools, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <flattened_gff_file> <sam_file> <output_file>",
description=
"This script counts how many reads in <sam_file> fall onto each exonic " +
"part given in <flattened_gff_file> and outputs a list of counts in " +
"<output_file>, for further analysis with the DEXSeq Bioconductor package. " +
"(Notes: The <flattened_gff_file> should be produced with the script " +
"dexseq_prepare_annotation.py). <sam_file> may be '-' to indicate standard input.",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-p", "--paired", type="choice", dest="paired",
choices = ( "no", "yes" ), default = "no",
help = "'yes' or 'no'. Indicates whether the data is paired-end (default: no)" )
optParser.add_option( "-s", "--stranded", type="choice", dest="stranded",
choices = ( "yes", "no", "reverse" ), default = "yes",
help = "'yes', 'no', or 'reverse'. Indicates whether the data is " +
"from a strand-specific assay (default: yes ). " +
"Be sure to switch to 'no' if you use a non strand-specific RNA-Seq library " +
"preparation protocol. 'reverse' inverts strands and is neede for certain " +
"protocols, e.g. paired-end with circularization." )
optParser.add_option( "-a", "--minaqual", type="int", dest="minaqual",
default = 10,
help = "skip all reads with alignment quality lower than the given " +
"minimum value (default: 10)" )
if len( sys.argv ) == 1:
optParser.print_help()
sys.exit(1)
(opts, args) = optParser.parse_args()
if len( args ) != 3:
sys.stderr.write( sys.argv[0] + ": Error: Please provide three arguments.\n" )
sys.stderr.write( " Call with '-h' to get usage information.\n" )
sys.exit( 1 )
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gff_file = args[0]
sam_file = args[1]
out_file = args[2]
stranded = opts.stranded == "yes" or opts.stranded == "reverse"
reverse = opts.stranded == "reverse"
is_PE = opts.paired == "yes"
minaqual = opts.minaqual
if sam_file == "-":
sam_file = sys.stdin
# Step 1: Read in the GFF file as generated by aggregate_genes.py
# and put everything into a GenomicArrayOfSets
features = HTSeq.GenomicArrayOfSets( "auto", stranded=stranded )
for f in HTSeq.GFF_Reader( gff_file ):
if f.type == "exonic_part":
f.name = f.attr['gene_id'] + ":" + f.attr['exonic_part_number']
features[f.iv] += f
# initialise counters
num_reads = 0
counts = {}
counts[ '_empty' ] = 0
counts[ '_ambiguous' ] = 0
counts[ '_lowaqual' ] = 0
counts[ '_notaligned' ] = 0
# put a zero for each feature ID
for iv, s in features.steps():
for f in s:
counts[ f.name ] = 0
#We need this little helper below:
def reverse_strand( s ):
if s == "+":
return "-"
elif s == "-":
return "+"
else:
raise SystemError, "illegal strand"
# Now go through the aligned reads
if not is_PE:
num_reads = 0
for a in HTSeq.SAM_Reader( sam_file ):
if not a.aligned:
counts[ '_notaligned' ] += 1
continue
if a.aQual < minaqual:
counts[ '_lowaqual' ] += 1
continue
rs = set()
for cigop in a.cigar:
if cigop.type != "M":
continue
if reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps( ):
rs = rs.union( s )
set_of_gene_names = set( [ f.name.split(":")[0] for f in rs ] )
if len( set_of_gene_names ) == 0:
counts[ '_empty' ] += 1
elif len( set_of_gene_names ) > 1:
counts[ '_ambiguous' ] +=1
else:
for f in rs:
counts[ f.name ] += 1
num_reads += 1
if num_reads % 100000 == 0:
sys.stdout.write( "%d reads processed.\n" % num_reads )
else: # paired-end
num_reads = 0
for af, ar in HTSeq.pair_SAM_alignments( HTSeq.SAM_Reader( sam_file ) ):
rs = set()
if af and ar and not af.aligned and not ar.aligned:
counts[ '_notaligned' ] += 1
continue
if af and ar and not af.aQual < minaqual and ar.aQual < minaqual:
counts[ '_lowaqual' ] += 1
continue
if af and af.aligned and af.aQual >= minaqual and af.iv.chrom in features.chrom_vectors.keys():
for cigop in af.cigar:
if cigop.type != "M":
continue
if reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps():
rs = rs.union( s )
if ar and ar.aligned and ar.aQual >= minaqual and ar.iv.chrom in features.chrom_vectors.keys():
for cigop in ar.cigar:
if cigop.type != "M":
continue
if not reverse:
cigop.ref_iv.strand = reverse_strand( cigop.ref_iv.strand )
for iv, s in features[cigop.ref_iv].steps():
rs = rs.union( s )
set_of_gene_names = set( [ f.name.split(":")[0] for f in rs ] )
if len( set_of_gene_names ) == 0:
counts[ '_empty' ] += 1
elif len( set_of_gene_names ) > 1:
counts[ '_ambiguous' ] = 0
else:
for f in rs:
counts[ f.name ] += 1
num_reads += 1
if num_reads % 100000 == 0:
sys.stderr.write( "%d reads processed.\n" % num_reads )
# Step 3: Write out the results
fout = open( out_file, "w" )
for fn in sorted( counts.keys() ):
fout.write( "%s\t%d\n" % ( fn, counts[fn] ) )
fout.close()
|
vipints/oqtans
|
oqtans_tools/DEXSeq/1.6/src/dexseq_count.py
|
Python
|
bsd-3-clause
| 6,000
|
[
"Bioconductor",
"HTSeq"
] |
97d045709a234bcb321ceb68ccf305a6ffb5e727c87c9bf8b475bea803e51431
|
#!/usr/bin/env python
# Copyright (c) 2002-2008 ActiveState Software.
# License: MIT License.
# Author: Trent Mick (trentm at google's mail thing)
"""
Quick directory changing.
Usage:
go <shortcut>[/sub/dir/path] # change directories
# same as "go -c ..."
go -c|-o|-a|-d|-s ... # cd, open, add, delete, set
go --list [<pattern>] # list matching shortcuts
Options:
-h, --help print this help and exit
-V, --version print verion info and exit
-c, --cd <path> cd to shortcut path in shell
-s, --set <shortcut> <dir> set a shortcut to <dir>
-a, --add-current <shortcut> add shortcut to current directory
-d, --delete <shortcut> delete the named shortcut
-o, --open <path> open the given shortcut path in
explorer (Windows only)
-l, --list [<pattern>] list current shortcuts
Generally you have a set of directories that you commonly visit.
Typing these paths in full can be a pain. This script allows one to
define a set of directory shortcuts to be able to quickly change to
them. For example, I could define 'ko' to represent
"D:\\trentm\\main\\Apps\\Komodo-devel", then
C:\\> go ko
D:\\trentm\\main\\Apps\\Komodo-devel>
and
C:\\> go ko/test
D:\\trentm\\main\\Apps\\Komodo-devel\\test>
As well, you can always use some standard shortcuts, such as '~'
(home) and '...' (up two dirs).
See <http://code.google.com/p/go-tool/> for more information.
"""
# Dev Notes:
# - Shortcuts are stored in an XML file in your AppData folder.
# On Windows this is typically:
# <AppDataDir>\TrentMick\go\shortcuts.xml
# On Linux (or other UN*X systems) this is typically:
# ~/.go/shortcuts.xml
__version_info__ = (1, 2, 1)
__version__ = '.'.join(map(str, __version_info__))
import os
from os.path import splitext, expanduser, join, exists
import sys
import getopt
import re
import pprint
import codecs
import xml.dom.minidom
#---- exceptions
class GoError(Exception):
pass
class InternalGoError(GoError):
def __str__(self):
return GoError.__str__(self) + """
* * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Please log a bug at *
* http://code.google.com/p/go-tool/issues/list *
* to report this error. Thanks! *
* -- Trent *
* * * * * * * * * * * * * * * * * * * * * * * * * * * *"""
#---- globals
_envvar = "GO_SHELL_SCRIPT"
# On Windows, "console" or "windows" controls how some things behave.
_subsystem = "console"
if sys.platform.startswith("win") and\
os.path.splitext(sys.executable)[0][-1] == 'w':
_subsystem = "windows"
_gDriverFromShell = {
"cmd": """\
@echo off
rem Windows shell driver for 'go' (http://code.google.com/p/go-tool/).
set GO_SHELL_SCRIPT=%TEMP%\__tmp_go.bat
call python -m go %1 %2 %3 %4 %5 %6 %7 %8 %9
if exist %GO_SHELL_SCRIPT% call %GO_SHELL_SCRIPT%
set GO_SHELL_SCRIPT=""",
"sh": """\
# Bash shell driver for 'go' (http://code.google.com/p/go-tool/).
function go {
export GO_SHELL_SCRIPT=$HOME/.__tmp_go.sh
python -m go $*
if [ -f $GO_SHELL_SCRIPT ] ; then
source $GO_SHELL_SCRIPT
fi
unset GO_SHELL_SCRIPT
}""",
}
#---- public module interface
def getShortcutsFile():
"""Return the path to the shortcuts file."""
fname = "shortcuts.xml"
if sys.platform.startswith("win"):
# Favour ~/.go if shortcuts.xml already exists there, otherwise
# favour CSIDL_APPDATA/... if have win32com to *find* that dir.
dname = os.path.expanduser("~/.go")
shortcutsFile = os.path.join(dname, fname)
if not os.path.isfile(shortcutsFile):
try:
from win32com.shell import shellcon, shell
dname = os.path.join(
shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0),
"TrentMick", "Go")
shortcutsFile = os.path.join(dname, fname)
except ImportError:
pass
else:
dname = os.path.expanduser("~/.go")
shortcutsFile = os.path.join(dname, fname)
return shortcutsFile
def getDefaultShortcuts():
"""Return the dictionary of default shortcuts."""
if sys.platform == "win32" and sys.version.startswith("2.3."):
import warnings
warnings.filterwarnings("ignore", module="fcntl", lineno=7)
import tempfile
shortcuts = {
'.': os.curdir,
'..': os.pardir,
'...': os.path.join(os.pardir, os.pardir),
'tmp': tempfile.gettempdir(),
}
try:
shortcuts['~'] = os.environ['HOME']
except KeyError:
pass
return shortcuts
def setShortcut(name, value):
"""Add the given shortcut mapping to the XML database.
<shortcuts version="...">
<shortcut name="..." value="..."/>
</shortcuts>
A value of None deletes the named shortcut.
"""
shortcutsXml = getShortcutsFile()
if os.path.isfile(shortcutsXml):
dom = xml.dom.minidom.parse(shortcutsXml)
else:
dom = xml.dom.minidom.parseString(
'<shortcuts version="1.0"></shortcuts>')
shortcuts = dom.getElementsByTagName("shortcuts")[0]
for s in shortcuts.getElementsByTagName("shortcut"):
if s.getAttribute("name") == name:
if value:
s.setAttribute("value", value)
else:
shortcuts.removeChild(s)
break
else:
if value:
s = dom.createElement("shortcut")
s.setAttribute("name", name)
s.setAttribute("value", value)
shortcuts.appendChild(s)
else:
raise GoError("shortcut '%s' does not exist" % name)
if not os.path.isdir(os.path.dirname(shortcutsXml)):
os.makedirs(os.path.dirname(shortcutsXml))
fout = open(shortcutsXml, 'w')
fout.write(dom.toxml())
fout.close()
def getShortcuts():
"""Return the shortcut dictionary."""
shortcuts = getDefaultShortcuts()
shortcutsXml = getShortcutsFile()
if os.path.isfile(shortcutsXml):
dom = xml.dom.minidom.parse(shortcutsXml)
shortcutsNode = dom.getElementsByTagName("shortcuts")[0]
for shortcutNode in shortcutsNode.getElementsByTagName("shortcut"):
name = shortcutNode.getAttribute("name")
value = shortcutNode.getAttribute("value")
shortcuts[name] = value
return shortcuts
def resolvePath(path):
"""Return a dir for the given <shortcut>[/<subpath>].
Raises a GoError if the shortcut does not exist.
"""
shortcuts = getShortcuts()
if path:
tagend = path.find('/')
if tagend == -1:
tagend = path.find('\\')
if tagend == -1:
tag, suffix = path, None
else:
tag, suffix = path[:tagend], path[tagend+1:]
try:
target = shortcuts[tag]
except KeyError:
# Bash will expand ~ (used as a shortcut) into the user's
# actual home directory. We still want to support '~' as a
# shortcut in Bash so try to determine if it is likely that
# the user typed it and act accordingly.
home = os.path.expanduser('~')
if path.startswith(home):
tag, suffix = '~', path[len(home)+1:]
target = shortcuts[tag]
elif os.path.isdir(path):
target = ""
suffix = path
else:
raise
if suffix:
target = os.path.join(target, os.path.normpath(suffix))
else:
raise GoError("no path was given")
return target
def generateShellScript(scriptName, path=None):
"""Generate a shell script with the given name to change to the
given shortcut path.
"scriptName" is the path to the script the create.
"path" is the shortcut path, i.e. <shortcut>[/<subpath>]. If path is
None (the default) a no-op script is written.
"""
shortcuts = getShortcuts()
if path is None:
target = None
else:
target = resolvePath(path)
if sys.platform.startswith("win"):
fbat = open(scriptName, 'w')
fbat.write('@echo off\n')
if target:
drive, tail = os.path.splitdrive(target)
fbat.write('@echo off\n')
if drive:
fbat.write('call %s\n' % drive)
fbat.write('call cd "%s"\n' % target)
fbat.write('title "%s"\n' % target)
fbat.close()
else:
fsh = open(scriptName, 'w')
fsh.write('#!/bin/sh\n')
if target:
fsh.write('cd "%s"\n' % target)
fsh.close()
def printShortcuts(shortcuts, subheader=None):
# Organize the shortcuts into groups.
defaults = [re.escape(s) for s in getDefaultShortcuts().keys()]
groupMap = { # mapping of group regex to group order and title
"^(%s)$" % '|'.join(defaults): (0, "Default shortcuts"),
None: (1, "Custom shortcuts"),
}
grouped = {
# <group title>: [<member shortcuts>...]
}
for shortcut in shortcuts:
for pattern, (order, title) in groupMap.items():
if pattern and re.search(pattern, shortcut):
if title in grouped:
grouped[title].append(shortcut)
else:
grouped[title] = [shortcut]
break
else:
title = "Custom shortcuts"
if title in grouped:
grouped[title].append(shortcut)
else:
grouped[title] = [shortcut]
for memberList in grouped.values(): memberList.sort()
groups = []
titles = groupMap.values()
titles.sort()
# Construct the table.
table = ""
header = "Go Shortcuts"
if subheader:
header += ": " + subheader
table += ' '*20 + header + '\n'
table += ' '*20 + '='*len(header) + '\n'
for order, title in titles:
if title not in grouped: continue
table += '\n' + title + ":\n"
for shortcut in grouped[title]:
dir = shortcuts[shortcut]
#TODO: Might want to prettily shorten long names.
#if len(dir) > 53:
# dir = dir[:50] + "..."
table += " %-20s %s\n" % (shortcut, dir)
# Display the table.
if _subsystem == "windows":
import win32ui
import win32con
win32ui.MessageBox(table, "Go Shortcuts",
win32con.MB_OK | win32con.MB_ICONINFORMATION)
else:
sys.stdout.write(table)
def error(msg):
if _subsystem == "console":
sys.stderr.write("go: error: %s\n" % msg)
elif _subsystem == "windows" and sys.platform.startswith("win"):
import win32ui
import win32con
win32ui.MessageBox(msg, "Go Error",
win32con.MB_OK | win32con.MB_ICONERROR)
else:
raise ValueError("internal error: unrecognized subsystem, '%s', and "
"platform, '%s'." % (_subsystem, sys.platform))
def _getShell():
if sys.platform == "win32":
#assert "cmd.exe" in os.environ["ComSpec"]
return "cmd"
elif "SHELL" in os.environ:
shell_path = os.environ["SHELL"]
if "/bash" in shell_path or "/sh" in shell_path:
return "sh"
elif "/tcsh" in shell_path or "/csh" in shell_path:
return "csh"
else:
raise InternalGoError("couldn't determine your shell (SHELL=%r)"
% os.environ.get("SHELL"))
def setup():
from os.path import normcase, normpath, join
shell = _getShell()
try:
driver = _gDriverFromShell[shell]
except KeyError:
raise InternalGoError("don't know how to setup for your shell: %s"
% shell)
# Knowing the user's HOME dir will help later.
nhome = None
if "HOME" in os.environ:
nhome = _normpath(os.environ["HOME"])
elif "HOMEDRIVE" in os.environ and "HOMEPATH" in os.environ:
nhome = _normpath(
os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"])
print("* * *")
if shell == "cmd":
# Need a install candidate dir for "go.bat".
nprefix = _normpath(sys.prefix)
ncandidates = set()
candidates = []
for dir in os.environ["PATH"].split(os.path.pathsep):
ndir = _normpath(dir)
if ndir.startswith(nprefix):
if ndir not in ncandidates:
ncandidates.add(ndir)
candidates.append(dir)
elif nhome and ndir.startswith(nhome) \
and ndir[len(nhome)+1:].count(os.path.sep) < 2:
if ndir not in ncandidates:
ncandidates.add(ndir)
candidates.append(dir)
#print candidates
print("""\
It appears that `go' is not setup properly in your environment. Typing
`go' must end up calling `go.bat' somewhere on your PATH and *not* `go.py'
directly. This is how `go' can change the directory in your current shell.
You'll need a file "go.bat" with the following contents in a directory on
your PATH:
%s""" % _indent(driver))
if candidates:
print("\nCandidate directories are:\n")
for i, dir in enumerate(candidates):
print(" [%s] %s" % (i+1, dir))
print()
answer = _query_custom_answers(
"If you would like this script to create `go.bat' for you in\n"
"one of these directories, enter the number of that\n"
"directory. Otherwise, enter 'no' to not create `go.bat'.",
[str(i+1) for i in range(len(candidates))] + ["&no"],
default="no",
)
if answer == "no":
pass
else:
dir = candidates[int(answer)-1]
path = join(dir, "go.bat")
print("\nCreating `%s'." % path)
print("You should now be able to run `go --help'.")
open(path, 'w').write(driver)
elif shell == "sh":
print("""\
It appears that `go' is not setup properly in your environment. Typing
`go' must end up calling the Bash function `go' and *not* `go.py'
directly. This is how `go' can change the directory in your current shell.
You'll need to have the following function in your shell startup script
(e.g. `.bashrc' or `.profile'):
%s
To just play around in your current shell, simple cut and paste this
function.""" % _indent(driver))
candidates = ["~/.bashrc", "~/.bash_profile", "~/.bash_login",
"~/.profile"]
candidates = [c for c in candidates if exists(expanduser(c))]
if candidates:
q = """\
Would you like this script to append `function go' to one of the following
Bash initialization scripts? If so, enter the number of the listed file.
Otherwise, enter `no'."""
for i, path in enumerate(candidates):
q += "\n (%d) %s" % (i+1, path)
answers = [str(i+1) for i in range(len(candidates))] + ["&no"]
print()
answer = _query_custom_answers(q, answers, default="no")
if answer == "no":
pass
else:
path = candidates[int(answer)-1]
xpath = expanduser(path)
f = codecs.open(xpath, 'a', 'utf-8')
try:
f.write('\n\n'+driver)
finally:
f.close()
print()
print("`function go' appended to `%s'." % path)
print("Run `source %s` to enable this for this shell." % path)
print("You should then be able to run `go --help'.")
else:
print("""\
It appears that `go' is not setup properly in your environment. Typing
`go' must end up calling the shell function `go' and *not* `go.py'
directly. This is how `go' can change the directory in your current shell.
The appropriate function for the *Bash* shell is this:
%s
If you know the appropriate translation for your shell (%s) I'd appreciate
your feedback on that so I can update this script. Please add an issue here:
http://code.google.com/p/go-tool/issues/list
Thanks!""" % (_indent(_gDriverFromShell["sh"]), shell))
print("* * *")
# Recipe: query_custom_answers (1.0)
def _query_custom_answers(question, answers, default=None):
"""Ask a question via raw_input() and return the chosen answer.
@param question {str} Printed on stdout before querying the user.
@param answers {list} A list of acceptable string answers. Particular
answers can include '&' before one of its letters to allow a
single letter to indicate that answer. E.g., ["&yes", "&no",
"&quit"]. All answer strings should be lowercase.
@param default {str, optional} A default answer. If no default is
given, then the user must provide an answer. With a default,
just hitting <Enter> is sufficient to choose.
"""
prompt_bits = []
answer_from_valid_choice = {
# <valid-choice>: <answer-without-&>
}
clean_answers = []
for answer in answers:
if '&' in answer and not answer.index('&') == len(answer)-1:
head, tail = answer.split('&', 1)
prompt_bits.append(head.lower()+tail.lower().capitalize())
clean_answer = head+tail
shortcut = tail[0].lower()
else:
prompt_bits.append(answer.lower())
clean_answer = answer
shortcut = None
if default is not None and clean_answer.lower() == default.lower():
prompt_bits[-1] += " (default)"
answer_from_valid_choice[clean_answer.lower()] = clean_answer
if shortcut:
answer_from_valid_choice[shortcut] = clean_answer
clean_answers.append(clean_answer.lower())
# This is what it will look like:
# Frob nots the zids? [Yes (default), No, quit] _
# Possible alternatives:
# Frob nots the zids -- Yes, No, quit? [y] _
# Frob nots the zids? [*Yes*, No, quit] _
# Frob nots the zids? [_Yes_, No, quit] _
# Frob nots the zids -- (y)es, (n)o, quit? [y] _
prompt = " [%s] " % ", ".join(prompt_bits)
leader = question + prompt
if len(leader) + max(len(c) for c in answer_from_valid_choice) > 78:
leader = question + '\n' + prompt.lstrip()
leader = leader.lstrip()
valid_choices = answer_from_valid_choice.keys()
admonishment = "*** Please respond with '%s' or '%s'. ***" \
% ("', '".join(clean_answers[:-1]), clean_answers[-1])
while 1:
sys.stdout.write(leader)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in answer_from_valid_choice:
return answer_from_valid_choice[choice]
else:
sys.stdout.write("\n"+admonishment+"\n\n\n")
# Recipe: indent (0.2.1)
def _indent(s, width=4, skip_first_line=False):
"""_indent(s, [width=4]) -> 's' indented by 'width' spaces
The optional "skip_first_line" argument is a boolean (default False)
indicating if the first line should NOT be indented.
"""
lines = s.splitlines(1)
indentstr = ' '*width
if skip_first_line:
return indentstr.join(lines)
else:
return indentstr + indentstr.join(lines)
def _normpath(path):
from os.path import normcase, normpath
n = normcase(normpath(path))
if n.endswith(os.path.sep):
n = n[:-1]
elif os.path.altsep and n.endswith(os.path.altsep):
n = n[:-1]
return n
#---- mainline
def main(argv):
# Must write out a no-op shell script before any error can happen
# otherwise the script from the previous run could result.
try:
shellScript = os.environ[_envvar]
except KeyError:
if _subsystem == "windows":
pass # Don't complain about missing console setup.
return setup()
else:
generateShellScript(shellScript) # no-op, overwrite old one
# Parse options
try:
shortopts = "hVcsadl"
longopts = ['help', 'version', 'cd', 'set', 'add-current',
'delete', 'list']
if sys.platform.startswith("win"):
shortopts += "o"
longopts.append("open")
optlist, args = getopt.getopt(argv[1:], shortopts, longopts)
except getopt.GetoptError as ex:
msg = ex.msg
if ex.opt in ('d', 'dump'):
msg += ": old -d|--dump option is now -l|--list"
sys.stderr.write("go: error: %s.\n" % msg)
sys.stderr.write("See 'go --help'.\n")
return 1
action = "cd"
for opt, optarg in optlist:
if opt in ('-h', '--help'):
sys.stdout.write(__doc__)
return 0
elif opt in ('-V', '--version'):
sys.stdout.write("go %s\n" % __version__)
return 0
elif opt in ('-c', '--cd'):
action = "cd"
elif opt in ('-s', '--set'):
action = "set"
elif opt in ('-a', '--add-current'):
action = "add"
elif opt in ('-d', '--delete'):
action = "delete"
elif opt in ('-l', '--list'):
action = "list"
elif opt in ("-o", "--open"):
action = "open"
# Parse arguments and do specified action.
if action == "add":
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
name, value = args[0], os.getcwd()
try:
setShortcut(name, value)
except GoError as ex:
error(str(ex))
return 1
elif action == "delete":
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
name, value = args[0], None
try:
setShortcut(name, value)
except GoError as ex:
error(str(ex))
return 1
elif action == "set":
if len(args) != 2:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
name, value = args
try:
setShortcut(name, value)
except GoError as ex:
error(str(ex))
return 1
elif action == "cd":
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
#error("Usage: go [options...] shortcut[/subpath]")
return 1
path = args[0]
if _subsystem == "console":
try:
generateShellScript(shellScript, path)
except KeyError as ex:
error("Unrecognized shortcut: '%s'" % str(ex))
return 1
except GoError as ex:
error(str(ex))
return 1
elif _subsystem == "windows" and sys.platform.startswith("win"):
try:
dir = resolvePath(path)
except GoError as ex:
error("Error resolving '%s': %s" % (path, ex))
return 1
try:
comspec = os.environ["COMSPEC"]
except KeyError:
error("Could not determine shell. No COMSPEC environment "
"variable.")
return 1
argv = [comspec, "/k", # Does command.com support '/k'?
"cd", "/D", '"%s"' % dir]
if os.path.basename(comspec).lower() == "cmd.exe":
argv += ["&&", "title", '%s' % dir]
os.spawnv(os.P_NOWAIT, comspec, argv)
else:
error("Internal error: subsystem is 'windows' and platform is "
"not win32")
return 1
elif action == "list":
if len(args) == 0:
printShortcuts(getShortcuts())
elif len(args) == 1:
pattern = args[0].lower()
shortcuts = getShortcuts()
s = {}
for name, value in shortcuts.items():
if name.lower().find(pattern) != -1:
s[name] = value
printShortcuts(s, "Matching '%s'" % pattern)
else:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
elif action == "open" and sys.platform.startswith("win"):
if len(args) != 1:
error("Incorrect number of arguments. argv: %s" % argv)
return 1
path = args[0]
try:
dir = resolvePath(path)
except GoError as ex:
error("Error resolving '%s': %s" % (path, ex))
return 1
import win32api
try:
explorerExe, offset = win32api.SearchPath(None, "explorer.exe")
except win32api.error as ex:
error("Could not find 'explorer.exe': %s" % ex)
return 1
os.spawnv(os.P_NOWAIT, explorerExe, [explorerExe, '/E,"%s"' % dir])
else:
error("Internal Error: unknown action: '%s'\n")
return 1
if __name__ == "__main__":
if _subsystem == "windows":
try:
retval = main(sys.argv)
except:
import traceback
tb = ''.join(traceback.format_exception(*sys.exc_info()))
error(tb)
else:
retval = main(sys.argv)
sys.exit(retval)
|
trentm/go-tool
|
lib/go.py
|
Python
|
mit
| 25,855
|
[
"VisIt"
] |
8ced7defcf796b22bba8d5762a7d1a94fdb1fad5110be9c1e5f20c58a13587d9
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import decimal
import logging
import os
import shutil
import tempfile
# Django
from django.conf import settings
from django.core.cache import cache
from django.test import TestCase
from django.urls import (
NoReverseMatch,
reverse
)
from django.utils.translation import activate
# wger
from wger.utils.constants import TWOPLACES
STATUS_CODES_FAIL = (302, 403, 404)
def get_reverse(url, kwargs={}):
"""
Helper function to get the reverse URL
"""
try:
url = reverse(url, kwargs=kwargs)
except NoReverseMatch:
# URL needs special care and doesn't need to be reversed here,
# everything was already done in the individual test case
url = url
return str(url)
def get_user_list(users):
"""
Helper function that returns a list with users to test
"""
if isinstance(users, tuple):
return users
else:
return [users]
def delete_testcase_add_methods(cls):
"""
Helper function that dynamically adds test methods.
This is a bit of a hack, but it's the easiest way of making sure that
all the setup and teardown work is performed for each test user (and,
most importantly for us, that the database is reseted every time).
This must be called if the testcase has more than one success user
"""
for user in get_user_list(cls.user_fail):
def test_unauthorized(self):
self.user_login(user)
self.delete_object(fail=False)
setattr(cls, f'test_unauthorized_{user}', test_unauthorized)
for user in get_user_list(cls.user_success):
def test_authorized(self):
self.user_login(user)
self.delete_object(fail=False)
setattr(cls, f'test_authorized_{user}', test_authorized)
class BaseTestCase(object):
"""
Base test case.
Generic base testcase that is used for both the regular tests and the
REST API tests
"""
fixtures = ('days_of_week',
'gym_config',
'groups',
'setting_repetition_units',
'setting_weight_units',
'test-languages',
'test-licenses',
'test-gyms',
'test-gymsconfig',
'test-user-data',
'test-gym-adminconfig.json',
'test-gym-userconfig.json',
'test-admin-user-notes',
'test-gym-user-documents',
'test-contracts',
'test-apikeys',
'test-weight-data',
'test-equipment',
'test-exercises',
'test-exercise-images',
'test-weight-units',
'test-ingredients',
'test-nutrition-data',
'test-nutrition-diary',
'test-workout-data',
'test-workout-session',
'test-schedules')
current_user = 'anonymous'
current_password = ''
def setUp(self):
"""
Overwrite some of Django's settings here
"""
# Don't check reCaptcha's entries
os.environ['RECAPTCHA_TESTING'] = 'True'
# Explicitly set the locale to en, otherwise the CI might make problems
activate('en')
# Set logging level
logging.disable(logging.INFO)
# Set MEDIA_ROOT
self.media_root = tempfile.mkdtemp()
settings.MEDIA_ROOT = self.media_root
def tearDown(self):
"""
Reset settings
"""
del os.environ['RECAPTCHA_TESTING']
cache.clear()
# Clear MEDIA_ROOT folder
shutil.rmtree(self.media_root)
class WgerTestCase(BaseTestCase, TestCase):
"""
Testcase to use with the regular website
"""
user_success = 'admin'
"""
A list of users to test for success. For convenience, a string can be used
as well if there is only one user.
"""
user_fail = 'test'
"""
A list of users to test for failure. For convenience, a string can be used
as well if there is only one user.
"""
def user_login(self, user='admin'):
"""
Login the user, by default as 'admin'
"""
password = f'{user}{user}'
self.client.login(username=user, password=password)
self.current_user = user
self.current_password = password
def user_logout(self):
"""
Visit the logout page
"""
self.client.logout()
self.current_user = 'anonymous'
def compare_fields(self, field, value):
current_field_class = field.__class__.__name__
# Standard types, simply compare
if current_field_class in ('unicode', 'str', 'int', 'float', 'time', 'date'):
self.assertEqual(field, value)
# boolean, convert
elif current_field_class == 'bool':
self.assertEqual(field, bool(value))
# decimal, convert
elif current_field_class == 'Decimal':
# TODO: use FOURPLACES when routine branch is merged
self.assertEqual(field.quantize(TWOPLACES), decimal.Decimal(value).quantize(TWOPLACES))
# Related manager and SortedManyToMany, iterate
elif current_field_class in ('ManyRelatedManager', 'SortedRelatedManager'):
for j in field.all():
self.assertIn(j.id, value)
# Uploaded image or file, compare the filename
elif current_field_class in ('ImageFieldFile', 'FieldFile'):
self.assertEqual(os.path.basename(field.name), os.path.basename(value.name))
# Other objects (from foreign keys), check the ID
else:
self.assertEqual(field.id, value)
def post_test_hook(self):
"""
Hook to add some more specific tests after the basic add or delete
operations are finished
"""
pass
class WgerDeleteTestCase(WgerTestCase):
"""
Tests deleting an object an authorized user, a different one and a logged out
one. This assumes the delete action is only triggered with a POST request and
GET will only show a confirmation dialog.
"""
pk = None
url = ''
object_class = ''
def delete_object(self, fail=False):
"""
Helper function to test deleting a workout
"""
# Only perform the checks on derived classes
if self.__class__.__name__ == 'WgerDeleteTestCase':
return
# Fetch the delete page
count_before = self.object_class.objects.count()
response = self.client.get(get_reverse(self.url, kwargs={'pk': self.pk}))
count_after = self.object_class.objects.count()
self.assertEqual(count_before, count_after)
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
else:
self.assertEqual(response.status_code, 200)
# Try deleting the object
response = self.client.post(get_reverse(self.url, kwargs={'pk': self.pk}))
count_after = self.object_class.objects.count()
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertEqual(count_before, count_after)
else:
self.assertEqual(response.status_code, 302)
self.assertEqual(count_before - 1, count_after)
self.assertRaises(self.object_class.DoesNotExist,
self.object_class.objects.get,
pk=self.pk)
# TODO: the redirection page might not have a language prefix (e.g. /user/login
# instead of /en/user/login) so there is an additional redirect
# # The page we are redirected to doesn't trigger an error
# response = self.client.get(response['Location'])
# self.assertEqual(response.status_code, 200)
self.post_test_hook()
def test_delete_object_anonymous(self):
"""
Tests deleting the object as an anonymous user
"""
self.delete_object(fail=True)
def test_delete_object_authorized(self):
"""
Tests deleting the object as the authorized user
"""
if not isinstance(self.user_success, tuple):
self.user_login(self.user_success)
self.delete_object(fail=False)
def test_delete_object_other(self):
"""
Tests deleting the object as the unauthorized, logged in users
"""
if self.user_fail and not isinstance(self.user_success, tuple):
for user in get_user_list(self.user_fail):
self.user_login(user)
self.delete_object(fail=True)
class WgerEditTestCase(WgerTestCase):
"""
Tests editing an object as an authorized user, a different one and a logged out
one.
"""
object_class = ''
url = ''
pk = None
data = {}
data_ignore = ()
fileupload = None
"""
If the form requires a file upload, specify the field name and the file path
here in a list or tuple:
['fielname', 'path']
"""
def edit_object(self, fail=False):
"""
Helper function to test editing an object
"""
# Only perform the checks on derived classes
if self.__class__.__name__ == 'WgerEditTestCase':
return
# Fetch the edit page
response = self.client.get(get_reverse(self.url, kwargs={'pk': self.pk}))
entry_before = self.object_class.objects.get(pk=self.pk)
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertTemplateUsed('login.html')
else:
self.assertEqual(response.status_code, 200)
# Try to edit the object
# Special care if there are any file uploads
if self.fileupload:
field_name = self.fileupload[0]
filepath = self.fileupload[1]
with open(filepath, 'rb') as testfile:
self.data[field_name] = testfile
url = get_reverse(self.url, kwargs={'pk': self.pk})
response = self.client.post(url, self.data)
else:
response = self.client.post(get_reverse(self.url, kwargs={'pk': self.pk}), self.data)
entry_after = self.object_class.objects.get(pk=self.pk)
# Check the results
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertTemplateUsed('login.html')
self.assertEqual(entry_before, entry_after)
else:
self.assertEqual(response.status_code, 302)
# Check that the data is correct
for i in [j for j in self.data if j not in self.data_ignore]:
current_field = getattr(entry_after, i)
self.compare_fields(current_field, self.data[i])
# TODO: the redirection page might not have a language prefix (e.g. /user/login
# instead of /en/user/login) so there is an additional redirect
# # The page we are redirected to doesn't trigger an error
# response = self.client.get(response['Location'])
# self.assertEqual(response.status_code, 200)
self.post_test_hook()
def test_edit_object_anonymous(self):
"""
Tests editing the object as an anonymous user
"""
self.edit_object(fail=True)
def test_edit_object_authorized(self):
"""
Tests editing the object as the authorized users
"""
for user in get_user_list(self.user_success):
self.user_login(user)
self.edit_object(fail=False)
def test_edit_object_other(self):
"""
Tests editing the object as the unauthorized, logged in users
"""
if self.user_fail:
for user in get_user_list(self.user_fail):
self.user_login(user)
self.edit_object(fail=True)
class WgerAddTestCase(WgerTestCase):
"""
Tests adding an object as an authorized user, a different one and a logged out
one.
"""
object_class = ''
url = ''
pk_before = None
pk_after = None
anonymous_fail = True
data = {}
data_ignore = ()
fileupload = None
"""
If the form requires a file upload, specify the field name and the file path
here in a list or tuple:
['fielname', 'path']
"""
def add_object(self, fail=False):
"""
Helper function to test adding an object
"""
# Only perform the checks on derived classes
if self.__class__.__name__ == 'WgerAddTestCase':
return
# Fetch the add page
response = self.client.get(get_reverse(self.url))
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
else:
self.assertEqual(response.status_code, 200)
# Enter the data
count_before = self.object_class.objects.count()
self.pk_before = self.object_class.objects.all().order_by('id').last().pk
# Special care if there are any file uploads
if self.fileupload:
field_name = self.fileupload[0]
filepath = self.fileupload[1]
with open(filepath, 'rb') as testfile:
self.data[field_name] = testfile
response = self.client.post(get_reverse(self.url), self.data)
else:
response = self.client.post(get_reverse(self.url), self.data)
count_after = self.object_class.objects.count()
self.pk_after = self.object_class.objects.all().order_by('id').last().pk
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertEqual(self.pk_before, self.pk_after)
self.assertEqual(count_before, count_after)
else:
self.assertEqual(response.status_code, 302)
self.assertGreater(self.pk_after, self.pk_before)
entry = self.object_class.objects.get(pk=self.pk_after)
# Check that the data is correct
for i in [j for j in self.data if j not in self.data_ignore]:
current_field = getattr(entry, i)
self.compare_fields(current_field, self.data[i])
self.assertEqual(count_before + 1, count_after)
# TODO: the redirection page might not have a language prefix (e.g. /user/login
# instead of /en/user/login) so there is an additional redirect
# # The page we are redirected to doesn't trigger an error
# response = self.client.get(response['Location'])
# self.assertEqual(response.status_code, 200)
self.post_test_hook()
def test_add_object_anonymous(self):
"""
Tests adding the object as an anonymous user
"""
if self.user_fail:
self.add_object(fail=self.anonymous_fail)
def test_add_object_authorized(self):
"""
Tests adding the object as the authorized users
"""
for user in get_user_list(self.user_success):
self.user_login(user)
self.add_object(fail=False)
def test_add_object_other(self):
"""
Tests adding the object as the unauthorized, logged in users
"""
if self.user_fail:
for user in get_user_list(self.user_fail):
self.user_login(self.user_fail)
self.add_object(fail=True)
class WgerAccessTestCase(WgerTestCase):
"""
Tests accessing a URL per GET as an authorized user, an unauthorized one and
a logged out one.
"""
url = ''
anonymous_fail = True
def access(self, fail=True):
# Only perform the checks on derived classes
if self.__class__.__name__ == 'WgerAccessTestCase':
return
response = self.client.get(get_reverse(self.url))
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
# TODO: the redirection page might not have a language prefix (e.g. /user/login
# instead of /en/user/login) so there is an additional redirect
# if response.status_code == 302:
# # The page we are redirected to doesn't trigger an error
# response = self.client.get(response['Location'])
# self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 200)
def test_access_anonymous(self):
"""
Tests accessing the URL as an anonymous user
"""
self.user_logout()
self.access(fail=self.anonymous_fail)
def test_access_authorized(self):
"""
Tests accessing the URL as the authorized users
"""
for user in get_user_list(self.user_success):
self.user_login(user)
self.access(fail=False)
def test_access_other(self):
"""
Tests accessing the URL as the unauthorized, logged in users
"""
if self.user_fail:
for user in get_user_list(self.user_fail):
self.user_login(user)
self.access(fail=True)
self.user_logout()
|
rolandgeider/wger
|
wger/core/tests/base_testcase.py
|
Python
|
agpl-3.0
| 17,904
|
[
"VisIt"
] |
9eaeff44bd839dfa93465356ba0a14e645408bb0fc878a3017a54f228eb804dd
|
"""
Fuzzy Logic Base Class
E. Jucovy, 2005
based on fuzzy.py by D.S. Blank, 2001
"""
__author__ = "E. Jucovy, Douglas Blank <dblank@brynmawr.edu>"
__version__ = "$Revision: 2458 $"
from math import exp
class FuzzyOperators:
def Union(self,a,b):
pass
def Intersection(self,a,b):
pass
def Complement(self,a):
pass
def __str__(self):
return self.__class__.__name__
class StandardFuzzyOperators(FuzzyOperators):
def Union(self,a,b):
return max(a,b)
def Intersection(self,a,b):
return min(a,b)
def Complement(self,a):
return 1.0 - a
class FuzzyError(TypeError):
def __init__(self, st=""):
TypeError.__init__(self, st)
class FuzzyValue:
"""
Fuzzy value class
Contains a floating-point value between 0 and 1
"""
def __init__(self, val, ops = StandardFuzzyOperators()):
"""
Initialize the fuzzy value
If val is less than zero or greater than one, limit val to those bounds
"""
self.Ops = ops
if val < 0:
self.Value = 0.0
elif val > 1:
self.Value = 1.0
else:
self.Value = float(val)
def __and__(self, other):
"""
Return the intersection of self and other
"""
return FuzzyValue(self.Ops.Intersection(self.Value, float(other)), self.Ops)
def __or__(self, other):
"""
Return the union of self and other
"""
return FuzzyValue(self.Ops.Union(self.Value, float(other)), self.Ops)
def __neg__(self):
"""
Return the complement of self
"""
return FuzzyValue(self.Ops.Complement(self.Value), self.Ops)
__invert__ = __neg__
def __add__(self, other):
return FuzzyValue(self.Value + float(other), self.Ops)
__radd__ = __add__
def __sub__(self, other):
return FuzzyValue(self.Value - float(other), self.Ops)
def __rsub__(self, other):
return FuzzyValue(float(other) - self.Value, self.Ops)
def __mul__(self, other):
return FuzzyValue(self.Value * float(other), self.Ops)
__rmul__ = __mul__
def __div__(self, other):
return FuzzyValue(self.Value / float(other), self.Ops)
def __rdiv__(self, other):
return FuzzyValue(float(other) / self.Value, self.Ops)
def __cmp__(self, other):
return self.Value - float(other)
def __float__(self):
return self.Value
defuzzify = __float__
def __str__(self):
return "<Fuzzy value " + str(self.Value) + ">"
# def alphaCut(self, alpha):
# return self.Value >= alpha
class FuzzyClassifier:
"""
Fuzzy classifier class with a membership function and parameters.
Membership function can be set on initialization or with
setMembershipFunction(function). The membership function should
return a value between 0 and 1 (values outside that range will be
automatically set to either 0 or 1).
All relevant parameters used by the membership function can be set
on initialization or by setParams()
"""
def __init__(self, func=None, fName=None, ops=StandardFuzzyOperators(), **kwargs):
"""
Initialize the FuzzyClassifier
First argument is a reference to the membership function
Second argument is the name of the membership function
Remaining arguments are parameter names and values
"""
self.myParams = {}
if func.__class__ is FuzzyClassifier:
self.Function = func.Function
self.myParams = func.myParams
elif not func is None:
self.Function = func
else:
def Halfway():
return 0.5
self.Function = Halfway
if func.__class__ is FuzzyClassifier:
self.FunctionName = func.FunctionName
elif not fName is None:
self.FunctionName = fName
else:
self.FunctionName = self.Function.__name__
self.__name__ = "FuzzyClassifier:%s" % self.FunctionName
self.Ops = ops
for i in kwargs:
self.myParams[i] = kwargs[i]
def __call__(self, *args):
"""
Apply the fuzzy classifier to a set of values
Return a FuzzyValue with value Function(args)
"""
# get params and function arguments
mydict = {}
args = list(args)
funcargs = list(self.Function.func_code.co_varnames
[:self.Function.func_code.co_argcount])
for i in funcargs:
try:
mydict[i] = self.myParams[i]
except KeyError:
try:
mydict[i] = args.pop(0)
except IndexError:
raise TypeError("Too few arguments to FuzzyClassifier %s()" \
% (self.FunctionName))
x = len(mydict) - self.Function.func_code.co_argcount
if x == -1:
raise FuzzyError("1 undefined parameter to FuzzyClassifier %s" \
% self.FunctionName)
elif x < 0:
raise FuzzyError("%d undefined parameters to FuzzyClassifier %s" \
% (-x, self.FunctionName))
return FuzzyValue(self.Function(**mydict), self.Ops)
def safesetParams(self, **kwargs):
"""
Set one or more of the classifier's parameters
without overwriting any predefined parameters.
If a parameter is already defined safesetParams
will not overwrite it.
"""
keys = kwargs.keys()
for key in keys:
if not self.myParams.has_key(key):
self.myParams[key] = kwargs[key]
def setParams(self, **kwargs):
"""
Set one or more of the classifier's parameters
without deleting predefined parameters; but will
overwrite parameters.
"""
keys = kwargs.keys()
for key in keys:
self.myParams[key] = kwargs[key]
def resetParams(self, **kwargs):
"""
Set all the classifier's parameters at once and
delete all parameters that might already exist
"""
self.myParams = kwargs
def getParam(self, *names):
"""
Return one or more of the classifier's parameters
"""
retlist = []
for name in names:
try:
retlist.append(self.myParams[name])
except KeyError:
retlist.append(None)
return retlist
def setFunction(self, func, fName = None):
"""
Set the classifier's membership function
First (required) parameter is the membership function itself.
Second (optional) parameter is a name for the function, recommended,
e.g., for lambda functions; if this is not set then the function's
actual name will be used
"""
if not fName is None:
self.FunctionName = fName
elif func.__class__ is FuzzyClassifier:
self.FunctionName = func.FunctionName
else:
self.FunctionName = func.__name__
if func.__class__ is FuzzyClassifier:
self.Function = func.Function
self.safesetParams(**func.myParams)
else:
self.Function = func
self.__name__ = "FuzzyClassifier:%s" % self.FunctionName
def __str__(self):
return "FuzzyClassifier instance with\n\tmembership function " + \
"%s\n\tparameters %s\n\toperator set %s" \
% (self.FunctionName, self.myParams, self.Ops)
def __nonzero__(self):
return True
def __rshift__(self, val):
"""
Return a FuzzyValue classified under a linear rising
membership function whose parameters are decided by the
current FuzzyClassifier's parameters
Implemented for backwards compatibility
"""
keys = self.myParams.keys()
if len(keys) > 2:
print "This may not do what you expect."
a = self.myParams[keys[0]]
b = self.myParams[keys[1]]
if a > b:
aFC = RisingFuzzy(b,a)
else:
aFC = RisingFuzzy(a,b)
return aFC(val)
def __lshift__(self, val):
"""
Return a FuzzyValue classified under a linear falling
membership function whose parameters are decided by the
current FuzzyClassifier's parameters
Implemented for backwards compatibility
"""
keys = self.myParams.keys()
if len(keys) > 2:
print "This may not do what you expect."
a = self.myParams[keys[0]]
b = self.myParams[keys[1]]
if a > b:
aFC = FallingFuzzy(b,a)
else:
aFC = FallingFuzzy(a,b)
return aFC(val)
def Fuzzy(a,b):
"""
Create a new FuzzyClassifier with two parameters and
default membership function
Implemented for backwards compatibility
"""
return FuzzyClassifier(a=a,b=b)
def RisingFuzzy(a,b):
"""
Create a new FuzzyClassifier with a linear rising membership
function and parameters a,b
a: lower bound, mu(a) = 0.0
b: upper bound, mu(b) = 1.0
"""
def __upMF(x0,a,b):
"""
A linear rising membership function
"""
if x0 < a:
return 0.0
elif x0 > b:
return 1.0
else:
return float(x0 - a) / (b - a)
return FuzzyClassifier(__upMF, "Rising", a=a, b=b)
def FallingFuzzy(a,b):
"""
Create a new FuzzyClassifier with a linear falling membership
function and parameters a,b
a: lower bound, mu(a) = 1.0
b: upper bound, mu(b) = 0.0
"""
def __downMF(x0,a,b):
"""
A linear falling membership function
"""
if x0 < a:
return 1.0
elif x0 > b:
return 0.0
else:
return float(b - x0) / (b - a)
return FuzzyClassifier(__downMF, "Falling", a=a, b=b)
def TriangleFuzzy(a,b,c):
"""
Create a new FuzzyClassifier with a linear triangular membership
function and parameters a,b,c
a: lower bound, mu(a) = 0.0
b: midpoint, mu(b) = 1.0
c: upper bound, mu(c) = 0.0
"""
def __triMF(x0,a,b,c):
"""
A linear triangular membership function
"""
if x0 < a:
return 0.0
elif x0 < b:
return float(x0 - a) / (b - a)
elif x0 < c:
return float(c - x0) / (c - b)
else:
return 0.0
return FuzzyClassifier(__triMF, "Triangle", a=a, b=b, c=c)
def TrapezoidFuzzy(a,b,c,d):
"""
Create a new FuzzyClassifier with a linear trapezoidal membership
function and parameters a,b,c,d
a: lower bound, mu(a) = 0.0
b: start of top, mu(b) = 1.0
c: end of top, mu(c) = 1.0
d: upper bound, mu(d) = 0.0
"""
def __trapMF(x0,a,b,c,d):
"""
A linear trapezoidal membership function
"""
if x0 < a:
return 0.0
elif x0 < b:
return float(x0 - a) / (b - a)
elif x0 < c:
return 1.0
elif x0 < d:
return float(d - x0) / (d - c)
else:
return 0.0
return FuzzyClassifier(__trapMF, "Trapezoid", a=a, b=b, c=c, d=d)
def GaussianFuzzy(c,s):
"""
Create a new FuzzyClassifier with a gaussian membership function
and parameters c,s
c: center (mean), mu(c) = 1.0
s: spread (standard deviation)
"""
def __GaussMF(x0,c,s):
"""
A Gaussian membership function
"""
return exp(pow((float(x0) - c) / s, 2.0) / -2.0)
return FuzzyClassifier(__GaussMF, "Gaussian", c=c, s=s)
# needs comment
def BellFuzzy(a,b,c):
"""
All values will effectively be mapped to either 0, 0.5, or 1.
(Not quite, since it's continuous, but close.)
"""
def __BellMF(x,a,b,c):
return 1.0 / (1.0 + pow((x - c) / a, 2.0*b))
return FuzzyClassifier(__BellMF, "BellCurve", a=a,b=b,c=c)
# NOT YET
def SigmoidFuzzy(a,c):
"""
Create a new FuzzyClassifier with a sigmoid membership function
and parameters a,c
I wouldn't use this yet if I were you.
"""
def __SigmoidMF():
"""
I wouldn't use this yet if I were you
"""
return 1.0 / (1.0 + exp(-a * (x - c)))
return FuzzyClassifier(__SigmoidMF, "Sigmoid", a=a, c=c)
# NOT YET TESTED
def LRFuzzy(f,g,c,a,b):
"""
Create a new FuzzyClassifier with a left-right membership
function and parameters f,g,c,a,b
f: left-side function (or FuzzyClassifier)
g: right-side function (or FuzzyClassifier)
c: switching point
"""
def __LRMF():
"""
I wouldn't use this yet if I were you
"""
if x <= c:
return f((c - x) / a)
return g((x - c) / b)
return FuzzyClassifier(__LRMF, "Left"+f.__name__+"Right"+g.__name__,
f=f,g=g,c=c,a=a,b=b)
if __name__ == '__main__': # some tests
f = BellFuzzy(10,20,30)
for i in range(100):
print str(i) + ", " + str(float(f(i)))
|
emilydolson/forestcat
|
pyrobot/brain/fuzzy.py
|
Python
|
agpl-3.0
| 11,951
|
[
"Gaussian"
] |
4b836db948d96eff795c069e6d85f20abf6accc4d00529f526dcbefd3a219c31
|
# -*- coding: utf-8 -*-
"""Various text used throughout the website, e.g. status messages, errors, etc.
"""
# Status Messages
#################
# NOTE: in status messages, newlines are not preserved, so triple-quotes strings
# are ok
# Status message shown at settings page on first login
# (upon clicking primary email confirmation link)
WELCOME_MESSAGE = ('Welcome to the OSF! Please update the following settings. If you need assistance '
'in getting started, please visit the <a href="/getting-started/">Getting Started</a> page.')
REGISTRATION_SUCCESS = '''Registration successful. Please check {email} to confirm your email address.'''
# Shown if registration is turned off in website.settings
REGISTRATION_UNAVAILABLE = 'Registration currently unavailable.'
ALREADY_REGISTERED = '''The email <em>{email}</em> has already been registered.'''
# Shown if user tries to login with an email that is not yet confirmed
UNCONFIRMED = ('This login email has been registered but not confirmed. Please check your email (and spam folder).'
' <a href="/resend/">Click here</a> to resend your confirmation email.')
# Shown upon successful email address confirmation
CONFIRMED_EMAIL = 'Email address confirmation successful.'
# Shown if the user's account is disabled
DISABLED = '''
Log-in failed: Deactivated account.
'''
# Shown on incorrect password attempt
LOGIN_FAILED = '''
Log-in failed. Please try again or reset your password.
'''
# Shown at login page if user tries to access a resource that requires auth
MUST_LOGIN = '''
You must log in to access this resource.
'''
# Shown on logout
LOGOUT = '''
You have successfully logged out.
'''
EMAIL_NOT_FOUND = '''
<strong>{email}</strong> was not found in our records.
'''
# Shown after an unregistered user claims an account and is redirected to the
# settings page
CLAIMED_CONTRIBUTOR = ('<strong>Welcome to the OSF!</strong> Edit your display name below and then check your '
'<a href="/dashboard/">dashboard</a> to see projects to which you have been added as a '
'contributor by someone else.')
# Error Pages
# ###########
# Shown at error page if an expired/revokes email confirmation link is clicked
EXPIRED_EMAIL_CONFIRM_TOKEN = 'This confirmation link has expired. Please <a href="/login/">log in</a> to continue.'
INVALID_EMAIL_CONFIRM_TOKEN = 'This confirmation link is invalid. Please <a href="/login/">log in</a> to continue.'
CANNOT_MERGE_ACCOUNTS_SHORT = 'Cannot Merge Accounts'
CANNOT_MERGE_ACCOUNTS_LONG = 'Accounts cannot be merged due to a possible conflict with add-ons. Please deactivate any add-ons authorized on the account to be merged and try again.'
MERGE_COMPLETE = 'Accounts successfully merged.'
MERGE_CONFIRMATION_REQUIRED_SHORT = 'Confirmation Required: Merge Accounts'
MERGE_CONFIRMATION_REQUIRED_LONG = (
'<p>This email is confirmed to another account. '
'Would you like to merge <em>{user_to_merge.username}</em> with the account '
'<em>{user.username}</em>?<p>'
'<a class="btn btn-success" href="?confirm_merge">Confirm merge</a> '
)
# Node Actions
BEFORE_REGISTER_HAS_POINTERS = (
'This {category} contains links to other projects. Links will be copied '
'into your registration, but the projects that they link to will not be '
'registered. If you wish to register the linked projects, you must fork '
'them from the original project before registering.'
)
BEFORE_FORK_HAS_POINTERS = (
'This {category} contains links to other projects. Links will be copied '
'into your fork, but the projects that they link to will not be forked. '
'If you wish to fork the linked projects, they need to be forked from the '
'original project.'
)
REGISTRATION_INFO = '''
<p>Registration creates a frozen version of the project that can never be edited
or deleted. You can register your project by selecting a registration form, entering
information about your project, and then confirming. You will be
able to continue editing the original project, however, and the frozen version with
time stamps will always be linked to the original.</p>
<ul>
<li>A registration takes the same privacy settings as the project, e.g. a public project results in a public registration.</li>
<li>Before initiating a registration, make sure that the project is in the
state that you wish to freeze. Consider turning links into forks.</li>
<li>Start by selecting a registration form from the list below. You can
hit your browser's back button if the selected form is not
appropriate for your use.</li>
</ul>
'''
BEFORE_REGISTRATION_INFO = '''
Registration cannot be undone, and the archived content and files cannot be
deleted after registration. Please be sure the project is complete and
comprehensive for what you wish to register.
'''
# Nodes: forking, templating, linking
LINK_ACTION = 'Link to this Project'
LINK_DESCRIPTION = """
<p>Linking to this project will reference it in another project, without
creating a copy. The link will always point to the most up-to-date version.</p>
"""
TEMPLATE_ACTION = 'Copy Project Structure'
TEMPLATE_DESCRIPTION = """
<p>This option will create a new project, using this project as a template.
The new project will be structured in the same way, but contain no data.</p>
"""
FORK_ACTION = 'Fork this Project'
FORK_DESCRIPTION = """
<p>Fork this project if you plan to build upon it in your own work.
The new project will be an exact duplicate of this project's current state,
with you as the only contributor.</p>
"""
TEMPLATE_DROPDOWN_HELP = """Start typing to search. Selecting project as
template will duplicate its structure in the new project without importing the
content of that project."""
TEMPLATED_FROM_PREFIX = "Templated from "
# MFR Error handling
ERROR_PREFIX = "Unable to render. <a href='?action=download'>Download</a> file to view it."
SUPPORT = "Contact support@osf.io for further assistance."
# Custom Error Messages w/ support
STATA_VERSION_ERROR = 'Version of given Stata file is not 104, 105, 108, 113 (Stata 8/9), 114 (Stata 10/11) or 115 (Stata 12)<p>{0}</p>'.format(SUPPORT)
BLANK_OR_CORRUPT_TABLE_ERROR = 'Is this a valid instance of this file type?<p>{0}</p>'.format(SUPPORT)
#disk saving mode
DISK_SAVING_MODE = 'Forks, registrations, and uploads to OSF Storage uploads are temporarily disabled while we are undergoing a server upgrade. These features will return shortly.'
|
GaryKriebel/osf.io
|
website/language.py
|
Python
|
apache-2.0
| 6,493
|
[
"VisIt"
] |
55541f0fb02941680d6e5721d75430c44b18149084ba6f31385a40969fe23a53
|
"""Each ElkM1 area will be created as a separate alarm_control_panel."""
from elkm1_lib.const import AlarmState, ArmedStatus, ArmLevel, ArmUpState
from elkm1_lib.util import username
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
ATTR_CHANGED_BY,
FORMAT_NUMBER,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import ElkAttachedEntity, create_elk_entities
from .const import (
ATTR_CHANGED_BY_ID,
ATTR_CHANGED_BY_KEYPAD,
ATTR_CHANGED_BY_TIME,
DOMAIN,
ELK_USER_CODE_SERVICE_SCHEMA,
)
DISPLAY_MESSAGE_SERVICE_SCHEMA = {
vol.Optional("clear", default=2): vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional("beep", default=False): cv.boolean,
vol.Optional("timeout", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=65535)
),
vol.Optional("line1", default=""): cv.string,
vol.Optional("line2", default=""): cv.string,
}
SERVICE_ALARM_DISPLAY_MESSAGE = "alarm_display_message"
SERVICE_ALARM_ARM_VACATION = "alarm_arm_vacation"
SERVICE_ALARM_ARM_HOME_INSTANT = "alarm_arm_home_instant"
SERVICE_ALARM_ARM_NIGHT_INSTANT = "alarm_arm_night_instant"
SERVICE_ALARM_BYPASS = "alarm_bypass"
SERVICE_ALARM_CLEAR_BYPASS = "alarm_clear_bypass"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ElkM1 alarm platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
elk = elk_data["elk"]
entities = []
create_elk_entities(elk_data, elk.areas, "area", ElkArea, entities)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_ALARM_ARM_VACATION,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_vacation",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_HOME_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_home_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_NIGHT_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_night_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_DISPLAY_MESSAGE,
DISPLAY_MESSAGE_SERVICE_SCHEMA,
"async_display_message",
)
platform.async_register_entity_service(
SERVICE_ALARM_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_bypass",
)
platform.async_register_entity_service(
SERVICE_ALARM_CLEAR_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_clear_bypass",
)
class ElkArea(ElkAttachedEntity, AlarmControlPanelEntity, RestoreEntity):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
def __init__(self, element, elk, elk_data):
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._elk = elk
self._changed_by_keypad = None
self._changed_by_time = None
self._changed_by_id = None
self._changed_by = None
self._state = None
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
if len(self._elk.areas.elements) == 1:
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
self._element.add_callback(self._watch_area)
# We do not get changed_by back from resync.
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_CHANGED_BY_KEYPAD in last_state.attributes:
self._changed_by_keypad = last_state.attributes[ATTR_CHANGED_BY_KEYPAD]
if ATTR_CHANGED_BY_TIME in last_state.attributes:
self._changed_by_time = last_state.attributes[ATTR_CHANGED_BY_TIME]
if ATTR_CHANGED_BY_ID in last_state.attributes:
self._changed_by_id = last_state.attributes[ATTR_CHANGED_BY_ID]
if ATTR_CHANGED_BY in last_state.attributes:
self._changed_by = last_state.attributes[ATTR_CHANGED_BY]
def _watch_keypad(self, keypad, changeset):
if keypad.area != self._element.index:
return
if changeset.get("last_user") is not None:
self._changed_by_keypad = keypad.name
self._changed_by_time = keypad.last_user_time.isoformat()
self._changed_by_id = keypad.last_user + 1
self._changed_by = username(self._elk, keypad.last_user)
self.async_write_ha_state()
def _watch_area(self, area, changeset):
last_log = changeset.get("last_log")
if not last_log:
return
# user_number only set for arm/disarm logs
if not last_log.get("user_number"):
return
self._changed_by_keypad = None
self._changed_by_id = last_log["user_number"]
self._changed_by = username(self._elk, self._changed_by_id - 1)
self._changed_by_time = last_log["timestamp"]
self.async_write_ha_state()
@property
def code_format(self):
"""Return the alarm code format."""
return FORMAT_NUMBER
@property
def state(self):
"""Return the state of the element."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def extra_state_attributes(self):
"""Attributes of the area."""
attrs = self.initial_attrs()
elmt = self._element
attrs["is_exit"] = elmt.is_exit
attrs["timer1"] = elmt.timer1
attrs["timer2"] = elmt.timer2
if elmt.armed_status is not None:
attrs["armed_status"] = ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs["arm_up_state"] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs["alarm_state"] = AlarmState(elmt.alarm_state).name.lower()
attrs[ATTR_CHANGED_BY_KEYPAD] = self._changed_by_keypad
attrs[ATTR_CHANGED_BY_TIME] = self._changed_by_time
attrs[ATTR_CHANGED_BY_ID] = self._changed_by_id
return attrs
@property
def changed_by(self):
"""Last change triggered by."""
return self._changed_by
def _element_changed(self, element, changeset):
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._area_is_in_alarm_state():
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = (
STATE_ALARM_ARMING if self._element.is_exit else STATE_ALARM_PENDING
)
else:
self._state = elk_state_to_hass_state[self._element.armed_status]
def _entry_exit_timer_is_running(self):
return self._element.timer1 > 0 or self._element.timer2 > 0
def _area_is_in_alarm_state(self):
return self._element.alarm_state >= AlarmState.FIRE_ALARM.value
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def async_alarm_arm_home_instant(self, code=None):
"""Send arm stay instant command."""
self._element.arm(ArmLevel.ARMED_STAY_INSTANT.value, int(code))
async def async_alarm_arm_night_instant(self, code=None):
"""Send arm night instant command."""
self._element.arm(ArmLevel.ARMED_NIGHT_INSTANT.value, int(code))
async def async_alarm_arm_vacation(self, code=None):
"""Send arm vacation command."""
self._element.arm(ArmLevel.ARMED_VACATION.value, int(code))
async def async_display_message(self, clear, beep, timeout, line1, line2):
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
async def async_bypass(self, code=None):
"""Bypass all zones in area."""
self._element.bypass(code)
async def async_clear_bypass(self, code=None):
"""Clear bypass for all zones in area."""
self._element.clear_bypass(code)
|
sander76/home-assistant
|
homeassistant/components/elkm1/alarm_control_panel.py
|
Python
|
apache-2.0
| 9,831
|
[
"Elk"
] |
b77f8b7daa4182f2b862869a00a9196d1031688c4575c797f4ef3c550ebda75e
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004 Martin Hawlisch
# Copyright (C) 2005-2006, 2008 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"Export Events to vCalendar."
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
import sys
from time import localtime
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
import collections
log = logging.getLogger(".ExportVCal")
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.utils.db import family_name
from gramps.gen.lib import Date, EventType
from gramps.gui.glade import Glade
class CalendarWriter(object):
def __init__(self, database, filename, user, option_box=None):
self.db = database
self.filename = filename
self.user = user
self.option_box = option_box
if isinstance(self.user.callback, collections.Callable): # callback is really callable
self.update = self.update_real
else:
self.update = self.update_empty
self.plist = {}
self.flist = {}
self.count = 0
self.oldval = 0
self.persons_details_done = []
self.persons_notes_done = []
self.person_ids = {}
if option_box:
self.option_box.parse_options()
self.db = option_box.get_filtered_database(self.db)
def update_empty(self):
pass
def update_real(self):
self.count += 1
newval = int(100 * self.count / self.total)
if newval != self.oldval:
self.user.callback(newval)
self.oldval = newval
def writeln(self, text):
self.g.write('%s\n' % text.encode('ascii', 'backslashreplace'))
def export_data(self, filename):
self.dirname = os.path.dirname (filename)
try:
self.g = open(filename,"w")
except IOError as msg:
msg2 = _("Could not create %s") % filename
self.user.notify_error(msg2, str(msg))
return False
except:
self.user.notify_error(_("Could not create %s") % filename)
return False
self.writeln("BEGIN:VCALENDAR")
self.writeln("PRODID:-//GNU//Gramps//EN")
self.writeln("VERSION:1.0")
self.total = (len([x for x in self.db.iter_person_handles()]) +
len([x for x in self.db.iter_family_handles()]))
for key in self.db.iter_person_handles():
self.write_person(key)
self.update()
for key in self.db.iter_family_handles():
self.write_family(key)
self.update()
self.writeln("")
self.writeln("END:VCALENDAR")
self.g.close()
return True
def write_family(self, family_handle):
family = self.db.get_family_from_handle(family_handle)
if family:
for event_ref in family.get_event_ref_list():
event = self.db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE:
m_date = event.get_date_object()
place_handle = event.get_place_handle()
# feature requests 2356, 1657: avoid genitive form
text = _("Marriage of %s") % family_name(family, self.db)
if place_handle:
place = self.db.get_place_from_handle(place_handle)
self.write_vevent( text, m_date, place.get_title())
else:
self.write_vevent( text, m_date)
def write_person(self, person_handle):
person = self.db.get_person_from_handle(person_handle)
if person:
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.db.get_event_from_handle(birth_ref.ref)
if birth:
b_date = birth.get_date_object()
place_handle = birth.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle)
# feature requests 2356, 1657: avoid genitive form
self.write_vevent(_("Birth of %s") %
person.get_primary_name().get_name(),
b_date, place.get_title())
else:
# feature requests 2356, 1657: avoid genitive form
self.write_vevent(_("Birth of %s") %
person.get_primary_name().get_name(),
b_date)
death_ref = person.get_death_ref()
if death_ref:
death = self.db.get_event_from_handle(death_ref.ref)
if death:
d_date = death.get_date_object()
place_handle = death.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle)
# feature requests 2356, 1657: avoid genitive form
self.write_vevent(_("Death of %s") %
person.get_primary_name().get_name(),
d_date,
place.get_title())
else:
# feature requests 2356, 1657: avoid genitive form
self.write_vevent(_("Death of %s") %
person.get_primary_name().get_name(),
d_date)
def format_single_date(self, subdate, thisyear, cal):
retval = ""
(day, month, year, sl) = subdate
if thisyear:
year = localtime().tm_year
if not cal == Date.CAL_GREGORIAN:
return ""
if year > 0:
if month > 0:
if day > 0:
retval = "%s%02d%02d" % (year, month, day)
return retval
def format_date(self, date, thisyear=0):
retval = ""
if date.get_modifier() == Date.MOD_TEXTONLY:
return ""
elif not date.is_empty():
mod = date.get_modifier()
cal = cal = date.get_calendar()
if mod == Date.MOD_SPAN or mod == Date.MOD_RANGE:
start = self.format_single_date(date.get_start_date(),
thisyear, cal)
end = self.format_single_date(date.get_stop_date(),
thisyear, cal)
if start and end:
retval = "DTSTART:%sT000001\nDTEND:%sT235959" % (start,
end)
elif mod == Date.MOD_NONE:
start = self.format_single_date(date.get_start_date(),
thisyear, cal)
if start:
retval = "DTSTART:%sT000001\nDTEND:%sT235959" % (start,
start)
return retval
def write_vevent(self, event_text, date, location=""):
date_string = self.format_date(date)
if date_string is not "":
self.writeln("")
self.writeln("BEGIN:VEVENT")
self.writeln("SUMMARY:%s" % event_text)
if location:
self.writeln("LOCATION:%s" % location)
self.writeln(date_string)
self.writeln("END:VEVENT")
date_string = self.format_date(date, 1)
self.writeln("")
self.writeln("BEGIN:VEVENT")
self.writeln("SUMMARY:"+_("Anniversary: %s") % event_text)
if location:
self.writeln("LOCATION:%s" % location)
self.writeln("RRULE:YD1 #0")
self.writeln(date_string)
self.writeln("END:VEVENT")
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def exportData(database, filename, user, option_box=None):
cw = CalendarWriter(database, filename, user, option_box)
return cw.export_data(filename)
|
Forage/Gramps
|
gramps/plugins/export/exportvcalendar.py
|
Python
|
gpl-2.0
| 9,612
|
[
"Brian"
] |
5ccb09a320db529f760291cbac75cfe399c8f35ce213baceb7107b80d4f3ba15
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flares.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Oct 2017
'''
This contains a stellar flare model from Pitkin+ 2014.
http://adsabs.harvard.edu/abs/2014MNRAS.445.2268P
'''
import numpy as np
##################################
## MODEL AND RESIDUAL FUNCTIONS ##
##################################
def flare_model(flareparams, times, mags, errs):
'''This is a flare model function, similar to Kowalski+ 2011.
From the paper by Pitkin+ 2014:
http://adsabs.harvard.edu/abs/2014MNRAS.445.2268P
Parameters
----------
flareparams : list of float
This defines the flare model::
[amplitude,
flare_peak_time,
rise_gaussian_stdev,
decay_time_constant]
where:
`amplitude`: the maximum flare amplitude in mags or flux. If flux, then
amplitude should be positive. If mags, amplitude should be negative.
`flare_peak_time`: time at which the flare maximum happens.
`rise_gaussian_stdev`: the stdev of the gaussian describing the rise of
the flare.
`decay_time_constant`: the time constant of the exponential fall of the
flare.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate
model mags.
Returns
-------
(modelmags, times, mags, errs) : tuple
Returns the model mags evaluated at the input time values. Also returns
the input `times`, `mags`, and `errs`.
'''
(amplitude, flare_peak_time,
rise_gaussian_stdev, decay_time_constant) = flareparams
zerolevel = np.median(mags)
modelmags = np.full_like(times, zerolevel)
# before peak gaussian rise...
modelmags[times < flare_peak_time] = (
mags[times < flare_peak_time] +
amplitude * np.exp(
-((times[times < flare_peak_time] -
flare_peak_time) *
(times[times < flare_peak_time] -
flare_peak_time)) /
(2.0*rise_gaussian_stdev*rise_gaussian_stdev)
)
)
# after peak exponential decay...
modelmags[times > flare_peak_time] = (
mags[times > flare_peak_time] +
amplitude * np.exp(
-((times[times > flare_peak_time] -
flare_peak_time)) /
(decay_time_constant)
)
)
return modelmags, times, mags, errs
def flare_model_residual(flareparams, times, mags, errs):
'''
This returns the residual between model mags and the actual mags.
Parameters
----------
flareparams : list of float
This defines the flare model::
[amplitude,
flare_peak_time,
rise_gaussian_stdev,
decay_time_constant]
where:
`amplitude`: the maximum flare amplitude in mags or flux. If flux, then
amplitude should be positive. If mags, amplitude should be negative.
`flare_peak_time`: time at which the flare maximum happens.
`rise_gaussian_stdev`: the stdev of the gaussian describing the rise of
the flare.
`decay_time_constant`: the time constant of the exponential fall of the
flare.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate
model mags.
Returns
-------
np.array
The residuals between the input `mags` and generated `modelmags`,
weighted by the measurement errors in `errs`.
'''
modelmags, _, _, _ = flare_model(flareparams, times, mags, errs)
return (mags - modelmags)/errs
|
lgbouma/astrobase
|
astrobase/lcmodels/flares.py
|
Python
|
mit
| 3,809
|
[
"Gaussian"
] |
b3b3f900300125528fa09481d8c9164a621db0a16b8be975a0b99320c284e2cb
|
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# Please contact Eucalyptus Systems, Inc., 6750 Navigator Way, Goleta
# CA 93117, USA or visit http://www.eucalyptus.com/licenses/ if you need
# additional information or have any questions.
import sys
import subprocess
import os
from urlparse import urlparse
from boto.sts import STSConnection
class EsiBase(object):
def __init__(self, region='localhost'):
self.vars = {}
self.region = region
self._load_vars()
self.check_environment()
self._set_environment()
def get_sts_connection(self):
token_url = urlparse(self.vars['TOKEN_URL'])
STSConnection.DefaultRegionEndpoint = token_url.hostname
port = token_url.port if token_url.port else 80
return STSConnection(is_secure=False, port=port, path=token_url.path,
aws_access_key_id=self.get_env_var('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=self.get_env_var('AWS_SECRET_ACCESS_KEY'))
def _set_environment(self):
for i in ("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"):
os.environ[i] = self.vars[i]
def list_system_accounts(self):
accounts = {}
process = subprocess.Popen(['/usr/bin/euare-accountlist', '-U', self.vars['AWS_IAM_URL']],
stdout=subprocess.PIPE)
for line in process.stdout:
split = line.strip().split(None, 1)
if len(split) > 1 and (split[0].startswith('(eucalyptus)')
or split[0].startswith('eucalyptus')):
accounts[split[0]] = split[1]
return accounts
def _load_vars(self):
EsiBase._check_binary(['euca-generate-environment-config'])
process = subprocess.Popen(['euca-generate-environment-config', '--simple', '--region',
self.region], stdout=subprocess.PIPE)
t = process.communicate()
if process.returncode == 0:
for var in t[0].split('\n'):
v = var.split("=")
if len(v) == 2:
self.vars[v[0]] = v[1] if v[1] != '' else None
# assume eucalyptus account since this is a system tool
if self.get_env_var('EC2_USER_ID') is None:
self.vars['EC2_USER_ID'] = self.list_system_accounts()['eucalyptus']
# if EUCA_PROPERTIES_URL is not set let's assume that the command is invoked on CLC
if self.get_env_var('EUCA_PROPERTIES_URL') is None:
self.vars['EUCA_PROPERTIES_URL'] = 'http://127.0.0.1:8773/services/Properties/'
@staticmethod
def _check_binary(binary):
try:
with open(os.devnull, 'w') as nullfile:
subprocess.call(binary, env=os.environ.copy(), stdout=nullfile)
except OSError:
print >> sys.stderr, "Error: cannot execute '{0}' binary.".format(" ".join(binary))
print >> sys.stderr, "Make sure EUCALYPTUS path variable is exported."
sys.exit(1)
def check_environment(self):
if self.vars["EC2_URL"] is None or \
self.vars["AWS_ACCESS_KEY_ID"] is None or \
self.vars["AWS_SECRET_ACCESS_KEY"] is None:
print >> sys.stderr, "Error: Unable to find EC2_URL, AWS_ACCESS_KEY_ID, or AWS_SECRET_ACCESS_KEY"
print >> sys.stderr, "Make sure your environment is properly configured."
sys.exit(1)
def _set_property(self, property, value):
cmd = ['/usr/bin/euctl', '-U', self.vars['EUCA_PROPERTIES_URL'],
"{0}={1}".format(property, value)]
try:
subprocess.check_call(cmd)
except (OSError, subprocess.CalledProcessError):
print >> sys.stderr, "Error: failed to set property {0} to {1}".format(property, value)
print >> sys.stderr, "To set it manually run this command:"
print >> sys.stderr, " ".join(cmd)
sys.exit(1)
def _get_property(self, property):
try:
cmd = ['/usr/bin/euctl', '-U', self.vars['EUCA_PROPERTIES_URL'], property]
out = subprocess.Popen(cmd, env=os.environ.copy(), stdout=subprocess.PIPE).communicate()[0]
res = out.split()
return res[2] if len(res) == 3 else None
except OSError:
print >> sys.stderr, "Error: failed to get property {0}.".format(property)
sys.exit(1)
def get_env_var(self, var):
return self.vars[var] if var in self.vars else None
|
gholms/eucalyptus-service-image
|
esitoolsupport/esibase.py
|
Python
|
bsd-2-clause
| 5,149
|
[
"VisIt"
] |
b11319f3ce1a55b57e5d7b9ad1a361ad219712a0023ba894f002c4cc8f3b165d
|
from functools import reduce
from determinant import det
from matrix import mat , transpose , reverse , show , subscripts , parse
from combinatorics import C
from information import comparator
def mohanty ( compare , Mo , m , n ) :
"""
>>> compare = comparator( [ ] )
>>> Mo = mat( 0 , 0 )
>>> mohanty( compare , Mo , 0 , 0 )
>>> abs( det( Mo , 0 ) )
1
>>> compare = comparator( [ [ ] , [ ] , [ ] ] )
>>> Mo = mat( 3 , 3 )
>>> mohanty( compare , Mo , 3 , 0 )
>>> abs( det( Mo , 3 ) )
1
>>> compare = comparator( [ [ -1 ] ] )
>>> Mo = mat( 1 , 1 )
>>> mohanty( compare , Mo , 1 , 1 )
>>> abs( det( Mo , 1 ) )
1
>>> compare = comparator( [ [ 1 ] ] )
>>> Mo = mat( 1 , 1 )
>>> mohanty( compare , Mo , 1 , 1 )
>>> abs( det( Mo , 1 ) )
1
>>> compare = comparator( [ [ 0 ] ] )
>>> Mo = mat( 1 , 1 )
>>> mohanty( compare , Mo , 1 , 1 )
>>> abs( det( Mo , 1 ) )
2
>>> compare = comparator( [ [ 0 , 0 ] , [ 0 , 0 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
6
>>> compare = comparator( [ [ 0 , 0 ] , [ 0 , 0 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
6
>>> compare = comparator( [ [ 1 , 0 ] , [ 1 , 0 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
3
>>> compare = comparator( [ [ -1 , -1 ] , [ -1 , -1 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
1
>>> compare = comparator( [ [ -1 , -1 ] , [ 0 , 0 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
3
>>> compare = comparator( [ [ 1 , 1 ] , [ 1 , 1 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
1
>>> compare = comparator( [ [ 0 , -1 ] , [ 1 , 0 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
4
>>> compare = comparator( [ [ 0 , -1 ] , [ 1 , 1 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
2
>>> compare = comparator( [ [ -1 , -1 ] , [ 1 , 0 ] ] )
>>> Mo = mat( 2 , 2 )
>>> mohanty( compare , Mo , 2 , 2 )
>>> abs( det( Mo , 2 ) )
2
"""
for i , j in subscripts( 0 , m , 0 , m ) :
b = reduce( min , ( t + 1 for t in range( n ) if compare( i , t ) < 0 ) , n + 1 )
a = reduce( max , ( t + 1 for t in range( n ) if compare( j , t ) > 0 ) , 0 )
Mo[i][j] = C( b - a , j - i + 1 )
def compute ( compare , m , n ) :
Mo = mat( m , m )
mohanty( compare , Mo , m , n )
print( "mohanty matrix" )
print( show( Mo ) , end = "" )
d = det( Mo , m )
print( "after gaussian elimination" )
print( show( Mo ) , end = "" )
return d
def main ( lines ) :
M , m , n = parse( lines )
if n > m :
M = transpose( M , m , n )
reverse( M )
m , n = n , m
print( "partial information" )
print( show( M ) , end = "" )
compare = comparator( M )
print( compute( compare , m , n ) )
if __name__ == "__main__" :
import fileinput
main( fileinput.input( ) )
|
aureooms/mupi
|
mohanty.py
|
Python
|
agpl-3.0
| 3,115
|
[
"Gaussian"
] |
7b771751ae51df83806065109961a97eb12a6a49f86704fd4c496df80a5d6e92
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core Sparse MoE utils using pjit.
Many thanks to Parker Schuh and Sudip Roy, for helping with the einsum
implementation, and to Jonathan Heek for helping writing the lift transform.
The following abbreviations are sometimes used to name the size of different
axes in the arrays.
G = num_groups. It must be a multiple of num_experts.
S = group_size.
E = num_experts.
C = capacity.
K = num_selected_experts. It must be <= num_experts.
"""
import abc
from typing import Any, Callable, Optional, Tuple
import flax.core.lift
import flax.linen.transforms
import flax.struct
import jax
from jax.experimental import pjit
import jax.numpy as jnp
import vmoe.partitioning
Array = jnp.ndarray
PartitionSpec = pjit.PartitionSpec
with_sharding_constraint = vmoe.partitioning.with_sharding_constraint
class BaseDispatcher(abc.ABC):
"""Base class for different dispatcher implementations.
Dispatchers are in charge of preparing the data to be dispatched to the
different experts, and then combining the outputs of each expert for each
item. There are different ways of doing so with different memory / flops /
runtime implications when running on actual hardware.
In all cases, when dispatching data, they take an array of shape (G, S, ...).
The groups (G) are dispatched independently of each other. The items in each
group (S) will take place in the buffer (of capacity C) of items to be
processed by each expert (E). The output is an array of shape (E, G * C, ...)
with the elements to be processed by each expert.
When combining data, they take an array of shape (E, G * C, ...) and output
an array of shape (G, S, ...). Notice that the trailing dimensions (...) at
combine might not be the same as the ones at dispatch (e.g. if the expert
changes the shape of the data).
"""
@abc.abstractmethod
def dispatch(self, data: Array) -> Array:
"""Dispatches data to experts.
Args:
data: (G, S, ...) array with the data to dispatch to the experts.
Returns:
(E, G * C, ...) array with the data to be processed by each expert.
"""
@abc.abstractmethod
def combine(self, data: Array) -> Array:
"""Combines outputs from multiple experts.
Args:
data: (E, G * C, ...) array with the output data from each expert.
Returns:
(G, S, ...) array with the combined outputs from each expert.
"""
@flax.struct.dataclass
class EinsumDispatcher(BaseDispatcher):
"""Dispatcher using Einsum.
Attributes:
combine_weights: (G, S, E, C) array with the combine weights for each item
(G, S) for each expert (E) and buffer position (C).
dispatch_weights: Optional. (G, S, E, C) array with the dispatch weights of
each item (G, S) for each expert (E) and buffer position (C).
partition_spec: Optional. PartitionSpec used to constrain the sharding of
the data arrays. By default (None), no sharding constraint is specified.
einsum_precision: Optional. Precision used in all the einsums (e.g.
combining the outputs of different experts).
"""
combine_weights: Array
dispatch_weights: Optional[Array] = None
partition_spec: Optional[PartitionSpec] = flax.struct.field(
pytree_node=False, default=None)
einsum_precision: jax.lax.Precision = flax.struct.field(
pytree_node=False, default=jax.lax.Precision.DEFAULT)
def dispatch(self, data: Array) -> Array:
dispatch_weights = (
self.combine_weights > 0
if self.dispatch_weights is None else self.dispatch_weights)
data = jnp.einsum("GSEC,GS...->GEC...", dispatch_weights, data,
precision=self.einsum_precision)
return _dispatch(data, self.partition_spec)
def combine(self, data: Array) -> Array:
"""Combines data from experts according to combine_weights."""
num_groups, _, _, _ = self.combine_weights.shape
data = _receive(data, num_groups, self.partition_spec)
return jnp.einsum("GSEC,GEC...->GS...", self.combine_weights, data,
precision=self.einsum_precision)
@flax.struct.dataclass
class ExpertIndicesDispatcher(BaseDispatcher):
"""Dispatcher using scatter/gather with (expert, buffer) indices.
Attributes:
indices: (G, S, K, 2) integer array with the (expert, buffer) indices of
each item (G, S) and their K-selected experts. The tuple (expert, buffer)
for each item is represented in the last dimension (of size 2).
combine_weights: (G, S, K) array with the combine weights of each item
(G, S) and their K-selected experts.
num_experts: Number of experts.
capacity: Capacity of each expert's buffer per group.
partition_spec: Optional. PartitionSpec used to constrain the sharding of
the data arrays. By default (None), no sharding constraint is specified.
einsum_precision: Optional. Precision used in all the einsums (e.g.
combining the outputs of different experts).
"""
indices: Array # (G, S, K, 2).
combine_weights: Array # (G, S, K).
num_experts: int = flax.struct.field(pytree_node=False)
capacity: int = flax.struct.field(pytree_node=False)
partition_spec: Optional[PartitionSpec] = flax.struct.field(
pytree_node=False, default=None)
einsum_precision: jax.lax.Precision = flax.struct.field(
pytree_node=False, default=jax.lax.Precision.DEFAULT)
def dispatch(self, data: Array) -> Array:
num_groups, _, num_selected_experts, _ = self.indices.shape
_, _, *item_shape = data.shape
data = jnp.repeat(data, num_selected_experts, axis=1)
indices = self.indices.reshape(num_groups, -1, 2)
shape = (self.num_experts, self.capacity, *item_shape)
data = jax.vmap(lambda x, i: _scatter_nd(i, x, shape))(data, indices)
return _dispatch(data, self.partition_spec)
def combine(self, data: Array) -> Array:
num_groups, _, _ = self.combine_weights.shape
data = _receive(data, num_groups, self.partition_spec)
data = jax.vmap(lambda x, i: x[i[:, :, 0], i[:, :, 1]])(data, self.indices)
# Mask invalid gathered data.
mask = jnp.logical_and(self.indices[..., 0] < self.num_experts,
self.indices[..., 1] < self.capacity)
data = data * mask.reshape(mask.shape + (1,) * (data.ndim - 3))
# Weighted sum of the outputs of the K-selected experts for each item.
return jnp.einsum("GSK...,GSK->GS...", data, self.combine_weights,
precision=jax.lax.Precision.HIGHEST)
@flax.struct.dataclass
class Bfloat16Dispatcher(BaseDispatcher):
"""Dispatcher wrapper converting data to bfloat16 to save bandwidth."""
dispatcher: BaseDispatcher
def dispatch(self, data: Array) -> Array:
dtype = data.dtype
data = _cast_to_bfloat16(data)
data = self.dispatcher.dispatch(data)
return data.astype(dtype)
def combine(self, data: Array) -> Array:
dtype = data.dtype
data = _cast_to_bfloat16(data)
data = self.dispatcher.combine(data)
return data.astype(dtype)
def get_top_experts_per_item_dispatcher(gates: Array, name: str,
num_selected_experts: int,
capacity: int, batch_priority: bool,
**dispatcher_kwargs) -> BaseDispatcher:
"""Returns a dispatcher implementing Top-Experts-Per-Item routing.
For each item, the `num_selected_experts` experts with the largest gating
score are selected in a greedy fashion. However, because each expert has a
fixed `capacity`, if more items than `capacity` select a given expert some of
the assignments will be ignored. All top-1 choices have priority over top-2
choices and so on. In addition, the choices that are ignored also depend on
`batch_priority`. If it is False, the "Vanilla" algorithm is used, meaning
that items in earlier positions of the array have priority. If it is True, the
"Batch Priority Routing" algorithm (see https://arxiv.org/abs/2106.05974) is
used, which gives more priority to the items whose largest score is greater.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
name: String with the type of dispatcher to use (supported values are
"einsum" and "indices").
num_selected_experts: Maximum number of experts to select per each item.
capacity: Maximum number of items processed by each expert.
batch_priority: Whether to use batch priority routing or not.
**dispatcher_kwargs: Additional arguments for the dispatcher object.
Returns:
A dispatcher.
"""
fn_map = {
"einsum": _get_top_experts_per_item_einsum_dispatcher,
"indices": _get_top_experts_per_item_expert_indices_dispatcher,
}
if name not in fn_map:
raise ValueError(f"Unknown dispatcher type: {name!r}")
return fn_map[name](gates, num_selected_experts, capacity, batch_priority,
**dispatcher_kwargs)
def sparse_moe_spmd(target: flax.linen.transforms.Target,
split_rngs: bool = False,
has_aux: bool = False,
methods=None):
"""Lift transformation that wraps a target with a Sparse MoE using SPMD.
SPMD stands for "Single Program, Multiple Data", meaning that all experts
actually implement the same function (program), but use different data
(inputs and parameters). Thus, a single target to "expertify" is given.
When an instance of a Linen module wrapped with this transformation is called,
it expects one additional argument at the beginning, a "dispatcher"
(see `BaseDispatcher`). This "dispatcher" is used to prepare the arguments to
be processed by each "expert". The "target" is wrapped with vmap and applied
to different sets of parameters and inputs. Finally, the "dispatcher" combines
the outputs of all experts applied to each given item.
By default, all experts will be initialized using the same parameters. If you
want to initialize each expert differently, use "split_rngs = True".
If the target has any auxiliary outputs (e.g. metrics) that should not be
combined, these can be returned by using "has_aux = True".
Args:
target: A target to wrap with a Sparse MoE (e.g. a flax.linen.Module) with
methods passed via the `methods` argument.
split_rngs: If True, splits the RNGs passed to each expert.
has_aux: If the target returns any auxiliary output that should not be
combined, set this to True.
methods: Methods from the target to wrap with a Sparse MoE. By default,
the "__call__" method will be wrapped.
Returns:
A transformed target.
"""
def wrapper(expert_fn: Callable[..., Any]):
def transformed(scopes, dispatcher, *inputs):
# Prepare inputs to be processed by each expert.
inputs = jax.tree_map(dispatcher.dispatch, inputs)
# Wrap the target with vmap, to pass different parameters and inputs to
# each expert.
outputs = flax.core.lift.vmap(
expert_fn,
in_axes=0,
out_axes=0,
variable_axes={"params": 0},
split_rngs={"params": split_rngs})(scopes, *inputs)
# Combine outputs.
if has_aux:
outputs, aux = outputs
outputs = jax.tree_map(dispatcher.combine, outputs)
return (outputs, aux) if has_aux else outputs
return transformed
return flax.linen.transforms.lift_transform(wrapper, target, methods=methods)
def _cast_to_bfloat16(x: Array) -> Array:
return x.astype(jnp.bfloat16) if jnp.issubdtype(x.dtype, jnp.floating) else x
def _convert_partition_spec(spec):
if spec is not None and not isinstance(spec, PartitionSpec):
spec = (spec,) if isinstance(spec, str) else tuple(spec)
spec = PartitionSpec(*spec)
return spec
def _dispatch(data: Array, partition_spec: Optional[PartitionSpec]) -> Array:
"""Dispatches data to experts using all_to_all."""
partition_spec = _convert_partition_spec(partition_spec)
num_groups, num_experts, capacity, *item_shape = data.shape
data = with_sharding_constraint(data, partition_spec)
data = data.reshape(num_experts, num_groups // num_experts, num_experts,
capacity, *item_shape)
data = jnp.swapaxes(data, 0, 2)
data = data.reshape(-1, *item_shape)
data = with_sharding_constraint(data, partition_spec)
return data.reshape(num_experts, num_groups * capacity, *item_shape)
def _receive(data: Array, num_groups: int,
partition_spec: Optional[PartitionSpec]) -> Array:
"""Receives data from experts using all_to_all."""
partition_spec = _convert_partition_spec(partition_spec)
num_experts, num_groups_time_capacity, *item_shape = data.shape
capacity = num_groups_time_capacity // num_groups
data = data.reshape(num_experts * num_groups, capacity, *item_shape)
data = with_sharding_constraint(data, partition_spec)
data = data.reshape(num_experts, num_groups // num_experts, num_experts,
capacity, *item_shape)
data = jnp.swapaxes(data, 0, 2)
data = data.reshape(num_groups, num_experts, capacity, *item_shape)
data = with_sharding_constraint(data, partition_spec)
return data
def _scatter_nd(indices, updates, shape):
"""Jax implementation of tf.scatter_nd.
Notes:
- The updates are cumulative, ie. if multiple indices point to the
same position, the output value at this position is accumulated.
- We rely on the fact that out-of-range indices will be quietly ignored and
don't raise any error. This breaks what JAX index ops specify
(https://jax.readthedocs.io/en/latest/jax.ops.html), but makes the code
easier.
Args:
indices: An int matrix of (i, j, ...) indices with shape [B, ndim].
updates: An array of data points with shape [B, ...].
shape: An int vector with the dimensions of the output array of size [ndim].
Returns:
An array of shape `shape` with updated values at given indices.
"""
# See: https://www.tensorflow.org/api_docs/python/tf/scatter_nd.
zeros = jnp.zeros(shape, updates.dtype)
key = tuple(jnp.moveaxis(indices, -1, 0))
return zeros.at[key].add(updates)
def _get_top_experts_per_item_common(
gates: Array, num_selected_experts: int,
batch_priority: bool) -> Tuple[Array, Array, Array]:
"""Returns common arrays used by Top-Experts-Per-Item routing.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
num_selected_experts: Maximum number of experts to select per item.
batch_priority: Whether to use batch priority routing or not.
Returns:
- `combine_weights`, with shape (S, K) with the weights used to
combine the outputs of the K-selected experts for each item.
- `expert_index`, with shape (S, K) containing the expert_index for each of
the K-selected experts for each item.
- `buffer_index`, with shape (S, K, E) containing the buffer index for each
item and selected expert.
"""
group_size, num_experts = gates.shape
combine_weights, expert_index = jax.lax.top_k(gates, num_selected_experts)
if batch_priority:
# Sort items according to their maximum routing weight. The permutation will
# be reversed later, so no need to permute combine_weights here.
perm = jnp.argsort(-combine_weights[:, 0])
expert_index = expert_index[perm]
# (K * S,). Make K the leading axis to ensure that top-1 choices have priority
# over top-2 choices and so on. Flatten array for cumsum.
expert_index = jnp.swapaxes(expert_index, 0, 1).ravel()
# (K * S, E). Convert expert indices to a one-hot array.
expert_one_hot = jax.nn.one_hot(expert_index, num_experts, dtype=jnp.int32)
# (K * S, E) -> (K, S, E) -> (S, K, E). Use cumsum to compute the buffer idx
# within each experts' buffer.
buffer_index = jnp.cumsum(expert_one_hot, axis=0) * expert_one_hot - 1
buffer_index = buffer_index.reshape(-1, group_size, num_experts)
buffer_index = jnp.swapaxes(buffer_index, 0, 1)
# (K, S) -> (S, K). Revert expert_index to the original shape.
expert_index = jnp.swapaxes(expert_index.reshape(-1, group_size), 0, 1)
if batch_priority:
# Permute the items to their original order.
inv_perm = jnp.argsort(perm)
expert_index = expert_index[inv_perm]
buffer_index = buffer_index[inv_perm]
return combine_weights, expert_index, buffer_index
def _get_top_experts_per_item_einsum_dispatcher(
gates: Array, num_selected_experts: int, capacity: int,
batch_priority: bool, **dispatcher_kwargs) -> EinsumDispatcher:
"""Returns an EinsumDispatcher performing Top-Experts-Per-Item routing.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
num_selected_experts: Maximum number of experts to select per each item.
capacity: Maximum number of items processed by each expert.
batch_priority: Whether to use batch priority routing or not.
**dispatcher_kwargs: Additional arguments for the EinsumDispatcher.
Returns:
An EinsumDispatcher object.
"""
_, _, buffer_idx = _get_top_experts_per_item_common(
gates, num_selected_experts, batch_priority)
# (S, K, E) -> (S, E). Select the only buffer index for each (item, expert).
buffer_idx = jnp.max(buffer_idx, axis=1)
# (S, E, C). Convert the buffer indices to a one-hot matrix. We rely on the
# fact that indices < 0 or >= capacity will be ignored by the dispatcher.
dispatch_weights = jax.nn.one_hot(buffer_idx, capacity, dtype=jnp.bool_)
einsum_precision = dispatcher_kwargs.get("einsum_precision",
jax.lax.Precision.DEFAULT)
combine_weights = jnp.einsum(
"SE,SEC->SEC", gates, dispatch_weights, precision=einsum_precision)
return EinsumDispatcher(
combine_weights=combine_weights,
dispatch_weights=dispatch_weights,
**dispatcher_kwargs)
def _get_top_experts_per_item_expert_indices_dispatcher(
gates: Array, num_selected_experts: int, capacity: int,
batch_priority: bool, **dispatcher_kwargs) -> ExpertIndicesDispatcher:
"""Returns an ExpertIndicesDispatcher performing Top-Experts-Per-Item routing.
Args:
gates: (S, E) array with the gating values for each (item, expert).
These values will also be used as combine_weights for the selected pairs.
num_selected_experts: Maximum number of experts to select per each item.
capacity: Maximum number of items processed by each expert.
batch_priority: Whether to use batch priority routing or not.
**dispatcher_kwargs: Additional arguments for the ExpertIndicesDispatcher.
Returns:
An ExpertIndicesDispatcher object.
"""
_, num_experts = gates.shape
combine_weights, expert_idx, buffer_idx = _get_top_experts_per_item_common(
gates, num_selected_experts, batch_priority)
# (S, K, E) -> (S, K). Select the only buffer index for each (item, k_choice).
buffer_idx = jnp.max(buffer_idx, axis=2)
return ExpertIndicesDispatcher(
indices=jnp.stack([expert_idx, buffer_idx], axis=-1),
combine_weights=combine_weights,
num_experts=num_experts,
capacity=capacity,
**dispatcher_kwargs)
|
google-research/vmoe
|
vmoe/moe.py
|
Python
|
apache-2.0
| 19,848
|
[
"MOE"
] |
6217eb171b9f0fb9fea4994f4a898e31b43e57c5c1edee588179bf692bc6aa0c
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Instructor Dashboard.
"""
import ddt
from bok_choy.promise import EmptyPromise
from six.moves import range
from common.test.acceptance.fixtures.certificates import CertificateConfigFixture
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.common.utils import enroll_user_track
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.instructor_dashboard import (
InstructorDashboardPage,
StudentAdminPage,
StudentSpecificAdmin
)
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from common.test.acceptance.tests.helpers import (
EventsTestMixin,
UniqueCourseTest,
create_multiple_choice_problem,
disable_animations,
get_modal_alert
)
from openedx.core.lib.tests import attr
class BaseInstructorDashboardTest(EventsTestMixin, UniqueCourseTest):
"""
Mixin class for testing the instructor dashboard.
"""
def log_in_as_instructor(self, global_staff=True, course_access_roles=None):
"""
Login with an instructor account.
Args:
course_access_roles (str[]): List of course access roles that should be assigned to the user.
Returns
username (str)
user_id (int)
"""
course_access_roles = course_access_roles or []
auto_auth_page = AutoAuthPage(
self.browser, course_id=self.course_id, staff=global_staff, course_access_roles=course_access_roles
)
auto_auth_page.visit()
user_info = auto_auth_page.user_info
return user_info['username'], user_info['user_id'], user_info['email'], user_info['password']
def visit_instructor_dashboard(self):
"""
Visits the instructor dashboard.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
return instructor_dashboard_page
@attr('a11y')
class LMSInstructorDashboardA11yTest(BaseInstructorDashboardTest):
"""
Instructor dashboard base accessibility test.
"""
def setUp(self):
super(LMSInstructorDashboardA11yTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
def test_instructor_dashboard_a11y(self):
self.instructor_dashboard_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.instructor_dashboard_page.a11y_audit.check_for_accessibility_errors()
@ddt.ddt
class BulkEmailTest(BaseInstructorDashboardTest):
"""
End-to-end tests for bulk emailing from instructor dash.
"""
shard = 23
def setUp(self):
super(BulkEmailTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.send_email_page = instructor_dashboard_page.select_bulk_email()
@ddt.data(["myself"], ["staff"], ["learners"], ["myself", "staff", "learners"])
def test_email_queued_for_sending(self, recipient):
self.send_email_page.send_message(recipient)
self.send_email_page.verify_message_queued_successfully()
@attr('a11y')
def test_bulk_email_a11y(self):
"""
Bulk email accessibility tests
"""
self.send_email_page.a11y_audit.config.set_scope([
'#section-send-email'
])
self.send_email_page.a11y_audit.config.set_rules({
"ignore": [
'button-name', # TODO: TNL-5830
'aria-allowed-role', # TODO: AC-936
'color-contrast', # TODO: AC-938
'listitem' # TODO: AC-937
]
})
self.send_email_page.a11y_audit.check_for_accessibility_errors()
@attr(shard=20)
class AutoEnrollmentWithCSVTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Auto-Registration and enrollment functionality via CSV file.
"""
def setUp(self):
super(AutoEnrollmentWithCSVTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.auto_enroll_section = instructor_dashboard_page.select_membership().select_auto_enroll_section()
# Initialize the page objects
self.register_page = CombinedLoginAndRegisterPage(self.browser, start_page="register")
self.dashboard_page = DashboardPage(self.browser)
def test_browse_and_upload_buttons_are_visible(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Auto-Enroll Browse and Upload buttons are visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I see the 'REGISTER/ENROLL STUDENTS' section on the page with the 'Browse' and 'Upload' buttons
"""
self.assertTrue(self.auto_enroll_section.is_file_attachment_browse_button_visible())
self.assertTrue(self.auto_enroll_section.is_upload_button_visible())
def test_enroll_unregister_student(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Batch Enrollment div is visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I enter the email and enroll it.
Logout the current page.
And Navigate to the registration page and register the student.
Then I see the course which enrolled the student.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.auto_enroll_section.fill_enrollment_batch_text_box(email)
self.assertIn(
'Successfully sent enrollment emails to the following users. '
'They will be enrolled once they register:',
self.auto_enroll_section.get_notification_text()
)
LogoutPage(self.browser).visit()
self.register_page.visit()
self.register_page.register(
email=email,
password="123456",
username=username,
full_name="Test User",
country="US",
favorite_movie="Harry Potter",
)
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertEqual(len(course_names), 1)
self.assertIn(self.course_info["display_name"], course_names)
def test_clicking_file_upload_button_without_file_shows_error(self):
"""
Scenario: Clicking on the upload button without specifying a CSV file results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I click the Upload Button without specifying a CSV file
Then I should be shown an Error Notification
And The Notification message should read 'File is not attached.'
"""
self.auto_enroll_section.click_upload_file_button()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "File is not attached.")
def test_uploading_correct_csv_file_results_in_success(self):
"""
Scenario: Uploading a CSV with correct data results in Success.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with correct data and click the Upload Button
Then I should be shown a Success Notification.
"""
self.auto_enroll_section.upload_correct_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_SUCCESS))
def test_uploading_csv_file_with_bad_data_results_in_errors_and_warnings(self):
"""
Scenario: Uploading a CSV with incorrect data results in error and warnings.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with incorrect data and click the Upload Button
Then I should be shown an Error Notification
And a corresponding Error Message.
And I should be shown a Warning Notification
And a corresponding Warning Message.
"""
self.auto_enroll_section.upload_csv_file_with_errors_warnings()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Data in row #2 must have exactly four columns: email, username, full name, and country")
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_WARNING))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_WARNING), "ename (d@a.com): (An account with email d@a.com exists but the provided username ename is different. Enrolling anyway with d@a.com.)")
def test_uploading_non_csv_file_results_in_error(self):
"""
Scenario: Uploading an image file for auto-enrollment results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I select an image file (a non-csv file) and click the Upload Button
Then I should be shown an Error Notification
And The Notification message should read 'Make sure that the file you upload is in CSV..'
"""
self.auto_enroll_section.upload_non_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Make sure that the file you upload is in CSV format with no extraneous characters or rows.")
@attr('a11y')
def test_auto_enroll_csv_a11y(self):
"""
Auto-enrollment with CSV accessibility tests
"""
self.auto_enroll_section.a11y_audit.config.set_scope([
'#membership-list-widget-tpl'
])
self.auto_enroll_section.a11y_audit.check_for_accessibility_errors()
@attr(shard=10)
class ProctoredExamsTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Proctoring Sections of the Instructor Dashboard.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(ProctoredExamsTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
course_fixture = CourseFixture(**self.course_info)
course_fixture.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
).install()
self.dashboard_page = DashboardPage(self.browser)
self.problem_page = ProblemPage(self.browser)
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate',
min_price=10, suggested_prices='10,20'
).visit()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _login_as_a_verified_user(self):
"""
login as a verififed user
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
enroll_user_track(self.browser, self.course_id, 'verified')
def _create_a_proctored_exam_and_attempt(self):
"""
Creates a proctored exam and makes the student attempt it so that
the associated allowance and attempts are visible on the Instructor Dashboard.
"""
# Visit the course outline page in studio
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.studio_course_outline.visit()
# open the exam settings to make it a proctored exam.
self.studio_course_outline.open_subsection_settings_dialog()
# select advanced settings tab
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_proctored()
# login as a verified student and visit the courseware.
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
# Start the proctored exam.
self.courseware_page.start_proctored_exam()
def _create_a_timed_exam_and_attempt(self):
"""
Creates a timed exam and makes the student attempt it so that
the associated allowance and attempts are visible on the Instructor Dashboard.
"""
# Visit the course outline page in studio
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.studio_course_outline.visit()
# open the exam settings to make it a proctored exam.
self.studio_course_outline.open_subsection_settings_dialog()
# select advanced settings tab
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_timed()
# login as a verified student and visit the courseware.
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
# Start the timed exam.
self.courseware_page.start_timed_exam()
# Stop the timed exam.
self.courseware_page.stop_timed_exam()
LogoutPage(self.browser).visit()
def test_can_reset_attempts(self):
"""
Make sure that Exam attempts are visible and can be reset.
"""
# Given that an exam has been configured to be a proctored exam.
self._create_a_timed_exam_and_attempt()
# When I log in as an instructor,
__, __, __, __ = self.log_in_as_instructor()
# And visit the Student Proctored Exam Attempts Section of Instructor Dashboard's Special Exams tab
instructor_dashboard_page = self.visit_instructor_dashboard()
exam_attempts_section = instructor_dashboard_page.select_special_exams().select_exam_attempts_section()
# Then I can see the search text field
self.assertTrue(exam_attempts_section.is_search_text_field_visible)
# And I can see one attempt by a student.
self.assertTrue(exam_attempts_section.is_student_attempt_visible)
# And I can remove the attempt by clicking the "x" at the end of the row.
exam_attempts_section.remove_student_attempt()
self.assertFalse(exam_attempts_section.is_student_attempt_visible)
@attr(shard=10)
class DataDownloadsTest(BaseInstructorDashboardTest):
"""
Bok Choy tests for the "Data Downloads" tab.
"""
def setUp(self):
super(DataDownloadsTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.instructor_username, self.instructor_id, __, __ = self.log_in_as_instructor(
course_access_roles=['data_researcher']
)
instructor_dashboard_page = self.visit_instructor_dashboard()
self.data_download_section = instructor_dashboard_page.select_data_download()
def verify_report_requested_event(self, report_type):
"""
Verifies that the correct event is emitted when a report is requested.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.requested', 'report_type': report_type}
)
def verify_report_downloaded_event(self, report_url):
"""
Verifies that the correct event is emitted when a report is downloaded.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.downloaded', 'report_url': report_url}
)
def verify_report_download(self, report_name):
"""
Verifies that a report can be downloaded and an event fired.
"""
download_links = self.data_download_section.report_download_links
self.assertEqual(len(download_links), 1)
download_links[0].click()
expected_url = download_links.attrs('href')[0]
self.assertIn(report_name, expected_url)
self.verify_report_downloaded_event(expected_url)
def test_student_profiles_report_download(self):
"""
Scenario: Verify that an instructor can download a student profiles report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Download profile information as a CSV" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"student_profile_info"
self.data_download_section.generate_student_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"grade_report"
self.data_download_section.generate_grade_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_problem_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a problem grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Problem Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"problem_grade_report"
self.data_download_section.generate_problem_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_ora2_response_report_download(self):
"""
Scenario: Verify that an instructor can download an ORA2 grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Download ORA2 Responses" button
Then a report should be generated
"""
report_name = u"ORA_data"
self.data_download_section.generate_ora2_response_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_download(report_name)
@attr('a11y')
def test_data_download_a11y(self):
"""
Data download page accessibility tests
"""
self.data_download_section.a11y_audit.config.set_scope([
'.data-download-container'
])
self.data_download_section.a11y_audit.check_for_accessibility_errors()
@ddt.ddt
class DataDownloadsWithMultipleRoleTests(BaseInstructorDashboardTest):
"""
Bok Choy tests for the "Data Downloads" tab with multiple user roles.
"""
shard = 23
def setUp(self):
super(DataDownloadsWithMultipleRoleTests, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
@ddt.data(['staff'], ['instructor'])
def test_list_student_profile_information_for_large_course(self, role):
"""
Scenario: List enrolled students' profile information for a large course
Given I am "<Role>" for a very large course
When I visit the "Data Download" tab
Then I do not see a button to 'List enrolled students' profile information'
Examples:
| Role |
| instructor |
| staff |
"""
username, __, email, password = self.log_in_as_instructor(
global_staff=False,
course_access_roles=role
)
instructor_dashboard_page = self.visit_instructor_dashboard()
data_download_section = instructor_dashboard_page.select_data_download()
self.assertTrue(data_download_section.enrolled_student_profile_button_present)
LogoutPage(self.browser).visit()
for __ in range(5):
learner_username = "test_student_{uuid}".format(uuid=self.unique_id[0:8])
learner_email = "{user}@example.com".format(user=learner_username)
# Enroll test users in the course
AutoAuthPage(
self.browser,
username=learner_username,
email=learner_email,
course_id=self.course_id
).visit()
# Login again with staff or instructor
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password,
course_id=self.course_id,
staff=False,
course_access_roles=role
).visit()
instructor_dashboard_page = self.visit_instructor_dashboard()
instructor_dashboard_page.select_data_download()
self.assertFalse(data_download_section.enrolled_student_profile_button_present)
@ddt.data(['staff'], ['instructor'])
def test_view_grading_configuration(self, role):
"""
Scenario: View the grading configuration
Given I am "<Role>" for a course
When I click "Grading Configuration"
Then I see the grading configuration for the course
Examples:
| Role |
| instructor |
| staff |
"""
expected = u"""-----------------------------------------------------------------------------
Course grader:
<class 'xmodule.graders.WeightedSubsectionsGrader'>
Graded sections:
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Homework, category=Homework, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Lab, category=Lab, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Midterm Exam, category=Midterm Exam, weight=0.3
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Final Exam, category=Final Exam, weight=0.4
-----------------------------------------------------------------------------
Listing grading context for course {}
graded sections:
[]
all graded blocks:
length=0""".format(self.course_id)
self.log_in_as_instructor(
global_staff=False,
course_access_roles=role
)
instructor_dashboard_page = self.visit_instructor_dashboard()
data_download_section = instructor_dashboard_page.select_data_download()
data_download_section.generate_grading_configuration_button.click()
self.assertEqual(data_download_section.grading_config_text, expected)
@attr(shard=10)
@ddt.ddt
class CertificatesTest(BaseInstructorDashboardTest):
"""
Tests for Certificates functionality on instructor dashboard.
"""
def setUp(self):
super(CertificatesTest, self).setUp()
self.test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
CourseFixture(**self.course_info).install()
self.cert_fixture = CertificateConfigFixture(self.course_id, self.test_certificate_config)
self.cert_fixture.install()
self.user_name, self.user_id, __, __ = self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
disable_animations(self.certificates_section)
def test_generate_certificates_buttons_is_disable(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Generate Certificates button is disable.
Given that I am on the Certificates tab on the Instructor Dashboard
The instructor-generation and cert_html_view_enabled feature flags have been enabled
But the certificate is not active in settings.
Then I see a 'Generate Certificates' button disabled
"""
self.test_certificate_config['is_active'] = False
self.cert_fixture.update_certificate(1)
self.browser.refresh()
self.assertFalse(self.certificates_section.generate_certificates_button.visible)
self.assertTrue(self.certificates_section.generate_certificates_disabled_button.visible)
def test_generate_certificates_buttons_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Generate Certificates button is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
And the instructor-generation feature flag has been enabled
Then I see a 'Generate Certificates' button
And when I click on the 'Generate Certificates' button
Then I should see a status message and 'Generate Certificates' button should be disabled.
"""
self.assertTrue(self.certificates_section.generate_certificates_button.visible)
self.certificates_section.generate_certificates_button.click()
alert = get_modal_alert(self.certificates_section.browser)
alert.accept()
self.certificates_section.wait_for_ajax()
EmptyPromise(
lambda: self.certificates_section.certificate_generation_status.visible,
'Certificate generation status shown'
).fulfill()
disabled = self.certificates_section.generate_certificates_button.attrs('disabled')
self.assertEqual(disabled[0], 'true')
def test_pending_tasks_section_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Pending Instructor Tasks section is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
Then I see 'Pending Instructor Tasks' section
"""
self.assertTrue(self.certificates_section.pending_tasks_section.visible)
def test_certificate_exceptions_section_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Certificate Exceptions section is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
Then I see 'CERTIFICATE EXCEPTIONS' section
"""
self.assertTrue(self.certificates_section.certificate_exceptions_section.visible)
def test_instructor_can_add_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can add new certificate
exception to list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
"""
notes = 'Test Notes'
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
# Verify that added exceptions are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate exception section to render
self.certificates_section.wait_for_certificate_exceptions_section()
# validate certificate exception synced with server is visible in certificate exceptions list
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
def test_remove_certificate_exception_on_page_reload(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can remove added certificate
exceptions from the list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
Revisit the page to make sure exceptions are synced.
Remove the user from the exception list should remove the user from the list.
"""
notes = 'Test Notes'
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
# Verify that added exceptions are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# Remove Certificate Exception
self.certificates_section.remove_first_certificate_exception()
self.assertNotIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_exception.text)
def test_instructor_can_remove_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can remove added certificate
exceptions from the list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
"""
notes = 'Test Notes'
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(notes, self.certificates_section.last_certificate_exception.text)
# Remove Certificate Exception
self.certificates_section.remove_first_certificate_exception()
self.assertNotIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_exception.text)
# Verify that added exceptions are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate exception section to render
self.certificates_section.wait_for_certificate_exceptions_section()
# validate certificate exception synced with server is visible in certificate exceptions list
self.assertNotIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_exception.text)
def test_error_on_duplicate_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if student being added already exists in certificate exceptions list
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username that already is in the list and click 'Add Exception' button
Then Error Message should say 'User (username/email={user}) already in exception list.'
"""
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, '')
# Add duplicate student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, '')
self.assertIn(
u'{user} already in exception list.'.format(user=self.user_name),
self.certificates_section.message.text
)
def test_error_on_empty_user_name(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if no username/email is entered while clicking "Add Exception" button
Given that I am on the Certificates tab on the Instructor Dashboard
When I click on 'Add Exception' button
AND student username/email field is empty
Then Error Message should say
'Student username/email field is required and can not be empty. '
'Kindly fill in username/email and then press "Add Exception" button.'
"""
# Click 'Add Exception' button without filling username/email field
self.certificates_section.wait_for_certificate_exceptions_section()
self.certificates_section.click_add_exception_button()
self.assertIn(
'Student username/email field is required and can not be empty. '
'Kindly fill in username/email and then press "Add to Exception List" button.',
self.certificates_section.message.text
)
def test_error_on_non_existing_user(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if username/email does not exists in the system while clicking "Add Exception" button
Given that I am on the Certificates tab on the Instructor Dashboard
When I click on 'Add Exception' button
AND student username/email does not exists
Then Error Message should say
'Student username/email field is required and can not be empty. '
'Kindly fill in username/email and then press "Add Exception" button.
"""
invalid_user = 'test_user_non_existent'
# Click 'Add Exception' button with invalid username/email field
self.certificates_section.wait_for_certificate_exceptions_section()
self.certificates_section.fill_user_name_field(invalid_user)
self.certificates_section.click_add_exception_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u"{user} does not exist in the LMS. Please check your spelling and retry.".format(user=invalid_user),
self.certificates_section.message.text
)
def test_user_not_enrolled_error(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard,
Error message appears if user is not enrolled in the course while trying to add a new exception.
Given that I am on the Certificates tab on the Instructor Dashboard
When I click on 'Add Exception' button
AND student is not enrolled in the course
Then Error Message should say
"The user (username/email={user}) you have entered is not enrolled in this course.
Make sure the username or email address is correct, then try again."
"""
new_user = 'test_user_{uuid}'.format(uuid=self.unique_id[6:12])
new_email = 'test_user_{uuid}@example.com'.format(uuid=self.unique_id[6:12])
# Create a new user who is not enrolled in the course
AutoAuthPage(self.browser, username=new_user, email=new_email).visit()
# Login as instructor and visit Certificate Section of Instructor Dashboard
self.user_name, self.user_id, __, __ = self.log_in_as_instructor()
self.instructor_dashboard_page.visit()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
# Click 'Add Exception' button with invalid username/email field
self.certificates_section.wait_for_certificate_exceptions_section()
self.certificates_section.fill_user_name_field(new_user)
self.certificates_section.click_add_exception_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u"{user} is not enrolled in this course. Please check your spelling and retry.".format(user=new_user),
self.certificates_section.message.text
)
def test_generate_certificate_exception(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, when user clicks
'Generate Exception Certificates' newly added certificate exceptions should be synced on server
Given that I am on the Certificates tab on the Instructor Dashboard
When I click 'Generate Exception Certificates'
Then newly added certificate exceptions should be synced on server
"""
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, '')
# Click 'Generate Exception Certificates' button
self.certificates_section.click_generate_certificate_exceptions_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
self.user_name + ' has been successfully added to the exception list. Click Generate Exception Certificate'
' below to send the certificate.',
self.certificates_section.message.text
)
@ddt.data(
('Test \nNotes', 'Test Notes'),
('<Test>Notes</Test>', '<Test>Notes</Test>'),
)
@ddt.unpack
def test_notes_escaped_in_add_certificate_exception(self, notes, expected_notes):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can add new certificate
exception to list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes (which contains character which are needed to be escaped)
and click 'Add Exception' button, then new certificate exception should be visible in
certificate exceptions list.
"""
# Add a student to Certificate exception list
self.certificates_section.add_certificate_exception(self.user_name, notes)
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(expected_notes, self.certificates_section.last_certificate_exception.text)
# Revisit Page & verify that added exceptions are also synced with backend
self.certificates_section.refresh()
# Wait for the certificate exception section to render
self.certificates_section.wait_for_certificate_exceptions_section()
# Validate certificate exception synced with server is visible in certificate exceptions list
self.assertIn(self.user_name, self.certificates_section.last_certificate_exception.text)
self.assertIn(expected_notes, self.certificates_section.last_certificate_exception.text)
@attr('a11y')
def test_certificates_a11y(self):
"""
Certificates page accessibility tests
"""
self.certificates_section.a11y_audit.config.set_rules({
"ignore": [
'aria-hidden-focus' # TODO: AC-938
]
})
self.certificates_section.a11y_audit.config.set_scope([
'.certificates-wrapper'
])
self.certificates_section.a11y_audit.check_for_accessibility_errors()
@attr(shard=20)
class CertificateInvalidationTest(BaseInstructorDashboardTest):
"""
Tests for Certificates functionality on instructor dashboard.
"""
@classmethod
def setUpClass(cls):
super(CertificateInvalidationTest, cls).setUpClass()
# Create course fixture once each test run
CourseFixture(
org='test_org',
number='335535897951379478207964576572017930000',
run='test_run',
display_name='Test Course 335535897951379478207964576572017930000',
).install()
def setUp(self):
super(CertificateInvalidationTest, self).setUp()
# set same course number as we have in fixture json
self.course_info['number'] = "335535897951379478207964576572017930000"
# we have created a user with this id in fixture, and created a generated certificate for it.
self.student_id = "99"
self.student_name = "testcert"
self.student_email = "cert@example.com"
# Enroll above test user in the course
AutoAuthPage(
self.browser,
username=self.student_name,
email=self.student_email,
course_id=self.course_id,
).visit()
self.test_certificate_config = {
'id': 1,
'name': 'Certificate name',
'description': 'Certificate description',
'course_title': 'Course title override',
'signatories': [],
'version': 1,
'is_active': True
}
self.cert_fixture = CertificateConfigFixture(self.course_id, self.test_certificate_config)
self.cert_fixture.install()
self.user_name, self.user_id, __, __ = self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
disable_animations(self.certificates_section)
def test_instructor_can_invalidate_certificate(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can add a certificate
invalidation to invalidation list.
Given that I am on the Certificates tab on the Instructor Dashboard
When I fill in student username and notes fields and click 'Add Exception' button
Then new certificate exception should be visible in certificate exceptions list
"""
notes = 'Test Notes'
# Add a student to certificate invalidation list
self.certificates_section.add_certificate_invalidation(self.student_name, notes)
self.assertIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(notes, self.certificates_section.last_certificate_invalidation.text)
# Validate success message
self.assertIn(
u"Certificate has been successfully invalidated for {user}.".format(user=self.student_name),
self.certificates_section.certificate_invalidation_message.text
)
# Verify that added invalidations are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate invalidations section to render
self.certificates_section.wait_for_certificate_invalidations_section()
# validate certificate invalidation is visible in certificate invalidation list
self.assertIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(notes, self.certificates_section.last_certificate_invalidation.text)
def test_instructor_can_re_validate_certificate(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor can re-validate certificate.
Given that I am on the certificates tab on the Instructor Dashboard
AND there is a certificate invalidation in certificate invalidation table
When I click "Remove from Invalidation Table" button
Then certificate is re-validated and removed from certificate invalidation table.
"""
notes = 'Test Notes'
# Add a student to certificate invalidation list
self.certificates_section.add_certificate_invalidation(self.student_name, notes)
self.assertIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(notes, self.certificates_section.last_certificate_invalidation.text)
# Verify that added invalidations are also synced with backend
# Revisit Page
self.certificates_section.refresh()
# wait for the certificate invalidations section to render
self.certificates_section.wait_for_certificate_invalidations_section()
# click "Remove from Invalidation Table" button next to certificate invalidation
self.certificates_section.remove_first_certificate_invalidation()
# validate certificate invalidation is removed from the list
self.assertNotIn(self.student_name, self.certificates_section.last_certificate_invalidation.text)
self.assertNotIn(notes, self.certificates_section.last_certificate_invalidation.text)
self.assertIn(
"The certificate for this learner has been re-validated and the system is "
"re-running the grade for this learner.",
self.certificates_section.certificate_invalidation_message.text
)
def test_error_on_empty_user_name_or_email(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor should see error message if he clicks
"Invalidate Certificate" button without entering student username or email.
Given that I am on the certificates tab on the Instructor Dashboard
When I click "Invalidate Certificate" button without entering student username/email.
Then I see following error message
"Student username/email field is required and can not be empty."
"Kindly fill in username/email and then press "Invalidate Certificate" button."
"""
# Click "Invalidate Certificate" with empty student username/email field
self.certificates_section.fill_certificate_invalidation_user_name_field("")
self.certificates_section.click_invalidate_certificate_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u'Student username/email field is required and can not be empty. '
u'Kindly fill in username/email and then press "Invalidate Certificate" button.',
self.certificates_section.certificate_invalidation_message.text
)
def test_error_on_invalid_user(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor should see error message if
the student entered for certificate invalidation does not exist.
Given that I am on the certificates tab on the Instructor Dashboard
When I click "Invalidate Certificate"
AND the username entered does not exist in the system
Then I see following error message
"Student username/email field is required and can not be empty."
"Kindly fill in username/email and then press "Invalidate Certificate" button."
"""
invalid_user = "invalid_test_user"
# Click "Invalidate Certificate" with invalid student username/email
self.certificates_section.fill_certificate_invalidation_user_name_field(invalid_user)
self.certificates_section.click_invalidate_certificate_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u"{user} does not exist in the LMS. Please check your spelling and retry.".format(user=invalid_user),
self.certificates_section.certificate_invalidation_message.text
)
def test_user_not_enrolled_error(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Instructor should see error message if
the student entered for certificate invalidation is not enrolled in the course.
Given that I am on the certificates tab on the Instructor Dashboard
When I click "Invalidate Certificate"
AND the username entered is not enrolled in the current course
Then I see following error message
"{user} is not enrolled in this course. Please check your spelling and retry."
"""
new_user = 'test_user_{uuid}'.format(uuid=self.unique_id[6:12])
new_email = 'test_user_{uuid}@example.com'.format(uuid=self.unique_id[6:12])
# Create a new user who is not enrolled in the course
AutoAuthPage(self.browser, username=new_user, email=new_email).visit()
# Login as instructor and visit Certificate Section of Instructor Dashboard
self.user_name, self.user_id, __, __ = self.log_in_as_instructor()
self.instructor_dashboard_page.visit()
self.certificates_section = self.instructor_dashboard_page.select_certificates()
# Click 'Invalidate Certificate' button with not enrolled student
self.certificates_section.wait_for_certificate_invalidations_section()
self.certificates_section.fill_certificate_invalidation_user_name_field(new_user)
self.certificates_section.click_invalidate_certificate_button()
self.certificates_section.wait_for_ajax()
self.assertIn(
u"{user} is not enrolled in this course. Please check your spelling and retry.".format(user=new_user),
self.certificates_section.certificate_invalidation_message.text
)
@attr('a11y')
def test_invalidate_certificates_a11y(self):
"""
Certificate invalidation accessibility tests
"""
self.certificates_section.a11y_audit.config.set_rules({
"ignore": [
'aria-hidden-focus' # TODO: AC-938
]
})
self.certificates_section.a11y_audit.config.set_scope([
'.certificates-wrapper'
])
self.certificates_section.a11y_audit.check_for_accessibility_errors()
@attr(shard=20)
class EcommerceTest(BaseInstructorDashboardTest):
"""
Bok Choy tests for the "E-Commerce" tab.
"""
def setup_course(self, course_number):
"""
Sets up the course
"""
self.course_info['number'] = course_number
course_fixture = CourseFixture(
self.course_info["org"],
self.course_info["number"],
self.course_info["run"],
self.course_info["display_name"]
)
course_fixture.install()
def visit_ecommerce_section(self):
"""
Log in to visit Instructor dashboard and click E-commerce tab
"""
self.log_in_as_instructor(course_access_roles=['finance_admin'])
instructor_dashboard_page = self.visit_instructor_dashboard()
return instructor_dashboard_page.select_ecommerce_tab()
def add_course_mode(self, sku_value=None):
"""
Add an honor mode to the course
"""
ModeCreationPage(browser=self.browser, course_id=self.course_id, mode_slug=u'honor', min_price=10,
sku=sku_value).visit()
def test_enrollment_codes_section_visible_for_non_ecommerce_course(self):
"""
Test Enrollment Codes UI, under E-commerce Tab, should be visible in the Instructor Dashboard with non
e-commerce course
"""
# Setup course
non_ecommerce_course_number = "34039497242734583224814321005482849780"
self.setup_course(non_ecommerce_course_number)
# Add an honor mode to the course
self.add_course_mode()
# Log in and visit E-commerce section under Instructor dashboard
self.assertIn(u'Enrollment Codes', self.visit_ecommerce_section().get_sections_header_values())
def test_coupon_codes_section_visible_for_non_ecommerce_course(self):
"""
Test Coupon Codes UI, under E-commerce Tab, should be visible in the Instructor Dashboard with non
e-commerce course
"""
# Setup course
non_ecommerce_course_number = "34039497242734583224814321005482849781"
self.setup_course(non_ecommerce_course_number)
# Add an honor mode to the course
self.add_course_mode()
# Log in and visit E-commerce section under Instructor dashboard
self.assertIn(u'Coupon Code List', self.visit_ecommerce_section().get_sections_header_values())
def test_enrollment_codes_section_not_visible_for_ecommerce_course(self):
"""
Test Enrollment Codes UI, under E-commerce Tab, should not be visible in the Instructor Dashboard with
e-commerce course
"""
# Setup course
ecommerce_course_number = "34039497242734583224814321005482849782"
self.setup_course(ecommerce_course_number)
# Add an honor mode to the course with sku value
self.add_course_mode('test_sku')
# Log in and visit E-commerce section under Instructor dashboard
self.assertNotIn(u'Enrollment Codes', self.visit_ecommerce_section().get_sections_header_values())
def test_coupon_codes_section_not_visible_for_ecommerce_course(self):
"""
Test Coupon Codes UI, under E-commerce Tab, should not be visible in the Instructor Dashboard with
e-commerce course
"""
# Setup course
ecommerce_course_number = "34039497242734583224814321005482849783"
self.setup_course(ecommerce_course_number)
# Add an honor mode to the course with sku value
self.add_course_mode('test_sku')
# Log in and visit E-commerce section under Instructor dashboard
self.assertNotIn(u'Coupon Code List', self.visit_ecommerce_section().get_sections_header_values())
class StudentAdminTest(BaseInstructorDashboardTest):
SECTION_NAME = 'Test Section 1'
SUBSECTION_NAME = 'Test Subsection 1'
UNIT_NAME = 'Test Unit 1'
PROBLEM_NAME = 'Test Problem 1'
shard = 23
def setUp(self):
super(StudentAdminTest, self).setUp()
self.course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
self.problem = create_multiple_choice_problem(self.PROBLEM_NAME)
self.vertical = XBlockFixtureDesc('vertical', "Lab Unit")
self.course_fix.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECTION_NAME).add_children(
self.vertical.add_children(self.problem)
)
),
).install()
self.username, __, __, __ = self.log_in_as_instructor()
self.instructor_dashboard_page = self.visit_instructor_dashboard()
def test_rescore_rescorable(self):
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentSpecificAdmin)
student_admin_section.set_student_email_or_username(self.username)
student_admin_section.set_problem_location(self.problem.locator)
getattr(student_admin_section, 'rescore_button').click()
alert = get_modal_alert(student_admin_section.browser)
alert.dismiss()
self.assertFalse(self.instructor_dashboard_page.is_rescore_unsupported_message_visible())
def test_task_list_visibility(self):
"""
Test that instructor task list is visible on student admin section
to users who have access to instructor tab/dashboard
"""
# first check for global staff users
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentAdminPage)
self.assertTrue(student_admin_section.running_tasks_section.visible)
# logout global-staff user and check for users with staff access to course
LogoutPage(self.browser).visit()
# having staff access to course is compulsory to access instructor dashboard
self.log_in_as_instructor(False, ['staff'])
self.instructor_dashboard_page = self.visit_instructor_dashboard()
student_admin_section = self.instructor_dashboard_page.select_student_admin(StudentAdminPage)
self.assertTrue(student_admin_section.running_tasks_section.visible)
|
cpennington/edx-platform
|
common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py
|
Python
|
agpl-3.0
| 58,884
|
[
"VisIt"
] |
9f9a99901486600d970049245c27d5071ad5514759288b5ddfe410c1f4035524
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import os
import tempfile
import gtk
import mock
from stoqlib.database.settings import DatabaseSettings
from stoqlib.gui.test.uitestutils import GUITest
from stoq.gui.config import (DatabaseSettingsStep,
FirstTimeConfigWizard)
class MockDatabaseSettings(DatabaseSettings):
def has_database(self):
return False
class TestFirstTimeConfigWizard(GUITest):
def setUp(self):
GUITest.setUp(self)
self.settings = None
def create_wizard(self):
options = mock.Mock()
options.sqldebug = False
options.verbose = False
if self.settings is None:
self.settings = MockDatabaseSettings(address=u'localhost',
port=12345,
dbname=u'dbname',
username=u'username',
password=u'password')
self.config = self.fake.StoqConfig(self.settings)
wizard = FirstTimeConfigWizard(options, self.config)
return wizard
@mock.patch('stoq.gui.config.test_local_database')
@mock.patch('stoq.gui.config.ProcessView.execute_command')
@mock.patch('stoq.gui.config.create_default_profile_settings')
@mock.patch('stoq.gui.config.yesno')
@mock.patch('stoq.gui.config.warning')
@mock.patch('stoq.gui.config.get_hostname')
@mock.patch('stoq.gui.config.check_extensions')
def test_local(self,
check_extensions,
get_hostname,
warning,
yesno,
create_default_profile_settings,
execute_command,
test_local_database):
DatabaseSettingsStep.model_type = self.fake.DatabaseSettings
self.settings = self.fake.DatabaseSettings(self.store)
get_hostname.return_value = u'foo_hostname'
test_local_database.return_value = (u'/var/run/postgres', 5432)
wizard = self.create_wizard()
self.check_wizard(wizard, u'wizard-config-welcome')
self.click(wizard.next_button)
step = wizard.get_current_step()
self.assertTrue(step.radio_local.get_active())
self.check_wizard(wizard, u'wizard-config-database-location')
self.click(wizard.next_button)
# Warning should not have being called by now.
self.assertEquals(warning.call_count, 0, warning.call_args_list)
self.check_wizard(wizard, u'wizard-config-installation-mode')
self.click(wizard.next_button)
self.check_wizard(wizard, u'wizard-config-plugins')
self.click(wizard.next_button)
step = wizard.get_current_step()
step.name.update(u'Name')
step.email.update(u'example@example.com')
step.phone.update(u'1212341234')
wizard.tef_request_done = True
self.check_wizard(wizard, u'wizard-config-tef')
self.click(wizard.next_button)
step = wizard.get_current_step()
step.password_slave.password.update(u'foobar')
step.password_slave.confirm_password.update(u'foobar')
self.check_wizard(wizard, u'wizard-config-admin-password')
self.click(wizard.next_button)
self.check_wizard(wizard, u'wizard-config-installing')
execute_command.assert_called_once_with([
u'stoq', u'dbadmin', u'init',
u'--no-load-config', u'--no-register-station', u'-v',
u'--enable-plugins', u'ecf',
u'--create-dbuser',
u'-d', u'stoq',
u'-p', u'12345',
u'-u', u'username',
u'-w', u'password'])
step = wizard.get_current_step()
self.assertEquals(step.progressbar.get_text(),
u'Creating database...')
step.process_view.emit(u'read-line', u'stoqlib.database.create SCHEMA')
self.assertEquals(step.progressbar.get_text(),
u'Creating base schema...')
step.process_view.emit(u'read-line', u'stoqlib.database.create PATCHES:1')
self.assertEquals(step.progressbar.get_text(),
u'Creating schema, applying patches...')
step.process_view.emit(u'read-line', u'stoqlib.database.create PATCH:0')
self.assertEquals(step.progressbar.get_text(),
u'Creating schema, applying patch 1 ...')
step.process_view.emit(u'read-line', u'stoqlib.database.create INIT START')
self.assertEquals(step.progressbar.get_text(),
u'Creating additional database objects ...')
step.process_view.emit(u'read-line', u'stoqlib.database.create PLUGIN')
self.assertEquals(step.progressbar.get_text(),
u'Activating plugins ...')
yesno.return_value = False
step.process_view.emit(u'finished', 30)
yesno.assert_called_once_with(
u'Something went wrong while trying to create the database. Try again?',
gtk.RESPONSE_NO, u'Change settings', u'Try again')
step.process_view.emit(u'finished', 999)
warning.assert_called_once_with(
u"Something went wrong while trying to create the Stoq database")
step.process_view.emit(u'finished', 0)
create_default_profile_settings.assert_called_once_with()
self.click(wizard.next_button)
self.check_wizard(wizard, u'wizard-config-done')
# FIXME: Find out why this is False when running the tests on a
# clean database and True otherwhise.
wizard.has_installed_db = True
self.click(wizard.next_button)
self.assertTrue(self.config.flushed)
@mock.patch('stoq.gui.config.ProcessView.execute_command')
@mock.patch('stoq.gui.config.create_default_profile_settings')
@mock.patch('stoq.gui.config.yesno')
@mock.patch('stoq.gui.config.warning')
@mock.patch('stoq.gui.config.get_hostname')
@mock.patch('stoq.gui.config.get_database_version')
@mock.patch('stoq.gui.config.check_extensions')
def test_remote(self,
check_extensions,
get_database_version,
get_hostname,
warning,
yesno,
create_default_profile_settings,
execute_command):
DatabaseSettingsStep.model_type = self.fake.DatabaseSettings
self.settings = self.fake.DatabaseSettings(self.store)
get_hostname.return_value = u'foo_hostname'
get_database_version.return_value = (9, 1)
wizard = self.create_wizard()
# Welcome
self.click(wizard.next_button)
# DatabaseLocationStep
step = wizard.get_current_step()
step.radio_network.set_active(True)
self.click(wizard.next_button)
# DatabaseSettingsStep, invalid
step = wizard.get_current_step()
step.address.update(u'remotehost')
step.port.update(12345)
step.username.update(u'username')
step.dbname.update(u'dbname')
# DatabaseSettingsStep, valid
self.settings.check = True
self.click(wizard.next_button)
# Installation mode
self.click(wizard.next_button)
# Plugins
self.click(wizard.next_button)
# TEF
step = wizard.get_current_step()
step.name.update(u'Name')
step.email.update(u'example@example.com')
step.phone.update(u'1212341234')
wizard.tef_request_done = True
self.click(wizard.next_button)
# AdminPassword
step = wizard.get_current_step()
step.password_slave.password.update(u'foobar')
step.password_slave.confirm_password.update(u'foobar')
self.check_wizard(wizard, u'wizard-config-admin-password-remote')
with tempfile.NamedTemporaryFile() as f:
os.environ[u'PGPASSFILE'] = f.name
self.click(wizard.next_button)
data = f.read()
self.assertEquals(data,
(u'remotehost:12345:postgres:username:password\n'
u'remotehost:12345:dbname:username:password\n'))
# Installing
step = wizard.get_current_step()
yesno.return_value = False
step.process_view.emit(u'finished', 0)
yesno.assert_called_once_with(
u"The specified database 'dbname' does not exist.\n"
u"Do you want to create it?", gtk.RESPONSE_YES,
u"Create database", u"Don't create")
create_default_profile_settings.assert_called_once_with()
self.click(wizard.next_button)
self.check_wizard(wizard, u'wizard-config-done')
# FIXME: Find out why this is False when running the tests on a
# clean database and True otherwhise.
wizard.has_installed_db = True
self.click(wizard.next_button)
self.assertTrue(self.config.flushed)
@mock.patch('stoq.gui.config.warning')
def test_database_name(self, warning):
wizard = self.create_wizard()
self.click(wizard.next_button)
step = wizard.get_current_step()
step.radio_network.set_active(True)
self.click(wizard.next_button)
step = wizard.get_current_step()
step.address.update(u'remotehost')
step.port.update(12345)
step.username.update(u'username')
step.dbname.update(u'invalid; DROP DATABASE postgresql;')
self.assertFalse(wizard.next_button.props.sensitive)
# DatabaseSettingsStep, valid
step.dbname.update(u'valid')
self.click(wizard.next_button)
warning.assert_called_once_with(
u'Invalid database address',
u"The database address 'remotehost' is invalid. Please fix it and try again")
|
andrebellafronte/stoq
|
stoq/gui/test/test_config_wizard.py
|
Python
|
gpl-2.0
| 10,749
|
[
"VisIt"
] |
5022b1c946272dd9686816411f7f26343280ce9e22438462507e3a0588945af2
|
from matplotlib import rc
fsize = 17
rc('text', usetex=False)
rc('font', size=fsize)#, ftype=42)
line_width = 3
point_size = 30
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import yt
import numpy as np
import glob
from onezone import star
import os
import sys
from galaxy_analysis.analysis import Galaxy
from galaxy_analysis.utilities import utilities as util
import deepdish as dd
#
# Step 1: load "final" and "initial" simulations
# Step 2: Load all massive star particle remnants, final mass, initial mass, etc.
# Step 3: Run each through the star particle class in onezone
# (make sure sn ejecta for m > 25 is zero)
# Step 4: Compute for each SN ejecta, wind ejecta, and total ejecta
# Step 5: Compute:
# % error = ((Birth - Current) - (sn_ej+wind_ej)) / (sn_ej+wind_ej)
#
# Step 6: Plot cumulative distribution of SNII remnants error
def generate_model_stars(m, z, abund = ['m_tot','m_metal'], M_o = None,
include_popIII = False, PopIII_crit_z=2.2E-6):
"""
Makes a list of star objects from one zone model
"""
if M_o is None:
M_o = m
all_star = [None]*np.size(m)
if not 'm_tot' in abund:
abund.extend(['m_tot'])
if not 'm_metal' in abund:
abund.extend(['m_metal'])
ele = {}
for k in abund:
if k is 'm_tot':
val = 1.0
elif k is 'm_metal':
val = z[0]
else:
val = 0.0
ele[k] = val
sum = 0.0
for i in np.arange(np.size(m)):
if include_popIII:
if z[i] < PopIII_crit_z:
ptype = 'popIII'
else:
ptype = 'star'
else:
ptype = 'star'
all_star[i] = star.Star(M=m[i],Z=z[i], abundances=ele, M_o = M_o[i], star_type = ptype)
if ptype == 'popIII':
all_star[i].set_popIII_properties(True)
else:
all_star[i].set_SNII_properties(True)
# if m[i] > 8.0:
# print(s.sn_ejecta_masses['O'])
all_star[i].set_SNIa_properties(check_mass = True)
sum += all_star[i].sn_ejecta_masses['O']
# print("yyyyy", np.sum( [x.sn_ejecta_masses['O'] for x in all_star]), sum)
return all_star
def check_all_masses(ds, data, ds0 = None, time_cut = -1.0):
pt = data['particle_type']
# cut out DM
select = pt >= 11
pt = pt[select]
bm = data['birth_mass'][select].value
pm = data['particle_mass'][select].convert_to_units('Msun').value
z = data['metallicity_fraction'][select].value
elements = util.species_from_fields(ds.field_list)
all_stars = generate_model_stars(bm,z, abund = elements,
include_popIII = ds.parameters['IndividualStarPopIIIFormation'])
lifetime = data['dynamical_time'][select].convert_to_units('Myr')
birth = data['creation_time'][select].convert_to_units('Myr')
age = ds.current_time.convert_to_units('Myr') - birth
model_wind_ejecta = {} # total_wind_ejecta
for k in all_stars[0].wind_ejecta_masses().keys():
model_wind_ejecta[k] = np.array([x.wind_ejecta_masses()[k] for x in all_stars])
model_sn_ejecta = {}
for k in all_stars[0].sn_ejecta_masses.keys():
model_sn_ejecta[k] = np.array([x.sn_ejecta_masses[k] for x in all_stars])
# correct for AGB stars that haven't died
AGB = (bm < 8.0) * (pt == 11)
select = (bm > 8.0) * (pt == 11)
factor = age / lifetime
factor[factor>1.0] = 1.0
time_select = birth > time_cut
#
# Apply correction and zero out SN abundances for stars that have not gone SNe
#
for k in list(model_wind_ejecta.keys()):
model_wind_ejecta[k][AGB] = 0.0
model_sn_ejecta[k][ (pt == 11) ] = 0.0 # regular stars
model_sn_ejecta[k][ (pt == 14) ] = 0.0 # popIII stars
model_wind_ejecta[k][select] = model_wind_ejecta[k][select]*factor[select]
total_model_ejecta = {}
for k in list(model_wind_ejecta.keys()):
total_model_ejecta[k] = np.sum(model_sn_ejecta[k][time_select]) + np.sum(model_wind_ejecta[k][time_select])
#print("xxxxxx", np.sum(model_sn_ejecta['O']), np.sum(model_sn_ejecta['O'][bm>8.0]), np.sum(model_sn_ejecta['O'][bm<8.0]))
# construct the indivdual mode dictionary
separate_mode_ejecta = {'AGB' : {}, 'SWind' : {}, 'SNII' : {}, 'SNIa' : {} , 'PopIII' : {}, 'Total' : {}}
for k in list(model_wind_ejecta.keys()):
separate_mode_ejecta['PopIII'][k] = np.sum(model_sn_ejecta[k][(pt==13)*(z<2.2E-6)])
separate_mode_ejecta['SNII'][k] = np.sum(model_sn_ejecta[k][(bm > 8.0)*(z>2.2E-6)])
separate_mode_ejecta['SNIa'][k] = np.sum(model_sn_ejecta[k][(bm < 8.0)*(z>2.2E-6)])
separate_mode_ejecta['SWind'][k] = np.sum(model_wind_ejecta[k][bm > 8.0])
separate_mode_ejecta['AGB'][k] = np.sum(model_wind_ejecta[k][bm < 8.0])
separate_mode_ejecta['Total'][k] = np.sum( [separate_mode_ejecta[x][k] for x in ['AGB','SWind','SNII','SNIa','PopIII'] ])
for k in list(separate_mode_ejecta.keys()):
separate_mode_ejecta[k]['Total Tracked Metals'] = np.sum( [separate_mode_ejecta[k][x] for x in list(separate_mode_ejecta[k].keys()) if (not x in ['m_tot','m_metal','H','He'])] )
if os.path.exists(str(ds) + '_galaxy_data.h5'):
dd_data = dd.io.load(str(ds) + '_galaxy_data.h5')
dd_data['gas_meta_data']['masses']['Type'] = separate_mode_ejecta
dd.io.save( str(ds) + '_galaxy_data.h5',dd_data)
# now do this for the individual abundances on grid:
grid_masses = {}
for k in list(model_wind_ejecta.keys()):
if k is 'm_tot' or k is 'm_metal':
continue
grid_masses[k] = np.sum(data[k + '_Density'] * ds.mass_unit / ds.length_unit**3 *\
data['cell_volume']).convert_to_units('Msun').value
if not (ds0 is None):
grid_masses[k] = grid_masses[k] - np.sum(ds0[k + '_Density'] * ds0.mass_unit / ds0.length_unit**3 *\
ds0['cell_volume']).convert_to_units('Msun').value
# else:
# grid_masses[k] = grid_masses[k] - 1.0E-10 * np.sum(data['cell_mass'].to('Msun')).value
gal = Galaxy(str(ds))
outflow_masses = gal.boundary_mass_flux
#print total_model_ejecta
#print grid_masses
#print outflow_masses
for k in separate_mode_ejecta.keys():
print(k, separate_mode_ejecta[k]['O'], separate_mode_ejecta[k]['N'])
print(list(grid_masses.keys()))
print("Element Total_on_Grid Total_Outflow Sum_Injected Total_model_mass Percent_error")
for k in list(grid_masses.keys()):
okey = k + '_Density'
error =100 * (outflow_masses[okey] + grid_masses[k] - total_model_ejecta[k] ) / total_model_ejecta[k]
print("%2s %8.8E %8.8E %8.8E %8.8E %4.4f"%(k,grid_masses[k], outflow_masses[okey], grid_masses[k] + outflow_masses[okey],
total_model_ejecta[k], error))
return all_stars, model_sn_ejecta, model_wind_ejecta, total_model_ejecta
def check_wind_ejecta(ds, data):
pt = data['particle_type']
# cut out DM
select = pt >= 11
pt = pt[select]
bm = data['birth_mass'][select].value
pm = data['particle_mass'][select].convert_to_units('Msun').value
z = data['metallicity_fraction'][select].value
elements = util.species_from_fields(ds.field_list)
all_stars = generate_model_stars(bm,z, abund = elements,
include_popIII = ds.parameters['IndividualStarPopIIIFormation'])
lifetime = data['dynamical_time'][select].convert_to_units('Myr')
birth = data['creation_time'][select].convert_to_units('Myr')
age = ds.current_time.convert_to_units('Myr') - birth
# total wind ejecta over entire lifetime
total_wind_ejecta = np.array([x.wind_ejecta_masses()['m_tot'] for x in all_stars])
# correct for AGB stars that haven't died
AGB = (bm < 8.0)
model_wind_ejecta = total_wind_ejecta * 1.0
model_wind_ejecta[ AGB * (pt == 11)] = 0.0
# adjust wind to correct fraction given lifetime
select = (bm > 8.0) * (pt == 11)
factor = age / lifetime
factor[factor>1.0] = 1.0
model_wind_ejecta[select] = model_wind_ejecta[select] * factor[select]
# load actual injection from simulation
actual_wind_ejecta = data['wind_mass_ejected'][select].value
# compute percent error
model_wind_ejecta = model_wind_ejecta[age>1]
actual_wind_ejecta = actual_wind_ejecta[age>1]
error = (model_wind_ejecta - actual_wind_ejecta)
error[model_wind_ejecta>0] = error[model_wind_ejecta>0]/model_wind_ejecta[model_wind_ejecta>0]
error_mass = error[model_wind_ejecta>0]
all = 1.0 * np.size(error_mass)
print(np.size( error_mass[ (np.abs(error_mass) < 0.05) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.10) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.15) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.20) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.25) ])/all)
#error_mass = error_mass[birth[model_wind_ejecta>0] > 110]
#error_mass = error_mass[error_mass>0]
print(np.min(error_mass), np.max(error_mass), np.average(error_mass), np.median(error_mass))
print(error_mass)
select = (age>1)
bm = bm[select]
pm = pm[select]
age = age[select]
lifetime = lifetime[select]
total_wind_ejecta = total_wind_ejecta[select]
select = (model_wind_ejecta>0)
bm = bm[select]
pm = pm[select]
age = age[select]
lifetime = lifetime[select]
model_wind_ejecta = model_wind_ejecta[select]
actual_wind_ejecta = actual_wind_ejecta[select]
total_wind_ejecta = total_wind_ejecta[select]
#print("BM PM Percent_error Model_wind actual_wind lifetime_wind")
#for i in np.arange(np.size(error_mass)):
# print("%5.5f %3.3f %5.5f %5.5E %5.5E %5.5E"%(bm[i],pm[i],error_mass[i]*100,model_wind_ejecta[i], actual_wind_ejecta[i], total_wind_ejecta[i]))
#print(np.min(error_mass), np.max(error_mass), np.average(error_mass), np.median(error_mass))
# print bm[error > 0.9], pm[error>0.9], pt[error>0.9]
# print age[error>0.9]
# print actual_wind_ejecta[error>0.9]
# print model_wind_ejecta[error>0.9]
#print actual_wind_ejecta[birth > 110]
#print model_wind_ejecta[birth > 110]
return
def compute_SNII_error(ds, data, uselog = True):
pt = data['particle_type']
select = pt >= 11
pt = pt[select]
pm = data['particle_mass'][select].convert_to_units('Msun').value
bm = data['birth_mass'][select].value
z = data['metallicity_fraction'][select].value
# select all particles that could have gone supernova
select = ((pt == 13) * (bm > 8.0) * (bm < 25.0)) +\
((pt == 13) * (z < 2.2E-6) * ((bm > 11.0) * (bm<40.0) + (bm>140.0)*(bm<260.0)))
pm = pm[select]
bm = bm[select]
z = z[select]
elements = util.species_from_fields(ds.field_list)
all_stars = generate_model_stars(bm, z, abund = elements)
total_ejecta = np.zeros(np.size(bm))
error = np.zeros(np.size(bm))
wind_error = np.zeros(np.size(bm))
sn_error = np.zeros(np.size(bm))
ej_frac = np.zeros(np.size(bm))
for i,s in enumerate(all_stars):
s.set_SNII_properties(True)
wind = s.wind_ejecta_masses()
sn = s.sn_ejecta_masses
total_ejecta[i] = wind['m_tot'] + sn['m_tot']
error[i] = ( -1.0*(bm[i]-pm[i]) + total_ejecta[i]) / (total_ejecta[i])
ej_frac[i] = (bm[i]-pm[i]) / total_ejecta[i]
wind_error[i] = ( wind['m_tot'] / total_ejecta[i] )
sn_error[i] = ( sn['m_tot'] / total_ejecta[i] )
snavg , snstd = np.average(sn_error), np.std(sn_error)
windavg, windstd = np.average(wind_error), np.std(wind_error)
# now plot cumulative distribution of positive error (error > 0 = missing mass)
pos_error = error[error>0]
fig, ax = plt.subplots()
if uselog:
xdata = np.log10(pos_error)
bins = np.arange(-4, 1.0, 0.025)
else:
xdata = pos_error
bins = np.linspace(0.0, 1.0, 200)
hist,bins = np.histogram(np.log10(ej_frac), bins = bins)
cent = (bins[1:] + bins[:-1])*0.5
ax.plot(cent, np.cumsum(hist) / (1.0*np.sum(hist)), lw = 3, color = 'black')
ylim = [0.0, 1.05]
ax.set_ylim(ylim)
def _plot_line(x, color, ls, log, label):
if log:
if x <= 0:
return
x = np.log10(x)
ax.plot([x,x],ylim, color = color, ls = ls, label = label, lw = 2)
return
# _plot_line(snavg, 'blue', '-', uselog, 'SN fraction')
# _plot_line(snavg-snstd, 'blue', '-', uselog, None)
# _plot_line(snavg+snstd, 'blue', '-', uselog, None)
# _plot_line(windavg, 'purple', '-', uselog, 'Wind fraction')
# _plot_line(windavg - windstd, 'purple', '--', uselog, None)
# _plot_line(windavg + windstd, 'purple', '--', uselog, None)
ax.set_xlabel('Fraction of Mass Actually Injected')
ax.set_ylabel('Fraction of SN')
fig.set_size_inches(8,8)
plt.tight_layout()
plt.minorticks_on()
fig.savefig('sn_cum_mass_error.png')
plt.close()
#
#
# histogram
#
#
fig, ax = plt.subplots()
if uselog:
xdata = np.log10(pos_error)
bins = np.arange(-2, 0.05, 0.025)
else:
xdata = pos_error
bins = np.linspace(0.0, 1.0, 200)
hist,bins = np.histogram(xdata, bins = bins)
cent = (bins[1:] + bins[:-1])*0.5
ax.plot(cent, hist, lw = 3, color = 'black')
energy_error = ( np.sum(pos_error)) / (np.size(pos_error)*1.0)
ax.plot([np.average(pos_error),np.average(pos_error)], [0,np.max(hist)], color = 'black' ,ls = '--', lw = 3)
ax.annotate("Energy Error = %0.2f percent"%(100*energy_error), xy=(0.5,0.9*np.max(hist)),
xytext=(0.5,0.9*np.max(hist)))
print(energy_error)
ax.set_ylim([0,np.max(hist)])
ax.set_xlabel('Error in Ejected Mass')
ax.set_ylabel('Counts')
fig.set_size_inches(8,8)
plt.tight_layout()
plt.minorticks_on()
fig.savefig('sn_mass_error.png')
return error, fig, ax
if __name__=="__main__":
name_list = np.sort(glob.glob('DD????/DD????'))
if np.size(sys.argv) == 1:
try:
ds = yt.load(name_list[-1])
except:
print("Could not load ", name_list[-1], " trying the next one")
ds = yt.load(name_list[-2])
else:
# name = 'DD%0004i'%( int(sys.argv[1]))
name = str( sys.argv[1] )
ds = yt.load( name + '/' + name)
data = ds.all_data()
# if ('enzo','wind_mass_ejected') in ds.field_list or\
# ('io','wind_mass_ejected') in ds.field_list:
# try:
# check_wind_ejecta(ds,data)
# except:
# print("failing in wind ejecta")
# try:
# error, fig, ax = compute_SNII_error(ds,data, uselog=True)
# except:
# print("failing in SNII check")
# ds0 = yt.load('./../lowres/Dds0035/Dds0035')
# ds0 = ds0.all_data()
check_all_masses(ds,data) #, ds0 = ds0)
|
aemerick/galaxy_analysis
|
misc/missing_mass_analysis.py
|
Python
|
mit
| 15,245
|
[
"Galaxy"
] |
786b75f3bd43cd09b1d95373999ada4e14964cb67029c545d1be514c2d5f4f29
|
# Halldór Jens Vilhjálmsson
from . import room
import random
import time
import turtle
import os
# Brotinn Lykill og Brotinn Lykill(43) Sun Cream -> Eidur
print("You are in room 43!")
time.sleep(1)
print("The first thing you see is a Treasure Goblin from Diablo 3")
room.grunnur.items.append(["Sun Cream"])
print("YOU HAVE OBTAINED SUN CREAM, PLEASE VISIT ROOM 11 LATER!")
time.sleep(1)
def turtledrawing():
wn = turtle.Screen()
wn.bgcolor('black')
chestTurtle = turtle.Turtle()
chestTurtle.begin_fill()
chestTurtle.fillcolor('red')
for i in range(10):
chestTurtle.forward(50)
chestTurtle.right(144)
chestTurtle.end_fill()
wn.mainloop()
def exitFunction():
fyrirUtan = room.grunnur(43)
fyrirUtan.info = "You've broken the system, congratulations!"
y = input("Do you want to exit room 43?").lower()
if y[0] in ['y', 'j']:
i = input("Which direction do you want to head? (n,s,w,e)").lower()
fyrirUtan.go(i[0])
else:
raise ValueError(fyrirUtan.info)
def do():
print(room.grunnur.items)
if ["DUCT TAPE(12)"] in room.grunnur.items:
print("You see a locked chest in the corner of the room.")
i = input("Do you want to open it?")
if i[0] in ['y', 'j']:
if ["Brotinn Lykill"] in room.grunnur.items:
if ["Brotinn Lykill(43)"] in room.grunnur.items:
print("You found a drawing!")
room.grunnur.items.append(["MONOLIZAPAINTING"])
x = input("Do you want to see your inventory?")
if x[0] in ['y', 'j']:
print(room.grunnur.items)
turtledrawing()
else:
print("You need the other keypiece")
exitFunction()
else:
print("You need both keypieces to open this chest!")
exitFunction()
else:
print("Wat")
exitFunction()
else:
respawn()
def goblinkamp():
i = random.randrange(1,10)
if i < 7:
print("You kill the goblin and recieve DUCT TAPE")
room.grunnur.items.append(["DUCT TAPE(12)"])
do()
else:
os.system('cls')
print("You kill the goblin but get no loot")
print("The goblin will respawn soon")
time.sleep(5)
respawn()
def respawn():
print("The goblin has spawned")
svar = input("Do you want to kill it? (y/n) ")
if svar[0] in ['y', 'j']:
goblinkamp()
else:
print("OK")
exitFunction()
respawn()
|
Forritarar-FS/Kastali
|
pythonHus/room43.py
|
Python
|
unlicense
| 2,219
|
[
"VisIt"
] |
d94b3e8da355054900f40d81d10c1d55f0b4df4b521ccd85a220c9c821acd733
|
#! /usr/bin/python
# Copyright Ivan Sovic, 2015. www.sovic.org
#
# Creates a pileup from a given SAM/BAM file, and calls consensus bases (or variants).
import os;
import sys;
import operator;
import subprocess;
VERBOSE_VARIANT_FILE = True;
OUTPUT_N_VARIANTS = False;
def increase_in_dict(dict_counter, value):
try:
dict_counter[value] += 1;
except:
dict_counter[value] = 1;
def process_mpileup_line(line, line_number, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=False):
# Split the line, and perform a sanity check.
split_line = line.strip().split('\t');
if (len(split_line) < 5 or len(split_line) > 6):
sys.stderr.write(line + '\n');
return 0;
ref_name = split_line[0];
position = split_line[1];
ref_base = split_line[2];
coverage = split_line[3];
original_bases = split_line[4];
if (len(split_line) == 6):
qualities = split_line[5];
bases = '';
# Replace the '.' and ',' signs with the actual reference base.
i = 0;
while (i < len(original_bases)):
if (original_bases[i] == '.' or original_bases[i] == ','):
bases += ref_base;
else:
bases += original_bases[i];
i += 1;
base_counts = {};
insertion_count = 0;
current_base_deletion_count = 0;
deletion_count = 0;
insertion_event_counts = {};
deletion_event_counts = {};
end_counts = 0;
i = 0;
while (i < len(bases)):
base = bases[i];
if (base == r'^'):
# This is the starting position of a read. It encodes two
# symbols: '^' marking the read start and a char marking the
# mapping quality of the read.
#increase_in_dict(base_counts, bases[i + 1].upper());
i += 1; # Increase only by 1, because we have i += 1 down there.
elif (base == r'$'):
# This marks the end of a read.
end_counts += 1;
elif (base == r'*'):
# This is a deletion, just count it.
current_base_deletion_count += 1;
elif (base == r'-'):
# This marks the occurance of deletions. It is a composite object
# consisting of: the special character '-', the number of the deleted bases
# and the actual bases that are deleted (these bases follow the current position).
# In our approach, we ignore this case, because we count deletions one by one
# through the '*' character.
# Get the number of bases that need to be skipped in the string.
j = (i + 1);
while (bases[j] in '0123456789'):
j += 1;
num_bases = int(bases[(i + 1):j]);
skip_bases = (j - i) + num_bases - 1;
deletion_count += 1;
deletion = bases[j : (j + num_bases)].upper();
increase_in_dict(deletion_event_counts, deletion);
# Skip the length of the numeric entry plus the actual number of bases
# that need to be skipped.
i += skip_bases;
elif (base == r'+'):
# This marks the occurance of an insertion. It is a composite object
# consisting of: the special character '+', the number of the inserted bases
# and the actual bases that are inserted (these bases follow the current position).
# Similar to the deletion marking, but here we actually care about the bases,
# and we need to make an allele aware count.
# Get the number of bases that are inserted;
j = (i + 1);
while (bases[j] in '0123456789'):
j += 1;
num_bases = int(bases[(i + 1):j]);
skip_bases = (j - i) + num_bases - 1;
insertion_count += 1;
insertion = bases[j : (j + num_bases)].upper();
increase_in_dict(insertion_event_counts, insertion);
i += skip_bases;
else:
increase_in_dict(base_counts, bases[i].upper());
i += 1;
# TODO: An additional problematic case, discovered this on 03.11.2014., when analyzing BWA-MEM's mpileup.
# There are pileup bases that do not have any actual bases, but only the '*' symbols. How should this be handled properly?
# Example line from the mpileup file:
# gi|48994873|gb|U00096.2|_Escherichia_coli_str._K-12_substr._MG1655,_complete_genome 1938202 T 20 ******************** 8,2*#-;)$B>2$1&D-
# I chose to handle them as undercovered bases.
non_indel_coverage_current_base = int(coverage) - current_base_deletion_count;
if (verbose == True):
sys.stdout.write('%s\nbase_counts: %s\n' % (line.strip(), str(base_counts)));
# EDIT: Previously I compared the total coverage of the current base with the coverage threshold.
# However, the total coverage also accounts for the deletions denoted with the '*' sign, which I think
# isn't relevant, as deletions are counted prior to occuring, and at that point is already decided if there is going
# to be a deletion event. If we wound up at this base (i.e. this base didn't get skipped because of a deletion
# consensus), then the deletions on this base are ignored.
#if (int(coverage) < coverage_threshold or int(coverage) == current_base_deletion_count):
# if (non_indel_coverage_current_base < coverage_threshold):
if (int(coverage) < coverage_threshold):
ret_num_undercovered_bases[0] += 1;
# ret_coverage_sum[0] += 0;
ret_coverage_sum[0] += int(coverage); # TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
sorted_base_counts = [['A', 0], ['C', 0], ['T', 0], ['G', 0]];
sorted_base_counts = sorted(base_counts.items(), key=operator.itemgetter(1));
try:
most_common_base_count = sorted_base_counts[-1][1];
except Exception, e:
most_common_base_count = 0;
pass;
#variant_line = 'undercovered1\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
#ret_variant_list.append(variant_line);
variant_line = 'undercovered1\tpos = %s\tref = %s\tcoverage = %d\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s' % (position, ref_name, int(coverage), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts));
ret_variant_list.append(variant_line);
if (OUTPUT_N_VARIANTS == True):
### VCF output ###
qual = 1000;
info = 'DP=%s;TYPE=snp' % (coverage);
ref_field = ref_base;
alt_field = 'N';
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
else:
ret_num_called_bases[0] += 1;
ret_coverage_sum[0] += int(coverage); # TODO: Should I count total coverage of this base, or the non_indel_coverage_current_base?
most_common_base_count = 0;
### Handling base consensus.
sorted_base_counts = sorted(base_counts.items(), key=operator.itemgetter(1));
try:
most_common_base_count = sorted_base_counts[-1][1];
except Exception, e:
pass;
# sys.stderr.write(str(e) + '\n');
# sys.stderr.write('sorted_base_counts:\n');
# sys.stderr.write(str(sorted_base_counts) + '\n');
# sys.stderr.write('base_counts:\n');
# sys.stderr.write(str(base_counts) + '\n');
# sys.stderr.write('original_bases:\n');
# sys.stderr.write(str(original_bases) + '\n');
# sys.stderr.write('line:\n');
# sys.stderr.write(line.strip() + '\n');
# most_common_base_count = 0;
# Allow for the case where there are multiple equally good choices.
# In this case, we prefer the choice which is equal to the reference.
is_good = False;
for base_count in sorted_base_counts:
if (base_count[1] == most_common_base_count):
if (base_count[0] == ref_base):
is_good = True;
break;
if (is_good == False):
if (len(sorted_base_counts) > 0):
ret_snp_count[0] += 1;
# ret_variant_list.append(line_number);
variant_line = 'SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
ret_variant_list.append(variant_line);
### VCF output ###
alt_base = ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]));
qual = 1000;
info = 'DP=%s;TYPE=snp' % (coverage);
ref_field = ref_base;
alt_field = alt_base;
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
else:
sys.stderr.write('\nWarning: a SNP was detected, but there were no bases in the sorted_base_counts!')
variant_line = 'SNP\tpos = %s\tref = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0])), str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
sys.stderr.write('\n');
else:
ret_num_correct_bases[0] += 1;
if (verbose == True):
sys.stdout.write('Reference base: %s\n' % (ref_base));
sys.stdout.write('Consensus base: %s\n\n' % (base_count[0]));
#if (int(position) == 100000 or int(position) == 1000000 or int(position) == 2000000 or int(position) == 3000000 or int(position) == 4000000):
#print '\nTEST\tpos = %s\tcoverage = %d\tnon_indel_cov_curr = %d\tmost_common_base_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s\n' % (position, int(coverage), non_indel_coverage_current_base, most_common_base_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
### Indels got called even with coverage below the threshold. Attempting to correct this.
if ((int(coverage) - end_counts) >= coverage_threshold):
### Handling indel consensus.
### Put a different coverage threshold. Here we are interested even in the reads
### which had a '*' at the current position (because we don't know where it ends).
non_indel_coverage_next_base = int(coverage) - end_counts - deletion_count - insertion_count;
if ((non_indel_coverage_next_base + deletion_count + insertion_count) > coverage_threshold):
# Sanity check, just to see if there actually were any insertions (to avoid index out of bounds error).
# If there are insertions, get the most common one.
if (len(insertion_event_counts.keys()) > 0):
# print 'insertion_event_counts = %s' % (str(insertion_event_counts));
sorted_insertion_counts = sorted(insertion_event_counts.items(), key=operator.itemgetter(1));
most_common_insertion_count = sorted_insertion_counts[-1][1];
most_common_insertion_length = len(sorted_insertion_counts[-1][0]);
insertion_unique = True if (sum([int(val[1] == most_common_insertion_count) for val in sorted_insertion_counts]) == 1) else False;
insertion_lengths = {};
for insertion_event in insertion_event_counts:
# print insertion_event;
try:
insertion_lengths[len(insertion_event)] += insertion_event_counts[insertion_event];
except:
insertion_lengths[len(insertion_event)] = insertion_event_counts[insertion_event];
sorted_insertion_lengths = sorted(insertion_lengths.items(), key=operator.itemgetter(1));
most_common_insertions = {};
for insertion_event in insertion_event_counts:
if (len(insertion_event) == sorted_insertion_lengths[-1][0]):
try:
most_common_insertions[insertion_event] += insertion_event_counts[insertion_event];
except:
most_common_insertions[insertion_event] = insertion_event_counts[insertion_event];
sorted_most_common_insertions = sorted(most_common_insertions.items(), key=operator.itemgetter(1));
most_common_insertion = sorted_most_common_insertions[-1][0] if (len(sorted_most_common_insertions) > 0) else None;
# print 'sorted_insertion_counts = %s' % (str(sorted_insertion_counts));
# print 'most_common_insertion_count = %s' % (str(most_common_insertion_count));
# print 'most_common_insertion_length = %s' % (str(most_common_insertion_length));
# print 'insertion_unique = %s' % (str(insertion_unique));
# print 'insertion_lengths = %s' % (str(insertion_lengths));
# print 'sorted_insertion_lengths = %s' % (str(sorted_insertion_lengths));
most_common_insertion_count = sorted_insertion_lengths[-1][1];
most_common_insertion_length = sorted_insertion_lengths[-1][0];
# print 'most_common_insertion_count = %s' % (str(most_common_insertion_count));
# print 'most_common_insertion_length = %s' % (str(most_common_insertion_length));
# print 'sorted_most_common_insertions = %s' % (str(sorted_most_common_insertions));
# print 'most_common_insertion = %s' % (str(most_common_insertion));
# print '';
# stao sam ovdje jer sam pokusavao naci kako sortirati insertione po duljini eventa, i onda uzeti najucestaliju duljinu eventa
# za tu duljinu eventa onda uzmem da je most common insertion count i length, a event uzmem najucestaliji od njih
# tu je logika da je originalna sekvenca mogla imati puno deletiona, a svi ostali readovi mozda nemaju te deletione
# ako drugi nemaju deletione ali imaju i puno SNP-ova, to bi moglo uzrokovati da se insertioni nikada ne poprave, i konacni konsensus ostane prekratak
else:
most_common_insertion_count = 0;
most_common_insertion_length = 0;
insertion_unique = False;
# Sanity check, just to see if there actually were any deletions (to avoid index out of bounds error).
# If there are deletions, get the most common one.
if (len(deletion_event_counts.keys()) > 0):
sorted_deletion_counts = sorted(deletion_event_counts.items(), key=operator.itemgetter(1));
most_common_deletion_count = sorted_deletion_counts[-1][1];
most_common_deletion_length = len(sorted_deletion_counts[-1][0]);
deletion_unique = True if (sum([int(val[1] == most_common_deletion_count) for val in sorted_deletion_counts]) == 1) else False;
else:
most_common_deletion_count = 0;
most_common_deletion_length = 0;
deletion_unique = False;
# print 'deletion_count = ', (deletion_count);
if (most_common_deletion_count > non_indel_coverage_next_base):
# In this case, deletions are a clear winner.
# if (deletion_unique == True):
#ret_deletion_count[0] += most_common_deletion_length;
ret_deletion_count[0] += 1;
#variant_line = 'deletion\t%d\t%s\t%s\t%s\t%s' % (most_common_deletion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
#ret_variant_list.append(variant_line);
#return most_common_deletion_length;
variant_line = 'del\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_deletion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_deletion_count, ref_base, sorted_base_counts[-1][0], str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
ret_variant_list.append(variant_line);
### Deletions in the VCF format specifies the position where a deletion occurs, with the first base being non-deletion, and the following bases being a deletion event.
### VCF output ###
alt_base = ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]));
qual = 1000;
info = 'DP=%s;TYPE=del' % (coverage);
ref_field = '%s%s' % (ref_base, sorted_deletion_counts[-1][0]);
alt_field = ref_base;
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
# print '\npos = %s, coverage_threshold = %d, (int(coverage) - end_counts) = %d' % (position, coverage_threshold, (int(coverage) - end_counts));
return most_common_deletion_length;
# if (insertion_count > coverage_threshold):
if (most_common_insertion_count > non_indel_coverage_next_base and most_common_insertion_count > coverage_threshold):
# In this case, insertions are a clear winner.
# if (insertion_unique == True):
#ret_insertion_count[0] += most_common_insertion_length;
ret_insertion_count[0] += 1;
ret_num_called_bases[0] += most_common_insertion_length;
#variant_line = 'insertion\t%d\t%s\t%s\t%s\t%s' % (most_common_insertion_count, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
#ret_variant_list.append(variant_line);
try:
temp_sorted_bc = sorted_base_counts[-1][0];
except:
temp_sorted_bc = 0;
indel_length = most_common_insertion_length;
variant_line = 'ins\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\tref_base = %s\tcons_base = %s\tbase_counts = %s\tinsertion_counts = %s\tdeletion_counts = %s\t%s' % (position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), str(insertion_event_counts), str(deletion_event_counts), line.strip());
ret_variant_list.append(variant_line);
### Insertions in the VCF format specifies the position where a insertion occurs. The ref position should contain the base which is the same as ref, but the alt field contains the ref base + the insertion event.
### VCF output ###
# alt_base = ('{}') if (len(sorted_base_counts) == 0) else (str(sorted_base_counts[-1][0]));
alt_base = ('{}') if (most_common_insertion == None) else (most_common_insertion);
qual = 1000;
info = 'DP=%s;TYPE=ins' % (coverage);
ref_field = ref_base;
alt_field = '%s%s' % (ref_base, sorted_insertion_counts[-1][0]);
vcf_line = '%s\t%s\t.\t%s\t%s\t%d\tPASS\t%s' % (ref_name, position, ref_field, alt_field, qual, info);
ret_vcf_list.append(vcf_line);
##################
# else:
# # In this case, either the base count consensus wins, or the
# # insertion/deletion count is ambiguous.
# try:
# temp_sorted_bc = sorted_base_counts[-1][0];
# except:
# temp_sorted_bc = 0;
# # print insertion_event_counts;
# # for val in insertion_event_counts:
# # print val;
# # sys.stdout.flush();
# # ins_cov = 0;
# # del_cov = 0;
# ins_cov = sum([insertion_event_counts[val] for val in insertion_event_counts.keys()]) if (len(insertion_event_counts.keys()) > 0) else 0;
# del_cov = sum([deletion_event_counts[val] for val in deletion_event_counts.keys()]) if (len(deletion_event_counts.keys()) > 0) else 0;
# if (ins_cov > coverage_threshold):
# variant_line = 'skipped\tpos = %s\tref = %s\tnon_indel_cov_next = %d\tnon_indel_cov_curr = %d\tmost_common_insertion_count = %d\n\t* ref_base = %s\tcons_base = %s\tbase_counts = %s\n\t* ins_cov = %d\tinsertion_counts = %s\n\t* del_cov = %d\tdeletion_counts = %s\t%s' % (position, ref_name, non_indel_coverage_next_base, non_indel_coverage_current_base, most_common_insertion_count, ref_base, temp_sorted_bc, str(sorted_base_counts), ins_cov, str(insertion_event_counts), del_cov, str(deletion_event_counts), line.strip());
# sys.stdout.write('%s\n' % (variant_line));
# sys.stdout.write('\n');
# sys.stdout.flush();
# pass;
return 0;
def process_mpileup(alignments_path, reference_path, mpileup_path, coverage_threshold, output_prefix, thread_id=0, bed_position=''):
fp = None;
try:
fp = open(mpileup_path, 'r');
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % mpileup_path);
return None;
ret_variant_list = [];
ret_vcf_list = [];
ret_snp_count = [0];
ret_insertion_count = [0];
ret_deletion_count = [0];
ret_num_undercovered_bases = [0];
ret_num_called_bases = [0];
ret_num_correct_bases = [0];
ret_coverage_sum = [0];
# lines = fp.readlines();
if (VERBOSE_VARIANT_FILE == True):
fp_variant = None;
fp_vcf = None;
if (output_prefix != ''):
if (not os.path.exists(os.path.dirname(output_prefix))):
os.makedirs(os.path.dirname(output_prefix));
if (VERBOSE_VARIANT_FILE == True):
variant_file = ('%s.csv' % output_prefix) if (output_prefix.endswith('.vcf') == True) else ('%s-cov_%d.variant.csv' % (output_prefix, coverage_threshold));
fp_variant = open(variant_file, 'w');
# if (output_prefix.endswith('.vcf') == True):
# vcf_file = output_prefix;
# else:
# vcf_file = ('%s-cov_%d.variant.vcf' % (output_prefix, coverage_threshold));
vcf_file = output_prefix if (output_prefix.endswith('.vcf') == True) else ('%s-cov_%d.variant.vcf' % (output_prefix, coverage_threshold));
fp_vcf = open(vcf_file, 'w');
fp_vcf.write('##fileformat=VCFv4.0\n');
fp_vcf.write('##fileDate=20150409\n');
fp_vcf.write('##source=%s\n' % (' '.join(sys.argv)));
fp_vcf.write('##reference=%s\n' % reference_path);
fp_vcf.write('##INFO=<ID=DP,Number=1,Type=Integer,Description="Raw Depth">\n');
fp_vcf.write('##INFO=<ID=TYPE,Number=A,Type=String,Description="Type of each allele (snp, ins, del, mnp, complex)">\n');
fp_vcf.write('##INFO=<ID=AF,Number=1,Type=Float,Description="Allele Frequency">\n');
fp_vcf.write('##INFO=<ID=SB,Number=1,Type=Integer,Description="Phred-scaled strand bias at this position">\n');
fp_vcf.write('##INFO=<ID=DP4,Number=4,Type=Integer,Description="Counts for ref-forward bases, ref-reverse, alt-forward and alt-reverse bases">\n');
fp_vcf.write('##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Indicates that the variant is an INDEL.">\n');
fp_vcf.write('##INFO=<ID=CONSVAR,Number=0,Type=Flag,Description="Indicates that the variant is a consensus variant (as opposed to a low frequency variant).">\n');
fp_vcf.write('##INFO=<ID=HRUN,Number=1,Type=Integer,Description="Homopolymer length to the right of report indel position">\n');
fp_vcf.write('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n');
fp_vcf.flush();
use_bed = False;
bed_chromosome = "";
bed_pos_start = 0;
# bed_pos_end = len(lines);
bed_pos_end = -1;
if (bed_position != ""):
bed_split = bed_position.split(':');
if (len(bed_split) != 2):
use_bed = False;
else:
bed_chromosome = bed_split[0];
bed_pos_split = bed_split[1].split('-');
if (len(bed_pos_split) != 2):
use_bed = False;
else:
bed_pos_start = int(bed_pos_split[0]);
bed_pos_end = int(bed_pos_split[1]);
use_bed = True;
sys.stderr.write('Using location specified through commandline:\n');
sys.stderr.write('\tChromosome: "%s"\n' % bed_chromosome);
sys.stderr.write('\tStart: %d\n' % bed_pos_start);
sys.stderr.write('\tEnd: %d\n\n' % bed_pos_end);
# i = 0;
i = 0 if (use_bed == False) else max((bed_pos_start - 10), 0);
j = 0;
# while (i < bed_pos_end): # len(lines)):
num_bases_to_skip = 0;
for line in fp:
# line = lines[i];
if (num_bases_to_skip > 0):
num_bases_to_skip -= 1;
continue;
if (use_bed == True):
line_split = line.strip().split('\t');
if (len(line_split) > 2 and line_split[0] == bed_chromosome):
current_pos = int(line_split[1]);
if (current_pos < bed_pos_start or current_pos >= bed_pos_end):
i += 1;
j += 1;
continue;
else:
# print line_split[0];
# print bed_chromosome;
i += 1;
j += 1;
continue;
if (thread_id == 0):
if ((j % 1000) == 0):
sys.stderr.write('\r[%d] snps = %d, insertions = %d, deletions = %d, undercovered = %d, coverage = %.2f' % (i, ret_snp_count[0], ret_insertion_count[0], ret_deletion_count[0], ret_num_undercovered_bases[0], (float(ret_coverage_sum[0])/float((i + 1)))));
sys.stderr.flush();
variant_list_length = len(ret_variant_list);
vcf_list_length = len(ret_vcf_list);
num_bases_to_skip = process_mpileup_line(line, i, ret_variant_list, ret_vcf_list, ret_snp_count, ret_insertion_count, ret_deletion_count, ret_num_undercovered_bases, ret_num_called_bases, ret_num_correct_bases, ret_coverage_sum, coverage_threshold, verbose=use_bed);
if (VERBOSE_VARIANT_FILE == True):
if (len(ret_variant_list) > variant_list_length and fp_variant != None):
fp_variant.write('\n'.join(ret_variant_list[variant_list_length:]) + '\n');
fp_variant.flush();
if (len(ret_vcf_list) > vcf_list_length and fp_vcf != None):
fp_vcf.write('\n'.join(ret_vcf_list[vcf_list_length:]) + '\n');
fp_vcf.flush();
i += num_bases_to_skip;
i += 1;
j += 1;
#if (i > 10000):
#break;
fp.close();
sys.stderr.write('\n')
if (VERBOSE_VARIANT_FILE == True):
if (fp_variant != None):
fp_variant.close();
if (fp_vcf != None):
fp_vcf.close();
summary_lines = '';
summary_lines += 'alignments_file: %s\n' % alignments_path;
summary_lines += 'mpileup_file: %s\n' % mpileup_path;
summary_lines += 'coverage_threshold: %d\n' % coverage_threshold;
summary_lines += 'snp_count: %d\n' % ret_snp_count[0];
summary_lines += 'insertion_count: %d\n' % ret_insertion_count[0];
summary_lines += 'deletion_count: %d\n' % ret_deletion_count[0];
summary_lines += 'num_undercovered_bases: %d\n' % ret_num_undercovered_bases[0];
summary_lines += 'num_called_bases: %d\n' % ret_num_called_bases[0];
summary_lines += 'num_correct_bases: %d\n' % ret_num_correct_bases[0];
summary_lines += 'average_coverage: %.2f\n' % ((float(ret_coverage_sum[0])/float((i + 1))));
sys.stderr.write(summary_lines + '\n');
sys.stderr.write('\n');
sys.stderr.write('Output file: %s\n' % (vcf_file));
# if (output_prefix != ''):
# #summary_file = output_prefix + '.conssum';
# summary_file = ('%s-cov_%d.variant.sum' % (output_prefix, coverage_threshold));
# try:
# fp_sum = open(summary_file, 'w');
# fp_sum.write(summary_lines);
# fp_sum.close();
# return summary_file;
# except IOError:
# sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % (summary_file));
# return None;
return None;
def main(alignments_path, reference_path, coverage_threshold, output_prefix, thread_id=0, bed_position=""):
# Sanity checking the existence of the file, and the correctness of its extension.
# Also, if input file is a SAM file, then convert it to a sorted BAM.
alignments_path_bam = alignments_path;
if (os.path.exists(alignments_path) == False):
sys.stderr.write('ERROR: File "%s" does not exist!\n' % alignments_path);
return;
if (alignments_path.endswith('sam')):
# Determine the path where the new BAM file will be generated.
dir_name = os.path.dirname(alignments_path);
if (dir_name == ''):
dir_name = '.';
alignments_path_bam = dir_name + '/' + os.path.splitext(os.path.basename(alignments_path))[0] + '.bam'
alignments_path_bam_exists = os.path.exists(alignments_path_bam);
# Check if a BAM file with the given name already exists.
if (alignments_path_bam_exists == False or (alignments_path_bam_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(alignments_path_bam))):
# Convert the SAM file to a sorted BAM file.
command = 'samtools view -bS %s | samtools sort - %s' % (alignments_path, os.path.splitext(alignments_path_bam)[0]);
sys.stderr.write(command + '\n')
subprocess.call(command, shell='True');
# Create the BAM index file.
command = 'samtools index %s %s.bai' % (alignments_path_bam, alignments_path_bam);
subprocess.call(command, shell='True');
elif (alignments_path.endswith('bam') == False):
sys.stderr.write('ERROR: File extension needs to be either .sam or .bam! Input file path: "%s".\n' % alignments_path);
return;
# Convert the sorted BAM file to a mpileup file if it doesn't exist yet.
mpileup_path = ('%s.mpileup' % alignments_path_bam);
mpileup_exists = os.path.exists(mpileup_path);
if (mpileup_exists == False or (mpileup_exists == True and os.path.getmtime(alignments_path) > os.path.getmtime(mpileup_path))):
command = 'samtools mpileup -B -d 1000000 -Q 0 -A -f %s %s > %s.mpileup' % (reference_path, alignments_path_bam, alignments_path_bam);
subprocess.call(command, shell='True');
sys.stderr.write('Processing file "%s"...\n' % alignments_path);
sys.stderr.write('Reference file "%s"...\n' % reference_path);
sys.stderr.write('Coverage threshold: %d\n' % coverage_threshold);
summary_file = process_mpileup(alignments_path, reference_path, ('%s.mpileup' % alignments_path_bam), coverage_threshold, output_prefix, thread_id, bed_position);
def CollectSummaries(sam_files, prefix_for_intermediate_results, collective_output_file):
fp_collect = None;
try:
fp_collect = open(collective_output_file, 'w');
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % collective_output_file);
return;
for sam_file in sam_files:
summary_file = prefix_for_intermediate_results + '.sum';
try:
fp_sum = open(summary_file, 'r');
lines = fp_sum.readlines();
fp_sum.close();
except IOError:
sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % summary_file);
continue;
fp_collect.write(''.join(lines) + '\n');
fp_collect.close();
if __name__ == "__main__":
# if (len(sys.argv) < 5):
# sys.stderr.write('Usage:\n');
# sys.stderr.write('\t%s <reference_file_path> coverage_threshold <collective_output_file> <{sb}am_file_1> [<{sb}am_file_2> <{sb}am_file_3> ...]\n' % sys.argv[0]);
# sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
# exit(1);
if (len(sys.argv) < 5):
sys.stderr.write('Usage:\n');
sys.stderr.write('\t%s <reference_file_path> coverage_threshold <output_prefix> <{sb}am_file_> [position]\n' % sys.argv[0]);
sys.stderr.write('\t(If <collective_output_file> is equal to "-", no files will be written to disk.)\n');
sys.stderr.write('\tPosition parameter is a string specifying "chromosome:start-end"\n\n');
exit(1);
reference_file = sys.argv[1];
coverage_threshold = int(sys.argv[2]);
output_prefix = sys.argv[3];
sam_file = sys.argv[4];
bed_position = '';
if (len(sys.argv) > 5):
bed_position = sys.argv[5];
# sys.stderr.write('bed_position: "%s"\n\n' % bed_position);
processes = [];
if (output_prefix == '-'):
output_prefix = os.path.splitext(sam_file)[0];
main(sam_file, reference_file, coverage_threshold, output_prefix, 0, bed_position);
# if (output_prefix != '-'):
# CollectSummaries([sam_file], output_prefix, output_prefix + '.variant.sum');
|
isovic/ra-consensus
|
src/denovoconsensus3.py
|
Python
|
mit
| 30,955
|
[
"BWA"
] |
e115033504130485cc3a11385eedfa95558cfc4daf0b47043d54c95741737908
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
from unittest import TestCase
from lucene import \
WhitespaceAnalyzer, Document, Field, IndexWriter, Term, MultiSearcher, \
TermRangeQuery, RAMDirectory, IndexSearcher
class MultiSearcherTest(TestCase):
def setUp(self):
animals = [ "aardvark", "beaver", "coati",
"dog", "elephant", "frog", "gila monster",
"horse", "iguana", "javelina", "kangaroo",
"lemur", "moose", "nematode", "orca",
"python", "quokka", "rat", "scorpion",
"tarantula", "uromastyx", "vicuna",
"walrus", "xiphias", "yak", "zebra" ]
analyzer = WhitespaceAnalyzer()
aTOmDirectory = RAMDirectory()
nTOzDirectory = RAMDirectory()
aTOmWriter = IndexWriter(aTOmDirectory, analyzer, True,
IndexWriter.MaxFieldLength.UNLIMITED)
nTOzWriter = IndexWriter(nTOzDirectory, analyzer, True,
IndexWriter.MaxFieldLength.UNLIMITED)
for animal in animals:
doc = Document()
doc.add(Field("animal", animal,
Field.Store.YES, Field.Index.NOT_ANALYZED))
if animal[0].lower() < "n":
aTOmWriter.addDocument(doc)
else:
nTOzWriter.addDocument(doc)
aTOmWriter.close()
nTOzWriter.close()
self.searchers = [ IndexSearcher(aTOmDirectory),
IndexSearcher(nTOzDirectory) ]
def testMulti(self):
searcher = MultiSearcher(self.searchers)
# range spans documents across both indexes
query = TermRangeQuery("animal", "h", "t", True, True)
scoreDocs = searcher.search(query, 50).scoreDocs
self.assertEqual(12, len(scoreDocs), "tarantula not included")
|
romanchyla/pylucene-trunk
|
samples/LuceneInAction/lia/advsearching/MultiSearcherTest.py
|
Python
|
apache-2.0
| 2,564
|
[
"MOOSE",
"ORCA"
] |
c44a148356937ee6f4c8044bc256282bd4c089a9add2e716cfa1e30aec61694a
|
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Anders F Bjorklund <afb@users.sourceforge.net>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interfaces.qt import getPixmap, centerWindow
from smart.channel import getChannelInfo
from smart import *
import qt
class TextListViewItem(qt.QListViewItem):
def __init__(self, parent):
qt.QListViewItem.__init__(self, parent)
self._text = {}
self._oldtext = {}
def setText(self, col, text):
qt.QListViewItem.setText(self, col, text)
if col in self._text:
self._oldtext[col] = self._text[col]
self._text[col] = text
def oldtext(self, col):
return self._oldtext.get(col, None)
class QtPriorities(object):
def __init__(self, parent=None):
self._window = qt.QDialog(None)
self._window.setIcon(getPixmap("smart"))
self._window.setCaption(_("Priorities"))
#self._window.setModal(True)
self._window.setMinimumSize(600, 400)
layout = qt.QVBoxLayout(self._window)
layout.setResizeMode(qt.QLayout.FreeResize)
vbox = qt.QVBox(self._window)
vbox.setMargin(10)
vbox.setSpacing(10)
vbox.show()
layout.addWidget(vbox)
self._treeview = qt.QListView(vbox)
self._treeview.setAllColumnsShowFocus(True)
self._treeview.show()
qt.QObject.connect(self._treeview, qt.SIGNAL("itemRenamed(QListViewItem *, int, const QString &)"), self.itemRenamed)
qt.QObject.connect(self._treeview, qt.SIGNAL("selectionChanged()"), self.selectionChanged)
self._treeview.addColumn(_("Package Name"))
self._treeview.addColumn(_("Channel Alias"))
self._treeview.addColumn(_("Priority"))
bbox = qt.QHBox(vbox)
bbox.setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
button = qt.QPushButton(_("New"), bbox)
button.setEnabled(True)
button.setIconSet(qt.QIconSet(getPixmap("crystal-add")))
button.show()
qt.QObject.connect(button, qt.SIGNAL("clicked()"), self.newPriority)
self._newpriority = button
button = qt.QPushButton(_("Delete"), bbox)
button.setEnabled(False)
button.setIconSet(qt.QIconSet(getPixmap("crystal-delete")))
button.show()
qt.QObject.connect(button, qt.SIGNAL("clicked()"), self.delPriority)
self._delpriority = button
button = qt.QPushButton(_("Close"), bbox)
qt.QObject.connect(button, qt.SIGNAL("clicked()"), self._window, qt.SLOT("accept()"))
button.setDefault(True)
vbox.adjustSize()
def fill(self):
self._treeview.clear()
priorities = sysconf.get("package-priorities", {})
prioritieslst = priorities.items()
prioritieslst.sort()
for name, pkgpriorities in prioritieslst:
aliaslst = pkgpriorities.items()
aliaslst.sort()
for alias, priority in aliaslst:
item = TextListViewItem(self._treeview)
item.setText(0, name)
item.setText(1, alias or "*")
item.setText(2, str(priority))
item.setRenameEnabled(0, True)
item.setRenameEnabled(1, True)
item.setRenameEnabled(2, True)
def show(self):
self.fill()
self._window.show()
centerWindow(self._window)
self._window.raiseW()
self._window.exec_loop()
self._window.hide()
def newPriority(self):
name, alias, priority = PriorityCreator(self._window).show()
if name:
if sysconf.has(("package-priorities", name, alias)):
iface.error(_("Name/alias pair already exists!"))
else:
sysconf.set(("package-priorities", name, alias), int(priority))
self.fill()
def delPriority(self):
item = self._treeview.selectedItem()
if item:
name = str(item.text(0))
alias = str(item.text(1))
if alias == "*":
alias = None
sysconf.remove(("package-priorities", name, alias))
self.fill()
def selectionChanged(self):
item = self._treeview.selectedItem()
self._delpriority.setEnabled(bool(item))
def itemRenamed(self, item, col, newtext):
newtext = str(newtext).strip()
if col == 1:
if newtext == "*":
newtext = ""
oldtext = item.oldtext(col)
if newtext != oldtext:
if col == 0:
alias = str(item.text(0))
if alias == "*":
alias = None
priority = str(item.text(2))
if not newtext:
pass
elif sysconf.has(("package-priorities", newtext, alias)):
iface.error(_("Name/alias pair already exists!"))
else:
sysconf.set(("package-priorities", newtext, alias),
int(priority))
sysconf.remove(("package-priorities", oldtext, alias))
elif col == 1:
name = item.text(0)
priority = item.text(2)
if sysconf.has(("package-priorities", name, newtext)):
iface.error(_("Name/alias pair already exists!"))
else:
sysconf.move(("package-priorities", name, oldtext),
("package-priorities", name, newtext))
item.setText(col, newtext or "*")
elif col == 2:
if newtext:
name = str(item.text(0))
alias = str(item.text(1))
if alias == "*":
alias = None
try:
sysconf.set(("package-priorities", name, alias),
int(newtext))
except ValueError:
item.setText(col, oldtext)
iface.error(_("Invalid priority!"))
class PriorityCreator(object):
def __init__(self, parent=None):
self._window = qt.QDialog(parent)
self._window.setIcon(getPixmap("smart"))
self._window.setCaption(_("New Package Priority"))
self._window.setModal(True)
#self._window.setMinimumSize(600, 400)
vbox = qt.QVBox(self._window)
vbox.setMargin(10)
vbox.setSpacing(10)
vbox.show()
table = qt.QGrid(2, vbox)
table.setSpacing(10)
table.show()
label = qt.QLabel(_("Package Name:"), table)
self._name = qt.QLineEdit(table)
self._name.show()
label = qt.QLabel(_("Channel Alias:"), table)
self._alias = qt.QLineEdit(table)
self._alias.setText("*")
self._alias.show()
label = qt.QLabel(_("Priority:"), table)
self._priority = qt.QSpinBox(table)
self._priority.setSteps(1, 10)
self._priority.setRange(-100000,+100000)
self._priority.show()
sep = qt.QFrame(vbox)
sep.setFrameShape(qt.QFrame.HLine)
sep.setFrameShadow(qt.QFrame.Sunken)
sep.show()
bbox = qt.QHBox(vbox)
bbox.setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
button = qt.QPushButton(_("Cancel"), bbox)
qt.QObject.connect(button, qt.SIGNAL("clicked()"), self._window, qt.SLOT("reject()"))
button = qt.QPushButton(_("OK"), bbox)
qt.QObject.connect(button, qt.SIGNAL("clicked()"), self._window, qt.SLOT("accept()"))
button.setDefault(True)
vbox.adjustSize()
self._window.adjustSize()
def show(self):
self._window.show()
self._window.raiseW()
self._window.setActiveWindow()
while True:
self._result = self._window.exec_loop()
if self._result == qt.QDialog.Accepted:
name = str(self._name.text()).strip()
if not name:
iface.error(_("No name provided!"))
continue
alias = str(self._alias.text()).strip()
if alias == "*":
alias = None
priority = str(self._priority.value())
break
name = alias = priority = None
break
self._window.hide()
return name, alias, priority
class QtSinglePriority(object):
def __init__(self, parent=None):
self._window = qt.QDialog(parent)
self._window.setIcon(getPixmap("smart"))
self._window.setCaption(_("Package Priority"))
self._window.setModal(True)
#self._window.setMinimumSize(600, 400)
vbox = qt.QVBox(self._window)
vbox.setMargin(10)
vbox.setSpacing(10)
vbox.show()
self._vbox = vbox
self._table = qt.QGrid(2, vbox)
self._table.setSpacing(10)
self._table.show()
bbox = qt.QHBox(vbox)
bbox.setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
button = qt.QPushButton(_("Close"), bbox)
qt.QObject.connect(button, qt.SIGNAL("clicked()"), self._window, qt.SLOT("hide()"))
self._vbox.adjustSize()
self._window.adjustSize()
def show(self, pkg):
priority = sysconf.get(("package-priorities", pkg.name), {})
table = self._table
#table.foreach(table.remove)
label = qt.QLabel(_("Package:"), table)
label.show()
label = qt.QLabel("<b>%s</b>" % pkg.name, table)
label.show()
class AliasCheckBox(qt.QCheckBox):
def __init__(self, name, parent):
qt.QSpinBox.__init__(self, name, parent)
def connect(self, signal, slot, spin, alias):
qt.QObject.connect(self, qt.SIGNAL(signal), slot)
self._spin = spin
self._alias = alias
def toggled(self, check):
spin = self._spin
alias = self._alias
if check:
priority[alias] = int(spin.value())
spin.setEnabled(True)
else:
if alias in priority:
del priority[alias]
spin.setEnabled(False)
class AliasSpinBox(qt.QSpinBox):
def __init__(self, parent):
qt.QSpinBox.__init__(self, parent)
def connect(self, signal, slot, alias):
qt.QObject.connect(self, qt.SIGNAL(signal), slot)
self._alias = alias
def value_changed(self, value):
alias = spin._alias
priority[alias] = value
label = qt.QLabel(_("Default priority:"), table)
label.show()
hbox = qt.QHBox(table)
hbox.setSpacing(10)
hbox.show()
radio = qt.QRadioButton(_("Channel default"), hbox)
radio.setChecked(None not in priority)
radio.show()
radio = qt.QRadioButton(_("Set to"), hbox)
radio.setChecked(None in priority)
radio.show()
spin = qt.QSpinBox(hbox)
spin.setSteps(1, 10)
spin.setRange(-100000,+100000)
spin.setValue(priority.get(None, 0))
spin.show()
label = qt.QLabel(_("Channel priority:"), table)
label.show()
chantable = qt.QGrid(2, table)
chantable.setSpacing(10)
chantable.show()
pos = 0
channels = sysconf.get("channels")
for alias in channels:
channel = channels[alias]
if not getChannelInfo(channel.get("type")).kind == "package":
continue
name = channel.get("name")
if not name:
name = alias
check = AliasCheckBox(name, chantable)
check.setChecked(alias in priority)
check.show()
spin = AliasSpinBox(chantable)
if alias not in priority:
spin.setEnabled(False)
spin.setSteps(1, 10)
spin.setRange(-100000,+100000)
spin.setValue(priority.get(alias, 0))
spin.connect("valueChanged(int)", spin.value_changed, alias)
check.connect("toggled(bool)", check.toggled, spin, alias)
spin.show()
pos += 1
table.adjustSize()
self._vbox.adjustSize()
self._window.adjustSize()
self._window.show()
self._window.raiseW()
self._window.setActiveWindow()
self._window.exec_loop()
self._window.hide()
if not priority:
sysconf.remove(("package-priorities", pkg.name))
else:
sysconf.set(("package-priorities", pkg.name), priority)
# vim:ts=4:sw=4:et
|
blackPantherOS/packagemanagement
|
smartpm/smart/interfaces/qt/priorities.py
|
Python
|
apache-2.0
| 13,734
|
[
"CRYSTAL"
] |
a04bb12141745585f98b9d1ad187e31d8d12f7b5da57f5daeba6422f761a6809
|
import numpy as np
from ase.units import Hartree
from gpaw.aseinterface import GPAW
from gpaw.lcao.overlap import NewTwoCenterIntegrals
from gpaw.utilities import unpack
from gpaw.utilities.tools import tri2full, lowdin
from gpaw.lcao.tools import basis_subset2, get_bfi2
from gpaw.coulomb import get_vxc as get_ks_xc
from gpaw.utilities.blas import r2k, gemm
from gpaw.lcao.projected_wannier import dots, condition_number, eigvals, \
get_bfs, get_lcao_projections_HSP
def get_rot(F_MM, V_oM, L):
eps_M, U_MM = np.linalg.eigh(F_MM)
indices = eps_M.real.argsort()[-L:]
U_Ml = U_MM[:, indices]
U_Ml /= np.sqrt(dots(U_Ml.T.conj(), F_MM, U_Ml).diagonal())
U_ow = V_oM.copy()
U_lw = np.dot(U_Ml.T.conj(), F_MM)
for col1, col2 in zip(U_ow.T, U_lw.T):
norm = np.linalg.norm(np.hstack((col1, col2)))
col1 /= norm
col2 /= norm
return U_ow, U_lw, U_Ml
def get_lcao_xc(calc, P_aqMi, bfs=None, spin=0):
nq = len(calc.wfs.ibzk_qc)
nao = calc.wfs.setups.nao
dtype = calc.wfs.dtype
if bfs is None:
bfs = get_bfs(calc)
if calc.density.nt_sg is None:
calc.density.interpolate_pseudo_density()
nt_sg = calc.density.nt_sg
vxct_sg = calc.density.finegd.zeros(calc.wfs.nspins)
calc.hamiltonian.xc.calculate(calc.density.finegd, nt_sg, vxct_sg)
vxct_G = calc.wfs.gd.zeros()
calc.hamiltonian.restrict(vxct_sg[spin], vxct_G)
Vxc_qMM = np.zeros((nq, nao, nao), dtype)
for q, Vxc_MM in enumerate(Vxc_qMM):
bfs.calculate_potential_matrix(vxct_G, Vxc_MM, q)
tri2full(Vxc_MM, 'L')
# Add atomic PAW corrections
for a, P_qMi in P_aqMi.items():
D_sp = calc.density.D_asp[a][:]
H_sp = np.zeros_like(D_sp)
calc.hamiltonian.xc.calculate_paw_correction(calc.wfs.setups[a],
D_sp, H_sp)
H_ii = unpack(H_sp[spin])
for Vxc_MM, P_Mi in zip(Vxc_qMM, P_qMi):
Vxc_MM += dots(P_Mi, H_ii, P_Mi.T.conj())
return Vxc_qMM * Hartree
def get_xc2(calc, w_wG, P_awi, spin=0):
if calc.density.nt_sg is None:
calc.density.interpolate_pseudo_density()
nt_g = calc.density.nt_sg[spin]
vxct_g = calc.density.finegd.zeros()
calc.hamiltonian.xc.get_energy_and_potential(nt_g, vxct_g)
vxct_G = calc.wfs.gd.empty()
calc.hamiltonian.restrict(vxct_g, vxct_G)
# Integrate pseudo part
Nw = len(w_wG)
xc_ww = np.empty((Nw, Nw))
r2k(.5 * calc.wfs.gd.dv, w_wG, vxct_G * w_wG, .0, xc_ww)
tri2full(xc_ww, 'L')
# Add atomic PAW corrections
for a, P_wi in P_awi.items():
D_sp = calc.density.D_asp[a][:]
H_sp = np.zeros_like(D_sp)
calc.wfs.setups[a].xc_correction.calculate_energy_and_derivatives(
D_sp, H_sp)
H_ii = unpack(H_sp[spin])
xc_ww += dots(P_wi, H_ii, P_wi.T.conj())
return xc_ww * Hartree
class ProjectedWannierFunctionsFBL:
"""PWF in the finite band limit.
::
--N
|w_w> = > |psi_n> U_nw
--n=1
"""
def __init__(self, V_nM, No, ortho=False):
Nw = V_nM.shape[1]
assert No <= Nw
V_oM, V_uM = V_nM[:No], V_nM[No:]
F_MM = np.dot(V_uM.T.conj(), V_uM)
U_ow, U_lw, U_Ml = get_rot(F_MM, V_oM, Nw - No)
self.U_nw = np.vstack((U_ow, dots(V_uM, U_Ml, U_lw)))
# stop here ?? XXX
self.S_ww = self.rotate_matrix(np.ones(1))
if ortho:
lowdin(self.U_nw, self.S_ww)
self.S_ww = np.identity(Nw)
self.norms_n = np.dot(self.U_nw, np.linalg.solve(
self.S_ww, self.U_nw.T.conj())).diagonal()
def rotate_matrix(self, A_nn):
if A_nn.ndim == 1:
return np.dot(self.U_nw.T.conj() * A_nn, self.U_nw)
else:
return dots(self.U_nw.T.conj(), A_nn, self.U_nw)
def rotate_projections(self, P_ani):
P_awi = {}
for a, P_ni in P_ani.items():
P_awi[a] = np.tensordot(self.U_nw, P_ni, axes=[[0], [0]])
return P_awi
def rotate_function(self, psit_nG):
return np.tensordot(self.U_nw, psit_nG, axes=[[0], [0]])
class ProjectedWannierFunctionsIBL:
"""PWF in the infinite band limit.
::
--No --Nw
|w_w> = > |psi_o> U_ow + > |f_M> U_Mw
--o=1 --M=1
"""
def __init__(self, V_nM, S_MM, No, lcaoindices=None):
Nw = V_nM.shape[1]
assert No <= Nw
self.V_oM, V_uM = V_nM[:No], V_nM[No:]
F_MM = S_MM - np.dot(self.V_oM.T.conj(), self.V_oM)
U_ow, U_lw, U_Ml = get_rot(F_MM, self.V_oM, Nw - No)
self.U_Mw = np.dot(U_Ml, U_lw)
self.U_ow = U_ow - np.dot(self.V_oM, self.U_Mw)
if lcaoindices is not None:
for i in lcaoindices:
self.U_ow[:, i] = 0.0
self.U_Mw[:, i] = 0.0
self.U_Mw[i, i] = 1.0
# stop here ?? XXX
self.S_ww = self.rotate_matrix(np.ones(1), S_MM)
P_uw = np.dot(V_uM, self.U_Mw)
self.norms_n = np.hstack((
np.dot(U_ow, np.linalg.solve(self.S_ww, U_ow.T.conj())).diagonal(),
np.dot(P_uw, np.linalg.solve(self.S_ww, P_uw.T.conj())).diagonal()))
def rotate_matrix(self, A_o, A_MM):
assert A_o.ndim == 1
A_ww = dots(self.U_ow.T.conj() * A_o, self.V_oM, self.U_Mw)
A_ww += np.conj(A_ww.T)
A_ww += np.dot(self.U_ow.T.conj() * A_o, self.U_ow)
A_ww += dots(self.U_Mw.T.conj(), A_MM, self.U_Mw)
return A_ww
def rotate_projections(self, P_aoi, P_aMi, indices=None):
if indices is None:
U_ow = self.U_ow
U_Mw = self.U_Mw
else:
U_ow = self.U_ow[:, indices]
U_Mw = self.U_Mw[:, indices]
P_awi = {}
for a, P_oi in P_aoi.items():
P_awi[a] = np.tensordot(U_Mw, P_aMi[a], axes=[[0], [0]])
if len(U_ow) > 0:
P_awi[a] += np.tensordot(U_ow, P_oi, axes=[[0], [0]])
return P_awi
def rotate_function(self, psit_oG, bfs, q=-1, indices=None):
if indices is None:
U_ow = self.U_ow
U_Mw = self.U_Mw
else:
U_ow = self.U_ow[:, indices]
U_Mw = self.U_Mw[:, indices]
w_wG = np.zeros((U_ow.shape[1],) + psit_oG.shape[1:])
if len(U_ow) > 0:
gemm(1., psit_oG, U_ow.T.copy(), 0., w_wG)
bfs.lcao_to_grid(U_Mw.T.copy(), w_wG, q)
return w_wG
class PWFplusLCAO(ProjectedWannierFunctionsIBL):
def __init__(self, V_nM, S_MM, No, pwfmask, lcaoindices=None):
Nw = V_nM.shape[1]
self.V_oM = V_nM[:No]
dtype = V_nM.dtype
# Do PWF optimization for pwfbasis submatrix only!
Npwf = len(pwfmask.nonzero()[0])
pwfmask2 = np.outer(pwfmask, pwfmask)
s_MM = S_MM[pwfmask2].reshape(Npwf, Npwf)
v_oM = self.V_oM[:, pwfmask]
f_MM = s_MM - np.dot(v_oM.T.conj(), v_oM)
nw = len(s_MM)
assert No <= nw
u_ow, u_lw, u_Ml = get_rot(f_MM, v_oM, nw - No)
u_Mw = np.dot(u_Ml, u_lw)
u_ow = u_ow - np.dot(v_oM, u_Mw)
# Determine U for full lcao basis
self.U_ow = np.zeros((No, Nw), dtype)
for U_w, u_w in zip(self.U_ow, u_ow):
np.place(U_w, pwfmask, u_w)
self.U_Mw = np.identity(Nw, dtype)
np.place(self.U_Mw, pwfmask2, u_Mw.flat)
if lcaoindices is not None:
for i in lcaoindices:
self.U_ow[:, i] = 0.0
self.U_Mw[:, i] = 0.0
self.U_Mw[i, i] = 1.0
self.S_ww = self.rotate_matrix(np.ones(1), S_MM)
self.norms_n = None
def set_lcaoatoms(calc, pwf, lcaoatoms):
ind = get_bfi(calc, lcaoatoms)
for i in ind:
pwf.U_ow[:, i] = 0.0
pwf.U_Mw[:, i] = 0.0
pwf_U_Mw[i, i] = 1.0
class PWF2:
def __init__(self, gpwfilename, fixedenergy=0., spin=0, ibl=True,
basis='sz', zero_fermi=False, pwfbasis=None, lcaoatoms=None,
projection_data=None):
calc = GPAW(gpwfilename, txt=None, basis=basis)
assert calc.wfs.gd.comm.size == 1
assert calc.wfs.kpt_comm.size == 1
assert calc.wfs.band_comm.size == 1
if zero_fermi:
try:
Ef = calc.get_fermi_level()
except NotImplementedError:
Ef = calc.get_homo_lumo().mean()
else:
Ef = 0.0
self.ibzk_kc = calc.get_ibz_k_points()
self.nk = len(self.ibzk_kc)
self.eps_kn = [calc.get_eigenvalues(kpt=q, spin=spin) - Ef
for q in range(self.nk)]
self.M_k = [sum(eps_n <= fixedenergy) for eps_n in self.eps_kn]
print 'Fixed states:', self.M_k
self.calc = calc
self.dtype = self.calc.wfs.dtype
self.spin = spin
self.ibl = ibl
self.pwf_q = []
self.norms_qn = []
self.S_qww = []
self.H_qww = []
if ibl:
if pwfbasis is not None:
pwfmask = basis_subset2(calc.atoms.get_chemical_symbols(),
basis, pwfbasis)
if lcaoatoms is not None:
lcaoindices = get_bfi2(calc.atoms.get_chemical_symbols(),
basis,
lcaoatoms)
else:
lcaoindices = None
self.bfs = get_bfs(calc)
if projection_data is None:
V_qnM, H_qMM, S_qMM, self.P_aqMi = get_lcao_projections_HSP(
calc, bfs=self.bfs, spin=spin, projectionsonly=False)
else:
V_qnM, H_qMM, S_qMM, self.P_aqMi = projection_data
H_qMM -= Ef * S_qMM
for q, M in enumerate(self.M_k):
if pwfbasis is None:
pwf = ProjectedWannierFunctionsIBL(V_qnM[q], S_qMM[q], M,
lcaoindices)
else:
pwf = PWFplusLCAO(V_qnM[q], S_qMM[q], M, pwfmask,
lcaoindices)
self.pwf_q.append(pwf)
self.norms_qn.append(pwf.norms_n)
self.S_qww.append(pwf.S_ww)
self.H_qww.append(pwf.rotate_matrix(self.eps_kn[q][:M],
H_qMM[q]))
else:
if projection_data is None:
V_qnM = get_lcao_projections_HSP(calc, spin=spin)
else:
V_qnM = projection_data
for q, M in enumerate(self.M_k):
pwf = ProjectedWannierFunctionsFBL(V_qnM[q], M, ortho=False)
self.pwf_q.append(pwf)
self.norms_qn.append(pwf.norms_n)
self.S_qww.append(pwf.S_ww)
self.H_qww.append(pwf.rotate_matrix(self.eps_kn[q]))
for S in self.S_qww:
print 'Condition number: %0.1e' % condition_number(S)
def get_hamiltonian(self, q=0, indices=None):
if indices is None:
return self.H_qww[q]
else:
return self.H_qww[q].take(indices, 0).take(indices, 1)
def get_overlap(self, q=0, indices=None):
if indices is None:
return self.S_qww[q]
else:
return self.S_qww[q].take(indices, 0).take(indices, 1)
def get_projections(self, q=0, indices=None):
kpt = self.calc.wfs.kpt_u[self.spin * self.nk + q]
if not hasattr(self, 'P_awi'):
if self.ibl:
M = self.M_k[q]
self.P_awi = self.pwf_q[q].rotate_projections(
dict([(a, P_ni[:M]) for a, P_ni in kpt.P_ani.items()]),
dict([(a, P_qMi[q]) for a, P_qMi in self.P_aqMi.items()]),
indices)
else:
self.P_awi = pwf.rotate_projections(kpt.P_ani, indices)
return self.P_awi
def get_orbitals(self, q=0, indices=None):
self.calc.wfs.initialize_wave_functions_from_restart_file()
kpt = self.calc.wfs.kpt_u[self.spin * self.nk + q]
if not hasattr(self, 'w_wG'):
if self.ibl:
self.w_wG = self.pwf_q[q].rotate_function(
kpt.psit_nG[:self.M_k[q]], self.bfs, q, indices)
else:
self.w_wG = self.pwf_q[q].rotate_function(
kpt.psit_nG, indices)
return self.w_wG
def get_Fcore(self, q=0, indices=None):
if indices is None:
Fcore_ww = np.zeros_like(self.H_qww[q])
else:
Fcore_ww = np.zeros((len(indices), len(indices)))
for a, P_wi in self.get_projections(q, indices).items():
X_ii = unpack(self.calc.wfs.setups[a].X_p)
Fcore_ww -= dots(P_wi, X_ii, P_wi.T.conj())
return Fcore_ww * Hartree
def get_eigs(self, q=0):
return eigvals(self.H_qww[q], self.S_ww[q])
def get_condition_number(self, q=0):
return condition_number(self.S_qww[q])
def get_xc(self, q=0, indices=None):
#self.calc.density.ghat.set_positions(
# self.calc.atoms.get_scaled_positions() % 1.)
#self.calc.hamiltonian.poisson.initialize()
if self.ibl:
return get_xc2(self.calc, self.get_orbitals(q, indices),
self.get_projections(q, indices), self.spin)
else:
return self.pwf_q[q].rotate_matrix(get_ks_xc(self.calc,
spin=self.spin))
class LCAOwrap:
def __init__(self, calc, spin=0):
assert calc.wfs.gd.comm.size == 1
assert calc.wfs.kpt_comm.size == 1
assert calc.wfs.band_comm.size == 1
from gpaw.lcao.tools import get_lcao_hamiltonian
H_skMM, S_kMM = get_lcao_hamiltonian(calc)
self.calc = calc
self.dtype = calc.wfs.dtype
self.spin = spin
self.H_qww = H_skMM[spin]
self.S_qww = S_kMM
self.P_aqwi = calc.wfs.P_aqMi
self.Nw = self.S_qww.shape[-1]
for S in self.S_qww:
print 'Condition number: %0.1e' % condition_number(S)
def get_hamiltonian(self, q=0, indices=None):
if indices is None:
return self.H_qww[q]
else:
return self.H_qww[q].take(indices, 0).take(indices, 1)
def get_overlap(self, q=0, indices=None):
if indices is None:
return self.S_qww[q]
else:
return self.S_qww[q].take(indices, 0).take(indices, 1)
def get_projections(self, q=0, indices=None):
if indices is None:
return dict([(a, P_qwi[q]) for a, P_qwi in self.P_aqwi.items()])
else:
return dict([(a, P_qwi[q].take(indices, 0))
for a, P_qwi in self.P_aqwi.items()])
def get_orbitals(self, q=-1, indices=None):
assert q == -1
if indices is None:
indices = range(self.Nw)
Ni = len(indices)
C_wM = np.zeros((Ni, self.Nw), self.dtype)
for i, C_M in zip(indices, C_wM):
C_M[i] = 1.0
w_wG = self.calc.wfs.gd.zeros(Ni, dtype=self.dtype)
self.calc.wfs.basis_functions.lcao_to_grid(C_wM, w_wG, q=-1)
return w_wG
def get_Fcore(self, q=0, indices=None):
if indices is None:
Fcore_ww = np.zeros_like(self.H_qww[q])
else:
Fcore_ww = np.zeros((len(indices), len(indices)))
for a, P_wi in self.get_projections(q, indices).items():
if self.calc.wfs.setups[a].type != 'ghost':
X_ii = unpack(self.calc.wfs.setups[a].X_p)
Fcore_ww -= dots(P_wi, X_ii, P_wi.T.conj())
return Fcore_ww * Hartree
def get_xc(self, q=0, indices=None):
if not hasattr(self, 'Vxc_qww'):
self.Vxc_qww = get_lcao_xc(self.calc, self.P_aqwi,
bfs=self.calc.wfs.basis_functions,
spin=self.spin)
if indices is None:
return self.Vxc_qww[q]
else:
return self.Vxc_qww[q].take(indices, 0).take(indices, 1)
|
robwarm/gpaw-symm
|
gpaw/lcao/pwf2.py
|
Python
|
gpl-3.0
| 16,335
|
[
"ASE",
"GPAW"
] |
a4d1b055dcded331a14330796c9858b59ad79cfb8d87bcc1dac56555bf1e5c32
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..utils import check_random_state
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""
Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
dupplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of dupplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Return
------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
assert n_informative + n_redundant + n_repeated <= n_features
assert 2 ** n_informative >= n_classes * n_clusters_per_class
assert weights is None or (len(weights) == n_classes or
len(weights) == (n_classes - 1))
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in xrange(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in xrange(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples)
# Build the polytope
from itertools import product
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in xrange(n_clusters):
C[k, :] *= generator.rand()
for f in xrange(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in xrange(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in xrange(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in xrange(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
indices = range(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10, bias=0.0,
effective_rank=None, tail_strength=0.5, noise=0.0,
shuffle=True, coef=False, random_state=None):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
coef : array of shape [n_features], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros(n_features)
ground_truth[:n_informative] = 100 * generator.rand(n_informative)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
indices = range(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
if coef:
return X, y, ground_truth
else:
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""
Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional (default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Return
------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, random_state=0)
>>> X.shape
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, int):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = np.atleast_2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [n_samples / n_centers] * n_centers
for i in xrange(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""
Generate the "Friedman #1" regression problem as described in Friedman [1]
and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
assert n_features >= 5
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""
Generate the "Friedman #2" regression problem as described in Friedman [1]
and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 \
+ (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""
Generate the "Friedman #3" regression problem as described in Friedman [1]
and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) \
+ noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""
Generate a mostly low rank random matrix with bell-shaped singular
values profile.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- graw level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * \
np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in xrange(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""
Generate a random regression problem with sparse uncorrelated design as
described in Celeux et al [1].::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""
Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""
Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""
Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
|
joshbohde/scikit-learn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 30,408
|
[
"Gaussian"
] |
3a6caa6029d72c65c90d705137c00e1f3e3537efc8fb42f4e0e7ef2b233b4b19
|
# This file is part of Androguard.
#
# Copyright (C) 2012, Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import androguard.decompiler.dad.util as util
class IRForm:
def __init__(self):
self.var_map = {}
self.type = None
def is_call(self):
return False
def is_cond(self):
return False
def is_const(self):
return False
def is_ident(self):
return False
def is_propagable(self):
return True
def get_type(self):
return self.type
def set_type(self, _type):
self.type = _type
def has_side_effect(self):
return False
def get_used_vars(self):
return []
def replace(self, old, new):
raise NotImplementedError('replace not implemented in %r' % self)
def replace_lhs(self, new):
raise NotImplementedError('replace_lhs not implemented in %r' % self)
def replace_var(self, old, new):
raise NotImplementedError('replace_var not implemented in %r' % self)
def remove_defined_var(self):
pass
def get_rhs(self):
return []
def get_lhs(self):
return None
def visit(self, visitor):
pass
class Constant(IRForm):
def __init__(self, value, atype, int_value=None, descriptor=None):
self.v = 'c%s' % value
self.cst = value
if int_value is None:
self.cst2 = value
else:
self.cst2 = int_value
self.type = atype
self.clsdesc = descriptor
def get_used_vars(self):
return []
def is_const(self):
return True
def get_int_value(self):
return self.cst2
def get_type(self):
return self.type
def visit(self, visitor):
if self.type == 'Z':
if self.cst == 0:
return visitor.visit_constant('false')
else:
return visitor.visit_constant('true')
elif self.type == 'Ljava/lang/Class;':
return visitor.visit_base_class(self.cst, data=self.cst)
elif self.type in 'IJB':
return visitor.visit_constant(self.cst2)
else:
return visitor.visit_constant(self.cst)
def __str__(self):
return 'CST_%s' % repr(self.cst)
class BaseClass(IRForm):
def __init__(self, name, descriptor=None):
self.v = 'c%s' % name
self.cls = name
self.clsdesc = descriptor
def is_const(self):
return True
def visit(self, visitor):
return visitor.visit_base_class(self.cls, data=self.cls)
def __str__(self):
return 'BASECLASS_%s' % self.cls
class Variable(IRForm):
def __init__(self, value):
self.v = value
self.declared = False
self.type = None
self.name = value
def get_used_vars(self):
return [self.v]
def is_ident(self):
return True
def value(self):
return self.v
def visit(self, visitor):
return visitor.visit_variable(self)
def visit_decl(self, visitor):
return visitor.visit_decl(self)
def __str__(self):
return 'VAR_%s' % self.name
class Param(Variable):
def __init__(self, value, atype):
super().__init__(value)
self.declared = True
self.type = atype
self.this = False
def is_const(self):
return True
def visit(self, visitor):
return visitor.visit_param(self.v, data=self.type)
def __str__(self):
return 'PARAM_%s' % self.name
class ThisParam(Param):
def __init__(self, value, atype):
super().__init__(value, atype)
self.this = True
self.super = False
def visit(self, visitor):
if self.super:
return visitor.visit_super()
return visitor.visit_this()
def __str__(self):
return 'THIS'
class AssignExpression(IRForm):
def __init__(self, lhs, rhs):
super().__init__()
if lhs:
self.lhs = lhs.v
self.var_map[lhs.v] = lhs
lhs.set_type(rhs.get_type())
else:
self.lhs = None
self.rhs = rhs
def is_propagable(self):
return self.rhs.is_propagable()
def is_call(self):
return self.rhs.is_call()
def has_side_effect(self):
return self.rhs.has_side_effect()
def get_rhs(self):
return self.rhs
def get_lhs(self):
return self.lhs
def get_used_vars(self):
return self.rhs.get_used_vars()
def remove_defined_var(self):
self.lhs = None
def replace(self, old, new):
self.rhs.replace(old, new)
def replace_lhs(self, new):
self.lhs = new.v
self.var_map[new.v] = new
def replace_var(self, old, new):
self.rhs.replace_var(old, new)
def visit(self, visitor):
return visitor.visit_assign(self.var_map.get(self.lhs), self.rhs)
def __str__(self):
return 'ASSIGN({}, {})'.format(self.var_map.get(self.lhs), self.rhs)
class MoveExpression(IRForm):
def __init__(self, lhs, rhs):
super().__init__()
self.lhs = lhs.v
self.rhs = rhs.v
self.var_map.update([(lhs.v, lhs), (rhs.v, rhs)])
lhs.set_type(rhs.get_type())
def has_side_effect(self):
return False
def is_call(self):
return self.var_map[self.rhs].is_call()
def get_used_vars(self):
return self.var_map[self.rhs].get_used_vars()
def get_rhs(self):
return self.var_map[self.rhs]
def get_lhs(self):
return self.lhs
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_move(v_m[self.lhs], v_m[self.rhs])
def replace(self, old, new):
v_m = self.var_map
rhs = v_m[self.rhs]
if not (rhs.is_const() or rhs.is_ident()):
rhs.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.rhs = new.value()
else:
v_m[old] = new
def replace_lhs(self, new):
if self.lhs != self.rhs:
self.var_map.pop(self.lhs)
self.lhs = new.v
self.var_map[new.v] = new
def replace_var(self, old, new):
if self.lhs != old:
self.var_map.pop(old)
self.rhs = new.v
self.var_map[new.v] = new
def __str__(self):
v_m = self.var_map
return '{} = {}'.format(v_m.get(self.lhs), v_m.get(self.rhs))
class MoveResultExpression(MoveExpression):
def __init__(self, lhs, rhs):
super().__init__(lhs, rhs)
def is_propagable(self):
return self.var_map[self.rhs].is_propagable()
def has_side_effect(self):
return self.var_map[self.rhs].has_side_effect()
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_move_result(v_m[self.lhs], v_m[self.rhs])
def __str__(self):
v_m = self.var_map
return '{} = {}'.format(v_m.get(self.lhs), v_m.get(self.rhs))
class ArrayStoreInstruction(IRForm):
def __init__(self, rhs, array, index, _type):
super().__init__()
self.rhs = rhs.v
self.array = array.v
self.index = index.v
self.var_map.update([(rhs.v, rhs), (array.v, array), (index.v, index)])
self.type = _type
def has_side_effect(self):
return True
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.array].get_used_vars()
lused_vars.extend(v_m[self.index].get_used_vars())
lused_vars.extend(v_m[self.rhs].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_astore(v_m[self.array],
v_m[self.index],
v_m[self.rhs],
data=self)
def replace_var(self, old, new):
if self.rhs == old:
self.rhs = new.v
if self.array == old:
self.array = new.v
if self.index == old:
self.index = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
if self.rhs == old:
self.rhs = new.value()
if self.array == old:
self.array = new.value()
if self.index == old:
self.array = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.array], v_m[self.index], v_m[self.rhs]):
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return '{}[{}] = {}'.format(v_m[self.array], v_m[self.index], v_m[self.rhs])
class StaticInstruction(IRForm):
def __init__(self, rhs, klass, ftype, name):
super().__init__()
self.rhs = rhs.v
self.cls = util.get_type(klass)
self.ftype = ftype
self.name = name
self.var_map[rhs.v] = rhs
self.clsdesc = klass
def has_side_effect(self):
return True
def get_used_vars(self):
return self.var_map[self.rhs].get_used_vars()
def get_lhs(self):
return None
def visit(self, visitor):
return visitor.visit_put_static(
self.cls, self.name, self.var_map[self.rhs])
def replace_var(self, old, new):
self.rhs = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
rhs = v_m[self.rhs]
if not (rhs.is_const() or rhs.is_ident()):
rhs.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.rhs = new.value()
else:
v_m[old] = new
def __str__(self):
return '{}.{} = {}'.format(self.cls, self.name, self.var_map[self.rhs])
class InstanceInstruction(IRForm):
def __init__(self, rhs, lhs, klass, atype, name):
super().__init__()
self.lhs = lhs.v
self.rhs = rhs.v
self.atype = atype
self.cls = util.get_type(klass)
self.name = name
self.var_map.update([(lhs.v, lhs), (rhs.v, rhs)])
self.clsdesc = klass
def has_side_effect(self):
return True
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.lhs].get_used_vars()
lused_vars.extend(v_m[self.rhs].get_used_vars())
return list(set(lused_vars))
def get_lhs(self):
return None
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_put_instance(
v_m[self.lhs],
self.name,
v_m[self.rhs],
data=self.atype)
def replace_var(self, old, new):
if self.lhs == old:
self.lhs = new.v
if self.rhs == old:
self.rhs = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
if self.lhs == old:
self.lhs = new.value()
if self.rhs == old:
self.rhs = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.lhs], v_m[self.rhs]):
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return '{}.{} = {}'.format(v_m[self.lhs], self.name, v_m[self.rhs])
class NewInstance(IRForm):
def __init__(self, ins_type):
super().__init__()
self.type = ins_type
def get_type(self):
return self.type
def get_used_vars(self):
return []
def visit(self, visitor):
return visitor.visit_new(self.type, data=self)
def replace(self, old, new):
pass
def __str__(self):
return 'NEW(%s)' % self.type
class InvokeInstruction(IRForm):
def __init__(self, clsname, name, base, rtype, ptype, args, triple):
super().__init__()
self.cls = clsname
self.name = name
self.base = base.v
self.rtype = rtype
self.ptype = ptype
self.args = [arg.v for arg in args]
self.var_map[base.v] = base
for arg in args:
self.var_map[arg.v] = arg
self.triple = triple
assert (triple[1] == name)
def get_type(self):
if self.name == '<init>':
return self.var_map[self.base].get_type()
return self.rtype
def is_call(self):
return True
def has_side_effect(self):
return True
def replace_var(self, old, new):
if self.base == old:
self.base = new.v
new_args = []
for arg in self.args:
if arg != old:
new_args.append(arg)
else:
new_args.append(new.v)
self.args = new_args
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
if self.base == old:
self.base = new.value()
new_args = []
for arg in self.args:
if arg != old:
new_args.append(arg)
else:
new_args.append(new.v)
self.args = new_args
else:
v_m[old] = new
else:
base = v_m[self.base]
if not (base.is_ident() or base.is_const()):
base.replace(old, new)
for arg in self.args:
cnt = v_m[arg]
if not (cnt.is_ident() or cnt.is_const()):
cnt.replace(old, new)
def get_used_vars(self):
v_m = self.var_map
lused_vars = []
for arg in self.args:
lused_vars.extend(v_m[arg].get_used_vars())
lused_vars.extend(v_m[self.base].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
largs = [v_m[arg] for arg in self.args]
return visitor.visit_invoke(self.name, v_m[self.base], self.ptype,
self.rtype, largs, self)
def __str__(self):
v_m = self.var_map
return '{}.{}({})'.format(v_m[self.base], self.name,
', '.join('%s' % v_m[i] for i in self.args))
class InvokeRangeInstruction(InvokeInstruction):
def __init__(self, clsname, name, rtype, ptype, args, triple):
base = args.pop(0)
super().__init__(clsname, name, base, rtype,
ptype, args, triple)
class InvokeDirectInstruction(InvokeInstruction):
def __init__(self, clsname, name, base, rtype, ptype, args, triple):
super().__init__(
clsname, name, base, rtype, ptype, args, triple)
class InvokeStaticInstruction(InvokeInstruction):
def __init__(self, clsname, name, base, rtype, ptype, args, triple):
super().__init__(
clsname, name, base, rtype, ptype, args, triple)
def get_used_vars(self):
v_m = self.var_map
lused_vars = []
for arg in self.args:
lused_vars.extend(v_m[arg].get_used_vars())
return list(set(lused_vars))
class ReturnInstruction(IRForm):
def __init__(self, arg):
super().__init__()
self.arg = arg
if arg is not None:
self.var_map[arg.v] = arg
self.arg = arg.v
def get_used_vars(self):
if self.arg is None:
return []
return self.var_map[self.arg].get_used_vars()
def get_lhs(self):
return None
def visit(self, visitor):
if self.arg is None:
return visitor.visit_return_void()
else:
return visitor.visit_return(self.var_map[self.arg])
def replace_var(self, old, new):
self.arg = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
if self.arg is not None:
return 'RETURN(%s)' % self.var_map.get(self.arg)
return 'RETURN'
class NopExpression(IRForm):
def __init__(self):
pass
def get_used_vars(self):
return []
def get_lhs(self):
return None
def visit(self, visitor):
return visitor.visit_nop()
class SwitchExpression(IRForm):
def __init__(self, src, branch):
super().__init__()
self.src = src.v
self.branch = branch
self.var_map[src.v] = src
def get_used_vars(self):
return self.var_map[self.src].get_used_vars()
def visit(self, visitor):
return visitor.visit_switch(self.var_map[self.src])
def replace_var(self, old, new):
self.src = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
src = v_m[self.src]
if not (src.is_const() or src.is_ident()):
src.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.src = new.value()
else:
v_m[old] = new
def __str__(self):
return 'SWITCH(%s)' % (self.var_map[self.src])
class CheckCastExpression(IRForm):
def __init__(self, arg, _type, descriptor=None):
super().__init__()
self.arg = arg.v
self.var_map[arg.v] = arg
self.type = descriptor
self.clsdesc = descriptor
def is_const(self):
return self.var_map[self.arg].is_const()
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_check_cast(self.var_map[self.arg],
util.get_type(self.type))
def replace_var(self, old, new):
self.arg = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return 'CAST({}) {}'.format(self.type, self.var_map[self.arg])
class ArrayExpression(IRForm):
def __init__(self):
super().__init__()
class ArrayLoadExpression(ArrayExpression):
def __init__(self, arg, index, _type):
super().__init__()
self.array = arg.v
self.idx = index.v
self.var_map.update([(arg.v, arg), (index.v, index)])
self.type = _type
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.array].get_used_vars()
lused_vars.extend(v_m[self.idx].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_aload(v_m[self.array], v_m[self.idx])
def get_type(self):
return self.var_map[self.array].get_type().replace('[', '', 1)
def replace_var(self, old, new):
if self.array == old:
self.array = new.v
if self.idx == old:
self.idx = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
if self.array == old:
self.array = new.value()
if self.idx == old:
self.idx = new.value()
else:
v_m[old] = new
else:
for arg in (self.array, self.idx):
cnt = v_m[arg]
if not (cnt.is_ident() or cnt.is_const()):
cnt.replace(old, new)
def __str__(self):
v_m = self.var_map
return 'ARRAYLOAD({}, {})'.format(v_m[self.array], v_m[self.idx])
class ArrayLengthExpression(ArrayExpression):
def __init__(self, array):
super().__init__()
self.array = array.v
self.var_map[array.v] = array
def get_type(self):
return 'I'
def get_used_vars(self):
return self.var_map[self.array].get_used_vars()
def visit(self, visitor):
return visitor.visit_alength(self.var_map[self.array])
def replace_var(self, old, new):
self.array = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
array = v_m[self.array]
if not (array.is_const() or array.is_ident()):
array.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.array = new.value()
else:
v_m[old] = new
def __str__(self):
return 'ARRAYLEN(%s)' % (self.var_map[self.array])
class NewArrayExpression(ArrayExpression):
def __init__(self, asize, atype):
super().__init__()
self.size = asize.v
self.type = atype
self.var_map[asize.v] = asize
def is_propagable(self):
return False
def get_used_vars(self):
return self.var_map[self.size].get_used_vars()
def visit(self, visitor):
return visitor.visit_new_array(self.type, self.var_map[self.size])
def replace_var(self, old, new):
self.size = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
size = v_m[self.size]
if not (size.is_const() or size.is_ident()):
size.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.size = new.value()
else:
v_m[old] = new
def __str__(self):
return 'NEWARRAY_{}[{}]'.format(self.type, self.var_map[self.size])
class FilledArrayExpression(ArrayExpression):
def __init__(self, asize, atype, args):
super().__init__()
self.size = asize
self.type = atype
self.args = []
for arg in args:
self.var_map[arg.v] = arg
self.args.append(arg.v)
def get_used_vars(self):
lused_vars = []
for arg in self.args:
lused_vars.extend(self.var_map[arg].get_used_vars())
return list(set(lused_vars))
def replace_var(self, old, new):
new_args = []
for arg in self.args:
if arg == old:
new_args.append(new.v)
else:
new_args.append(arg)
self.args = new_args
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
new_args = []
for arg in self.args:
if arg == old:
new_args.append(new.v)
else:
new_args.append(arg)
self.args = new_args
else:
v_m[old] = new
else:
for arg in self.args:
cnt = v_m[arg]
if not (cnt.is_ident() or cnt.is_const()):
cnt.replace(old, new)
def visit(self, visitor):
v_m = self.var_map
largs = [v_m[arg] for arg in self.args]
return visitor.visit_filled_new_array(self.type, self.size, largs)
class FillArrayExpression(ArrayExpression):
def __init__(self, reg, value):
super().__init__()
self.reg = reg.v
self.var_map[reg.v] = reg
self.value = value
def is_propagable(self):
return False
def get_rhs(self):
return self.reg
def replace_var(self, old, new):
self.reg = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
reg = v_m[self.reg]
if not (reg.is_const() or reg.is_ident()):
reg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.reg = new.value()
else:
v_m[old] = new
def get_used_vars(self):
return self.var_map[self.reg].get_used_vars()
def visit(self, visitor):
return visitor.visit_fill_array(self.var_map[self.reg], self.value)
class RefExpression(IRForm):
def __init__(self, ref):
super().__init__()
self.ref = ref.v
self.var_map[ref.v] = ref
def is_propagable(self):
return False
def get_used_vars(self):
return self.var_map[self.ref].get_used_vars()
def replace_var(self, old, new):
self.ref = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
ref = v_m[self.ref]
if not (ref.is_const() or ref.is_ident()):
ref.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.ref = new.value()
else:
v_m[old] = new
class MoveExceptionExpression(RefExpression):
def __init__(self, ref, _type):
super().__init__(ref)
self.type = _type
ref.set_type(_type)
def get_lhs(self):
return self.ref
def has_side_effect(self):
return True
def get_used_vars(self):
return []
def replace_lhs(self, new):
self.var_map.pop(self.ref)
self.ref = new.v
self.var_map[new.v] = new
def visit(self, visitor):
return visitor.visit_move_exception(self.var_map[self.ref], data=self)
def __str__(self):
return 'MOVE_EXCEPT %s' % self.var_map[self.ref]
class MonitorEnterExpression(RefExpression):
def __init__(self, ref):
super().__init__(ref)
def visit(self, visitor):
return visitor.visit_monitor_enter(self.var_map[self.ref])
class MonitorExitExpression(RefExpression):
def __init__(self, ref):
super().__init__(ref)
def visit(self, visitor):
return visitor.visit_monitor_exit(self.var_map[self.ref])
class ThrowExpression(RefExpression):
def __init__(self, ref):
super().__init__(ref)
def visit(self, visitor):
return visitor.visit_throw(self.var_map[self.ref])
def __str__(self):
return 'Throw %s' % self.var_map[self.ref]
class BinaryExpression(IRForm):
def __init__(self, op, arg1, arg2, _type):
super().__init__()
self.op = op
self.arg1 = arg1.v
self.arg2 = arg2.v
self.var_map.update([(arg1.v, arg1), (arg2.v, arg2)])
self.type = _type
def has_side_effect(self):
v_m = self.var_map
return (v_m[self.arg1].has_side_effect() or
v_m[self.arg2].has_side_effect())
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.arg1].get_used_vars()
lused_vars.extend(v_m[self.arg2].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_binary_expression(self.op, v_m[self.arg1],
v_m[self.arg2])
def replace_var(self, old, new):
if self.arg1 == old:
self.arg1 = new.v
if self.arg2 == old:
self.arg2 = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
if self.arg1 == old:
self.arg1 = new.value()
if self.arg2 == old:
self.arg2 = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.arg1], v_m[self.arg2]):
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return '({} {} {})'.format(self.op, v_m[self.arg1], v_m[self.arg2])
class BinaryCompExpression(BinaryExpression):
def __init__(self, op, arg1, arg2, _type):
super().__init__(op, arg1, arg2, _type)
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_cond_expression(self.op, v_m[self.arg1],
v_m[self.arg2])
class BinaryExpression2Addr(BinaryExpression):
def __init__(self, op, dest, arg, _type):
super().__init__(op, dest, arg, _type)
class BinaryExpressionLit(BinaryExpression):
def __init__(self, op, arg1, arg2):
super().__init__(op, arg1, arg2, 'I')
class UnaryExpression(IRForm):
def __init__(self, op, arg, _type):
super().__init__()
self.op = op
self.arg = arg.v
self.var_map[arg.v] = arg
self.type = _type
def get_type(self):
return self.var_map[self.arg].get_type()
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_unary_expression(self.op, self.var_map[self.arg])
def replace_var(self, old, new):
self.arg = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
elif old in v_m:
if new.is_ident():
v_m[new.value()] = new
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return '({}, {})'.format(self.op, self.var_map[self.arg])
class CastExpression(UnaryExpression):
def __init__(self, op, atype, arg):
super().__init__(op, arg, atype)
self.clsdesc = atype
def is_const(self):
return self.var_map[self.arg].is_const()
def get_type(self):
return self.type
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_cast(self.op, self.var_map[self.arg])
def __str__(self):
return 'CAST_{}({})'.format(self.op, self.var_map[self.arg])
CONDS = {'==': '!=', '!=': '==', '<': '>=', '<=': '>', '>=': '<', '>': '<=', }
class ConditionalExpression(IRForm):
def __init__(self, op, arg1, arg2):
super().__init__()
self.op = op
self.arg1 = arg1.v
self.arg2 = arg2.v
self.var_map.update([(arg1.v, arg1), (arg2.v, arg2)])
def get_lhs(self):
return None
def is_cond(self):
return True
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.arg1].get_used_vars()
lused_vars.extend(v_m[self.arg2].get_used_vars())
return list(set(lused_vars))
def neg(self):
self.op = CONDS[self.op]
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_cond_expression(self.op, v_m[self.arg1],
v_m[self.arg2])
def replace_var(self, old, new):
if self.arg1 == old:
self.arg1 = new.v
if self.arg2 == old:
self.arg2 = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
if self.arg1 == old:
self.arg1 = new.value()
if self.arg2 == old:
self.arg2 = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.arg1], v_m[self.arg2]):
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return 'COND({}, {}, {})'.format(self.op, v_m[self.arg1], v_m[self.arg2])
class ConditionalZExpression(IRForm):
def __init__(self, op, arg):
super().__init__()
self.op = op
self.arg = arg.v
self.var_map[arg.v] = arg
def get_lhs(self):
return None
def is_cond(self):
return True
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def neg(self):
self.op = CONDS[self.op]
def visit(self, visitor):
return visitor.visit_condz_expression(self.op, self.var_map[self.arg])
def replace_var(self, old, new):
self.arg = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
elif old in v_m:
if new.is_ident():
v_m[new.value()] = new
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return '(IS{}0, {})'.format(self.op, self.var_map[self.arg])
class InstanceExpression(IRForm):
def __init__(self, arg, klass, ftype, name):
super().__init__()
self.arg = arg.v
self.cls = util.get_type(klass)
self.ftype = ftype
self.name = name
self.var_map[arg.v] = arg
self.clsdesc = klass
def get_type(self):
return self.ftype
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_get_instance(
self.var_map[self.arg],
self.name,
data=self.ftype)
def replace_var(self, old, new):
self.arg = new.v
self.var_map.pop(old)
self.var_map[new.v] = new
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
elif old in v_m:
if new.is_ident():
v_m[new.value()] = new
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return '{}.{}'.format(self.var_map[self.arg], self.name)
class StaticExpression(IRForm):
def __init__(self, cls_name, field_type, field_name):
super().__init__()
self.cls = util.get_type(cls_name)
self.ftype = field_type
self.name = field_name
self.clsdesc = cls_name
def get_type(self):
return self.ftype
def visit(self, visitor):
return visitor.visit_get_static(self.cls, self.name)
def replace(self, old, new):
pass
def __str__(self):
return '{}.{}'.format(self.cls, self.name)
|
reox/androguard
|
androguard/decompiler/dad/instruction.py
|
Python
|
apache-2.0
| 37,768
|
[
"VisIt"
] |
205f0de1f005429d8527e5af68edf7753b4c03b6298249674bc9843257a73d90
|
# encoding: utf-8
"""surfaceslab.py - Window for setting up surfaces
"""
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack, cancel_apply_ok, oops
from ase.gui.pybutton import PyButton
from ase.gui.setupwindow import SetupWindow
import ase.lattice.surface as _surf
import ase
import numpy as np
introtext = _("""\
Use this dialog to create surface slabs. Select the element by
writing the chemical symbol or the atomic number in the box. Then
select the desired surface structure. Note that some structures can
be created with an othogonal or a non-orthogonal unit cell, in these
cases the non-orthogonal unit cell will contain fewer atoms.
If the structure matches the experimental crystal structure, you can
look up the lattice constant, otherwise you have to specify it
yourself.""")
# Name, structure, orthogonal, support-nonorthogonal, function
surfaces = [(_('FCC(100)'), _('fcc'), True, False, _surf.fcc100),
(_('FCC(110)'), _('fcc'), True, False, _surf.fcc110),
(_('FCC(111) non-orthogonal'), _('fcc'), False, True,
_surf.fcc111),
(_('FCC(111) orthogonal'), _('fcc'), True, True, _surf.fcc111),
(_('BCC(100)'), _('bcc'), True, False, _surf.bcc100),
(_('BCC(110) non-orthogonal'), _('bcc'), False, True,
_surf.bcc110),
(_('BCC(110) orthogonal'), _('bcc'), True, True, _surf.bcc110),
(_('BCC(111) non-orthogonal'), _('bcc'), False, True,
_surf.bcc111),
(_('BCC(111) orthogonal'), _('bcc'), True, True, _surf.bcc111),
(_('HCP(0001) non-orthogonal'), _('hcp'), False, True,
_surf.hcp0001),
(_('HCP(0001) orthogonal'), _('hcp'), True, True, _surf.hcp0001),
(_('HCP(10-10) orthogonal'), _('hcp'), True, False,
_surf.hcp10m10),
(_('DIAMOND(100) orthogonal'), _('diamond'), True, False,
_surf.diamond100),
(_('DIAMOND(111) non-orthogonal'), _('diamond'), False, True,
_surf.diamond111),
]
py_template = """
from ase.lattice.surface import %(func)s
atoms = %(func)s(symbol='%(symbol)s', size=%(size)s,
a=%(a).3f, vacuum=%(vacuum).3f%(orthoarg)s)
"""
class SetupSurfaceSlab(SetupWindow):
"""Window for setting up a surface."""
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title(_("Surface"))
self.atoms = None
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
# Choose the element
label = gtk.Label(_("Element: "))
element = gtk.Entry(max=3)
self.element = element
self.elementinfo = gtk.Label("")
pack(vbox, [label, element, self.elementinfo])
self.element.connect('activate', self.update)
self.legal_element = False
# Choose the surface structure
label = gtk.Label(_("Structure: "))
self.structchoice = gtk.combo_box_new_text()
self.surfinfo = {}
for s in surfaces:
assert len(s) == 5
self.structchoice.append_text(s[0])
self.surfinfo[s[0]] = s
pack(vbox, [label, self.structchoice])
self.structchoice.connect('changed', self.update)
# Choose the lattice constant
tbl = gtk.Table(2, 3)
label = gtk.Label(_("Lattice constant: "))
tbl.attach(label, 0, 1, 0, 1)
vbox2 = gtk.VBox() # For the non-HCP stuff
self.vbox_hcp = gtk.VBox() # For the HCP stuff.
self.lattice_const = gtk.Adjustment(3.0, 0.0, 1000.0, 0.01)
lattice_box = gtk.SpinButton(self.lattice_const, 10.0, 3)
lattice_box.numeric = True
pack(vbox2, [gtk.Label(_("a:")), lattice_box, gtk.Label(_(u"Å"))])
tbl.attach(vbox2, 1, 2, 0, 1)
lattice_button = gtk.Button(_("Get from database"))
tbl.attach(lattice_button, 2, 3, 0, 1)
# HCP stuff
self.hcp_ideal = (8.0/3)**(1.0/3)
self.lattice_const_c = gtk.Adjustment(self.lattice_const.value * self.hcp_ideal,
0.0, 1000.0, 0.01)
lattice_box_c = gtk.SpinButton(self.lattice_const_c, 10.0, 3)
lattice_box_c.numeric = True
pack(self.vbox_hcp, [gtk.Label("c:"),
lattice_box_c, gtk.Label(u"Å")])
self.hcp_c_over_a_format = "c/a: %.3f " + _("(%.1f %% of ideal)")
self.hcp_c_over_a_label = gtk.Label(self.hcp_c_over_a_format % \
(self.hcp_ideal, 100.0))
pack(self.vbox_hcp, [self.hcp_c_over_a_label])
tbl.attach(self.vbox_hcp, 1, 2, 1, 2)
tbl.show_all()
pack(vbox, [tbl])
self.lattice_const.connect('value-changed', self.update)
self.lattice_const_c.connect('value-changed', self.update)
lattice_button.connect('clicked', self.get_lattice_const)
pack(vbox, gtk.Label(""))
# System size
self.size = [gtk.Adjustment(1, 1, 100, 1) for i in range(3)]
buttons = [gtk.SpinButton(s, 0, 0) for s in self.size]
self.vacuum = gtk.Adjustment(10.0, 0, 100.0, 0.1)
vacuum_box = gtk.SpinButton(self.vacuum, 0.0, 1)
pack(vbox, [gtk.Label(_("Size: \tx: ")), buttons[0],
gtk.Label(_(" unit cells"))])
pack(vbox, [gtk.Label(_("\t\ty: ")), buttons[1],
gtk.Label(_(" unit cells"))])
pack(vbox, [gtk.Label(_(" \t\tz: ")), buttons[2],
gtk.Label(_(" layers, ")),
vacuum_box, gtk.Label(_(u" Å vacuum"))])
self.nosize = _("\t\tNo size information yet.")
self.sizelabel = gtk.Label(self.nosize)
pack(vbox, [self.sizelabel])
for s in self.size:
s.connect('value-changed', self.update)
self.vacuum.connect('value-changed', self.update)
pack(vbox, gtk.Label(""))
# Buttons
self.pybut = PyButton(_("Creating a surface slab."))
self.pybut.connect('clicked', self.update)
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [self.pybut, buts], end=True, bottom=True)
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
# Hide the HCP stuff to begin with.
self.vbox_hcp.hide_all()
# update_element inherited from SetupWindow
def update(self, *args):
"Called when something has changed."
struct = self.structchoice.get_active_text()
if struct:
structinfo = self.surfinfo[struct]
if structinfo[1] == 'hcp':
self.vbox_hcp.show_all()
ca = self.lattice_const_c.value / self.lattice_const.value
self.hcp_c_over_a_label.set_text(self.hcp_c_over_a_format %
(ca, 100 * ca / self.hcp_ideal))
else:
self.vbox_hcp.hide_all()
# Abort if element or structure is invalid
if not (self.update_element() and struct):
self.sizelabel.set_text(self.nosize)
self.atoms = None
self.pybut.python = None
return False
# Make the atoms
assert self.legal_element
kw = {}
kw2 = {}
if structinfo[3]: # Support othogonal keyword?
kw['orthogonal'] = structinfo[2]
kw2['orthoarg'] = ', orthogonal=' + str(kw['orthogonal'])
else:
kw2['orthoarg'] = ''
kw2['func'] = structinfo[4].__name__
kw['symbol'] = self.legal_element
kw['size'] = [int(s.value) for s in self.size]
kw['a'] = self.lattice_const.value
kw['vacuum'] = self.vacuum.value
# Now create the atoms
try:
self.atoms = structinfo[4](**kw)
except ValueError as e:
# The values were illegal - for example some size
# constants must be even for some structures.
self.pybut.python = None
self.atoms = None
self.sizelabel.set_text(str(e).replace(". ", ".\n"))
return False
kw2.update(kw)
self.pybut.python = py_template % kw2
# Find the heights of the unit cell
h = np.zeros(3)
uc = self.atoms.get_cell()
for i in range(3):
norm = np.cross(uc[i-1], uc[i-2])
norm /= np.sqrt(np.dot(norm, norm))
h[i] = np.abs(np.dot(norm, uc[i]))
natoms = len(self.atoms)
txt = ("\t\t%.2f Å x %.2f Å x %.2f Å, %s"
% (h[0], h[1], h[2], _('%i atoms.') % natoms))
self.sizelabel.set_text(txt)
return True
def get_lattice_const(self, *args):
if not self.update_element():
oops(_("Invalid element."))
return
z = ase.data.atomic_numbers[self.legal_element]
ref = ase.data.reference_states[z]
surface = self.structchoice.get_active_text()
if not surface:
oops(_("No structure specified!"))
return
struct = self.surfinfo[surface][1]
if ref is None or ref['symmetry'] != struct:
from ase.data.alternatives import alternative_structures
alt = alternative_structures[z]
if alt and alt['symmetry'] == struct:
ref = alt
else:
oops(_('%(struct)s lattice constant unknown for %(element)s.')
% dict(struct=struct.upper(), element=self.legal_element))
a = ref['a']
self.lattice_const.set_value(a)
if struct == 'hcp':
c = ref['c/a'] * a
self.lattice_const_c.set_value(c)
def apply(self, *args):
self.update()
if self.atoms is not None:
self.gui.new_atoms(self.atoms)
return True
else:
oops(_("No valid atoms."),
_("You have not (yet) specified "
"a consistent set of parameters."))
return False
def ok(self, *args):
if self.apply():
self.destroy()
|
suttond/MODOI
|
ase/gui/surfaceslab.py
|
Python
|
lgpl-3.0
| 10,313
|
[
"ASE",
"CRYSTAL"
] |
09279a5140650ef9d597104f036aa533b7d8f46280856f1e0931834a02b2d0da
|
"""
This is a data file for IsyEvent.py
"""
# author : Peter Shipley <peter.shipley@gmail.com>
# copyrigh : Copyright (C) 2015 Peter Shipley
# license : BSD
__all__ = [] # EVENT_CTRL, LOG_USERID
## EVENT_CTRL ##
EVENT_CTRL = {
"_0" : "Heartbeat",
"_1" : "Trigger",
"_2" : "Protocol Specific",
"_3" : "Nodes Updated",
"_4" : "System Config Updated",
"_5" : "System Status",
"_6" : "Internet Access",
"_7" : "System Progress",
"_8" : "Security System",
"_9" : "System Alert",
"_10" : "Electricity",
"_11" : "Climate",
"_12" : "AMI/SEP",
"_13" : "Ext Energy Mon",
"_14" : "UPB Linker",
"_15" : "UPB Dev State",
"_16" : "UPB Dev Status",
"_17" : "Gas",
"_18" : "ZigBee",
"_19" : "Elk",
"_20" : "Device Link",
"DON" : "Device On",
"DFON" : "Device Fast On",
"DOF" : "Device Off",
"DFOF" : "Device Fast Off",
"ST" : "Status",
"OL" : "On Level",
"RR" : "Ramp Rate",
"BMAN" : "Start Manual Change",
"SMAN" : "Stop Manual Change",
"CLISP" : "Setpoint",
"CLISPH" : "Heat Setpoint",
"CLISPC" : "Cool Setpoint",
"CLIFS" : "Fan State",
"CLIMD" : "Thermostat Mode",
"CLIHUM" : "Humidity",
"CLIHCS" : "Heat/Cool State",
"BRT" : "Brighten",
"DIM" : "Dim",
"X10" : "Direct X10 Commands",
"BEEP" : "Beep",
}
LOG_USERID = [ "SYSTEM_USER", "SYSTEM_DRIVER_USER", "WEB_USER",
"SCHEDULER_USER", "D2D_USER", " ELK_USER",
"SEP_DEVICE_UMETER_USER", "SEP_DEVICE_UPRICE_USER",
"SEP_DEVICE_UMSG_USER", "SEP_DEVICE_UDR_USER",
"GAS_METER_USER" ]
LOG_TYPES = {
"1": "SYSTEM_STARTUP",
"2": "SYSTEM_SHUTDOWN",
"3": "WARNING",
"4": "INFO",
"5": "LOG",
"6": "UD_SEP_SUBSYS_STARTUP",
"-1": "REQUEST_FAILED_ERROR",
"-2": "DEVICE_COMMUNICATION_ERROR",
"-3": "DEVICE_RETURNED_INVALID_NODE",
"-4": "DEVICE_RETURNED_INVALID_ADDRESS",
"-5": "ERROR_LOGGER_STARTUP",
"-10": "MAIN_HAML_DRIVER_NOT_FOUND",
"-20": "MAIN_LOCAL_DEVICE_BLANK",
"-100": "SYSTEM_NO_NETWORK_CONNECTION",
"-101": "SYSTEM_WEBSERVER_SELECT_FAILED",
"-500": "HAML_DRIVER_LISTENER_NOT_REGISTERED",
"-1000": "HAML_PARSER_UNDEFINED_ELEMENT",
"-1001": "HAML_PARSER_ONDATA",
"-5001": "UPNP_DRIVER_NO_DEVICES_CONFIGURED",
"-5002": "UPNP_DRIVER_SERIAL_READER_FAILED",
"-5003": "UPNP_DRIVER_MAX_DEVICES",
"-5004": "UPNP_SERVICE_TYPE_SEARCH_NS",
"-5005": "UPNP_SUBSCRIPTION_NOT_FOUND_FOR_RENEWAL",
"-5006": "UPNP_SUBSCRIPTION_NOT_FOUND_FOR_CANCELATION",
"-5007": "UPNP_INVALID_SUBSCRIPTION_URL",
"-5008": "UPNP_INVALID_SUBSCRIPTION_CALLBACK",
"-5009": "UPNP_MAX_SUBSCRIBERS",
"-5010": "UPNP_SUBSCRIBER_TCP_CONNECT_FAILURE",
"-5011": "PROCESS_DEVICE_STATE_CHANGE_SID_NOT_FOUND",
"-5012": "UPNP_SUBSCRIBER_NOREPLY_TO_EVENT_1",
"-5013": "UPNP_SUBSCRIBER_NOREPLY_TO_EVENT_2",
"-5014": "UPNP_SUBSCRIBER_NOREPLY_TO_EVENT_3",
"-5015": "UPNP_CONTROL_MALFORMED_SOAP_REQUEST_1",
"-5016": "UPNP_CONTROL_MALFORMED_SOAP_REQUEST_2",
"-6000": "OS_DUPLICATE_TASK_PRIORITY",
"-6001": "OS_OPEN_SERIAL_FAILED",
"-7020": "D2D_PARSER_ERROR",
"-7029": "NOTIFICATIONS_MAIL_TO_ADDRESS_REQUIRED",
"-7030": "NOTIFICATIONS_SEND_MAIL_FAILED",
"-7050": "D2D_EXPECTED_D2D_TAG",
"-7051": "D2D_UNEXPECTED_TAG_IN_SENSE",
"-7052": "D2D_UNEXPECTED_TAG_IN_CONDITION",
"-7501": "DIAG_PARSER_ERROR",
"-7601": "LINK_PARSER_ERROR",
"-10100": "PNP_SECURITY_NOT_VERIFIED",
"-10001": "SSL_DECODING_LENGTHS_FAILED",
"-10002": "SSL_DECODING_PMOD_FAILED",
"-10003": "SSL_DECODING_PEXP_FAILED",
"-10004": "SSL_DECODING_PRI_EXP_FAILED",
"-10005": "SSL_DECODING_PRI_P_FAILED",
"-10006": "SSL_DECODING_PRI_Q_FAILED",
"-10007": "SSL_DECODING_PRI_X1_FAILED",
"-10008": "SSL_DECODING_PRI_X2_FAILED",
"-10009": "SSL_DECODING_COEFF_FAILED",
"-10010": "SSL_DECODING_CERT_FAILED",
"-10011": "SSL_REQUEST_NOT_AUTHENTICATED",
"-10026": "SECURE_SESSION_DOES_NOT_EXIST",
"-10027": "SECURE_SESSIONS_EXHAUSTED",
"-10101": "AUTHENTICATION_UNSUPPORTED_UID_LEN",
"-10102": "AUTHENTICATION_UNSUPPORTED_PWD_LEN",
"-10103": "AUTHENTICATION_USER_ID_DOES_NOT_EXIST",
"-10104": "AUTHENTICATION_USER_ID_PWD_NOT_PRESENT",
"-10105": "AUTHENTICATION_WRONG_PASSWORD",
"-10106": "AUTHENTICATION_FAILED",
"-10107": "HTTP_AUTH_DECODING_FAILED",
"-11000": "SECURITY_INITIALIZATION_FAILED",
"-12000": "TIMED_OUT_WAITING_FOR_CRITICAL_SECION",
"-12001": "ERROR_LEAVING_CRITICAL_SECTION_NOT_OWNED",
"-13000": "CONTENT_LEN_NOT_EQUAL_TO_HEADER_CONTENT_LEN",
"-14001 ": "XML_MALFORMED_TAG",
"-14002": "XML_MALFORMED_END_TAG",
"-14003 ": "XML_NO_START_TAG",
"-14004 ": "XML_NO_TAG_NAME",
"-14005 ": "XML_START_END_NAME_MISMATCH",
"-20000": "MALFORMED_UPNP_HEADERS",
"-50000": "MAIL_SERVER_CONNECT_ERROR",
"-50001": "SMTP_SERVER_FAILURE",
"-50010": "MAIL_SERVER_DNS_ERROR",
"-50011": "MAIL_MAX_FROM_LEN",
"-50012": "MAIL_MAX_SUBJECT_LEN",
"-50013": "MAIL_MAX_TO_LEN",
"-60000": "NTP_CONFIG_SERVER_NO_HOST_PARAM",
"-60001": "NTP_CONFIG_SERVER_ADDRESS_RESOLUTION_FAILED",
"-60002": "NTP_CONFIG_SERVER_NO_INTERVAL_PARAM",
"-60006": "NTP_SERVER_NOT_RESPONDING",
"-60007": "NTP_SERVER_CONNECT_ERROR",
"-70000": "OUT_OF_MEMORY",
"-80000": "IGD_FAILED_PARSING_DESCRIPTION_URL",
"-80001": "IGD_FAILED_RETRIEVING_DESCRIPTION_FILE",
"-80002": "IGD_FAILED_RETRIEVING_URL_BASE",
"-80003": "IGD_FAILED_PARSING_URL_BASE",
"-80004": "IGD_FAILED_RETRIEVING_WAN_CONNECTION_DEVICE",
"-80005": "IGD_FAILED_RETRIEVING_CONTROL_URL",
"-80006": "IGD_FAILED_PARSING_CONTROL_URL",
"-80007": "IGD_FAILED_RETRIEVING_EXTERNAL_IP",
"-80008": "IGD_NO_RESPONSE_FROM_GATEWAY",
"-80009": "IGD_FAILED_STRIPPING_HTTP_HEADERS",
"-80010": "IGD_FAILED_DELETING_PORT_FORWARD_MAP",
"-80011": "IGD_FAILED_ADDING_PORT_FORWARD_MAP",
"-80012": "IGD_FAILED_GETTING_SPECIFIC_ENTRY",
"-90001": "CRC_INVALID_ORDER",
"-90002": "CRC_INVALID_POLYNOM",
"-90003": "CRC_INVALID_CRC_INIT",
"-90004": "CRC_INVALID_CRC_XOR",
"-100000": "LOGGER_DIRECTORY_CREATION_FAILED",
"-100001": "LOGGER_SD_IS_NOT_INSTALLED",
"-100002": "LOGGER_LOG_FILE_OPEN_FAILED",
"-110000": "FILE_TO_STRING_OPEN_FAILED",
"-110001": "FILE_TO_STRING_MEM_ALLOC_FAILED",
"-110002": "SD_DRIVE_FORMAT_FAILED_1",
"-110003": "SD_DRIVE_FORMAT_FAILED_2",
"-110004": "SD_DRIVE_MOUNT_FAILED_1",
"-110005": "SD_DRIVE_MOUNT_FAILED_2",
"-110006": "SEND_FILE_OPEN_FAILED",
"-110007": "SEND_FILE_READ_FAILED",
"-110008": "RECEIVE_FILE_WRITE_FAILED",
"-110009": "RECEIVE_FILE_OPEN_FAILED",
"-110010": "SD_DRIVE_DIRECTORY_CREATION_FAILED",
"-110011": "SD_DRIVE_CONFIG_FILE_OPEN_WRITE_FAILED",
"-110012": "SD_DRIVE_CONFIG_FILE_OPEN_READ_FAILED",
"-110013": "SD_DRIVE_CONFIG_WRITE_FAILED",
"-110014": "SD_DRIVE_CONFIG_READ_FAILED",
"-110015": "STRING_TO_FILE_OPEN_FAILED",
"-110016": "STRING_TO_FILE_WRITE_FAILED",
"-110017": "FILE_TO_STRING_READ_FAILED",
"-110018": "REMOVE_FILE_FAILED",
"-110019": "REMOVE_DIR_FAILED",
"-110020": "FLUSH_FILE_FAILED",
"-110021": "CLOSE_FILE_FAILED",
"-110022": "OPEN_FILE_FAILED",
"-110023": "FLUSH_FILE_SYSTEM_FAILED",
"-110024": "FILESYSTEM_INIT_FAILED",
"-110025": "FILESYSTEM_CRIT_FAILED",
"-120000": "FIRMWARE_UPDATE_OPEN_FILE_FAILED",
"-120001": "FIRMWARE_UPDATE_HEADER_READ_FAILED",
"-120002": "FIRMWARE_UPDATE_CHECKSUM_FAILED",
"-120003": "FIRMWARE_UPDATE_MALLOC_FAILED",
"-120004": "FIRMWARE_UPDATE_DATA_READ_FAILED",
"-130000": "ELK_CONFIG_PARSER_ERROR",
"-140000": "HTTP_CLIENT_DNS_ERROR",
"-140001": "HTTP_CLIENT_BASE64_ENCRYPTION_FAILED",
"-140002": "HTTP_CLIENT_CONNECTION_TIMED_OUT",
"-140003": "HTTP_CLIENT_WRITE_HEADER_FAILED",
"-140004": "HTTP_CLIENT_WRITE_BODY_FAILED",
"-140005": "HTTP_CLIENT_READ_RESPONSE_FAILED",
"-140006": "HTTP_CLIENT_HEADER_NO_STATUS",
"-140007": "HTTP_CLIENT_RESOURCE_MOVED",
"-140008": "HTTP_CLIENT_REQUEST_FAILED",
"-140009": "HTTP_CLIENT_NO_NETWORK",
"-150000": "TCP_CLIENT_WRITE_FAILED",
"-150100": "UDP_CLIENT_DNS_ERROR",
"-160000": "PROTOCOL_READER_READ_ERROR",
"-160001": "PROTOCOL_READER_BUFFER_OVERFLOW",
"-160002": "PROTOCOL_READER_REOPEN_ERROR",
"-170000": "WEB_MODULE_NO_FREE_SPACE",
"-170001": "SYSTEM_ACCESS_LOG",
"-180000": "SEP_NETWORK_SCAN_ERROR",
"-180001": "SEP_NETWORK_KEY_EST_ERROR",
"-180002": "SEP_NETWORK_DISCOVERY_ERROR",
"-180003": "SEP_NETWORK_SYNCH_ERROR",
"-180004": "SEP_MODULE_RESET_ERROR",
"-180005": "SEP_MODULE_INVALID_CALL_ERROR",
"-180006": "SEP_MODULE_UNKNOWN_ERROR",
"-190001": "UDERR_ISY_API_NO_SPACE",
"-190002": "UDERR_ISY_API_INVALID_8_3_FILENAME",
"-190003": "UDERR_ISY_API_INVALID_PGM_FILENAME",
"-190004": "UDERR_ISY_API_INCORRECT_PGM_KEY",
"-190005": "UDERR_ISY_API_INVALID_PGM_URL_SEARCH_STRING",
"-200000": "DEVICE_DRIVER_ERROR_MSG",
"-210001": "CALL_HOME_PORTAL_NO_FD",
}
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
joegross/ISYlib-python
|
ISY/IsyEventData.py
|
Python
|
bsd-2-clause
| 9,543
|
[
"Elk"
] |
2ef1267f9ec32de3fe87c3a72e070e65133179344e17bf283f724c0a45f134e8
|
# Outdoor Comfort Calculator - Univeral Thermal Climate Index(UTCI)
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Chris Mackey <Chris@MackeyArchitecture.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate the Universal Thermal Climate Index (UTCI) for a set of input climate conditions. Perhaps the most familiar application of Univeral Thermal Climate Index (UTCI) is the temperature given by TV weathermen and women when they say that, "even though the dry bulb temperature outside is a certain value, the temperature actually "feels like" something higher or lower."
UTCI is this temperature of what the weather "feels like" and it takes into account the radiant temperature (sometimes including solar radiation), relative humidity, and wind speed. UTCI uses these variables in a human energy balance model to give a temperature value that is indicative of the heat stress or cold stress felt by a human body in the outdoors.
_
A UTCI between 9 and 26 degrees Celcius indicates no thermal stress or comfortable conditions outdoors.
_
A UTCI between 26 and 28 degrees Celcius indicates slight heat stress (comfortable for short periods of time). Between 28 and 32 degrees, UTCI indicates moderate heat stress (hot but not dangerous). Between 32 and 38 degrees, UTCI indicates strong heat stress (dangerous beyond short periods of time). Above 38, UTCI indicates very strong to extreme heat stress (very dangerous).
_
A UTCI between 0 and 9 degrees Celcius indicates slight cold stress (comfortable for short periods of time). Between 0 and -13 degrees, UTCI indicates moderate cold stress (cold but not dangerous). Between -13 and -27 degrees, UTCI indicates strong cold stress (dangerous beyond short periods of time). Below -27, UTCI indicates very stong to extreme cold stress (very dangerous).
_
_
UTCI is result of the world's leading comfort specailists' attempt to make an interational standard of outdoor temperature sensation that fills the follwoing requirements:
1) Thermo-physiological significance in the whole range of heat exchange conditions of existing thermal environments
2) Valid in all climates, seasons, and scales
3) Useful for key applications in human biometeorology.
_
_
The code that makes this component possible is a Python version of the original Fortran code for calculating UTCI. Information on UTCI and the original Fortran code can be found here: http://www.utci.org/.
-
Provided by Ladybug 0.0.60
Args:
_dryBulbTemperature: A number representing the dry bulb temperature of the air in degrees Celcius. This input can also accept a list of temperatures representing conditions at different times or the direct output of dryBulbTemperature from the Import EPW component.
meanRadiantTemperature_: A number representing the mean radiant temperature of the surrounding surfaces in degrees Celcius. If no value is plugged in here, this component will assume that the mean radiant temperature is equal to air temperature value above. This input can also accept a list of temperatures representing conditions at different times or the direct output of dryBulbTemperature from the Import EPW component.
windSpeed_tenMeters: A number representing the wind speed of the air in meters per second at 10 meters off the ground (note that all wind readings for EPW data are 10m off the ground). If no value is plugged in here, this component will assume a very low wind speed of 0.05 m/s, characteristic of most indoor conditions. This input can also accept a list of wind speeds representing conditions at different times or the direct output of windSpeed from of the Import EPW component.
_relativeHumidity: A number between 0 and 100 representing the relative humidity of the air in percentage. This input can also accept a list of relative humidity values representing conditions at different times or the direct output of relativeHumidity from of the Import EPW component.
------------------------------: ...
analysisPeriod_: An optional analysis period from the Analysis Period component. If no Analysis period is given and epw data from the ImportEPW component has been connected, the analysis will be run for the enitre year.
Returns:
readMe!: ...
------------------------------: ...
universalThermalClimateIndex: The UTCI of the input conditions in degrees Celcius. Perhaps the most familiar application of Univeral Thermal Climate Index (UTCI) is the temperature given by TV weathermen and women when they say that, even though the dry bulb temperature outside is a certain value, the temperature actually "feels like" something higher or lower. UTCI is this temperature of what the weather "feels like" and it takes into account radiant temperature (usually including solar radiation), relative humidity, wind speed and uses them in a human energy balance model to give a temperature value that is indicative of the heat stress or cold stress felt by the human body.
comfortableOrNot: A stream of 0's and 1's (or "False" and "True" values) indicating whether a person outside is comfortable for each hour of the input conditions. 0 indicates that a person is not comfortable while 1 indicates that a person is comfortable. A person is considered to be comfortable when he/she experiences no thermal stress (9 < UTCI < 26).
thermalStress: A stream of interger values from -1 to +1 that indicate the following:
-1 - Cold Stress - cold conditions (UTCI < 9C).
0 - No Thermal Stress - comfortable conditions (9C < UTCI < 26C).
+1 - Heat Stress - hot conditions (UTCI > 26C).
conditionOfPerson: A stream of interger values from -3 to +3 that indicate the following:
-3 - Strong Cold Stress - potential public health hazard with higher-than-normal mortality rates (UTCI < -13C).
-2 - Moderate Cold Stress - cold but no public health hazard (-13C < UTCI < 0C).
-1 - Slight Cold Stress - cool but comfortable for short periods of time (0C < UTCI < 9C)
0 - No Thermal Stress - comfortable conditions (9C < UTCI < 26C).
+1 - Slight Heat Stress - warm but comfortable for short periods of time (26C < UTCI < 28C).
+2 - Moderate Heat Stress - hot but no public health hazard (28C < UTCI < 32C).
+3 - Strong Heat Stress - potential public health hazard with higher-than-normal mortality rates (UTCI > 32C).
------------------------------: ...
percentOfTimeComfortable: The percent of the input data for which the UTCI indicates no thermal stress (comfortable conditions). Comfortable conditions are when the UTCI is between 9 and 26 degrees Celcius.
percentComfForShortPeriod: The percent of the input data for which the UTCI indicates slight heat/cold stress. This indicates conditions that are comfortable for short periods of time with proper attire. This includes all conditions when the UTCI is between 0 and 9 degrees Celcius or between 26 and 28 degrees Celcius.
percentHeatStress: The percent of the input data for which the UTCI indicates moderate-to-extreme heat stress. This indicates conditions that are not comfortable. This includes all conditions are when the UTCI is above 28 degrees Celcius.
percentColdStress: The percent of the input data for which the UTCI indicates moderate-to-extreme cold stress. This indicates conditions that are not comfortable. This includes all conditions are when the UTCI is below 0 degrees Celcius.
"""
ghenv.Component.Name = "Ladybug_Outdoor Comfort Calculator"
ghenv.Component.NickName = 'OutdoorComfortCalculator'
ghenv.Component.Message = 'VER 0.0.60\nJUL_06_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "1 | AnalyzeWeatherData"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "3"
except: pass
import Grasshopper.Kernel as gh
import math
import scriptcontext as sc
def checkTheInputs():
#Define a value that will indicate whether someone has hooked up epw data.
epwData = False
epwStr = []
#Define a function to duplicate data
def duplicateData(data, calcLength):
dupData = []
for count in range(calcLength):
dupData.append(data[0])
return dupData
#Check lenth of the _dryBulbTemperature list and evaluate the contents.
checkData1 = False
airTemp = []
airMultVal = False
if len(_dryBulbTemperature) != 0:
try:
if 'Temperature' in _dryBulbTemperature[2]:
airTemp = _dryBulbTemperature[7:]
checkData1 = True
epwData = True
epwStr = _dryBulbTemperature[0:7]
except: pass
if checkData1 == False:
for item in _dryBulbTemperature:
try:
airTemp.append(float(item))
checkData1 = True
except: checkData1 = False
if len(airTemp) > 1: airMultVal = True
if checkData1 == False:
warning = '_dryBulbTemperature input does not contain valid temperature values in degrees Celcius.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print 'Connect a temperature in degrees celcius for _dryBulbTemperature'
#Check lenth of the meanRadiantTemperature_ list and evaluate the contents.
checkData2 = False
radTemp = []
radMultVal = False
if len(meanRadiantTemperature_) != 0:
try:
if 'Temperature' in meanRadiantTemperature_[2]:
radTemp = meanRadiantTemperature_[7:]
checkData2 = True
epwData = True
epwStr = meanRadiantTemperature_[0:7]
except: pass
if checkData2 == False:
for item in meanRadiantTemperature_:
try:
radTemp.append(float(item))
checkData2 = True
except: checkData2 = False
if len(radTemp) > 1: radMultVal = True
if checkData2 == False:
warning = 'meanRadiantTemperature_ input does not contain valid temperature values in degrees Celcius.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData2 = True
radTemp = airTemp
if len (radTemp) > 1: radMultVal = True
print 'No value connected for meanRadiantTemperature_. It will be assumed that the radiant temperature is the same as the air temperature.'
#Check lenth of the windSpeed_tenMeters_ list and evaluate the contents.
checkData3 = False
windSpeed = []
windMultVal = False
nonPositive = True
if len(windSpeed_tenMeters_) != 0:
try:
if windSpeed_tenMeters_[2] == 'Wind Speed':
windSpeed = windSpeed_tenMeters_[7:]
checkData3 = True
epwData = True
epwStr = windSpeed_tenMeters_[0:7]
except: pass
if checkData3 == False:
for item in windSpeed_tenMeters_:
try:
if float(item) >= 0:
windSpeed.append(float(item))
checkData3 = True
else: nonPositive = False
except: checkData3 = False
if nonPositive == False: checkData3 = False
if len(windSpeed) > 1: windMultVal = True
if checkData3 == False:
warning = 'windSpeed_tenMeters_ input does not contain valid wind speed in meters per second. Note that wind speed must be positive.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData3 = True
windSpeed = [0.05]
print 'No value connected for windSpeed_tenMeters_. It will be assumed that the wind speed is a low 0.05 m/s.'
#Check lenth of the _relativeHumidity list and evaluate the contents.
checkData4 = False
relHumid = []
humidMultVal = False
nonValue = True
if len(_relativeHumidity) != 0:
try:
if _relativeHumidity[2] == 'Relative Humidity':
relHumid = _relativeHumidity[7:]
checkData4 = True
epwData = True
epwStr = _relativeHumidity[0:7]
except: pass
if checkData4 == False:
for item in _relativeHumidity:
try:
if 0 <= float(item) <= 100:
relHumid.append(float(item))
checkData4 = True
else: nonValue = False
except:checkData4 = False
if nonValue == False: checkData4 = False
if len(relHumid) > 1: humidMultVal = True
if checkData4 == False:
warning = '_relativeHumidity input does not contain valid value.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print 'Connect a value for _relativeHumidity.'
#Finally, for those lists of length greater than 1, check to make sure that they are all the same length.
checkData5 = False
if checkData1 == True and checkData2 == True and checkData3 == True and checkData4 == True:
if airMultVal == True or radMultVal == True or windMultVal == True or humidMultVal == True:
listLenCheck = []
if airMultVal == True: listLenCheck.append(len(airTemp))
if radMultVal == True: listLenCheck.append(len(radTemp))
if windMultVal == True: listLenCheck.append(len(windSpeed))
if humidMultVal == True: listLenCheck.append(len(relHumid))
if all(x == listLenCheck[0] for x in listLenCheck) == True:
checkData5 = True
calcLength = listLenCheck[0]
if airMultVal == False: airTemp = duplicateData(airTemp, calcLength)
if radMultVal == False: radTemp = duplicateData(radTemp, calcLength)
if windMultVal == False: windSpeed = duplicateData(windSpeed, calcLength)
if humidMultVal == False: relHumid = duplicateData(relHumid, calcLength)
else:
calcLength = None
warning = 'If you have put in lists with multiple values, the lengths of these lists must match across the parameters or you have a single value for a given parameter to be applied to all values in the list.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData5 = True
calcLength = 1
else:
calcLength = 0
#If all of the checkDatas have been good to go, let's give a final go ahead.
if checkData1 == True and checkData2 == True and checkData3 == True and checkData4 == True and checkData5 == True:
checkData = True
else:
checkData = False
#Let's return everything we need.
return checkData, epwData, epwStr, calcLength, airTemp, radTemp, windSpeed, relHumid
def main():
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_comfortModels = sc.sticky["ladybug_ComfortModels"]()
#Check the inputs and organize the incoming data into streams that can be run throught the comfort model.
checkData = False
checkData, epwData, epwStr, calcLength, airTemp, radTemp, windSpeed, relHumid = checkTheInputs()
#Check if there is an analysisPeriod_ connected and, if not, run it for the whole year.
if calcLength == 8760 and len(analysisPeriod_)!=0 and epwData == True:
HOYS, months, days = lb_preparation.getHOYsBasedOnPeriod(analysisPeriod_, 1)
runPeriod = analysisPeriod_
calcLength = len(HOYS)
elif len(analysisPeriod_)==0 and epwData == True:
HOYS = range(calcLength)
runPeriod = [epwStr[5], epwStr[6]]
else:
HOYS = range(calcLength)
runPeriod = [(1,1,1), (12,31,24)]
#If things are good, run it through the comfort model.
universalThermalClimateIndex = []
comfortableOrNot = []
thermalStressType = []
coldStressComfortableHeatStress = []
percentOfTimeComfortable = None
percentComfForShortPeriod = None
percentHeatStress = None
percentColdStress = None
if checkData == True and epwData == True:
universalThermalClimateIndex.extend([epwStr[0], epwStr[1], 'Universal Thermal Climate Index', 'C', epwStr[4], runPeriod[0], runPeriod[1]])
comfortableOrNot.extend([epwStr[0], epwStr[1], 'Comfort or Not', 'Boolean Value', epwStr[4], runPeriod[0], runPeriod[1]])
thermalStressType.extend([epwStr[0], epwStr[1], 'Thermal Stress', '-1 = Cold | 0 = Comfort | 1 = Hot', epwStr[4], runPeriod[0], runPeriod[1]])
coldStressComfortableHeatStress.extend([epwStr[0], epwStr[1], 'Outdoor Comfort', '-3 = Extreme Cold | -2 = Cold | -1 = Cool | 0 = Comfort | 1 = Warm | 2 = Hot | 3 = Extreme Heat', epwStr[4], runPeriod[0], runPeriod[1]])
elif checkData == True and epwData == True and 'for' in epwStr[2]:
universalThermalClimateIndex.extend([epwStr[0], epwStr[1], 'Universal Thermal Climate Index' + ' for ' + epwStr[2].split('for ')[-1], 'C', epwStr[4], runPeriod[0], runPeriod[1]])
comfortableOrNot.extend([epwStr[0], epwStr[1], 'Comfort or Not' + ' for ' + epwStr[2].split('for ')[-1], 'Boolean Value', epwStr[4], runPeriod[0], runPeriod[1]])
thermalStressType.extend([epwStr[0], epwStr[1], 'Thermal Stress', '-1 = Cold | 0 = Comfort | 1 = Hot', epwStr[4], runPeriod[0], runPeriod[1]])
coldStressComfortableHeatStress.extend([epwStr[0], epwStr[1], 'Outdoor Comfort' + ' for ' + epwStr[2].split('for ')[-1], '-3 = Extreme Cold | -2 = Cold | -1 = Cool | 0 = Comfort | 1 = Warm | 2 = Hot | 3 = Extreme Heat', epwStr[4], runPeriod[0], runPeriod[1]])
if checkData == True:
try:
utciList = []
comfOrNot = []
thermalStr = []
coldComfHot = []
for count in HOYS:
# let the user cancel the process
if gh.GH_Document.IsEscapeKeyDown(): assert False
#If the difference between the air and rad temperatures is greater than 70 (because of solar radiation), move each closer to the average of the two.
if radTemp[count] - airTemp[count] >= 70.0:
distToMove = ((radTemp[count] - airTemp[count]) - 69.0)/2
radTemp[count] = radTemp[count]-distToMove
airTemp[count] = airTemp[count]+distToMove
print "Index " + str(count) + " had a difference between air temperature and radiant temperature greater than 70. Both temperatures wee moved closer to their average to prevent the comfort model from failing."
utci, comf, condition, stressVal = lb_comfortModels.comfUTCI(airTemp[count], radTemp[count], windSpeed[count], relHumid[count])
if utci != None:
utciList.append(utci)
comfOrNot.append(comf)
thermalStr.append(stressVal)
coldComfHot.append(condition)
else:
utciList.append(50)
comfOrNot.append(0)
thermalStr.append(1)
coldComfHot.append(3)
comfTime = []
for item in comfOrNot:
if item == 1: comfTime.append(1.0)
else: pass
percentOfTimeComfortable = ((sum(comfTime))/calcLength)*100
short = []
hot = []
cold = []
for item in coldComfHot:
if item == -1 or item == 1: short.append(1.0)
elif item == -2 or item == -3: cold.append(1.0)
elif item == 2 or item == 3: hot.append(1.0)
else: pass
percentHeatStress = ((sum(hot))/calcLength)*100
percentColdStress = ((sum(cold))/calcLength)*100
percentComfForShortPeriod = ((sum(short))/calcLength)*100
universalThermalClimateIndex.extend(utciList)
comfortableOrNot.extend(comfOrNot)
thermalStressType.extend(thermalStr)
coldStressComfortableHeatStress.extend(coldComfHot)
except:
universalThermalClimateIndex = []
comfortableOrNot = []
thermalStressType = []
coldStressComfortableHeatStress = []
percentOfTimeComfortable = None
percentComfForShortPeriod = None
percentHeatStress = None
percentColdStress = None
print "The calculation has been terminated by the user!"
e = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(e, "The calculation has been terminated by the user!")
#Return all of the info.
return universalThermalClimateIndex, comfortableOrNot, thermalStressType, coldStressComfortableHeatStress, percentOfTimeComfortable, percentComfForShortPeriod, percentHeatStress, percentColdStress
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return [None, None, None, None, None, None]
if _runIt == True:
results = main()
if results != -1:
universalThermalClimateIndex, comfortableOrNot, thermalStress, conditionOfPerson, \
percentOfTimeComfortable, percentComfForShortPeriod, percentHeatStress, \
percentColdStress = results
|
samuto/ladybug
|
src/Ladybug_Outdoor Comfort Calculator.py
|
Python
|
gpl-3.0
| 23,821
|
[
"EPW"
] |
82e0914ee4fe0fcd7f738e598d879477733a463d77e952bdd8f22c494bdfb457
|
from __future__ import print_function, division
import os,unittest
from pyscf.nao import tddft_iter
from pyscf.nao.m_comp_spatial_distributions import spatial_distribution
import h5py
import numpy as np
dname = os.path.dirname(os.path.abspath(__file__))
Ha = 27.211386024367243
td = tddft_iter(label='water', iter_broadening=0.15/Ha, xc_code='LDA,PZ',
tol_loc=1e-4, tol_biloc=1e-6, cd=dname, verbosity=0)
class KnowValues(unittest.TestCase):
def test_tddft_iter_spatial(self):
""" Check the spatial density change distribution"""
self.assertTrue(hasattr(td, 'xocc'))
self.assertTrue(hasattr(td, 'xvrt'))
self.assertEqual(td.xocc[0].shape[0], 4)
self.assertEqual(td.xvrt[0].shape[0], 19)
# run TDDFT
omegas = h5py.File(dname+"/tddft_iter_output_water_ref.hdf5", "r")["polarizability/frequency"].value/Ha + 1j*td.eps
td.comp_dens_inter_along_Eext(omegas, Eext=np.array([1.0, 1.0, 1.0]))
np.save("density_change_pyscf.npy", td.dn)
np.save("frequency.npy", omegas.real)
np.save("pol_tensor.npy", td.p_mat)
ref = h5py.File(dname+"/tddft_iter_output_water_ref.hdf5", "r")["polarizability"]
pyscf = np.load("pol_tensor.npy")
pyscf_freq = np.load("frequency.npy")
for ireim, reim in enumerate(["re", "im"]):
for i in range(3):
for j in range(3):
mbpt = ref["dipol_inter_iter_krylov_"+reim].value[j, i, :]
if ireim == 0:
py = -pyscf[i, j, :].real
elif ireim == 1:
py = -pyscf[i, j, :].imag
error = np.sum(abs(mbpt-py))/py.size
assert error < 5e-3
# calculate spatial distribution of density change
dn = np.load("density_change_pyscf.npy")
freq = np.load("frequency.npy")
box = np.array([[-15.0, 15.0],
[-15.0, 15.0],
[-15.0, 15.0]])
dr = np.array([0.5, 0.5, 0.5])
spd = spatial_distribution(dn, freq, box, dr = dr, label="water",
tol_loc=1e-4, tol_biloc=1e-6, cd=dname)
spd.get_spatial_density(8.35/Ha, Eext=np.array([1.0, 1.0, 1.0]))
ref = h5py.File(dname+"/tddft_iter_output_water_ref.hdf5", "r")
dn_mbpt = ref["field_spatial_dir_0.58_0.58_0.58_freq_8.35_inter/dens_re"].value +\
1.0j*ref["field_spatial_dir_0.58_0.58_0.58_freq_8.35_inter/dens_im"].value
Np = spd.dn_spatial.shape[1]//2
Nm = dn_mbpt.shape[1]//2
error = np.sum(abs(spd.dn_spatial[:, Np, :].imag - dn_mbpt[:, Nm, :].imag.T))/dn[:, Np, :].imag.size
assert error < 1e-2
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0072_tddft_iter_dens_spatial.py
|
Python
|
apache-2.0
| 2,632
|
[
"PySCF"
] |
3ddf8cc41c2a6da1630cb9ecc2e6b47faaab88152b9f5ed396ed6c602d0fa1bc
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""Unit tests for various models and operators"""
import os
import sys
from time import time
import numpy as np
import torch
import torchvision
import tvm
import tvm.testing
from packaging import version as package_version
from scipy.stats import t as tdistr
from torch.nn import Module
from torch.nn import functional as F
from tvm import relay
from tvm.contrib import graph_executor
from tvm.contrib.nvcc import have_fp16
import pytest
sys.setrecursionlimit(10000)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
def list_ops(expr):
class OpLister(tvm.relay.ExprVisitor):
def visit_op(self, expr):
if expr not in self.node_set:
self.node_list.append(expr)
return super().visit_op(expr)
def list_nodes(self, expr):
self.node_set = {}
self.node_list = []
self.visit(expr)
return self.node_list
return OpLister().list_nodes(expr)
def assert_shapes_match(tru, est):
if tru.shape != est.shape:
msg = "Output shapes {} and {} don't match"
raise AssertionError(msg.format(tru.shape, est.shape))
def load_torchvision(model_name):
"""Given a model name, returns a Torchvision model in eval mode as well
as an example input."""
with torch.no_grad():
if model_name.startswith("inception"):
height = width = 299
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
else:
height = width = 224
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_shape = [1, 3, height, width]
input_data = torch.randn(input_shape).float()
for channel in range(3):
input_data[:, channel] -= mean[channel]
input_data[:, channel] /= std[channel]
if model_name.startswith("googlenet"):
model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)
else:
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.float().eval()
return model, [input_data]
def load_pretrainedmodels(model_name):
"""Given a model name, returns a pretrainedmodels.pytorch model in eval
mode as well as an example input."""
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch
model = getattr(pretrainedmodels, model_name)().float().eval()
input_shape = [1, *model.input_size]
input_data = torch.rand(input_shape).float() * 256
for channel in range(3):
input_data[:, channel] -= model.mean[channel]
input_data[:, channel] /= model.std[channel]
return model, [input_data]
def load_model(model_name):
"""Given a model name, returns a model as well as an example input."""
if hasattr(torchvision.models, model_name):
return load_torchvision(model_name)
try:
import pretrainedmodels
if hasattr(pretrainedmodels, model_name):
return load_pretrainedmodels(model_name)
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install pretrainedmodels.pytorch")
raise RuntimeError("Model not supported")
def confidence_interval(mean, stdev, count, alpha=0.01):
"""Returns the lower and upper bounds of the confidence interval of a random
variable. Confidence is 1 - alpha (default confidence is 99%)."""
stdval = tdistr.ppf(1 - alpha / 2, count - 1)
lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)
return lower, upper
def measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):
"""Compute the latency of the given model"""
latencies = []
count = 0
while True:
if isinstance(model, Module):
input_data = [torch.rand(shape).float() for shape in input_shapes]
if torch.cuda.is_available():
input_data = list(map(lambda x: x.cuda(), input_data))
model = model.cuda()
t_start = time()
with torch.no_grad():
model(*input_data)
t_end = time()
latencies.append(t_end - t_start)
else:
input_data = {}
for i, shape in enumerate(input_shapes):
name = "input" + str(i)
arr = np.random.random(shape).astype("float32")
input_data[name] = tvm.nd.array(arr)
t_start = time()
model.set_input(**input_data)
model.run()
for i, shape in enumerate(output_shapes):
arr = np.zeros(shape).astype("float32")
model.get_output(i, tvm.nd.array(arr))
t_end = time()
count += 1
if count < dryruns:
continue
latencies.append(t_end - t_start)
mean = np.mean(latencies)
stdev = np.std(latencies)
sample_size = len(latencies)
if sample_size > dryruns:
lower, upper = confidence_interval(mean, stdev, sample_size)
est = (upper + lower) / 2
err = (upper - lower) / 2
if err < thresh:
return est
def verify_model(
model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5, expected_ops=[]
):
"""Assert that the output of a compiled model matches with that of its
baseline."""
if isinstance(model_name, str):
baseline_model, baseline_input = load_model(model_name)
elif isinstance(input_data, list):
baseline_model = model_name
baseline_input = input_data
elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:
baseline_model = model_name
baseline_input = [input_data]
else:
assert False, "Unexpected input format"
if torch.cuda.is_available():
if isinstance(baseline_model, torch.nn.Module):
baseline_model = baseline_model.cuda()
baseline_input = [inp.cuda() for inp in baseline_input]
with torch.no_grad():
baseline_outputs = baseline_model(*[input.clone() for input in baseline_input])
if isinstance(baseline_outputs, tuple):
baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
else:
baseline_outputs = (baseline_outputs.cpu().numpy(),)
trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
for arg in mod["main"].params[: len(input_names)]:
assert arg.name_hint in input_names
compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))
with tvm.transform.PassContext(opt_level=3):
for target in ["llvm", "cuda"]:
if not tvm.runtime.enabled(target):
continue
dev = tvm.device(target, 0)
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
relay_model = graph_executor.create(relay_graph, relay_lib, dev)
relay_model.set_input(**relay_params)
for name, inp in compiled_input.items():
relay_model.set_input(name, inp)
relay_model.run()
for i, baseline_output in enumerate(baseline_outputs):
compiled_output = relay_model.get_output(i).numpy()
assert_shapes_match(baseline_output, compiled_output)
tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)
if expected_ops:
def visit(op):
if isinstance(op, tvm.ir.op.Op):
if op.name in expected_ops:
expected_ops.remove(op.name)
tvm.relay.analysis.post_order_visit(mod["main"].body, visit)
if expected_ops:
msg = "TVM Relay do not contain expected ops {}"
raise AssertionError(msg.format(expected_ops))
del model_name
del baseline_model
torch.cuda.empty_cache()
def verify_span(model_name, input_data=[], custom_convert_map={}):
if isinstance(model_name, str):
baseline_model, baseline_input = load_model(model_name)
elif isinstance(input_data, list):
baseline_model = model_name
baseline_input = input_data
elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:
baseline_model = model_name
baseline_input = [input_data]
else:
assert False, "Unexpected input format"
trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
# collect fail cases for the convenience of further improvement
fail_cases = []
mod_main_start = False
for line in str(mod.__str__).split("\n"):
if "@main" in line:
mod_main_start = True
continue
if mod_main_start == True:
if "}" == line:
break
elif not ("/*" in line and "*/" in line):
fail_cases.append(line)
print(fail_cases)
assert len(fail_cases) == 0
def test_span():
verify_span("resnet18")
# Single operator tests
@tvm.testing.uses_gpu
def test_forward_pixel_shuffle():
torch.set_grad_enabled(False)
input_shape = [1, 144, 16, 16]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_add():
torch.set_grad_enabled(False)
input_shape = [10]
class Add1(Module):
def forward(self, *args):
return args[0] + args[0]
class Add2(Module):
def forward(self, *args):
return args[0] + 1
class Add3(Module):
def forward(self, *args):
ones = torch.ones(input_shape, dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
class Add4(Module):
def forward(self, *args):
ones = torch.ones([], dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
input_data = torch.rand(input_shape).float()
verify_model(Add1().float().eval(), input_data=input_data)
verify_model(Add2().float().eval(), input_data=input_data)
verify_model(Add3().float().eval(), input_data=input_data)
verify_model(Add4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_subtract():
torch.set_grad_enabled(False)
input_shape = [10]
class Subtract1(Module):
def forward(self, *args):
return args[0] - args[0]
class Subtract2(Module):
def forward(self, *args):
return args[0] - 1
class Subtract3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
class Subtract4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
input_data = torch.rand(input_shape).float()
verify_model(Subtract1().float().eval(), input_data=input_data)
verify_model(Subtract2().float().eval(), input_data=input_data)
verify_model(Subtract3().float().eval(), input_data=input_data)
verify_model(Subtract4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_multiply():
torch.set_grad_enabled(False)
input_shape = [10]
class Multiply1(Module):
def forward(self, *args):
return args[0] * args[0]
class Multiply2(Module):
def forward(self, *args):
return args[0] * 1.0
class Multiply3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
class Multiply4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
input_data = torch.rand(input_shape).float()
verify_model(Multiply1().float().eval(), input_data=input_data)
verify_model(Multiply2().float().eval(), input_data=input_data)
verify_model(Multiply3().float().eval(), input_data=input_data)
verify_model(Multiply4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_min_max():
class Max(Module):
def forward(self, inp):
return torch.max(inp)
class Min(Module):
def forward(self, inp):
return torch.min(inp)
class Max2(Module):
def forward(self, inp):
out, _ = torch.max(inp, 1, keepdim=True)
return out
class Min2(Module):
def forward(self, inp):
out, _ = torch.min(inp, 0, keepdim=False)
return out
class Max3(Module):
def forward(self, lhs, rhs):
return torch.max(lhs, rhs)
class Min3(Module):
def forward(self, lhs, rhs):
return torch.min(lhs, rhs)
input_data = [torch.rand((10, 10)), torch.rand((10, 10))]
verify_model(Max(), input_data=input_data[0])
verify_model(Min(), input_data=input_data[0])
verify_model(Max2(), input_data=input_data[0])
verify_model(Min2(), input_data=input_data[0])
verify_model(Max3(), input_data=input_data)
verify_model(Min3(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reciprocal():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Reciprocal1(Module):
def forward(self, *args):
return args[0].reciprocal()
input_data = torch.rand(input_shape).float()
verify_model(Reciprocal1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Repeat1(Module):
def forward(self, *args):
return args[0].repeat(1, 1)
class Repeat2(Module):
def forward(self, *args):
return args[0].repeat(4, 2)
class Repeat3(Module):
def forward(self, *args):
return args[0].repeat(4, 2, 1)
input_data = torch.rand(input_shape).float()
verify_model(Repeat1().float().eval(), input_data=input_data)
verify_model(Repeat2().float().eval(), input_data=input_data)
verify_model(Repeat3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat_interleave():
torch.set_grad_enabled(False)
input_shape = [2, 2, 3]
class RepeatInterleave1(Module):
def forward(self, *args):
return args[0].repeat_interleave(2)
class RepeatInterleave2(Module):
def forward(self, *args):
return args[0].repeat_interleave(3, dim=0)
class RepeatInterleave3(Module):
def forward(self, *args):
return args[0].repeat_interleave(2, dim=1)
class RepeatInterleave4(Module):
def forward(self, *args):
return args[0].repeat_interleave(4, dim=2)
input_data = torch.rand(input_shape).float()
verify_model(RepeatInterleave1().float().eval(), input_data=input_data)
verify_model(RepeatInterleave2().float().eval(), input_data=input_data)
verify_model(RepeatInterleave3().float().eval(), input_data=input_data)
verify_model(RepeatInterleave4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_unsqueeze():
torch.set_grad_enabled(False)
input_shape = [10, 10]
class Unsqueeze1(Module):
def forward(self, *args):
return args[0].unsqueeze(2)
class Unsqueeze2(Module):
def forward(self, *args):
_ = args[0].unsqueeze_(2)
# Check whether operations after inplace unsqueeze works as expected
y = args[0].squeeze(2)
return torch.add(y, y)
input_data = torch.rand(input_shape).float()
verify_model(Unsqueeze1().float().eval(), input_data=input_data)
verify_model(Unsqueeze2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_squeeze():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Squeeze1(Module):
def forward(self, *args):
return args[0].squeeze()
class Squeeze2(Module):
def forward(self, *args):
return args[0].squeeze(1)
input_data = torch.rand(input_shape).float()
verify_model(Squeeze1().float().eval(), input_data=input_data)
verify_model(Squeeze2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_arange():
torch.set_grad_enabled(False)
class Arange1(Module):
def forward(self, *args):
return torch.arange(5)
class Arange2(Module):
def forward(self, *args):
return torch.arange(2.5)
class Arange3(Module):
def forward(self, *args):
return torch.arange(1, 4)
class Arange4(Module):
def forward(self, *args):
return torch.arange(1, 2.5, 0.5)
class Arange5(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int32)
class Arange6(Module):
def forward(self, *args):
return torch.arange(start=1, end=6, step=2)
class Arange7(Module):
def forward(self, *args):
return torch.arange(1, 4, dtype=torch.float32)
class Arange8(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int16)
class Arange9(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4), 1)
return torch.arange(end) + torch.ones((5,), dtype=torch.int64)
class Arange10(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4.0), torch.tensor(1.0))
return torch.arange(end) + torch.ones((5,), dtype=torch.float)
class Arange11(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2), 1)
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.int64)
class Arange12(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2.5), torch.tensor(4.1))
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.float)
verify_model(Arange1().float().eval())
verify_model(Arange2().float().eval())
verify_model(Arange3().float().eval())
verify_model(Arange4().float().eval())
verify_model(Arange5().float().eval())
verify_model(Arange6().float().eval())
verify_model(Arange7().float().eval())
verify_model(Arange8().float().eval())
verify_model(Arange9().float().eval())
verify_model(Arange10().float().eval())
verify_model(Arange11().float().eval())
verify_model(Arange12().float().eval())
@tvm.testing.uses_gpu
def test_forward_mesh_grid():
torch.set_grad_enabled(False)
class MeshGrid1(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
class MeshGrid2(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3], dtype=torch.float32)
y = torch.add(torch.tensor(5, dtype=torch.float32), 1)
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
verify_model(MeshGrid1().float().eval())
verify_model(MeshGrid2().float().eval())
@tvm.testing.uses_gpu
def test_forward_abs():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Abs1(Module):
def forward(self, *args):
return args[0].abs()
input_data = torch.rand(input_shape).float()
verify_model(Abs1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_concatenate():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Concatenate1(Module):
def forward(self, *args):
return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)
class Concatenate2(Module):
def forward(self, *args):
a = (args[0][:, :, 0] + 2) * 7
b = (args[0][:, :, 1] + 3) * 11
c = (args[0][:, :, 2] + 5) * 13
return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)
input_data = torch.rand(input_shape).float()
verify_model(Concatenate1().float().eval(), input_data=input_data)
verify_model(Concatenate2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_relu():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ReLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_prelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)
# Test when input channel > 1 and num parameters = 1
verify_model(torch.nn.PReLU(num_parameters=1).eval(), input_data=input_data)
# Test when input dims < 2
verify_model(torch.nn.PReLU(num_parameters=1).eval(), input_data=torch.randn(2))
@tvm.testing.uses_gpu
def test_forward_leakyrelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)
verify_model(
torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_elu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.randn(input_shape).float()
verify_model(torch.nn.ELU().eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_celu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.CELU().eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gelu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_selu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_silu():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SiLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softplus():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softplus().eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softsign():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softsign().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_log_sigmoid():
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_adaptive_avgpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)
input_data = torch.rand([1, 3, 10]).float()
verify_model(torch.nn.AdaptiveAvgPool1d([1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool1d([5]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_adaptive_maxpool():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveMaxPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveMaxPool2d([10, 10]).eval(), input_data=input_data)
input_data = torch.rand([1, 3, 10]).float()
verify_model(torch.nn.AdaptiveMaxPool1d([1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveMaxPool1d([5]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool2d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[2, 2], dilation=[2, 3]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool2D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])
verify_model(MaxPool2D(), input_data=input_data)
class MaxPool2DWithIndices(Module):
def __init__(self):
super(MaxPool2DWithIndices, self).__init__()
self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)
def forward(self, *args):
output, indices = self.pool(args[0])
return output
class MaxPool2DWithIntStrides(Module):
def forward(self, *args):
# Makes kernel_size and strides a Relay expr to test converting back to int
x_shape = args[0].shape
kernel_size = [torch.tensor(x_shape[1]).int(), torch.tensor(x_shape[1]).int()]
strides = [torch.tensor(x_shape[0]).int(), torch.tensor(x_shape[0]).int()]
return torch.nn.functional.max_pool2d(args[0], kernel_size=[4, 4], stride=strides)
verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)
verify_model(MaxPool2DWithIntStrides().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool1d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=2, dilation=[1]).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool1D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool1d(args[0], kernel_size=10)
verify_model(MaxPool1D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[2, 2, 2], dilation=[1, 2, 3]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool3D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])
verify_model(MaxPool3D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_split():
torch.set_grad_enabled(False)
input_shape = [4, 10]
class Split(Module):
def __init__(self, split_size_or_sections, dim):
super(Split, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dim = dim
def forward(self, *args):
return torch.split(args[0], self.split_size_or_sections, self.dim)
input_data = torch.rand(input_shape).float()
verify_model(Split(2, 0).float().eval(), input_data=input_data)
verify_model(Split(3, 1).float().eval(), input_data=input_data)
verify_model(Split(4, 1).float().eval(), input_data=input_data)
verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool1d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
class AvgPool1D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool1d(args[0], kernel_size=[10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool1d(kernel_size=[10]).eval(), input_data=input_data)
verify_model(AvgPool1D2().float().eval(), input_data=input_data)
verify_model(
torch.nn.AvgPool1d(kernel_size=[5], stride=2, padding=2).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_avgpool2d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class AvgPool2D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)
verify_model(AvgPool2D2().float().eval(), input_data=input_data)
verify_model(
torch.nn.AvgPool2d(kernel_size=5, stride=2, padding=2).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_avgpool3d():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
class AvgPool3D1(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)
verify_model(AvgPool3D1().float().eval(), input_data=input_data)
verify_model(
torch.nn.AvgPool3d(kernel_size=5, stride=2, padding=2).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_hardtanh():
torch.set_grad_enabled(False)
input_shape = [10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_conv():
torch.set_grad_enabled(False)
conv1d_input_shape = [1, 3, 10]
conv2d_input_shape = [1, 3, 10, 10]
class Conv2D1(Module):
def __init__(self):
super(Conv2D1, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D2(Module):
def __init__(self):
super(Conv2D2, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D3(Module):
def __init__(self):
super(Conv2D3, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D1(Module):
def __init__(self):
super(Conv1D1, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D2(Module):
def __init__(self):
super(Conv1D2, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D3(Module):
def __init__(self):
super(Conv1D3, self).__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)
verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)
# depth wise conv with channel mult 2
verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)
# group conv
verify_model(
torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),
input_data=torch.randn((1, 8, 16, 16)),
)
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
@pytest.mark.parametrize("in_channels", [3], ids=lambda x: "in_channels=" + str(x))
@pytest.mark.parametrize("out_channels", [5], ids=lambda x: "out_channels=" + str(x))
@pytest.mark.parametrize("kernel_size", [3], ids=lambda x: "kernel_size=" + str(x))
@pytest.mark.parametrize("output_padding", [0, 1, 2], ids=lambda x: "output_padding=" + str(x))
@pytest.mark.parametrize("groups", [1], ids=lambda x: "groups=" + str(x))
@pytest.mark.parametrize("bias", [True, False], ids=lambda x: "bias=" + str(x))
def test_forward_conv_transpose(
in_channels, out_channels, kernel_size, output_padding, bias, groups
):
# Note we do not test with groups > 1 because that is not supported
# in tvm for conv transpose operations
# Output padding must be smaller than either stride or dilation so we
# opt to make the stride 1 + output padding
stride = output_padding + 1
# Conv 3D Transpose Tests
conv3d_input_shape = [1, in_channels, 16, 16, 16]
conv3d_input_data = torch.rand(conv3d_input_shape).float()
conv3d_transpose = torch.nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
groups=groups,
bias=bias,
).eval()
verify_model(conv3d_transpose, conv3d_input_data)
# Conv 2D Transpose Tests
conv2d_input_shape = [1, in_channels, 128, 256]
conv2d_input_data = torch.rand(conv2d_input_shape).float()
conv2d_transpose = torch.nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
groups=groups,
bias=bias,
).eval()
verify_model(conv2d_transpose, conv2d_input_data)
# # Conv 1D Transpose Tests
conv1d_input_shape = [1, in_channels, 10]
conv1d_input_data = torch.rand(conv1d_input_shape).float()
conv1d_transpose = torch.nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
groups=groups,
bias=bias,
).eval()
verify_model(conv1d_transpose, conv1d_input_data)
def test_forward_deform_conv():
torch.set_grad_enabled(False)
def test_run(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
out_height,
out_width,
offset_groups,
kh,
kw,
groups,
):
input_shape = [batch_size, in_channels, in_height, in_width]
offset_shape = [batch_size, 2 * offset_groups * kh * kw, out_height, out_width]
weight_shape = [out_channels, in_channels // groups, kh, kw]
input_data = torch.rand(input_shape)
offset_data = torch.rand(offset_shape)
weight_data = torch.rand(weight_shape)
class DeformConv2D(Module):
def forward(self, *args):
return torchvision.ops.deform_conv2d(args[0], args[1], args[2])
verify_model(
DeformConv2D().float().eval(),
input_data=[input_data, offset_data, weight_data],
rtol=1e-4,
atol=1e-4,
)
batch_size = 4
in_channels, out_channels = 4, 6
in_height, in_width = 10, 10
out_height, out_width = 8, 8
offset_groups = 2
kh, kw = 3, 3
groups = 1
test_run(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
out_height,
out_width,
offset_groups,
kh,
kw,
groups,
)
batch_size = 5
in_channels, out_channels = 4, 6
in_height, in_width = 10, 10
out_height, out_width = 8, 8
offset_groups = 1
kh, kw = 3, 3
groups = 1
test_run(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
out_height,
out_width,
offset_groups,
kh,
kw,
groups,
)
@tvm.testing.uses_gpu
def test_forward_threshold():
torch.set_grad_enabled(False)
input_shape = [1, 3]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_contiguous():
torch.set_grad_enabled(False)
input_shape = [10]
class Contiguous1(Module):
def forward(self, *args):
return args[0].contiguous()
input_data = torch.rand(input_shape).float()
verify_model(Contiguous1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_batchnorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:
init_weight(bn.eval())
verify_model(bn.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_instancenorm():
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ins_norm, inp in [
(torch.nn.InstanceNorm2d(16), inp_2d),
(torch.nn.InstanceNorm3d(16), inp_3d),
]:
verify_model(ins_norm.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_layernorm():
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias, 0.02)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:
init_weight(ln.eval())
verify_model(ln.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_groupnorm():
input_shape = [10, 6, 5, 5]
input_data = torch.rand(input_shape).float()
# Separate 6 channels into 3 groups
verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)
# Put all 6 channels into a single group (equivalent with LayerNorm)
verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)
# Separate 6 channels into 6 groups (equivalent with InstanceNorm)
verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)
input_shape = [1, 10, 4, 7]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reshape():
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
class Reshape1(Module):
def forward(self, *args):
return args[0].reshape(new_shape)
class Reshape2(Module):
def forward(self, *args):
return args[0].reshape([-1])
class Reshape3(torch.nn.Module):
def forward(self, x):
x_shape = x.shape
return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))
input_data = torch.rand(input_shape).float()
verify_model(Reshape1(), input_data=input_data)
verify_model(Reshape2(), input_data=input_data)
verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))
@tvm.testing.uses_gpu
def test_flatten():
class Flatten(Module):
def forward(self, x):
return torch.flatten(x)
class BatchFlatten(Module):
def forward(self, x):
return torch.flatten(x, start_dim=1)
inp = torch.rand((5, 2, 2))
verify_model(Flatten(), input_data=inp)
verify_model(BatchFlatten(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_transpose():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Transpose1(Module):
def forward(self, *args):
return args[0].transpose(2, 3)
class Transpose2(Module):
def forward(self, *args):
return args[0].transpose(-2, -1)
class Transpose3(Module):
def forward(self, *args):
return args[0].permute(0, 2, 3, 1)
input_data = torch.rand(input_shape).float()
verify_model(Transpose1().float().eval(), input_data=input_data)
verify_model(Transpose2().float().eval(), input_data=input_data)
verify_model(Transpose3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_size():
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Size1(Module):
def forward(self, *args):
return float(args[0].size(0)) * args[0]
input_data = torch.rand(input_shape).float()
verify_model(Size1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_type_as():
torch.set_grad_enabled(False)
input_shape = [1, 3]
def _create_module(dtype):
class TypeAs(Module):
def forward(self, *args):
expected_type_tensor = torch.zeros(1, 3, dtype=dtype)
return args[0].type_as(expected_type_tensor)
return TypeAs()
input_data = torch.randn(input_shape).float()
verify_model(_create_module(torch.float64), input_data=input_data)
verify_model(_create_module(torch.float32), input_data=input_data)
verify_model(_create_module(torch.int64), input_data=input_data)
verify_model(_create_module(torch.int32), input_data=input_data)
verify_model(_create_module(torch.int16), input_data=input_data)
verify_model(_create_module(torch.int8), input_data=input_data)
if torch.cuda.is_available():
check_fp16 = False
try:
# Only check half precision on supported hardwares.
if have_fp16(tvm.cuda(0).compute_version):
check_fp16 = True
except Exception as e:
# If GPU is not enabled in TVM, skip the fp16 test.
pass
# Temporary disable fp16 test
check_fp16 = False
if check_fp16:
verify_model(_create_module(torch.float16), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_view():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class View1(Module):
def forward(self, *args):
return args[0].view((1, 3 * 10 * 10))
class View2(Module):
def forward(self, *args):
return args[0].view(args[0].shape[0], -1)
class View3(Module):
def forward(self, *args):
d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)
return args[0].view(args[0].shape[0], d1)
input_data = torch.rand(input_shape).float()
verify_model(View1().float().eval(), input_data=input_data)
verify_model(View2().float().eval(), input_data=input_data)
verify_model(View3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_select():
torch.set_grad_enabled(False)
input_shape = [5, 3, 10, 10]
class Select1(Module):
def forward(self, *args):
return args[0].select(1, 1)
class IndexedSelect(Module):
def __init__(self, inp, dim):
super().__init__()
self.inp = inp
self.dim = dim
if torch.cuda.is_available():
self.inp = self.inp.cuda()
def forward(self, index):
return torch.index_select(self.inp, self.dim, index)
input_data = torch.rand(input_shape).float()
verify_model(Select1().float().eval(), input_data=input_data)
# test negative indexing
verify_model(lambda x: x[-1], input_data=input_data)
x = torch.randn(3, 4)
indices = torch.tensor([0, 2])
verify_model(IndexedSelect(x, 0).eval(), input_data=indices)
verify_model(IndexedSelect(x, 1).eval(), input_data=indices)
@tvm.testing.uses_gpu
def test_forward_clone():
torch.set_grad_enabled(False)
input_shape = [10]
class Clone1(Module):
def forward(self, *args):
return args[0].clone()
input_data = torch.rand(input_shape).float()
verify_model(Clone1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gather():
torch.set_grad_enabled(False)
class Gather1(Module):
def forward(self, *args):
return torch.gather(args[0], 0, args[1])
class Gather2(Module):
def forward(self, *args):
return torch.gather(args[0], 1, args[1])
class Gather3(Module):
def forward(self, *args):
return torch.gather(args[0], 2, args[1])
input_data = torch.rand((4,)).float()
index = torch.tensor([1])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.tensor([[1, 2], [3, 4]])
index = torch.tensor([[0, 0], [1, 0]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((3, 3, 3)).float()
index = torch.tensor(
[
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
)
verify_model(Gather3().float().eval(), input_data=[input_data, index])
@tvm.testing.uses_gpu
def test_forward_logsoftmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class LogSoftmax1(Module):
def forward(self, *args):
return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(LogSoftmax1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Norm1(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=None, keepdim=False)
class Norm2(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=False)
class Norm3(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=True)
class Norm4(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1, 2), keepdim=False)
class Norm5(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1), keepdim=True)
class Norm6(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)
class Norm7(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(1), dim=None, keepdim=False)
class Norm8(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)
class Norm9(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)
class Norm10(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(Norm1().float().eval(), input_data=input_data)
verify_model(Norm2().float().eval(), input_data=input_data)
verify_model(Norm3().float().eval(), input_data=input_data)
verify_model(Norm4().float().eval(), input_data=input_data)
verify_model(Norm5().float().eval(), input_data=input_data)
verify_model(Norm6().float().eval(), input_data=input_data)
verify_model(Norm7().float().eval(), input_data=input_data)
verify_model(Norm8().float().eval(), input_data=input_data)
verify_model(Norm9().float().eval(), input_data=input_data)
verify_model(Norm10().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_frobenius_norm():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FroNorm1(Module):
def forward(self, *args):
return torch.norm(args[0])
class FroNorm2(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=None, keepdim=True)
class FroNorm3(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=(1), keepdim=True)
class FroNorm4(Module):
def forward(self, *args):
return torch.norm(args[0], dim=None, keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(FroNorm1().float().eval(), input_data=input_data)
verify_model(FroNorm2().float().eval(), input_data=input_data)
verify_model(FroNorm3().float().eval(), input_data=input_data)
verify_model(FroNorm4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_sigmoid():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_dense():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Dense1(Module):
def __init__(self):
super(Dense1, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=True)
def forward(self, *args):
return self.linear(args[0][0, 0])
class Dense2(Module):
def __init__(self):
super(Dense2, self).__init__()
self.linear = torch.nn.Linear(10, 7, bias=False)
def forward(self, *args):
return self.linear(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(Dense1().float().eval(), input_data=input_data)
verify_model(Dense2().float().eval(), input_data=input_data)
trace = torch.jit.trace(Dense1(), [input_data])
mod, params = relay.frontend.from_pytorch(
trace,
[("input", input_shape)],
)
assert not any([op.name == "multiply" for op in list_ops(mod["main"])])
@tvm.testing.uses_gpu
def test_forward_linear():
torch.set_grad_enabled(False)
class Linear(Module):
def forward(self, input, weight, bias):
return F.linear(input, weight, bias)
class LinearNoBias(Module):
def forward(self, input, weight):
return F.linear(input, weight)
class LinearNested(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, z):
return F.linear(x, F.linear(y, z))
input2d = torch.rand([2, 2]).float()
input3d = torch.rand([4, 3, 2]).float()
weight1d = torch.rand([2]).float()
weight2d = torch.rand([2, 2]).float()
weight3x2 = torch.rand([3, 2]).float()
bias1d = torch.rand([2]).float()
bias2d = torch.rand([2, 2]).float()
# 2D input, 2D weight, 1D bias
verify_model(Linear(), input_data=[input2d, weight2d, bias1d])
# 2D input, 2D weight, 2D bias
verify_model(Linear(), input_data=[input2d, weight2d, bias2d])
# 2D input, 2D weight, no bias
verify_model(LinearNoBias(), input_data=[input2d, weight2d])
verify_model(LinearNoBias(), input_data=[input2d, weight3x2])
# 2D input, 1D weight, 1D bias is not supported by torch.linear()
# 2D input, 1D weight, no bias
verify_model(LinearNoBias(), input_data=[input2d, weight1d])
# 3D input, 2D weight, no bias
verify_model(LinearNoBias(), input_data=[input3d, weight3x2])
# 3D input, 2D weight, 1D bias
verify_model(Linear(), input_data=[input3d, weight2d, bias1d])
verify_model(LinearNested(), input_data=[torch.randn(10, 10) for _ in range(3)])
# TODO: Add the following cases when matmul(1D, _) is supported by TVM
# 1D input, 2D weight, 1D bias
# 1D input, 2D weight, no bias
# 1D input, 1D weight, scalar bias
# 1D input, 1D weight, no bias
@tvm.testing.uses_gpu
def test_forward_dropout():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])
verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])
verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)
verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])
@tvm.testing.uses_gpu
def test_forward_slice():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Slice1(Module):
def forward(self, *args):
return args[0][:, :, :, :3]
class Slice2(Module):
def forward(self, *args):
return args[0][0, :, :-3, :]
class Slice3(Module):
def forward(self, *args):
x0 = torch.tensor(2) - torch.tensor(1)
x1 = torch.tensor(3) + torch.tensor(1)
return args[0][:, x0:, 1:x1, :]
class SliceWithStride(torch.nn.Module):
def forward(self, x):
return x[..., 0::2] + x[..., 1::2]
class SliceWithStride2(torch.nn.Module):
def forward(self, x):
return x[0::2, 0::2] + x[1::2, 1::2]
class DynamicLengthSlice(torch.nn.Module):
def forward(self, values, length):
return values[0:length]
input_data = torch.rand(input_shape).float()
verify_model(Slice1(), input_data=input_data)
verify_model(Slice2(), input_data=input_data)
verify_model(Slice3(), input_data=input_data)
verify_model(SliceWithStride(), input_data=torch.randn(1, 4))
verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))
inp = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
slice_len = torch.tensor(2)
targets = ["llvm", "cuda"]
verify_trace_model(DynamicLengthSlice(), [inp, slice_len], targets)
@tvm.testing.uses_gpu
def test_forward_narrow():
torch.set_grad_enabled(False)
input_shape = [3, 3]
class Narrow1(Module):
def forward(self, *args):
return torch.narrow(args[0], 0, 0, 2)
class Narrow2(Module):
def forward(self, *args):
return torch.narrow(args[0], 1, 1, 2)
class Narrow3(Module):
def forward(self, *args):
begin = torch.tensor(2) - torch.tensor(1)
length = torch.tensor(1) * torch.tensor(2)
return torch.narrow(args[0], 1, begin, length)
input_data = torch.rand(input_shape).float()
verify_model(Narrow1(), input_data=input_data)
verify_model(Narrow2(), input_data=input_data)
verify_model(Narrow3(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_mean():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Mean1(Module):
def forward(self, *args):
return args[0].mean(2)
input_data = torch.rand(input_shape).float()
verify_model(Mean1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_expand():
torch.set_grad_enabled(False)
class Expand1(Module):
def forward(self, *args):
return args[0].expand((3, -1, -1, -1))
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Expand1().float().eval(), input_data=input_data)
class Expand2(Module):
def forward(self, *args):
return args[0].expand((3, 3, 3, 1))
input_shape = [3, 1]
input_data = torch.rand(input_shape).float()
verify_model(Expand2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_pow():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Pow1(Module):
def forward(self, *args):
return args[0] ** 2
input_data = torch.rand(input_shape).float()
verify_model(Pow1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_chunk():
torch.set_grad_enabled(False)
input_shape = [1, 3, 14, 14]
class Chunk1(Module):
def forward(self, *args):
chunks = args[0].chunk(7, 2)
return torch.cat(chunks, 2)
input_data = torch.rand(input_shape).float()
verify_model(Chunk1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_upsample():
class Upsample(Module):
def __init__(self, size=None, scale=None, mode="nearest", align_corners=None):
super().__init__()
self.size = size
self.scale = scale
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return torch.nn.functional.interpolate(
x,
size=self.size,
scale_factor=self.scale,
mode=self.mode,
align_corners=self.align_corners,
)
inp = torch.rand((1, 3, 32, 32))
verify_model(Upsample(size=(64, 64), mode="nearest"), inp)
verify_model(Upsample(scale=2, mode="nearest"), inp)
verify_model(Upsample(size=(50, 50), mode="nearest"), inp)
verify_model(Upsample(size=(64, 64), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(64, 64), mode="bicubic", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bicubic", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bicubic", align_corners=True), inp)
@tvm.testing.uses_gpu
def test_to():
"""test for aten::to(...)"""
class ToCPU(Module):
def forward(self, x):
return x.to("cpu")
class ToFloat(Module):
def forward(self, x):
return x.float()
class ToInt(Module):
def forward(self, x):
return x.int()
class ToLong(Module):
def forward(self, x):
return x.long()
class ToDouble(Module):
def forward(self, x):
return x.double()
class ToFloat16(Module):
def forward(self, x):
return x.to(torch.float16)
verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))
verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))
verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))
verify_model(ToInt().eval(), torch.tensor(0.8))
verify_model(ToLong().eval(), torch.tensor(0.8))
verify_model(ToDouble().eval(), torch.tensor(0.8))
verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))
verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
@tvm.testing.uses_gpu
def test_adaptive_pool3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_functional_pad():
torch.set_grad_enabled(False)
pad = (0, 0)
class Pad1(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 0)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zero_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ZeroPad2d(2).eval(), inp)
verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
inp = torch.rand((1, 2, 3))
verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad2d():
inp = torch.rand((1, 2, 2, 2))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad3d():
inp = torch.rand((1, 3, 2, 2, 2))
verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad1d():
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad2d():
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad3d():
inp = torch.rand((1, 1, 3, 3, 3))
verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)
verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)
inp = torch.rand((7, 5, 4, 5, 6))
verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_upsample3d():
inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)
verify_model(torch.nn.Upsample(scale_factor=2, mode="nearest").eval(), inp)
verify_model(torch.nn.Upsample(scale_factor=2, mode="trilinear").eval(), inp)
verify_model(
torch.nn.Upsample(scale_factor=2, mode="trilinear", align_corners=True).eval(), inp
)
def test_forward_nms():
"""dynamic Non-Maximum Suppression"""
torch.set_grad_enabled(False)
class NonMaxSupression(Module):
def __init__(self, iou_thres):
super().__init__()
self.iou_threshold = iou_thres
def forward(self, *args):
return torchvision.ops.nms(args[0], args[1], self.iou_threshold)
# Generate random input data
def _gen_rand_inputs(num_boxes):
box_len = 4
boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5
boxes[:, 2] += boxes[:, 0]
boxes[:, 3] += boxes[:, 1]
scores = np.linspace(0, 1, num=num_boxes).astype("float32")
np.random.shuffle(scores)
return boxes, torch.from_numpy(scores)
targets = ["llvm", "cuda"]
for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:
in_boxes, in_scores = _gen_rand_inputs(num_boxes)
verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)
def test_forward_roi_align():
"""ROI align"""
torch.set_grad_enabled(False)
class ROIAlign(Module):
def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):
super().__init__()
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.output_sizes = output_sizes
def forward(self, *args):
return torchvision.ops.roi_align(
args[0],
args[1],
self.output_sizes,
self.spatial_scale,
self.sampling_ratio,
)
in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))
in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))
in_batch = torch.zeros((35, 1), dtype=torch.float)
in_boxes = torch.cat([in_batch, in_boxes], dim=1)
verify_model(ROIAlign(7), [in_data, in_boxes])
verify_model(ROIAlign((10, 10), 0.7, 5), [in_data, in_boxes])
verify_model(ROIAlign(15, 0.9, 3), [in_data, in_boxes])
@tvm.testing.uses_gpu
def test_conv3d():
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)
# downsample
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)
@tvm.testing.uses_gpu
def test_conv3d_transpose():
for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8, out_channels=33, kernel_size=3, stride=2
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8,
out_channels=20,
kernel_size=(3, 5, 2),
stride=(2, 1, 1),
padding=(0, 4, 2),
).eval(),
inp,
),
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp
)
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),
inp,
)
# Model tests
@tvm.testing.uses_gpu
def test_resnet18():
torch.set_grad_enabled(False)
verify_model("resnet18", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_0():
torch.set_grad_enabled(False)
verify_model("squeezenet1_0", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_1():
torch.set_grad_enabled(False)
verify_model("squeezenet1_1", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_densenet121():
torch.set_grad_enabled(False)
verify_model("densenet121", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_inception_v3():
torch.set_grad_enabled(False)
verify_model("inception_v3", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_googlenet():
torch.set_grad_enabled(False)
verify_model("googlenet", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mnasnet0_5():
torch.set_grad_enabled(False)
verify_model("mnasnet0_5", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mobilenet_v2():
torch.set_grad_enabled(False)
verify_model("mobilenet_v2", atol=1e-4, rtol=1e-4)
"""
#TODO: Fix VGG and AlexNet issues (probably due to pooling)
@tvm.testing.uses_gpu
def test_alexnet():
torch.set_grad_enabled(False)
verify_model("alexnet")
@tvm.testing.uses_gpu
def test_vgg11():
torch.set_grad_enabled(False)
verify_model("vgg11")
@tvm.testing.uses_gpu
def test_vgg11_bn():
torch.set_grad_enabled(False)
verify_model("vgg11_bn")
"""
@tvm.testing.uses_gpu
def test_custom_conversion_map():
def get_roi_align():
pool_size = 5
n_channels = 2 * (pool_size ** 2)
x = torch.rand(2, n_channels, 10, 10)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=torch.float,
)
roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)
return roi_align.eval(), [x, rois]
def convert_roi_align():
def _impl(inputs, input_types):
spatial_scale = inputs[2]
pooled_size = (inputs[3], inputs[4])
sampling_ratio = inputs[5]
return relay.op.vision.roi_align(
inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio
)
return _impl
custom_map = {"torchvision::roi_align": convert_roi_align()}
model, inputs = get_roi_align()
verify_model(model, inputs, custom_map)
@tvm.testing.uses_gpu
def test_segmentation_models():
class SegmentationModelWrapper(Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)
deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]
verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)
verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_3d_models():
input_shape = (1, 3, 4, 56, 56)
resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()
verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)
def _get_default_vm_targets():
return ["llvm", "cuda"]
def verify_script_model(pt_model, ishapes, targets, idtype=None):
script_module = torch.jit.script(pt_model)
verify_model_vm(script_module, ishapes, idtype=idtype, targets=targets)
def verify_trace_model(pt_model, idata, targets):
traced_model = torch.jit.trace(pt_model, idata)
ishapes = [data.shape for data in idata]
verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)
def convert_pt_to_tvm_type(idtype):
"""Accepts a pytorch dtype and returns string TVM dtype."""
# TVM does not support PyTorch complex dtypes
if idtype == torch.float64:
curr_dtype = "float64"
elif idtype == torch.float32:
curr_dtype = "float32"
elif idtype == torch.float16:
curr_dtype = "float16"
elif idtype == torch.bfloat16:
curr_dtype = "bfloat16"
elif idtype == torch.int64:
curr_dtype = "int64"
elif idtype == torch.int32:
curr_dtype = "int32"
elif idtype == torch.int16:
curr_dtype = "int16"
elif idtype == torch.int8:
curr_dtype = "int8"
elif idtype == torch.uint8:
curr_dtype = "uint8"
elif idtype == torch.bool:
curr_dtype = "bool"
else:
raise NotImplementedError("Unsupported dtype: {}".format(idtype))
return curr_dtype
def verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=["llvm"]):
if not idtype:
idtype = torch.float
input_names = ["i{}".format(idx) for idx, ish in enumerate(ishapes)]
tvm_dtype = convert_pt_to_tvm_type(idtype)
input_dtypes = [tvm_dtype] * len(input_names)
input_shapes = list(zip(input_names, list(zip(ishapes, input_dtypes))))
if idata:
input_data = idata
# If no input_data provided, generate random data of specified dtype
else:
if idtype == torch.bool:
input_data = [
torch.Tensor.bool(torch.randint(low=0, high=2, size=shape)) for shape in ishapes
]
# Torch dtype can be float, complex, int, or Bool. Complex not supported, so if not float or Bool,
# dtype must be int!
elif not idtype.is_floating_point:
input_data = [
torch.randint(low=0, high=10, size=shape, dtype=idtype) for shape in ishapes
]
else:
input_data = [torch.randn(shape, dtype=idtype) for shape in ishapes]
# Compile via VM
mod, params = relay.frontend.from_pytorch(input_model, input_shapes)
for tgt in targets:
if not tvm.runtime.enabled(tgt):
continue
print("Running on target", tgt)
dev = tvm.device(tgt, 0)
evaluator = relay.create_executor("vm", mod=mod, device=dev, target=tgt).evaluate()
# Inference
for name, inp in zip(input_names, input_data):
params[name] = inp.numpy()
vm_res = evaluator(**params)
# Baseline result
with torch.no_grad():
pt_result = input_model(*input_data)
# Verify the accuracy
if isinstance(pt_result, tuple):
# handle multiple outputs
for i in range(len(pt_result)):
tvm_res = vm_res[i].numpy()
tvm.testing.assert_allclose(tvm_res, pt_result[i].numpy(), rtol=1e-5, atol=1e-5)
elif not isinstance(pt_result, torch.Tensor):
tvm_res = vm_res.numpy().item()
assert pt_result == tvm_res
else:
tvm.testing.assert_allclose(vm_res.numpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_control_flow():
class SimpleIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
return output
class NestedIf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
if inp.mean() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
else:
if inp.mean() >= 0.0:
output = self.weight * inp
else:
output = self.weight / inp
return output
class ScalarLoop(torch.nn.Module):
def forward(self, inp):
a = 0
for i in range(inp.size(0)):
b = i * i
b = b + 1
a += b
if a != 0:
a += 1
else:
a += 2
return a
class SimpleLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
c = a + b
a += c
return a
class LoopWithIf(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * 2.0
b = a + b
if b.sum() > 0.0:
a += b
else:
a -= b
return a
class NestedLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * float(i)
for j in range(inp.size(1)):
a += b * float(j)
return a
class SimpleScalarWhileLoop(torch.nn.Module):
def forward(self, inp):
a = 1
i = 0
while i <= inp.size(0):
a += i
i += 2
i = 0
# also test constant init cond
while i < 10:
a += i
i += 3
return a
class SimpleWhileLoop(torch.nn.Module):
def forward(self, inp):
a = inp
i = 0
while i < inp.size(0):
a += a * float(i) * 2.0
i += 1
return a
models = [
SimpleIf(10, 20),
NestedIf(10, 20),
ScalarLoop(),
SimpleLoop(),
LoopWithIf(),
SimpleScalarWhileLoop(),
SimpleWhileLoop(),
NestedLoop(),
]
for pt_model in models:
verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_simple_rnn():
# The mixed tracing and scripting example from
# https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing
class DecisionGate(torch.nn.Module):
def forward(self, x):
if x.sum() > 0:
return x
else:
return -x
class Cell(torch.nn.Module):
def __init__(self, dg):
super(Cell, self).__init__()
self.dg = dg
self.linear = torch.nn.Linear(4, 4)
def forward(self, x, h):
new_h = torch.tanh(self.dg(self.linear(x)) + h)
return new_h, new_h
class RNNLoop(torch.nn.Module):
def __init__(self):
super().__init__()
x = torch.rand(10, 4, dtype=torch.float)
h = torch.rand(10, 4, dtype=torch.float)
self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))
def forward(self, xs):
h = torch.zeros(10, 4, dtype=torch.float)
y = torch.zeros(10, 4, dtype=torch.float)
for i in range(xs.size(0)):
y, h = self.cell(xs[i], h)
return y
verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_forward_reduce_sum():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceSum1(Module):
def forward(self, *args):
return args[0].sum(1)
class ReduceSum2(Module):
def forward(self, *args):
return args[0].sum(dim=1, keepdim=False)
class ReduceSum3(Module):
def forward(self, *args):
return args[0].sum(dim=2, keepdim=True)
class ReduceSum4(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=True)
class ReduceSum5(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(ReduceSum1().float().eval(), input_data=input_data)
verify_model(ReduceSum2().float().eval(), input_data=input_data)
verify_model(ReduceSum3().float().eval(), input_data=input_data)
verify_model(ReduceSum4().float().eval(), input_data=input_data)
verify_model(ReduceSum5().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reduce_prod():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceProd1(Module):
def forward(self, *args):
return args[0].prod(1)
class ReduceProd2(Module):
def forward(self, *args):
return args[0].prod(dim=1, keepdim=False)
class ReduceProd3(Module):
def forward(self, *args):
return args[0].prod(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ReduceProd1().float().eval(), input_data=input_data)
verify_model(ReduceProd2().float().eval(), input_data=input_data)
verify_model(ReduceProd3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmin():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMin1(Module):
def forward(self, *args):
return args[0].argmin(1)
class ArgMin2(Module):
def forward(self, *args):
return args[0].argmin(dim=1, keepdim=False)
class ArgMin3(Module):
def forward(self, *args):
return args[0].argmin(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMin1().float().eval(), input_data=input_data)
verify_model(ArgMin2().float().eval(), input_data=input_data)
verify_model(ArgMin3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmax():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMax1(Module):
def forward(self, *args):
return args[0].argmax(1)
class ArgMax2(Module):
def forward(self, *args):
return args[0].argmax(dim=1, keepdim=False)
class ArgMax3(Module):
def forward(self, *args):
return args[0].argmax(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMax1().float().eval(), input_data=input_data)
verify_model(ArgMax2().float().eval(), input_data=input_data)
verify_model(ArgMax3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_std():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Std1(Module):
def forward(self, *args):
return args[0].std(1, unbiased=False)
class Std2(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=False)
class Std3(Module):
def forward(self, *args):
return args[0].std(dim=2, keepdim=True, unbiased=False)
class Std4(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)
class Std5(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)
class Std6(Module):
def forward(self, *args):
return args[0].std(unbiased=False)
class Std7(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=True)
class Std8(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)
class Std9(Module):
def forward(self, *args):
return args[0].std(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Std1().float().eval(), input_data=input_data)
verify_model(Std2().float().eval(), input_data=input_data)
verify_model(Std3().float().eval(), input_data=input_data)
verify_model(Std4().float().eval(), input_data=input_data)
verify_model(Std5().float().eval(), input_data=input_data)
verify_model(Std6().float().eval(), input_data=input_data)
verify_model(Std7().float().eval(), input_data=input_data)
verify_model(Std8().float().eval(), input_data=input_data)
verify_model(Std9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_variance():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Variance1(Module):
def forward(self, *args):
return args[0].var(1, unbiased=False)
class Variance2(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=False)
class Variance3(Module):
def forward(self, *args):
return args[0].var(dim=2, keepdim=True, unbiased=False)
class Variance4(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)
class Variance5(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)
class Variance6(Module):
def forward(self, *args):
return args[0].var(unbiased=False)
class Variance7(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=True)
class Variance8(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)
class Variance9(Module):
def forward(self, *args):
return args[0].var(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Variance1().float().eval(), input_data=input_data)
verify_model(Variance2().float().eval(), input_data=input_data)
verify_model(Variance3().float().eval(), input_data=input_data)
verify_model(Variance4().float().eval(), input_data=input_data)
verify_model(Variance5().float().eval(), input_data=input_data)
verify_model(Variance6().float().eval(), input_data=input_data)
verify_model(Variance7().float().eval(), input_data=input_data)
verify_model(Variance8().float().eval(), input_data=input_data)
verify_model(Variance9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_rsub():
torch.set_grad_enabled(False)
class Rsub1(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1])
class Rsub2(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1], alpha=0.5)
d1 = torch.rand([1, 3]).float()
d2 = torch.rand([1, 3]).float()
d3 = torch.rand([1, 3]).int()
verify_model(Rsub1().float().eval(), input_data=[d1, d2])
verify_model(Rsub1().float().eval(), input_data=[d1, d3])
verify_model(Rsub2().float().eval(), input_data=[d1, d2])
verify_model(Rsub2().float().eval(), input_data=[d1, d3])
@tvm.testing.uses_gpu
def test_forward_embedding():
torch.set_grad_enabled(False)
input_data = torch.randint(0, 10, [2, 4]).long()
verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_onehot():
torch.set_grad_enabled(False)
class OneHot1(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=3)
class OneHot2(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=5)
input_data = torch.arange(0, 5) % 3
verify_model(OneHot1().float().eval(), input_data=input_data)
input_data = torch.arange(0, 5) % 4
verify_model(OneHot2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isfinite():
torch.set_grad_enabled(False)
class IsFinite1(Module):
def forward(self, *args):
return torch.isfinite(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsFinite1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isnan():
torch.set_grad_enabled(False)
class IsNan1(Module):
def forward(self, *args):
return torch.isnan(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsNan1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isinf():
torch.set_grad_enabled(False)
class IsInf1(Module):
def forward(self, *args):
return torch.isinf(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsInf1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Clamp1(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.5, max=0.5)
class Clamp2(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.3)
class Clamp3(Module):
def forward(self, *args):
return torch.clamp(args[0], max=1.0)
class Clamp_MinExpr_MaxConstant(Module):
def forward(self, *args):
h, w = args[0].shape[2:]
amin = h / 100.0
return torch.clamp(args[0], min=amin, max=w)
input_data = torch.rand(input_shape).float()
verify_model(Clamp1().float().eval(), input_data=input_data)
verify_model(Clamp2().float().eval(), input_data=input_data)
verify_model(Clamp3().float().eval(), input_data=input_data)
verify_model(Clamp_MinExpr_MaxConstant().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp_():
torch.set_grad_enabled(False)
class ClampInPlace(Module):
def __init__(self, min, max):
super(ClampInPlace, self).__init__()
self.min = min
self.max = max
def forward(self, *args):
return torch.clamp_(args[0], self.min, self.max)
for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):
input_data = torch.rand(ishape).float()
verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_ones():
torch.set_grad_enabled(False)
class Ones1(Module):
def forward(self, *args):
return torch.ones(2, 3)
verify_model(Ones1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_ones_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class OnesLike1(Module):
def forward(self, *args):
return torch.ones_like(args[0])
class OnesLike2(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.int8)
class OnesLike3(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(OnesLike1().float().eval(), input_data=input_data)
verify_model(OnesLike2().float().eval(), input_data=input_data)
verify_model(OnesLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zeros():
torch.set_grad_enabled(False)
class Zeros1(Module):
def forward(self, *args):
return torch.zeros(2, 3)
verify_model(Zeros1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_zeros_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ZerosLike1(Module):
def forward(self, *args):
return torch.zeros_like(args[0])
class ZerosLike2(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.int32)
class ZerosLike3(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(ZerosLike1().float().eval(), input_data=input_data)
verify_model(ZerosLike2().float().eval(), input_data=input_data)
verify_model(ZerosLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_full():
torch.set_grad_enabled(False)
class Full1(Module):
def forward(self, *args):
return torch.full((2, 3), 3.14)
class Full2(Module):
def forward(self, *args):
return torch.full((1, 2, 3), 1.0, dtype=torch.int32)
verify_model(Full1().float().eval(), input_data=[])
verify_model(Full2().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_full_like():
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FullLike1(Module):
def forward(self, *args):
return torch.full_like(args[0], 3.14)
class FullLike2(Module):
def forward(self, *args):
return torch.full_like(args[0], 22.22, dtype=torch.int32)
class FullLike3(Module):
def forward(self, *args):
return torch.full_like(args[0], 1.4, dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(FullLike1().float().eval(), input_data=input_data)
verify_model(FullLike2().float().eval(), input_data=input_data)
verify_model(FullLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_linspace():
torch.set_grad_enabled(False)
class Linspace1(Module):
def forward(self, *args):
return torch.linspace(5, 10, steps=100)
class Linspace2(Module):
def forward(self, *args):
return torch.linspace(-10, 10, steps=5)
class Linspace3(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=5)
class Linspace4(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=1)
class Linspace5(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int32)
class Linspace6(Module):
def forward(self, *args):
return torch.linspace(start=1, end=6, steps=2)
class Linspace7(Module):
def forward(self, *args):
return torch.linspace(1, 4, steps=100, dtype=torch.float32)
class Linspace8(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int16)
verify_model(Linspace1().float().eval())
verify_model(Linspace2().float().eval())
verify_model(Linspace3().float().eval())
verify_model(Linspace4().float().eval())
verify_model(Linspace5().float().eval())
verify_model(Linspace6().float().eval())
verify_model(Linspace7().float().eval())
verify_model(Linspace8().float().eval())
@tvm.testing.uses_gpu
def test_forward_take():
torch.set_grad_enabled(False)
class Take1(Module):
def forward(self, *args):
indices = torch.tensor([[0, 0], [1, 0]])
if torch.cuda.is_available():
indices = indices.cuda()
return torch.take(args[0], indices)
class Take2(Module):
def forward(self, *args):
return torch.take(args[0], args[1])
input_data = torch.tensor([[1, 2], [3, 4]])
verify_model(Take1().float().eval(), input_data=input_data)
indices = torch.tensor([[0, 0], [1, 0]])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
indices = torch.tensor([0, -1])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
@tvm.testing.uses_gpu
def test_forward_topk():
torch.set_grad_enabled(False)
class Topk1(Module):
def forward(self, *args):
return torch.topk(args[0], k=3)
class Topk2(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=-2)
class Topk3(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=3)
class Topk4(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=True)
class Topk5(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=False)
class Topk6(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, sorted=True)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Topk1().float().eval(), input_data=input_data)
verify_model(Topk2().float().eval(), input_data=input_data)
verify_model(Topk3().float().eval(), input_data=input_data)
verify_model(Topk4().float().eval(), input_data=input_data)
verify_model(Topk5().float().eval(), input_data=input_data)
verify_model(Topk6().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_logical_not():
torch.set_grad_enabled(False)
class LogicalNot1(Module):
def forward(self, *args):
return torch.logical_not(args[0])
input_data = torch.tensor([True, False])
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_not():
torch.set_grad_enabled(False)
class BitwiseNot1(Module):
def forward(self, *args):
return torch.bitwise_not(args[0])
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([True, False])
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_xor():
torch.set_grad_enabled(False)
class BitwiseXor1(Module):
def forward(self, *args):
return torch.bitwise_xor(args[0], args[1])
class BitwiseXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.bitwise_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(BitwiseXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_logical_xor():
torch.set_grad_enabled(False)
class LogicalXor1(Module):
def forward(self, *args):
return torch.logical_xor(args[0], args[1])
class LogicalXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.logical_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(LogicalXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_unary():
torch.set_grad_enabled(False)
class Sqrt1(Module):
def forward(self, *args):
return torch.sqrt(args[0])
class RSqrt1(Module):
def forward(self, *args):
return torch.rsqrt(args[0])
class Ceil1(Module):
def forward(self, *args):
return torch.ceil(args[0])
class Floor1(Module):
def forward(self, *args):
return torch.floor(args[0])
class Round1(Module):
def forward(self, *args):
return torch.round(args[0])
class Cos1(Module):
def forward(self, *args):
return torch.cos(args[0])
class Sin1(Module):
def forward(self, *args):
return torch.sin(args[0])
class Tan1(Module):
def forward(self, *args):
return torch.tan(args[0])
class Tanh1(Module):
def forward(self, *args):
return torch.tanh(args[0])
class Acos1(Module):
def forward(self, *args):
return torch.acos(args[0])
class Asin1(Module):
def forward(self, *args):
return torch.asin(args[0])
class Atan1(Module):
def forward(self, *args):
return torch.atan(args[0])
class Log1(Module):
def forward(self, *args):
return torch.log(args[0])
class Exp1(Module):
def forward(self, *args):
return torch.exp(args[0])
class Erf1(Module):
def forward(self, *args):
return torch.erf(args[0])
class Trunc1(Module):
def forward(self, *args):
return torch.trunc(args[0])
class Sign1(Module):
def forward(self, *args):
return torch.sign(args[0])
class Neg1(Module):
def forward(self, *args):
return torch.neg(args[0])
class Sinh1(Module):
def forward(self, *args):
return torch.sinh(args[0])
class Cosh1(Module):
def forward(self, *args):
return torch.cosh(args[0])
class Log2_1(Module):
def forward(self, *args):
return torch.log2(args[0])
class Log10_1(Module):
def forward(self, *args):
return torch.log10(args[0])
class Log1p_1(Module):
def forward(self, *args):
return torch.log1p(args[0])
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Sqrt1().float().eval(), input_data=input_data)
verify_model(RSqrt1().float().eval(), input_data=input_data)
verify_model(Ceil1().float().eval(), input_data=input_data)
verify_model(Floor1().float().eval(), input_data=input_data)
verify_model(Round1().float().eval(), input_data=input_data)
verify_model(Cos1().float().eval(), input_data=input_data)
verify_model(Cosh1().float().eval(), input_data=input_data)
verify_model(Sin1().float().eval(), input_data=input_data)
verify_model(Sinh1().float().eval(), input_data=input_data)
verify_model(Tan1().float().eval(), input_data=input_data)
verify_model(Tanh1().float().eval(), input_data=input_data)
verify_model(Acos1().float().eval(), input_data=input_data)
verify_model(Asin1().float().eval(), input_data=input_data)
verify_model(Atan1().float().eval(), input_data=input_data)
verify_model(Log1().float().eval(), input_data=input_data)
verify_model(Log2_1().float().eval(), input_data=input_data)
verify_model(Log10_1().float().eval(), input_data=input_data)
verify_model(Log1p_1().float().eval(), input_data=input_data)
verify_model(Exp1().float().eval(), input_data=input_data)
verify_model(Erf1().float().eval(), input_data=input_data)
verify_model(Trunc1().float().eval(), input_data=input_data)
verify_model(Sign1().float().eval(), input_data=input_data)
verify_model(Neg1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_where():
torch.set_grad_enabled(False)
class Where1(Module):
def forward(self, *args):
y = torch.ones([3, 2])
if torch.cuda.is_available():
y = y.cuda()
return torch.where(args[0] > 0, args[0], y)
class Where2(Module):
def forward(self, *args):
return torch.where(args[0] > 0, args[0], args[1])
class Where3(Module):
def forward(self, *args):
return torch.where(args[0])[0]
x = torch.rand([3, 2]).float()
verify_model(Where1(), input_data=[x])
y = torch.rand([3, 2])
verify_model(Where2(), input_data=[x, y])
# a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)
inp = torch.rand([10])
inp[3:8] = 0
verify_trace_model(Where3(), [inp], ["llvm"])
@tvm.testing.uses_gpu
def test_forward_addcdiv():
torch.set_grad_enabled(False)
class Addcdiv1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcdiv(args[0], 0.1, t1, t2)
class Addcdiv2(Module):
def forward(self, *args):
return torch.addcdiv(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcdiv1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_addcmul():
torch.set_grad_enabled(False)
class Addcmul1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcmul(args[0], 0.1, t1, t2)
class Addcmul2(Module):
def forward(self, *args):
return torch.addcmul(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcmul1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_true_divide():
if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
return
torch.set_grad_enabled(False)
class TrueDivide(Module):
def forward(self, *args):
return torch.true_divide(args[0], args[1])
dividend = torch.rand([5, 3]).float()
# divisor could be either tensor or scalar
divisor_tensor = torch.rand([5, 3]).float() + 0.5
divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
)
@tvm.testing.uses_gpu
def test_forward_is_floating_point():
torch.set_grad_enabled(False)
class IsFloatingPoint(Module):
def forward(self, arg):
# `torch.jit.trace` cannot accept something that outputs
# a Bool, so `torch.jit.script` will be used instead
return torch.is_floating_point(arg)
targets = _get_default_vm_targets()
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float64)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float32)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float16)
# todo(dvisnty): Run the test for bfloat16 when full bfloat16 support is implemented
# verify_script_model(IsFloatingPoint(), [(1,1)], targets, idtype=torch.bfloat16)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int64)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int32)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int16)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int8)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.uint8)
@tvm.testing.uses_gpu
def test_forward_traced_function():
def fn(t1, t2):
return t1 + t2
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(3, 4)
verify_model(fn, input_data=[tensor1, tensor2])
@tvm.testing.uses_gpu
def test_forward_dtypes():
def fn(t1, t2):
return 2.5 * t1 + t2
for dt in [torch.int32, torch.int64, torch.double]:
tensor1 = torch.randn(3, 4).to(dtype=dt)
tensor2 = torch.randn(3, 4).to(dtype=dt)
verify_model(fn, input_data=[tensor1, tensor2])
class ModuleWithIntParameters(Module):
def __init__(self, arr):
super().__init__()
self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)
def forward(self, x):
return x.long() + self.param
shape = (10, 10)
param = torch.ones(shape, dtype=torch.long)
inp = torch.ones(shape, dtype=torch.int)
verify_model(ModuleWithIntParameters(param), input_data=inp)
@tvm.testing.uses_gpu
def test_weight_names():
tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])
mod, params = relay.frontend.from_pytorch(tm, [("input", (2, 3))])
assert set(params.keys()) == set(n for n, p in tm.named_parameters())
@tvm.testing.uses_gpu
def test_duplicate_weight_use():
# The test cases doesn't make any sense as a neural network,
# the issue popped up in shared input/output embeddings of bert,
# but this is quicker
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)
def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x
verify_model(Test(), input_data=[torch.randn(5, 5)])
@tvm.testing.uses_gpu
def test_forward_matmul():
torch.set_grad_enabled(False)
class MatMul1(Module):
def forward(self, *args):
return torch.matmul(args[0], args[1])
# matrix x vector
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
# matrix x matrix
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(4, 10)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# batched matrix x batched matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(10, 4, 5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# batched matrix x broadcasted matrix
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# broadcasted matrix x batched matrix
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(3, 4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# batched matrix x batched matrix
tensor1 = torch.randn(1, 12, 14, 64)
tensor2 = torch.randn(1, 12, 64, 14)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
def test_forward_index():
torch.set_grad_enabled(False)
input_shape = [3, 4, 5, 6]
class Index0(Module):
def forward(self, x):
return x[[0, 1], [0, 2], :2, 4]
input_data = torch.rand(input_shape).float()
verify_model(Index0().eval(), input_data=input_data)
class Index1(Module):
def forward(self, x):
return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]
input_data = torch.rand(input_shape).float()
verify_model(Index1().eval(), input_data=input_data)
def test_logsumexp():
class Logsumexp(Module):
def __init__(self, dim, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.logsumexp(x, self.dim, self.keepdim)
input_shape = (100, 100)
input_data = torch.rand(input_shape)
verify_model(Logsumexp(0), input_data=input_data)
verify_model(Logsumexp(0, keepdim=True), input_data=input_data)
# Also test on double
verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())
def test_stack():
class Stack(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.stack((x, x), dim=self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Stack(), input_data=inp)
verify_model(Stack(axis=-1), input_data=inp)
verify_model(Stack(axis=3), input_data=inp)
verify_model(Stack(axis=-4), input_data=inp)
def test_stack_dynamic():
class Stack(torch.nn.Module):
def forward(self, x):
tensor_list = []
for i in range(x.size(0)):
# this is a workaround to avoid generating impure aten::append op
tensor_list += [x[i]]
# relay tensor array only supports stacking on the first axis
return torch.stack(tensor_list, dim=0)
verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())
def test_forward_unbind():
class Unbind(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.unbind(x, self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Unbind(0), input_data=inp)
verify_model(Unbind(1), input_data=inp)
verify_model(Unbind(2), input_data=inp)
def test_forward_nonzero():
class Nonzero(Module):
def __init__(self, as_tuple=False):
super().__init__()
self.as_tuple = as_tuple
def forward(self, data):
return torch.nonzero(data, as_tuple=self.as_tuple)
inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype("float32"))
verify_trace_model(Nonzero(), [inp], ["llvm"])
def test_forward_scatter():
# integer cannot be traced
def test_fn_scatter(dim):
return lambda data, index, src: torch.scatter(data, dim=dim, index=index, src=src)
def test_fn_scatter_add(dim):
return lambda data, index, src: torch.scatter_add(data, dim=dim, index=index, src=src)
in_data = torch.zeros(3, 5)
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)
in_data = torch.zeros(2, 4)
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1)
verify_trace_model(test_fn_scatter(1), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(1), [in_data, in_index, in_src], targets)
def test_forward_index_put():
# torch.index_put for 2D tensor and default accumulate (False)
def test_fn_index_put2():
return lambda data, xidx, yidx, values: torch.index_put(
data, indices=[xidx, yidx], values=values
)
# torch.index_put for 3D tensor and accumulate=True
def test_fn_index_put3a():
return lambda data, xidx, yidx, zidx, values: torch.index_put(
data, indices=[xidx, yidx, zidx], values=values, accumulate=True
)
shape = (3, 5)
in_data = torch.zeros(shape)
xidx = torch.tensor([0, 1, 2, 2])
yidx = torch.tensor([0, 1, 3, 4])
values = torch.tensor([2.0, 4.0, 7.0, 9.0])
targets = ["llvm", "cuda"]
verify_trace_model(test_fn_index_put2(), [in_data, xidx, yidx, values], targets)
shape = (3, 5, 3)
in_data = torch.zeros(shape)
xidx = torch.tensor([0, 1, 2, 2, 0])
yidx = torch.tensor([0, 1, 3, 4, 0])
zidx = torch.tensor([0, 1, 1, 2, 0])
values = torch.tensor([2.0, 4.0, 7.0, 9.0, 1.0])
verify_trace_model(test_fn_index_put3a(), [in_data, xidx, yidx, zidx, values], targets)
def test_numel():
class Numel(Module):
def forward(self, data):
return torch.tensor(torch.numel(data))
targets = _get_default_vm_targets()
verify_script_model(Numel(), [(1,)], targets)
verify_script_model(Numel(), [(3, 5)], targets)
verify_script_model(Numel(), [(3, 5, 8)], targets)
def test_forward_pretrained_bert_base_uncased():
######################################################################
# This is an example how to run BERT models using TVM
# ---------------------------------------------------
"""
Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert
# To get started, pretrained bert package needs to be installed as prerequisite.
.. code-block:: bash
# install bert package
pip install pytorch_pretrained_bert==0.6.2 --user
"""
try:
from pytorch_pretrained_bert import BertForMaskedLM, BertTokenizer
except:
print("Torch pretrained bert package must be installed to run this script.")
return
######################################################################
# Load the tokenizer and tokenize the input
# -----------------------------------------
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Tokenized input
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
assert tokenized_text == [
"[CLS]",
"who",
"was",
"jim",
"henson",
"?",
"[SEP]",
"jim",
"[MASK]",
"was",
"a",
"puppet",
"##eer",
"[SEP]",
]
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
######################################################################
# Load a pretrained PyTorch model bert-base-uncased
# -------------------------------------------------
# Bert Model with a language modeling
model = BertForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
######################################################################
# Predict all tokens with pytorch
# -------------------------------
with torch.no_grad():
torch_preds = model(tokens_tensor, segments_tensors)
######################################################################
# Make TorchScripted model via jit trace
# --------------------------------------
scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_1 = "input_ids"
input_2 = "input.2"
shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Compile the model with relay
# ----------------------------
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
######################################################################
# Execute on TVM
# --------------
dev = tvm.device(target, 0)
relay_model = graph_executor.create(relay_graph, relay_lib, dev)
relay_model.set_input(**relay_params)
relay_model.set_input(input_1, tokens_tensor)
relay_model.set_input(input_2, segments_tensors)
relay_model.run()
compiled_output = relay_model.get_output(0).numpy()
######################################################################
# Validate the outputs
# --------------------
# Compare the torch and tvm outputs
tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)
######################################################################
# Process the output
# ------------------
# Process the model output to token.
# Torch output to token
torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()
torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]
# TVM output to token
tvm_pred_idx = compiled_output[0, masked_index].argmax()
tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]
assert torch_pred_idx == tvm_pred_idx
assert torch_pred_token == tvm_pred_token
# Print the outputs
print("Torch top-1 id: {}, token: {}".format(torch_pred_idx, torch_pred_token))
print("TVM top-1 id: {}, token: {}".format(tvm_pred_idx, tvm_pred_token))
def test_convert_torch_script_with_input_types():
def model_fn(x, y):
x = x.to(dtype=torch.int32)
y = x + y
return y
ishape = (4, 5)
input_x = torch.rand(ishape, dtype=torch.float32)
input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)
inputs = [input_x, input_y]
script_module = torch.jit.trace(model_fn, inputs)
fname = "tmp.pt"
torch.jit.save(script_module, fname)
loaded = torch.jit.load(fname)
os.remove(fname)
verify_model(loaded.eval(), input_data=inputs)
def expected(x_shape, y_shape):
# use a fixed order of args so alpha equal check can pass
x = relay.var("x", shape=x_shape, dtype="float32")
y = relay.var("y", shape=y_shape, dtype="int32")
args = [x, y]
x1 = relay.cast(x, "int32")
y1 = relay.add(x1, y)
mod = tvm.IRModule.from_expr(relay.Function(args, y1))
return mod["main"]
input_infos = [("input0", (ishape, "float")), ("input1", (ishape, "int"))]
mod, params = relay.frontend.from_pytorch(loaded, input_infos)
expected_mod = expected(ishape, ishape)
assert tvm.ir.structural_equal(expected_mod, mod["main"], map_free_vars=True)
def test_bincount():
def test_fn(x, weights=None):
return torch.bincount(x, weights=weights)
inp = torch.randint(0, 100, (10000,), dtype=torch.int64)
weights = torch.linspace(0, 100, steps=10000)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn, [inp], targets)
verify_trace_model(test_fn, [inp, weights], targets)
def test_hard_swish():
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input in examples:
verify_model(torch.nn.Hardswish().eval(), input_data=input)
verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input)
def test_hard_sigmoid():
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input in examples:
verify_model(torch.nn.Hardsigmoid().eval(), input_data=input)
verify_model(torch.nn.Hardsigmoid(inplace=True).eval(), input_data=input)
def test_cumsum():
def test_fn(dim, dtype=None):
return lambda x: torch.cumsum(x, dim=dim, dtype=dtype)
inp = torch.randint(0, 100, (10000,), dtype=torch.int32)
verify_model(test_fn(0), [inp])
verify_model(test_fn(0), [inp.to(torch.int64)])
verify_model(test_fn(0, dtype=torch.int64), [inp.to(torch.int64)])
inp = torch.randn((100, 100), dtype=torch.float32)
verify_model(test_fn(dim=0, dtype=torch.float64), [inp])
verify_model(test_fn(dim=1), [inp])
inp = torch.randn((100, 100), dtype=torch.float32) > 0.5
verify_model(test_fn(dim=0, dtype=torch.int32), [inp])
def test_masked_fill():
def test_fn(x, mask):
return torch.masked_fill(x, mask, 0.0)
inp = torch.randn(100, 100)
verify_model(test_fn, [inp, inp > 0.5])
verify_model(test_fn, [inp.to(torch.float64), inp > 0.5])
def test_transformer():
model = torch.nn.Transformer(d_model=256, nhead=8, num_encoder_layers=6, num_decoder_layers=6)
model = model.eval()
src = torch.rand((10, 32, 256))
tgt = torch.rand((20, 32, 256))
verify_model(model.eval(), input_data=[src, tgt])
def test_argsort():
def test_fn(dim, descending):
return lambda x: torch.argsort(x, dim=dim, descending=descending)
inp = torch.randn(100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(0, False), [inp])
inp = torch.randn(100, 100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(0, False), [inp])
verify_model(test_fn(1, True), [inp])
verify_model(test_fn(1, False), [inp])
def test_sort():
def test_fn(dim, descending):
return lambda x: torch.sort(x, dim=dim, descending=descending)
inp = torch.randn(100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(-1, False), [inp])
inp = torch.randn(100, 100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(-2, False), [inp])
verify_model(test_fn(1, True), [inp])
verify_model(test_fn(-1, False), [inp])
def test_logical_and():
def test_fn(x, y):
return torch.logical_and(x, y)
a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
verify_model(test_fn, [a, b])
a = torch.tensor([True, False, True])
b = torch.tensor([True, False, False])
verify_model(test_fn, [a, b])
def test_masked_select():
def test_fn(x, mask):
return torch.masked_select(x, mask)
for shape in [(10,), (3, 4), (16, 32, 64)]:
x = torch.randn(*shape)
mask = x.ge(0.5)
verify_trace_model(test_fn, [x, mask], ["llvm", "cuda"])
def test_unique():
def test_fn(is_sorted, return_inverse, return_counts):
return lambda x: torch.unique(x, is_sorted, return_inverse, return_counts)
in_data = torch.randint(0, 20, (10,), dtype=torch.int32)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
in_data = torch.randint(0, 20, (20,), dtype=torch.int64)
verify_trace_model(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
def test_forward_nll_loss():
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.randint(0, 3, (N,))
weights = torch.tensor([1, 2, 3]).float()
verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="none").eval(), input_data=[predictions, targets])
# multidimension nll loss (aten::nll_loss2d)
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.randint(0, 3, (N, d1, d2))
verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="none").eval(), input_data=[predictions, targets])
@tvm.testing.uses_gpu
def test_forward_flip():
torch.set_grad_enabled(False)
class Flip(Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return x.flip([self.axis])
input = torch.randn(2, 3, 4)
verify_model(Flip(axis=0), input_data=input)
verify_model(Flip(axis=1), input_data=input)
verify_model(Flip(axis=2), input_data=input)
verify_model(Flip(axis=-1), input_data=input)
def test_annotate_span():
model = torchvision.models.resnet18().eval()
inp = torch.randn([1, 3, 224, 224])
trace = torch.jit.trace(model, inp).eval()
mod, params = relay.frontend.from_pytorch(
trace, [("input", inp.shape)], use_parser_friendly_name=True
)
relay.transform.AnnotateSpans()(mod)
@tvm.testing.uses_gpu
def test_all_any():
def test_fn(f, dim=None, keepdim=False):
return lambda x: f(x, dim=dim, keepdim=keepdim)
for f in [torch.all, torch.any]:
verify_model(test_fn(f, 0), [torch.rand(1, 2).bool()])
verify_model(test_fn(f, 0), [torch.arange(0, 3).to(torch.uint8)])
verify_model(test_fn(f, 1), [torch.rand(4, 2).bool()])
verify_model(test_fn(f, 0, keepdim=True), [torch.rand(4, 2).bool()])
@tvm.testing.uses_gpu
def test_searchsorted():
def test_fn(out_int32=False, right=False):
return lambda x, y: torch.searchsorted(x, y, out_int32=out_int32, right=right)
sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
values = torch.tensor([[3, 6, 9], [3, 6, 9]])
verify_model(test_fn(), [sorted_sequence, values])
verify_model(test_fn(out_int32=True), [sorted_sequence[0], values[0]])
verify_model(test_fn(right=True), [sorted_sequence, values])
sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
values = torch.tensor([[3, 6, 9], [4, 2, 7]])
verify_model(test_fn(), [sorted_sequence_1d, values])
verify_model(test_fn(), [sorted_sequence_1d, torch.tensor(6)])
@tvm.testing.uses_gpu
def test_bucketize():
def test_fn(out_int32=False, right=False):
return lambda x, y: torch.bucketize(x, y, out_int32=out_int32, right=right)
boundaries = torch.tensor([1, 3, 5, 7, 9])
values = torch.tensor([3, 6, 9])
verify_model(test_fn(), [values, boundaries])
verify_model(test_fn(out_int32=True, right=True), [values, boundaries])
@tvm.testing.uses_gpu
def test_roll():
def test_fn(shifts, dims):
return lambda x: torch.roll(x, shifts, dims)
x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
verify_model(test_fn(1, 0), [x])
verify_model(test_fn(-1, 0), [x])
verify_model(test_fn(shifts=(2, 1), dims=(0, 1)), [x])
@tvm.testing.uses_gpu
def test_einsum():
def test_fn(equation):
return lambda *x: torch.einsum(equation, *x)
x = torch.ones([2, 3])
y = torch.ones([3, 4])
z = torch.ones([4, 5])
verify_model(test_fn("ij,jk"), [x, y])
verify_model(test_fn("ij,jk,km->im"), [x, y, z])
if __name__ == "__main__":
pytest.main([__file__])
|
Laurawly/tvm-1
|
tests/python/frontend/pytorch/test_forward.py
|
Python
|
apache-2.0
| 135,425
|
[
"VisIt"
] |
309bd046f1dc1b42a60c1006b616e47969ada13d9d3ac25023113cbee824dedb
|
'''
Elements for building Deep Neural Networks with Keras.
---
This file is part of Nifty python package. Copyright (c) by Marcin Wojnarski.
Nifty is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
Nifty is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Nifty. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import absolute_import
import numpy as np
import keras.layers
from keras import backend as K
from keras.layers import Layer, Dense, Conv2D, Activation, Lambda, concatenate, add as layers_add
from keras.layers.normalization import BatchNormalization
from keras.initializers import RandomUniform
from keras.utils.generic_utils import get_custom_objects
from keras.utils.conv_utils import normalize_tuple
from keras.regularizers import l2
# nifty; whenever possible, use relative imports to allow embedding of the library inside higher-level packages;
# only when executed as a standalone file, for unit tests, do an absolute import
if __name__ != "__main__":
from ..util import isstring, istuple, islist
else:
from nifty.util import isstring, istuple, islist
#####################################################################################################################################################
#####
##### METRICS
#####
def mse_weighted(class_weight, normalize = True, scale = 1.0, channels_axis = -1):
"""
Mean-Squared Error (MSE) with class weighting along the channels axis. Can handle multi-dimensional tensors unlike standard Keras MSE.
"""
assert all(w >= 0 for w in class_weight)
weights = np.array(class_weight)[np.newaxis, :]
if normalize:
weights /= weights.sum()
weights *= scale
weights = K.constant(weights) # convert to a tensor
def mse_w(y_true, y_pred):
return K.mean(K.square(y_pred - y_true) * weights, axis = channels_axis)
return mse_w
def accuracy_weighted(class_weight, channels_axis = -1):
"""
Like standard classification accuracy, but with class weighting over multi-dimensional arrays
(1D/2D/3D spatial dimensions + channels as dimension no. -1 by default).
The result is calculated as a weighted average of point-wise (over channels) classification errors
over all spatial positions. The weights during averaging are assigned from `class_weight` list/array according to
the ground-true class that should have been predicted in a given spatial position.
"""
assert all(w >= 0 for w in class_weight)
weights = np.array(class_weight)
weights = K.constant(weights)
def acc_w(y_true, y_pred):
class_true = K.argmax(y_true, axis = channels_axis)
class_pred = K.argmax(y_pred, axis = channels_axis)
point_error = K.cast(K.equal(class_true, class_pred), K.floatx())
point_error = K.flatten(point_error)
class_true = K.flatten(class_true)
weight_array = K.gather(weights, class_true)
return K.sum(point_error * weight_array) / K.sum(weight_array)
return acc_w
#####################################################################################################################################################
#####
##### LAYERS & ACTIVATIONS
#####
def Relu(x): return Activation('relu')(x)
def LeakyReLU(x): return keras.layers.LeakyReLU()(x)
def Softmax(x): return Activation('softmax')(x)
get_custom_objects().update({'Relu': Relu})
# get_custom_objects().update({'Leaky_relu': LeakyReLU})
def relu_BN(y):
"Relu activation preceeded by BatchNormalization."
y = BatchNormalization()(y)
y = Relu(y)
return y
def leaky_BN(y):
"LeakyReLU activation preceeded by BatchNormalization."
y = BatchNormalization()(y)
y = keras.layers.LeakyReLU()(y)
return y
def conv2D_BN(y, *args, **kwargs):
"""Extra arguments:
- add: (optional) tensor or a list of tensors (typically a shortcut connection) to be added to the output
right after BatchNormalization, but before activation
"""
activation = kwargs.pop('activation', None)
if isstring(activation): activation = Activation(activation)
add = kwargs.pop('add', None)
if add and not islist(add): add = [add]
y = Conv2D(*args, **kwargs)(y)
y = BatchNormalization()(y)
if add: y = layers_add([y] + add)
if activation: y = activation(y)
return y
#####################################################################################################################################################
#####
##### ADVANCED LAYERS
#####
class LocalNormalization(Layer):
"""
Shifts and rescales input activations by means and standard deviations and magnitudes of activation
of a given channel around a given pixel. Scaling parameters of normalization are trainable.
Depending on the target and role of a given channel, the network can use LocalNormalization to either
locally normalize the channel (enhance contrast between neighboring activations), or de-normalize it.
The latter happens, for instance, when the channel's output must exhibit positive local correlation
(i.e., high activations should co-occur on neighboring spatial positions) - in such case,
LocalNormalization will learn negative weights, so as to reinforce local correlation through negative normalization.
On the other hand, positive weights and positive normalization are learnt when the channel should expose
negative correlation between neighboring locations (e.g., when performing edge detection).
"""
def __init__(self, kernel_size = (7, 7), init_normal = 1.0, **kwargs):
"""
init_normal: initial value of (normal_dev+normal_mag) weight +/- uniform random shift of max. 0.05
"""
super(LocalNormalization, self).__init__(**kwargs)
self.kernel_size = normalize_tuple(kernel_size, 2, 'kernel_size')
self.init_normal = init_normal # initial value of (normal_dev+normal_mag), +/- random shift of max. 0.05
self.seed = None
def build(self, input_shape):
self.channel_axis = -1 # channel_axis = 1 if self.data_format == 'channels_first' else -1
depth = input_shape[self.channel_axis]
if depth is None: raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
# shift = 1.0: activations are translated such that their local mean == 0.0
# shift = 0.0: activiations are not translated at all
# shift outside of <0.0,1.0>: excessive translation (towards mean or below 0.0)
self.shift = self.add_weight(name = 'shift', shape = (depth,), initializer = 'uniform', trainable = True)
# normal_dev & normal_mag initialized with ~self.init_normal/2 each (~0.5 by default)
mid = self.init_normal / 2
uniform_05 = RandomUniform(mid - .05, mid + .05, seed = self.seed)
# normal = 1.0: activations are normalized to local std.deviation == 1.0
# normal = 0.0: activations are not normalized at all
self.normal_dev = self.add_weight(name = 'normal_dev', shape = (depth,), initializer = uniform_05, trainable = True) #constraint = Clip(0.0, 1.0)
self.normal_mag = self.add_weight(name = 'normal_mag', shape = (depth,), initializer = uniform_05, trainable = True) #constraint = Clip(0.0, 1.0)
# scale <> 0.0: activations are rescaled by the factor of e^scale after normalization
# scale = 0.0: activations are not rescaled
self.scale = self.add_weight(name = 'scale', shape = (depth,), initializer = 'uniform', trainable = True) #constraint = Clip(-1.0, 1.0)
# self.offset = self.add_weight(name = 'offset', shape = (depth,), initializer = 'uniform', trainable = True)
super(LocalNormalization, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
# print 'LocalNormalization.kernel_size:', self.kernel_size
def mean2d(y):
y = K.pool2d(y, (self.kernel_size[0], 1), pool_mode = 'avg', padding = 'same')
y = K.pool2d(y, (1, self.kernel_size[1]), pool_mode = 'avg', padding = 'same')
return y
# return K.pool2d(y, self.kernel_size, pool_mode = 'avg', padding = 'same')
# (dy, dx) = self.kernel_size
# top = dy/2 + 1 # if even `dy`, averaging window is shifted to the top
# left = dx/2 + 1 # if even `dx`, averaging window is shifted to the left
#
# padding = ((top, dy-top), (left, dx-left))
#
# z = K.spatial_2d_padding(y, padding) # `y` padded with zeros
# s1 = K.cumsum(z, axis = -3) # cumulative sums along Y axis only
# s = K.cumsum(s1, axis = -2) # cumulative sums along (Y,X) axes
#
# t = s[...,dy:,dx:,:] + s[...,:-dy,:-dx,:] - s[...,dy:,:-dx,:] - s[...,:-dy,dx:,:]
#
# # t[0,0] = s[dy,dx] + s[0,0] - ... = cumsum(y)[0,0] + cumsum(y)[dy,dx] - ... = z[0,0] + (z[0,0]+...+z[dy,dx]) - ...
# # = area_sum(z, (1,1)...(dy,dx)) = area_sum(y, (0,0)...(dy-top,dx-left)) = area_sum(y, (0,0)...(dy-(dy/2+1), dx-(dx/2+1))) =
#
# return t / float(dx*dy)
# mean of `x` and x^2 in local area around given pixel
M = mean2d(x)
M2 = mean2d(x**2)
V = mean2d((x-M)**2)
eps = 0.001 #K.epsilon()
scale = K.exp(self.scale) / K.pow(M2 + eps, self.normal_mag/2) / K.pow(V + eps, self.normal_dev/2) #(V + eps) #K.exp(K.log(D + eps) * self.normal[None,None,:])
return (x - self.shift * M) * scale
# D = K.pool2d(x, self.kernel_size, pool_mode = 'avg', padding = 'same', data_format = 'channels_last') #self.data_format
# return x / K.exp(D * self.scale - self.offset)
def get_config(self):
config = {'kernel_size': self.kernel_size}
base_config = super(LocalNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
class FeaturesNormalization(Layer):
"""
Normalize input values along channels dimension, independently on every spatial position.
Meta-parameters of normalization are learnt during training.
Normalization that works along channels dimension. Does 2 things:
1) normalizes channel activations so that their sum on every spatial position is (roughly) equal to a predefined (but trainable) value
2) when total activation is small, adds random noise to small activations to stimulate training
"""
def __init__(self, **kwargs):
super(FeaturesNormalization, self).__init__(**kwargs)
self.seed = None
def build(self, input_shape):
self.channel_axis = -1 # channel_axis = 1 if self.data_format == 'channels_first' else -1
self.depth = input_shape[self.channel_axis] or 100
uniform_05 = RandomUniform(0.45, 0.55, seed = self.seed) # norm_dev & norm_mag initialized with ~0.5 each
# norm_dev = 1.0: features are normalized to std.deviation == 1.0
# norm_mag = 1.0: features are normalized to quadratic average (magnitude) == 1.0
# norm_abs = 1.0: features are normalized to mean absolute value == 1.0
self.norm_dev = self.add_weight(name = 'norm_dev', shape = (), initializer = uniform_05, trainable = True)
self.norm_mag = self.add_weight(name = 'norm_mag', shape = (), initializer = uniform_05, trainable = True)
self.norm_abs = self.add_weight(name = 'norm_abs', shape = (), initializer = 'uniform', trainable = True)
super(FeaturesNormalization, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
# statistics computed along features dimension, on every spatial position of the input tensor
A = K.mean(K.abs(x), axis = self.channel_axis) # mean absolute value
M1 = K.mean(x, axis = self.channel_axis) # mean value
M2 = K.mean(x**2, axis = self.channel_axis) # squared quadratic average
V = M2 - M1**2 # variance: V[X] = E[X^2] - E[X]^2
eps = 0.001 #K.epsilon()
norm = K.pow(V + eps, self.norm_dev/2) * K.pow(M2 + eps, self.norm_mag/2) * K.pow(A + eps, self.norm_abs)
return x / norm[...,None]
def compute_output_shape(self, input_shape):
return input_shape
class SmartNoise(Layer):
"""
When total or maximum absolute activation of input is small on a particular spatial position,
SmartNoise selectively adds uniform noise to individual activations to stimulate training.
If a particular activiation is negative, adding noise is done by further decreasing its value
(i.e., the noise added has the same sign as the original value).
"""
def __init__(self, **kwargs):
super(SmartNoise, self).__init__(**kwargs)
self.seed = None
# self.supports_masking = True
def build(self, input_shape):
self.channel_axis = -1 # channel_axis = 1 if self.data_format == 'channels_first' else -1
uniform_0 = RandomUniform(-.05, +.05, seed = self.seed)
uniform_1 = RandomUniform(0.95, 1.05, seed = self.seed)
# uniform_3 = RandomUniform(2.95, 3.05, seed = self.seed)
# uniform_01 = RandomUniform(0.10, 0.15, seed = self.seed)
# uniform_001 = RandomUniform(0.01, 0.02, seed = self.seed)
self.scale = self.add_weight(name = 'scale', shape = (), initializer = uniform_1, trainable = True) # scale of added noise
self.sensitivity = self.add_weight(name = 'sensitivity', shape = (), initializer = uniform_0, trainable = True)
# self.reduction = self.add_weight(name = 'reduction', shape = (), initializer = uniform_3, trainable = True)
super(SmartNoise, self).build(input_shape) # Be sure to call this at the end
def call(self, x, training = None):
eps = 0.01
ax = K.abs(x)
M = K.mean((ax+eps) ** 4, axis = self.channel_axis) ** (1./4) # Minkowsky's average to focus more on the (few) large values than on (many) smaller ones
noise = K.random_uniform(shape = K.shape(x), minval = -1.0, maxval = 1.0, seed = self.seed)
# xr = ax * K.exp(self.reduction)
# red = xr / (1 + xr**2)
red = 1 / (1 + ax) # individual noise reduction for each element of input
mag = K.exp(-M / K.exp(self.sensitivity)) * self.scale # global magnitude: if M = 0.0 -> large magnitude (1.0) ... if M >> 0.0 -> low magnitude (~0.0)
noisy = x + noise * red * mag[...,None]
return noisy
# return K.in_train_phase(noisy, x, training = training)
def compute_output_shape(self, input_shape):
return input_shape
class SCS_Layer(Layer):
"""
Layer of fully-connected Signal-Control-Scale (SCS) neurons.
"""
def __init__(self, units, **kwargs):
assert kwargs.get('activation') is None
kwargs.pop('activation', None)
super(SCS_Layer, self).__init__(**kwargs)
self.units = units
self.seed = None
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
# input_all = np.prod(input_shape)
self.channel_axis = -1 # channel_axis = 1 if self.data_format == 'channels_first' else -1
# uniform_01 = RandomUniform(-.01, +.01, seed = None)
# (input_dim, self.units)
self.signal_w = self.add_weight(name = 'signal_w', shape = (input_dim, self.units), initializer = 'uniform', regularizer = l2(0.001), trainable = True)
self.signal_b = self.add_weight(name = 'signal_b', shape = (self.units,), initializer = 'uniform', regularizer = l2(0.001), trainable = True)
self.control_w = self.add_weight(name = 'control_w', shape = (input_dim, self.units), initializer = 'uniform', regularizer = l2(0.001), trainable = True)
self.control_b = self.add_weight(name = 'control_b', shape = (self.units,), initializer = 'uniform', regularizer = l2(0.001), trainable = True)
# self.scale = self.add_weight(name = 'scale', shape = (self.units,), initializer = RandomUniform(.95, 1.05), trainable = True)
# self.scale_w = self.add_weight(name = 'scale_w', shape = (input_dim, self.units), initializer = uniform_01, regularizer = l2(0.01), trainable = True)
# self.scale_b = self.add_weight(name = 'scale_b', shape = (self.units,), initializer = uniform_01, regularizer = l2(0.01), trainable = True)
super(SCS_Layer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
def log1x(y):
""" ln(|x|+1) * sgn(x) """
return K.sign(y) * K.log(K.abs(y) + 1) #** self.scale
# x = K.expand_dims(K.flatten(x), 0)
signal = log1x (K.dot(x, self.signal_w) + self.signal_b)
control = K.sigmoid (K.dot(x, self.control_w) + self.control_b)
# scale = K.square (K.dot(x, self.scale_w) + self.scale_b)
# signal = log1x (K.bias_add(K.dot(x, self.signal_w), self.signal_b, data_format = 'channels_last'))
# control = K.sigmoid(K.bias_add(K.dot(x, self.control_w), self.control_b, data_format = 'channels_last'))
# scale = K.exp (K.bias_add(K.dot(x, self.scale_w), self.scale_b, data_format = 'channels_last'))
return signal * control #* self.scale
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
class Sparse(Dense):
"""
Like a Dense layer, but picks for every neuron a random subset of inputs which would be (permanently) dropped through multiplying with 0.0.
"""
def __init__(self, units, prob = 0.5, **kwargs):
super(Sparse, self).__init__(units, **kwargs)
self.prob = prob
def build(self, input_shape):
self.select_features = K.random_binomial(shape = input_shape[1:], p = self.prob, seed = None) # binary mask of features to be selected
super(Sparse, self).build(input_shape)
def call(self, x, training = None):
q = x * self.select_features
return super(Sparse, self).call(q)
#####################################################################################################################################################
#####
##### BLOCKS
#####
def grouped_convolution(y, channels, groups, strides = 1, dilation_rate = 1):
"""Grouped convolution with `groups` number of groups, between layers of depth: channels[0] to channels[1].
When `groups`=1 this is just a standard convolution.
If `channels` is a single number, the same depth on input and output is assumed.
"""
if not istuple(channels): channels = (channels, channels)
if not istuple(groups): groups = (0, 0, groups)
(channels_in, channels_out) = channels
groups_H, groups_V, groups_C = groups
groups_total = sum(groups)
# if not groups:
# assert channels_out % groupsize == 0
# groups = channels_out // groupsize
#
# if not groupsize:
# assert channels_out % groups == 0
# groupsize = channels_out // groups
# if not channels:
# channels = groups * groupsize
# else:
# assert channels == groups * groupsize
# when groups==1 this is just a standard convolution
if groups == (0, 0, 1):
return Conv2D(channels_out, (3, 3), strides = strides, dilation_rate = dilation_rate, padding = 'same')(y)
depth_in = y.shape[-1]
assert channels_in == depth_in, "grouped_convolution(): declared no. of input channels (%s) differs from the actual depth of input layer (%s)" % (channels_in, depth_in)
assert channels_in % groups_total == 0, "grouped_convolution(): no. of input channels (%s) must be a multiplicity of the no. of groups (%s)" % (channels_in, groups_total)
assert channels_out % groups_total == 0, "grouped_convolution(): no. of output channels (%s) must be a multiplicity of the no. of groups (%s)" % (channels_out, groups_total)
group_in = channels_in // groups_total
group_out = channels_out // groups_total
# in a grouped convolutional layer, input & output channels are divided into groups and convolutions are performed separately
# within each group: between k-th input group and k-th output group; outputs are concatenated afterwards
paths = []
for k in xrange(groups_total):
shape = (1, 9) if k < groups_H else (9, 1) if k < groups_H + groups_V else (3, 3)
start = k * group_in
# def slice_input(x):
# return x[..., start : start + group_in]
group = Lambda(lambda x: x[..., start : start + group_in])(y)
layer = Conv2D(group_out, shape, strides = strides, dilation_rate = dilation_rate, padding = 'same')(group)
paths.append(layer)
return concatenate(paths)
def resnext_unit(y, bottleneck, channels_out, paths, strides = 1, dilation_rate = 1, activation = 'relu'):
"""ResNeXt residual unit, optionally extended with spatial (vertical+horizontal) paths.
If `paths` is a triple (A,B,C), A is the no. of 1x9 (horizontal) paths, B: 9x1 (vertical), C: 3x3.
"""
if isstring(activation): activation = Activation(activation)
depth_in = y.shape[-1]
shortcut = y
# the residual block is reshaped as a bottleneck + grouped-convolution + rev-bottleneck, which is equivalent
# to the original formulation as a collection of paths, but makes the network more economical
y = Conv2D(bottleneck, (1, 1), padding = 'same')(y) # (1) bottleneck
y = activation(BatchNormalization()(y))
# create ResNeXT grouped convolutions (the middle element of paths)
y = grouped_convolution(y, bottleneck, paths, strides = strides, dilation_rate = dilation_rate) # (2) grouped convolution
y = activation(BatchNormalization()(y))
y = Conv2D(channels_out, (1, 1), padding = 'same')(y) # (3) rev-bottleneck
# batch normalization is employed after aggregating the transformations and before adding to the shortcut
y = BatchNormalization()(y)
# if input/output have different dimensions: because of a stride (spatial dimension), or because of a different depth,
# an extra 1x1 convolution is added on the shortcut connection to perform the adjustment
if strides not in [None, 1, (1, 1)] or depth_in != channels_out:
shortcut = Conv2D(channels_out, (1, 1), strides = strides, padding = 'same')(shortcut)
shortcut = BatchNormalization()(shortcut)
y = layers_add([shortcut, y])
y = activation(y)
return y
|
mwojnars/nifty
|
deep/keras.py
|
Python
|
gpl-3.0
| 24,030
|
[
"NEURON"
] |
22b8649ccdfefa0c39697b388dcfb684260615c1096767d4fb5564339ff828b0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
# The MIT License (MIT)
# This code is part of the Random3Dcity package
# Copyright (c) 2015
# Filip Biljecki
# Delft University of Technology
# fbiljecki@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Generate CityGML files according to the building XML specifications.
"""
from lxml import etree
import argparse
import random
import numpy
import math
import uuid
import copy
#-- Parse command-line arguments
PARSER = argparse.ArgumentParser(description='Generator of CityGML files according to the XML of buildings.')
PARSER.add_argument('-i', '--filename',
help='XML file to read', required=True)
PARSER.add_argument('-o', '--directory',
help='Directory where to write CityGMLs', required=True)
PARSER.add_argument('-r', '--rotation',
help='Enable rotation (default is true; allowed values 0/1, True/False)', required=False)
PARSER.add_argument('-p', '--parts',
help='Enable building parts (default is true; allowed values 0/1, True/False)', required=False)
PARSER.add_argument('-id', '--id',
help='Generate an UUID for each polygon.', required=False)
PARSER.add_argument('-gr', '--geometricref',
help='Generate all geometric references (variants within LODs).', required=False)
PARSER.add_argument('-ov', '--solids',
help='Generate solids and semantic variants (ov = other variants).', required=False)
PARSER.add_argument('-s', '--street',
help='Generate a road network.', required=False)
PARSER.add_argument('-v', '--vegetation',
help='Generate vegetation.', required=False)
PARSER.add_argument('-rp', '--report',
help='Report on the progress. Disable with Python3.', required=False)
def argRead(ar, default=None):
"""Corrects the argument input in case it is not in the format True/False."""
if ar == "0" or ar == "False":
ar = False
elif ar == "1" or ar == "True":
ar = True
elif ar is None:
if default:
ar = default
else:
ar = False
else:
raise ValueError("Argument value not recognised.")
return ar
ARGS = vars(PARSER.parse_args())
BUILDINGFILE = ARGS['filename']
DIRECTORY = ARGS['directory']
ROTATIONENABLED = argRead(ARGS['rotation'], True)
BUILDINGPARTS = argRead(ARGS['parts'], True)
ASSIGNID = argRead(ARGS['id'], True)
VARIANTS = argRead(ARGS['geometricref'], False)
SOLIDS = argRead(ARGS['solids'], False)
STREETS = argRead(ARGS['street'], False)
VEGETATION = argRead(ARGS['vegetation'], False)
REPORT = argRead(ARGS['report'], True)
if REPORT:
try:
from fish import ProgressFish
except:
print("--Package Fish (used for reporting) failed to load, hence reporting is disabled--")
#-- Just disable reporting if Fish fails to load
REPORT = False
#-- Name spaces
ns_citygml = "http://www.opengis.net/citygml/2.0"
ns_gml = "http://www.opengis.net/gml"
ns_bldg = "http://www.opengis.net/citygml/building/2.0"
ns_tran = "http://www.opengis.net/citygml/transportation/2.0"
ns_veg = "http://www.opengis.net/citygml/vegetation/2.0"
ns_xsi = "http://www.w3.org/2001/XMLSchema-instance"
ns_xAL = "urn:oasis:names:tc:ciq:xsdschema:xAL:2.0"
ns_xlink = "http://www.w3.org/1999/xlink"
ns_dem = "http://www.opengis.net/citygml/relief/2.0"
ns_fme = "http://www.safe.com/xml/xmltables"
nsmap = {
None: ns_citygml,
'gml': ns_gml,
'bldg': ns_bldg,
'tran': ns_tran,
'veg': ns_veg,
'xsi': ns_xsi,
'xAL': ns_xAL,
'xlink': ns_xlink,
'dem': ns_dem,
'fme': ns_fme
}
#-- Functions
def createCityGML(suffix):
"""Creates a CityGML foundation to be filled later by the remaining part of the script."""
CityModel = etree.Element("CityModel", nsmap=nsmap)
citymodelname = etree.SubElement(CityModel, "{%s}name" % ns_gml)
citymodelname.text = str(suffix)
boundedBy = etree.SubElement(CityModel, "{%s}boundedBy" % ns_gml)
Envelope = etree.SubElement(boundedBy, "{%s}Envelope" % ns_gml, srsDimension="3")
Envelope.attrib["srsName"] = "EPSG:28992"
lowercorner = etree.SubElement(Envelope, "{%s}lowerCorner" % ns_gml)
lowercorner.text = '0 0 0'
uppercorner = etree.SubElement(Envelope, "{%s}upperCorner" % ns_gml)
uppercorner.text = '4000 4000 25'
return CityModel
def storeCityGML(suffix):
"Write the CityGML file."
citygml = etree.tostring(CityGMLs[suffix], pretty_print=True)
#-- Write the CityGML file
if str(suffix) == 'Ground Truth':
fname = DIRECTORY + '/' + 'groundTruth.gml'
else:
fname = DIRECTORY + '/' + str(suffix) + '.gml'
citygmlFile = open(fname, "w")
#-- Header of the XML
citygmlFile.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n")
citygmlFile.write("<!-- Generated by Random3Dcity (http://github.com/tudelft3d/Random3Dcity), a tool developed by Filip Biljecki at TU Delft. Version: 2015-03-11. -->\n")
# citygmlFile.write(citygml)
citygmlFile.write(citygml.decode('utf-8'))
citygmlFile.close()
def verticesBody(o, x, y, z, h=None, top=None, override=None):
"""Calculates the vertices of the building block/body depending on the input."""
#-- If the h value is not supplied than it is zero
if not h:
h = 0.0
if top:
if top < 1.5:
z = z + float(top) * h
elif top is None:
if override:
z = override
else:
z = z + h
p = []
p0 = "%s %s %s" % (o[0],o[1],o[2])
p.append(p0)
p1 = "%s %s %s" % (o[0]+x,o[1],o[2])
p.append(p1)
p2 = "%s %s %s" % (o[0]+x,o[1]+y,o[2])
p.append(p2)
p3 = "%s %s %s" % (o[0],o[1]+y,o[2])
p.append(p3)
p4 = "%s %s %s" % (o[0],o[1],o[2]+z)
p.append(p4)
p5 = "%s %s %s" % (o[0]+x,o[1],o[2]+z)
p.append(p5)
p6 = "%s %s %s" % (o[0]+x,o[1]+y,o[2]+z)
p.append(p6)
p7 = "%s %s %s" % (o[0],o[1]+y,o[2]+z)
p.append(p7)
return p
def verticesBodyList(o, x, y, z, h=None, top=None):
"""Calculates the vertices of the building block/body as a list depending on the input. Redundant function."""
#-- If the h value is not supplied than it is zero
if not h:
h = 0.0
if top:
z = z + float(top) * h
p = []
p0 = [o[0],o[1],o[2]]
p.append(p0)
p1 = [o[0]+x,o[1],o[2]]
p.append(p1)
p2 = [o[0]+x,o[1]+y,o[2]]
p.append(p2)
p3 = [o[0],o[1]+y,o[2]]
p.append(p3)
p4 = [o[0],o[1],o[2]+z]
p.append(p4)
p5 = [o[0]+x,o[1],o[2]+z]
p.append(p5)
p6 = [o[0]+x,o[1]+y,o[2]+z]
p.append(p6)
p7 = [o[0],o[1]+y,o[2]+z]
p.append(p7)
return p
def verticesRoof(b, h, rtype, width=None):
"""Calculates the vertices of the building roof."""
#-- The basic information
o, x, y, z = b
#-- If no roof
if not h:
h = 0.0
#-- Roof points
r = []
if rtype == 'Gabled':
r0 = "%s %s %s" % (o[0]+.5*x, o[1], o[2]+z+h)
r.append(r0)
r1 = "%s %s %s" % (o[0]+.5*x, o[1]+y, o[2]+z+h)
r.append(r1)
elif rtype == 'Shed':
r0 = "%s %s %s" % (o[0], o[1], o[2]+z+h)
r.append(r0)
r1 = "%s %s %s" % (o[0], o[1]+y, o[2]+z+h)
r.append(r1)
elif rtype == 'Hipped' or rtype == 'Pyramidal':
r0 = "%s %s %s" % (o[0]+.5*x, o[1]+width, o[2]+z+h)
r.append(r0)
r1 = "%s %s %s" % (o[0]+.5*x, o[1]+y-width, o[2]+z+h)
r.append(r1)
return r
def verticesOverhangs(b, p, h, rtype, ovh, r, width=None):
"""Calculates the vertices of the roof overhangs"""
#-- The basic information about the building
o, x, y, z = b
#-- Roof points
if r:
r0, r1 = r
#-- Overhang lenghts
ovhx, ovhy = ovh
overhangs = []
interior = []
#-- Overhang points
if rtype == 'Gabled':
if ovhx > 0:
fx = (.5*x) / ovhx
ovhz = h / fx
else:
ovhz = 0
overhangs.append("")
overhangs[0] += r0
overhangs[0] += " %s %s %s" % (o[0]+.5*x, o[1]-ovhy, o[2]+z+h)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[0] += " %s %s %s" % (o[0]+.5*x, o[1]+y+ovhy, o[2]+z+h)
overhangs[0] += " " + r1
overhangs[0] += " " + p[6]
overhangs[0] += " " + p[5]
overhangs[0] += " " + r0
#-- The above polygon has no interior
interior.append(None)
overhangs.append("")
overhangs[1] += r1
overhangs[1] += " %s %s %s" % (o[0]+.5*x, o[1]+y+ovhy, o[2]+z+h)
overhangs[1] += " %s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[1] += " %s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[1] += " %s %s %s" % (o[0]+.5*x, o[1]-ovhy, o[2]+z+h)
overhangs[1] += " " + r0
overhangs[1] += " " + p[4]
overhangs[1] += " " + p[7]
overhangs[1] += " " + r1
#-- The above polygon has no interior
interior.append(None)
eaves = o[2]+z-ovhz
elif rtype == 'Shed':
if ovhx > 0:
fx = x / ovhx
ovhz = h / fx
else:
ovhz = 0
overhangs.append("")
overhangs[0] += "%s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z+h+ovhz)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[0] += " %s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z+h+ovhz)
overhangs[0] += " %s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z+h+ovhz)
interior.append("")
interior[0] += r0
interior[0] += " " + r1
interior[0] += " " + p[6]
interior[0] += " " + p[5]
interior[0] += " " + r0
eaves = o[2]+z-ovhz
elif rtype == 'Hipped' or rtype == 'Pyramidal':
if ovhx > 0:
fx = (.5*x) / ovhx
ovhz = h / fx
fy = h / ovhz
ovhy = width / fy
else:
ovhy = 0
ovhz = 0
overhangs.append("")
overhangs[0] += "%s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[0] += " " + p[5]
overhangs[0] += " " + p[4]
overhangs[0] += " %s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
interior.append(None)
overhangs.append("")
overhangs[1] += "%s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[1] += " %s %s %s" % (o[0]+x+ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[1] += " " + p[6]
overhangs[1] += " " + p[5]
overhangs[1] += " %s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
interior.append(None)
overhangs.append("")
overhangs[2] += "%s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[2] += " " + p[7]
overhangs[2] += " " + p[6]
overhangs[2] += " %s %s %s" % (o[0]+x+ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[2] += " %s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
interior.append(None)
overhangs.append("")
overhangs[3] += "%s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[3] += " " + p[4]
overhangs[3] += " " + p[7]
overhangs[3] += " %s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[3] += " %s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
interior.append(None)
eaves = o[2]+z-ovhz
elif rtype == 'Flat':
overhangs.append("")
overhangs[0] += "%s %s %s" % (o[0]-ovhx,o[1]-ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx,o[1]-ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx,o[1]+y+ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]-ovhx,o[1]+y+ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]-ovhx,o[1]-ovhy,o[2]+z)
interior.append("")
interior[0] += p[4]
interior[0] += " " + p[7]
interior[0] += " " + p[6]
interior[0] += " " + p[5]
interior[0] += " " + p[4]
eaves = o[2]+z
ovhy_recalculated = ovhy
#-- Overhang points
return overhangs, interior, eaves, ovhy_recalculated
def wallOpeningOrganiser(openings):
"""Divide the openings per wall."""
if openings:
holes = [[], [], [], []]
opns = [[], [], [], []]
for i in range(0,4):
opns[i].append([])
opns[i].append([])
door = openings[0]
if door != '':
doorwall = int(door['wall'])
holes[doorwall].append(door['ring'])
opns[doorwall][0] = door
for o in openings[1]:
try:
windowwall = int(o['wall'])
except:
windowwall = int(o['side'])
holes[windowwall].append(o['ring'])
opns[windowwall][1].append(o)
else:
holes = None
opns = None
return holes, opns
def GMLPointList(point):
"""Translates the list of coordinates of one point to a string representation (GML)."""
x = point[0]
y = point[1]
z = point[2]
return "%s %s %s" % (x, y, z)
def multiGMLPointList(points):
"""Translates the list of multiple points to a string representation (GML)."""
l = ""
for t in points:
if len(l) > 0:
l += " "
l += GMLPointList(t)
return l
def GMLstring2points(pointstring):
"""Converts the list of points in string (GML) to a list."""
listPoints = []
#-- List of coordinates
coords = pointstring.split()
#-- Store the coordinate tuple
assert(len(coords) % 3 == 0)
for i in range(0, len(coords), 3):
listPoints.append([coords[i], coords[i+1], coords[i+2]])
return listPoints
def GMLreverser(pointlist):
"""Reverses the order of the points, i.e. the normal of the ring."""
revlist = pointlist[::-1]
return revlist
def GMLreversedRing(r):
"""Reverses a ring."""
gmllist= GMLstring2points(r)
revgmllist= GMLreverser(gmllist)
revring = multiGMLPointList(revgmllist)
return revring
def dormerVertices(dormers, p, h, rtype, oList, width):
"""Computes the vertices of a dormer."""
[o, x, y, z] = oList
dList = []
dListGML = []
for drm in dormers:
d = [[], [], [], [], [], []]
dGML = [[], [], [], [], [], []]
if rtype == 'Gabled':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif rtype == 'Shed':
xperimiter = (float(drm['origin'][1]) * x * 1.0) / h
xperimiter2 = (float(drm['size'][1]) * x * 1.0) / h + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif rtype == 'Hipped' or rtype == 'Pyramidal':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
yperimiter = (float(drm['origin'][1]) * width) / h
yperimiter2 = (float(drm['size'][1]) * width) / h + yperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 0:
d[1] = [p[4][0]+float(drm['origin'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1]]
d[2] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1]]
d[4] = [p[4][0]+float(drm['origin'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[4][0]+float(drm['origin'][0]), p[4][1] + yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]), p[4][1] + yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 2:
d[1] = [p[6][0]-float(drm['origin'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1]]
d[2] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1]]
d[4] = [p[6][0]-float(drm['origin'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[6][0]-float(drm['origin'][0]), p[6][1] - yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]), p[6][1] - yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif rtype == 'Flat':
#-- Valid only for roof windows
xperimiter = float(drm['origin'][1])
xperimiter2 = float(drm['size'][1]) + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2]]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2]]
if d != [[], [], [], [], [], []]:
for i in range(0, 6):
#for j in range(0, 3):
# d[i][j] = round(d[i][j], 2)
dGML[i] = GMLPointList(d[i])
dList.append(d)
dListGML.append(dGML)
return dList, dListGML
def interiordormerVertices(dormers, p, h, rtype, oList, width, wallThickness, rWth, dormerTickness, topThickness, rWth2=None):
"""Computes the vertices of a dormer."""
[o, x, y, z] = oList
dList = []
dListGML = []
for drm in dormers:
d = [[], [], [], [], [], []]
dGML = [[], [], [], [], [], []]
if rtype == 'Gabled':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
intxper = (xperimiter + dormerTickness) - rWth
dper2 = ((drm['origin'][1] + float(drm['size'][1]) - dormerTickness) * x * 0.5) / h + rWth
hper1 = intxper * h / (.5 * x)
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + hper1]
d[2] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + hper1]
d[4] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[1][0]- dper2, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[1][0]- dper2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
elif drm['side'] == 3:
# d[1] = [p[1][0]+xperimiter + dormerTickness, p[1][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + hper1]
# d[2] = [p[1][0]+xperimiter + dormerTickness, p[1][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + hper1]
# d[4] = [p[1][0]+xperimiter + dormerTickness, p[1][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
# d[5] = [p[1][0]+xperimiter + dormerTickness, p[1][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
# d[0] = [p[1][0]+xperimiter2 + (rWth-dormerTickness), p[1][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
# d[3] = [p[1][0]+xperimiter2 + (rWth-dormerTickness), p[1][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[1] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + hper1]
d[2] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + hper1]
d[4] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[4][0] + dper2, p[7][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[4][0] + dper2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
elif rtype == 'Shed':
xperimiter = (float(drm['origin'][1]) * x) / h
xperimiter2 = (float(drm['size'][1]) * x) / h + xperimiter
intxper = (xperimiter + dormerTickness) - rWth
dper2 = ((drm['origin'][1] + float(drm['size'][1]) - dormerTickness) * x) / h + rWth
hper1 = intxper * h / x
if drm['side'] == 1:
# d[1] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1]]
# d[2] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1]]
# d[4] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
# d[5] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
# d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
# d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
d[1] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + hper1]
d[2] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + hper1]
d[4] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[1][0] - dper2, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[1][0] - dper2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
elif rtype == 'Hipped' or rtype == 'Pyramidal':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
yperimiter = (float(drm['origin'][1]) * width) / h
yperimiter2 = (float(drm['size'][1]) * width) / h + yperimiter
intxper = (xperimiter + dormerTickness) - rWth
dper2 = ((drm['origin'][1] + float(drm['size'][1]) - dormerTickness) * x * 0.5) / h + rWth
hper1 = intxper * h / (.5 * x)
intyper = (yperimiter + dormerTickness) - rWth2
dper2_2 = ((drm['origin'][1] + float(drm['size'][1]) - dormerTickness) * width) / h + rWth2
hper2 = (intyper) * h / width
if drm['side'] == 1:
# d[1] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1]]
# d[2] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1]]
# d[4] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
# d[5] = [p[1][0]-xperimiter - rWth, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
# d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
# d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - topThickness]
d[1] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + hper1]
d[2] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + hper1]
d[4] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[1][0]-xperimiter - dormerTickness, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[1][0] - dper2, p[1][1] + float(drm['origin'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[1][0] - dper2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + hper1]
d[2] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + hper1]
d[4] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[4][0]+xperimiter + dormerTickness, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[4][0]+ dper2, p[7][1] - float(drm['origin'][0]) - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[4][0]+ dper2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]) + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
elif drm['side'] == 0:
d[1] = [p[4][0]+float(drm['origin'][0]) + dormerTickness, p[4][1] + yperimiter + dormerTickness, p[5][2] + hper2]
d[2] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]) - dormerTickness, p[4][1] + yperimiter + dormerTickness, p[5][2] + hper2]
d[4] = [p[4][0]+float(drm['origin'][0]) + dormerTickness, p[4][1] + yperimiter + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]) - dormerTickness, p[4][1] + yperimiter + dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[4][0]+float(drm['origin'][0]) + dormerTickness, p[4][1] + dper2_2, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]) - dormerTickness, p[4][1] + dper2_2, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
elif drm['side'] == 2:
d[1] = [p[6][0]-float(drm['origin'][0]) - dormerTickness, p[6][1] - yperimiter - dormerTickness, p[5][2] + hper2]
d[2] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]) + dormerTickness, p[6][1] - yperimiter - dormerTickness, p[5][2] + hper2]
d[4] = [p[6][0]-float(drm['origin'][0]) - dormerTickness, p[6][1] - yperimiter - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[5] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]) + dormerTickness, p[6][1] - yperimiter - dormerTickness, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[0] = [p[6][0]-float(drm['origin'][0]) - dormerTickness, p[6][1] - dper2_2, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
d[3] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]) + dormerTickness, p[6][1] - dper2_2, p[5][2] + drm['origin'][1] + float(drm['size'][1]) - dormerTickness]
if d != [[], [], [], [], [], []]:
for i in range(0, 6):
#for j in range(0, 3):
# d[i][j] = round(d[i][j], 2)
dGML[i] = GMLPointList(d[i])
dList.append(d)
dListGML.append(dGML)
return dList, dListGML
def chimneyVertices(chimneys, p, h, rtype, oList, width):
"""
Computes the vertices of a chimney.
The origin in chimneys is different than the one in dormers, hence a separate function.
"""
[o, x, y, z] = oList
dList = []
dListGML = []
for drm in chimneys:
d = [[], [], [], [], [], [], [], []]
dGML = [[], [], [], [], [], [], [], []]
chHeight = float(drm['size'][2])
if rtype == 'Gabled':
#xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
#xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
xperimiter = float(drm['origin'][1])
xperimiter2 = float(drm['size'][1]) + xperimiter
zperimiter1 = (xperimiter * h) / (x*.5)
zperimiter2 = (xperimiter2 * h) / (x*.5)
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter1]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter1]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2]
d[7] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[6] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter1]
d[2] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter1]
d[4] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[5] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
d[0] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter2]
d[3] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter2]
d[7] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[6] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
elif rtype == 'Shed':
#xperimiter = (float(drm['origin'][1]) * x * 1.0) / h
xperimiter = float(drm['origin'][1])
#xperimiter2 = (float(drm['size'][1]) * x * 1.0) / h + xperimiter
xperimiter2 = float(drm['size'][1]) + xperimiter
zperimiter1 = (xperimiter * h) / x
zperimiter2 = (xperimiter2 * h) / x
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter1]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter1]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2]
d[7] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[6] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
elif rtype == 'Hipped' or rtype == 'Pyramidal':
xperimiter = float(drm['origin'][1])
xperimiter2 = float(drm['size'][1]) + xperimiter
yperimiter = (float(drm['origin'][1]) * width) / h
yperimiter2 = (float(drm['size'][1]) * width) / h + yperimiter
zperimiter1 = (xperimiter * h) / (x*.5)
zperimiter2 = (xperimiter2 * h) / (x*.5)
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter1]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter1]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2]
d[7] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[6] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter1]
d[2] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter1]
d[4] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[5] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
d[0] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter2]
d[3] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter2]
d[7] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + zperimiter2 + chHeight]
d[6] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + zperimiter2 + chHeight]
elif rtype == 'Flat':
#-- Not valid for dormers
xperimiter = float(drm['origin'][1])
xperimiter2 = float(drm['size'][1]) + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + chHeight]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + chHeight]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2]]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2]]
d[7] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + chHeight]
d[6] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + chHeight]
if d != [[], [], [], [], [], []]:
for i in range(0, 8):
#for j in range(0, 3):
# d[i][j] = round(d[i][j], 2)
dGML[i] = GMLPointList(d[i])
dList.append(d)
dListGML.append(dGML)
return dList, dListGML
def adjustRoofFeatures(roofType, eaves, old_origin, overhang_x, overhang_y, side):
"""This function adjusts the location of the features of the roof for models of different geometric references."""
old_x = old_origin[0]
old_y = old_origin[1]
if roofType == 'Gabled' or roofType == 'Shed' or roofType == 'Hipped' or roofType == 'Pyramidal':
if side == 1 or side == 3:
adjusted_x = old_x + overhang_y
adjusted_y = old_y + eaves
elif side == 0 or side == 2:
adjusted_x = old_x + overhang_x
adjusted_y = old_y + eaves
elif roofType == 'Flat':
adjusted_x = old_x + overhang_y
adjusted_y = old_y + overhang_x
#xperimiter = float(drm['origin'][1])
return [adjusted_x, adjusted_y]
def gabledRoof(XMLelement, p, r, override_wall=None, semantics=None, openings=None, roofopenings=None, rfWindows=None, embrasure=None, pList=None):
"""Constructs a building with a gabled roof."""
#-- Roof Surface
roof0 = "%s %s %s %s %s" % (r[0], r[1], p[7], p[4], r[0])
roof1 = "%s %s %s %s %s" % (r[1], r[0], p[5], p[6], r[1])
#-- Wall Surface
face0 = "%s %s %s %s %s %s" % (p[4], p[0], p[1], p[5], r[0], p[4])
if override_wall:
face1 = override_wall['wall']
else:
face1 = "%s %s %s %s %s" % (p[5], p[1], p[2], p[6], p[5])
face2 = "%s %s %s %s %s %s" % (p[6], p[2], p[3], p[7], r[1], p[6])
face3 = "%s %s %s %s %s" % (p[7], p[3], p[0], p[4], p[7])
if openings:
holes, opns = wallOpeningOrganiser(openings)
else:
holes = None
opns = None
if embrasure and openings:
embO = embrasuresGeometry(openings, pList, embrasure)
if semantics:
if roofopenings:
if rfWindows:
multiSurface2(XMLelement, roof0, "RoofSurface", roofopenings[3], 3, rfWindows[3])
multiSurface2(XMLelement, roof1, "RoofSurface", roofopenings[1], 3, rfWindows[1])
else:
multiSurface(XMLelement, roof0, "RoofSurface", roofopenings[3], 3)
multiSurface(XMLelement, roof1, "RoofSurface", roofopenings[1], 3)
else:
multiSurface(XMLelement, roof0, "RoofSurface")
multiSurface(XMLelement, roof1, "RoofSurface")
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face0, "WallSurface", holes[0], 3, embO[0])
else:
multiSurface(XMLelement, face0, "WallSurface", holes[0], 3, opns[0])
else:
multiSurface(XMLelement, face0, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face1, "WallSurface", holes[1], 3, embO[1])
else:
multiSurface(XMLelement, face1, "WallSurface", holes[1], 3, opns[1])
else:
multiSurface(XMLelement, face1, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face2, "WallSurface", holes[2], 3, embO[2])
else:
multiSurface(XMLelement, face2, "WallSurface", holes[2], 3, opns[2])
else:
multiSurface(XMLelement, face2, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face3, "WallSurface", holes[3], 3, embO[3])
else:
multiSurface(XMLelement, face3, "WallSurface", holes[3], 3, opns[3])
else:
multiSurface(XMLelement, face3, "WallSurface", None)
#-- Building part
for fc in override_wall['rest']:
multiSurface(XMLelement, fc, "WallSurface", None)
for fc in override_wall['roof']:
multiSurface(XMLelement, fc, "RoofSurface", None)
for fc in override_wall['outerfloor']:
multiSurface(XMLelement, fc, "OuterFloorSurface", None)
else:
if roofopenings is not None:
addsurface(False, XMLelement, roof0, roofopenings[3])
addsurface(False, XMLelement, roof1, roofopenings[1])
else:
addsurface(False, XMLelement, roof0)
addsurface(False, XMLelement, roof1)
if holes is not None:
if embrasure:
addSurfaceWithEmbrasure(False, XMLelement, face0, holes[0], embO[0])
addSurfaceWithEmbrasure(False, XMLelement, face1, holes[1], embO[1])
addSurfaceWithEmbrasure(False, XMLelement, face2, holes[2], embO[2])
addSurfaceWithEmbrasure(False, XMLelement, face3, holes[3], embO[3])
else:
addsurface(False, XMLelement, face0)
addsurface(False, XMLelement, face1)
addsurface(False, XMLelement, face2)
addsurface(False, XMLelement, face3)
else:
addsurface(False, XMLelement, face0)
addsurface(False, XMLelement, face1)
addsurface(False, XMLelement, face2)
addsurface(False, XMLelement, face3)
for fc in override_wall['rest']:
addsurface(False, XMLelement, fc)
for fc in override_wall['roof']:
addsurface(False, XMLelement, fc)
for fc in override_wall['outerfloor']:
addsurface(False, XMLelement, fc)
def shedRoof(XMLelement, p, r, override_wall=None, semantics=None, openings=None, roofopenings=None, rfWindows=None, embrasure=None, pList=None):
"""Constructs a building with a shed roof."""
#-- Roof Surface
roof1 = "%s %s %s %s %s" % (r[1], r[0], p[5], p[6], r[1])
#-- Wall Surface
face0 = "%s %s %s %s %s" % (r[0], p[0], p[1], p[5], r[0])
if override_wall:
face1 = override_wall['wall']
else:
face1 = "%s %s %s %s %s" % (p[5], p[1], p[2], p[6], p[5])
face2 = "%s %s %s %s %s" % (p[6], p[2], p[3], r[1], p[6])
face3 = "%s %s %s %s %s" % (r[1], p[3], p[0], r[0], r[1])
if openings:
holes, opns = wallOpeningOrganiser(openings)
else:
holes = None
opns = None
if embrasure and openings:
embO = embrasuresGeometry(openings, pList, embrasure)
if semantics:
if roofopenings:
if rfWindows:
multiSurface2(XMLelement, roof1, "RoofSurface", roofopenings[1], 3, rfWindows[1])
else:
multiSurface(XMLelement, roof1, "RoofSurface", roofopenings[1], 3)
else:
multiSurface(XMLelement, roof1, "RoofSurface")
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face0, "WallSurface", holes[0], 3, embO[0])
else:
multiSurface(XMLelement, face0, "WallSurface", holes[0], 3, opns[0])
else:
multiSurface(XMLelement, face0, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face1, "WallSurface", holes[1], 3, embO[1])
else:
multiSurface(XMLelement, face1, "WallSurface", holes[1], 3, opns[1])
else:
multiSurface(XMLelement, face1, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face2, "WallSurface", holes[2], 3, embO[2])
else:
multiSurface(XMLelement, face2, "WallSurface", holes[2], 3, opns[2])
else:
multiSurface(XMLelement, face2, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face3, "WallSurface", holes[3], 3, embO[3])
else:
multiSurface(XMLelement, face3, "WallSurface", holes[3], 3, opns[3])
else:
multiSurface(XMLelement, face3, "WallSurface", None)
#-- Building part
for fc in override_wall['rest']:
multiSurface(XMLelement, fc, "WallSurface", None)
for fc in override_wall['roof']:
multiSurface(XMLelement, fc, "RoofSurface", None)
for fc in override_wall['outerfloor']:
multiSurface(XMLelement, fc, "OuterFloorSurface", None)
else:
if roofopenings is not None:
addsurface(False, XMLelement, roof1, roofopenings[1])
else:
addsurface(False, XMLelement, roof1)
if holes is not None and embrasure:
addSurfaceWithEmbrasure(False, XMLelement, face0, holes[0], embO[0])
addSurfaceWithEmbrasure(False, XMLelement, face1, holes[1], embO[1])
addSurfaceWithEmbrasure(False, XMLelement, face2, holes[2], embO[2])
addSurfaceWithEmbrasure(False, XMLelement, face3, holes[3], embO[3])
else:
#addsurface(False, XMLelement, roof1)
addsurface(False, XMLelement, face0)
addsurface(False, XMLelement, face1)
addsurface(False, XMLelement, face2)
addsurface(False, XMLelement, face3)
for fc in override_wall['rest']:
addsurface(False, XMLelement, fc)
for fc in override_wall['roof']:
addsurface(False, XMLelement, fc)
for fc in override_wall['outerfloor']:
addsurface(False, XMLelement, fc)
def hippedRoof(XMLelement, p, r, override_wall=None, semantics=None, openings=None, roofopenings=None, rfWindows=None, embrasure=None, pList=None):
"""Constructs a building with a hipped or pyramidal roof."""
#-- Roof Surface
#-- Pyramidal roof has the same point r0 and r1
if r[0] == r[1]:
roof0 = "%s %s %s %s" % (r[0], p[7], p[4], r[0])
roof1 = "%s %s %s %s" % (r[1], p[5], p[6], r[1])
else:
roof0 = "%s %s %s %s %s" % (r[0], r[1], p[7], p[4], r[0])
roof1 = "%s %s %s %s %s" % (r[1], r[0], p[5], p[6], r[1])
roofX = "%s %s %s %s" % (r[0], p[4], p[5], r[0])
roofY = "%s %s %s %s" % (r[1], p[6], p[7], r[1])
#-- Wall Surface
face0 = "%s %s %s %s %s" % (p[0], p[1], p[5], p[4], p[0])
if override_wall:
face1 = override_wall['wall']
else:
face1 = "%s %s %s %s %s" % (p[5], p[1], p[2], p[6], p[5])
face2 = "%s %s %s %s %s" % (p[2], p[3], p[7], p[6], p[2])
face3 = "%s %s %s %s %s" % (p[3], p[0], p[4], p[7], p[3])
if openings:
holes, opns = wallOpeningOrganiser(openings)
else:
holes = None
opns = None
if embrasure and openings:
embO = embrasuresGeometry(openings, pList, embrasure)
if semantics:
if roofopenings:
if rfWindows:
multiSurface2(XMLelement, roof0, "RoofSurface", roofopenings[3], 3, rfWindows[3])
multiSurface2(XMLelement, roof1, "RoofSurface", roofopenings[1], 3, rfWindows[1])
multiSurface2(XMLelement, roofX, "RoofSurface", roofopenings[0], 3, rfWindows[0])
multiSurface2(XMLelement, roofY, "RoofSurface", roofopenings[2], 3, rfWindows[2])
else:
multiSurface(XMLelement, roof0, "RoofSurface", roofopenings[3], 3)
multiSurface(XMLelement, roof1, "RoofSurface", roofopenings[1], 3)
multiSurface(XMLelement, roofX, "RoofSurface", roofopenings[0], 3)
multiSurface(XMLelement, roofY, "RoofSurface", roofopenings[2], 3)
else:
multiSurface(XMLelement, roof0, "RoofSurface")
multiSurface(XMLelement, roof1, "RoofSurface")
multiSurface(XMLelement, roofX, "RoofSurface")
multiSurface(XMLelement, roofY, "RoofSurface")
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face0, "WallSurface", holes[0], 3, embO[0])
else:
multiSurface(XMLelement, face0, "WallSurface", holes[0], 3, opns[0])
else:
multiSurface(XMLelement, face0, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face1, "WallSurface", holes[1], 3, embO[1])
else:
multiSurface(XMLelement, face1, "WallSurface", holes[1], 3, opns[1])
else:
multiSurface(XMLelement, face1, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face2, "WallSurface", holes[2], 3, embO[2])
else:
multiSurface(XMLelement, face2, "WallSurface", holes[2], 3, opns[2])
else:
multiSurface(XMLelement, face2, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face3, "WallSurface", holes[3], 3, embO[3])
else:
multiSurface(XMLelement, face3, "WallSurface", holes[3], 3, opns[3])
else:
multiSurface(XMLelement, face3, "WallSurface", None)
#-- Building part
for fc in override_wall['rest']:
multiSurface(XMLelement, fc, "WallSurface", None)
for fc in override_wall['roof']:
multiSurface(XMLelement, fc, "RoofSurface", None)
for fc in override_wall['outerfloor']:
multiSurface(XMLelement, fc, "OuterFloorSurface", None)
else:
if roofopenings is not None:
addsurface(False, XMLelement, roof0, roofopenings[3])
addsurface(False, XMLelement, roof1, roofopenings[1])
addsurface(False, XMLelement, roofX, roofopenings[0])
addsurface(False, XMLelement, roofY, roofopenings[2])
else:
addsurface(False, XMLelement, roof0)
addsurface(False, XMLelement, roof1)
addsurface(False, XMLelement, roofX)
addsurface(False, XMLelement, roofY)
if holes is not None and embrasure:
addSurfaceWithEmbrasure(False, XMLelement, face0, holes[0], embO[0])
addSurfaceWithEmbrasure(False, XMLelement, face1, holes[1], embO[1])
addSurfaceWithEmbrasure(False, XMLelement, face2, holes[2], embO[2])
addSurfaceWithEmbrasure(False, XMLelement, face3, holes[3], embO[3])
else:
addsurface(False, XMLelement, face0)
addsurface(False, XMLelement, face1)
addsurface(False, XMLelement, face2)
addsurface(False, XMLelement, face3)
for fc in override_wall['rest']:
addsurface(False, XMLelement, fc)
for fc in override_wall['roof']:
addsurface(False, XMLelement, fc)
for fc in override_wall['outerfloor']:
addsurface(False, XMLelement, fc)
def hippedAttic(CompositeSurface, intcoor, p, r, fel, cel, wallThickness, topThickness, atticbottom=False, roofopenings=None):
"""Constructs the interior of the attic of a hipped roof."""
[Xa, Ya, Xb, Yb] = intcoor
rA = str(r[0][0]) + ' ' + str(float(r[0][1]) + wallThickness) + ' ' + str(cel)
rB = str(r[1][0]) + ' ' + str(float(r[1][1]) - wallThickness) + ' ' + str(cel)
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
S = "%s %s %s %s" % (p0F, p1F, rA, p0F)
E = "%s %s %s %s %s" % (p1F, p2F, rB, rA, p1F)
N = "%s %s %s %s" % (p2F, p3F, rB, p2F)
W = "%s %s %s %s %s" % (p3F, p0F, rA, rB, p3F)
bottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
if roofopenings is not None:
addsurface(False, CompositeSurface, S, roofopenings[0])
addsurface(False, CompositeSurface, E, roofopenings[1])
addsurface(False, CompositeSurface, N, roofopenings[2])
addsurface(False, CompositeSurface, W, roofopenings[3])
else:
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if atticbottom is True:
addsurface(False, CompositeSurface, bottom)
def gabledAttic(CompositeSurface, intcoor, p, r, fel, cel, wallThickness, topThickness, atticbottom=False, roofopenings=None):
"""Constructs the interior of the attic of a gabled roof."""
[Xa, Ya, Xb, Yb] = intcoor
rA = str(r[0][0]) + ' ' + str(float(r[0][1]) + wallThickness) + ' ' + str(cel)
rB = str(r[1][0]) + ' ' + str(float(r[1][1]) - wallThickness) + ' ' + str(cel)
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
bottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
S = "%s %s %s %s" % (p0F, p1F, rA, p0F)
E = "%s %s %s %s %s" % (p1F, p2F, rB, rA, p1F)
N = "%s %s %s %s" % (p2F, p3F, rB, p2F)
W = "%s %s %s %s %s" % (p3F, p0F, rA, rB, p3F)
if roofopenings is not None:
addsurface(False, CompositeSurface, S, roofopenings[0])
addsurface(False, CompositeSurface, E, roofopenings[1])
addsurface(False, CompositeSurface, N, roofopenings[2])
addsurface(False, CompositeSurface, W, roofopenings[3])
else:
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if atticbottom is True:
addsurface(False, CompositeSurface, bottom)
def pyramidalAttic(CompositeSurface, intcoor, p, r, fel, cel, wallThickness, topThickness, atticbottom=False, roofopenings=None):
"""Constructs the interior of the attic of a pyramidal roof."""
[Xa, Ya, Xb, Yb] = intcoor
rA = str(r[0][0]) + ' ' + str(r[0][1]) + ' ' + str(cel)
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
bottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
S = "%s %s %s %s" % (p0F, p1F, rA, p0F)
E = "%s %s %s %s" % (p1F, p2F, rA, p1F)
N = "%s %s %s %s" % (p2F, p3F, rA, p2F)
W = "%s %s %s %s" % (p3F, p0F, rA, p3F)
addsurface(False, CompositeSurface, bottom)
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if roofopenings is not None:
addsurface(False, CompositeSurface, S, roofopenings[0])
addsurface(False, CompositeSurface, E, roofopenings[1])
addsurface(False, CompositeSurface, N, roofopenings[2])
addsurface(False, CompositeSurface, W, roofopenings[3])
else:
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if atticbottom is True:
addsurface(False, CompositeSurface, bottom)
def shedAttic(CompositeSurface, intcoor, p, r, fel, cel, wallThickness, topThickness, atticbottom=False, roofopenings=None):
"""Constructs the interior of the attic of a shed roof."""
[Xa, Ya, Xb, Yb] = intcoor
rA = str(float(r[0][0]) + wallThickness) + ' ' + str(float(r[0][1]) + wallThickness) + ' ' + str(cel)
rB = str(float(r[1][0]) + wallThickness) + ' ' + str(float(r[1][1]) - wallThickness) + ' ' + str(cel)
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
bottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
top = "%s %s %s %s %s" % (rA, p1F, p2F, rB, rA)
S = "%s %s %s %s" % (p0F, p1F, rA, p0F)
N = "%s %s %s %s" % (p2F, p3F, rB, p2F)
W = "%s %s %s %s %s" % (p0F, rA, rB, p3F, p0F)
addsurface(False, CompositeSurface, bottom)
addsurface(False, CompositeSurface, top)
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if roofopenings is not None:
addsurface(False, CompositeSurface, S, roofopenings[0])
#addsurface(False, CompositeSurface, E, roofopenings[1])
addsurface(False, CompositeSurface, N, roofopenings[2])
addsurface(False, CompositeSurface, W, roofopenings[3])
else:
addsurface(False, CompositeSurface, S)
#addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if atticbottom is True:
addsurface(False, CompositeSurface, bottom)
def flatRoof(XMLelement, p, r, override_wall=None, semantics=None, openings=None, roofopenings=None, rfWindows=None, embrasure=None, pList=None):
"""Constructs a building with a flat roof."""
#-- Top face / Roof Surface
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
#-- Wall Surface
face0 = "%s %s %s %s %s" % (p[0], p[1], p[5], p[4], p[0])
if override_wall:
face1 = override_wall['wall']
else:
face1 = "%s %s %s %s %s" % (p[5], p[1], p[2], p[6], p[5])
face2 = "%s %s %s %s %s" % (p[2], p[3], p[7], p[6], p[2])
face3 = "%s %s %s %s %s" % (p[3], p[0], p[4], p[7], p[3])
if openings:
holes, opns = wallOpeningOrganiser(openings)
else:
holes = None
opns = None
if embrasure and openings:
embO = embrasuresGeometry(openings, pList, embrasure)
if semantics:
#-- Roofs
if roofopenings:
multiSurface2(XMLelement, faceTop, "RoofSurface", roofopenings[1], 3, rfWindows[1])
else:
multiSurface(XMLelement, faceTop, "RoofSurface", None)
#-- Walls
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face0, "WallSurface", holes[0], 3, embO[0])
else:
multiSurface(XMLelement, face0, "WallSurface", holes[0], 3, opns[0])
else:
multiSurface(XMLelement, face0, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face1, "WallSurface", holes[1], 3, embO[1])
else:
multiSurface(XMLelement, face1, "WallSurface", holes[1], 3, opns[1])
else:
multiSurface(XMLelement, face1, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face2, "WallSurface", holes[2], 3, embO[2])
else:
multiSurface(XMLelement, face2, "WallSurface", holes[2], 3, opns[2])
else:
multiSurface(XMLelement, face2, "WallSurface", None)
if openings:
if embrasure:
multiSurfaceWithEmbrasure(XMLelement, face3, "WallSurface", holes[3], 3, embO[3])
else:
multiSurface(XMLelement, face3, "WallSurface", holes[3], 3, opns[3])
else:
multiSurface(XMLelement, face3, "WallSurface", None)
#-- Building part
for fc in override_wall['rest']:
multiSurface(XMLelement, fc, "WallSurface", None)
for fc in override_wall['roof']:
multiSurface(XMLelement, fc, "RoofSurface", None)
for fc in override_wall['outerfloor']:
multiSurface(XMLelement, fc, "OuterFloorSurface", None)
else:
addsurface(False, XMLelement, faceTop)
if holes is not None and embrasure:
addSurfaceWithEmbrasure(False, XMLelement, face0, holes[0], embO[0])
addSurfaceWithEmbrasure(False, XMLelement, face1, holes[1], embO[1])
addSurfaceWithEmbrasure(False, XMLelement, face2, holes[2], embO[2])
addSurfaceWithEmbrasure(False, XMLelement, face3, holes[3], embO[3])
else:
addsurface(False, XMLelement, face0)
addsurface(False, XMLelement, face1)
addsurface(False, XMLelement, face2)
addsurface(False, XMLelement, face3)
for fc in override_wall['rest']:
addsurface(False, XMLelement, fc)
for fc in override_wall['roof']:
addsurface(False, XMLelement, fc)
for fc in override_wall['outerfloor']:
addsurface(False, XMLelement, fc)
def roofOverhangs(XMLelement, overhangs, interiors, semantics=None):
"""Add surfaces for overhangs."""
i = 0
if semantics:
for overhang in overhangs:
multiSurface(XMLelement, overhang, "RoofSurface", interiors, 3)
i += 1
else:
for overhang in overhangs:
plainMultiSurface(XMLelement, overhang)
i += 1
def openingRing(op, p):
"""Makes a linear ring of the feature (opening)."""
X = op['origin'][0]
Y = op['origin'][1]
width = op['size'][0]
height = op['size'][1]
if op['wall'] == 0:
ring = "%s %s %s " % (p[0][0]+X, p[0][1], p[0][2]+Y)
ring+= "%s %s %s " % (p[0][0]+X, p[0][1], p[0][2]+Y+height)
ring+= "%s %s %s " % (p[0][0]+X+width, p[0][1], p[0][2]+Y+height)
ring+= "%s %s %s " % (p[0][0]+X+width, p[0][1], p[0][2]+Y)
ring+= "%s %s %s" % (p[0][0]+X, p[0][1], p[0][2]+Y)
elif op['wall'] == 1:
ring = "%s %s %s " % (p[1][0], p[1][1]+X, p[1][2]+Y)
ring+= "%s %s %s " % (p[1][0], p[1][1]+X, p[1][2]+Y+height)
ring+= "%s %s %s " % (p[1][0], p[1][1]+X+width, p[1][2]+Y+height)
ring+= "%s %s %s " % (p[1][0], p[1][1]+X+width, p[1][2]+Y)
ring+= "%s %s %s" % (p[1][0], p[1][1]+X, p[1][2]+Y)
elif op['wall'] == 2:
ring = "%s %s %s " % (p[2][0]-X, p[2][1], p[2][2]+Y)
ring+= "%s %s %s " % (p[2][0]-X, p[2][1], p[2][2]+Y+height)
ring+= "%s %s %s " % (p[2][0]-X-width, p[2][1], p[2][2]+Y+height)
ring+= "%s %s %s " % (p[2][0]-X-width, p[2][1], p[2][2]+Y)
ring+= "%s %s %s" % (p[2][0]-X, p[2][1], p[2][2]+Y)
elif op['wall'] == 3:
ring = "%s %s %s " % (p[3][0], p[3][1]-X, p[3][2]+Y)
ring+= "%s %s %s " % (p[3][0], p[3][1]-X, p[3][2]+Y+height)
ring+= "%s %s %s " % (p[3][0], p[3][1]-X-width, p[3][2]+Y+height)
ring+= "%s %s %s " % (p[3][0], p[3][1]-X-width, p[3][2]+Y)
ring+= "%s %s %s" % (p[3][0], p[3][1]-X, p[3][2]+Y)
else:
raise ValueError("The door is positioned on an unknown wall.")
return ring
def embrasuresGeometry(openings, p, embrasure):
"""Makes a linear ring of the feature (opening) with embrasure."""
embO = [[], [], [], []]
j = 0
for t in openings:
if j == 0:
currentType = 'Door'
elif j == 1:
currentType = 'Window'
j += 1
if type(t) is not list:
t = [t]
for op in t:
if op == '':
continue
X = float(op['origin'][0])
Y = float(op['origin'][1])
width = float(op['size'][0])
height = float(op['size'][1])
odict = {}
if op['wall'] == 0:
W0 = "%s %s %s" % (p[0][0]+X, p[0][1], p[0][2]+Y)
W1 = "%s %s %s" % (p[0][0]+X+width, p[0][1], p[0][2]+Y)
W2 = "%s %s %s" % (p[0][0]+X+width, p[0][1], p[0][2]+Y+height)
W3 = "%s %s %s" % (p[0][0]+X, p[0][1], p[0][2]+Y+height)
O0 = "%s %s %s" % (p[0][0]+X, p[0][1]+embrasure, p[0][2]+Y)
O1 = "%s %s %s" % (p[0][0]+X+width, p[0][1]+embrasure, p[0][2]+Y)
O2 = "%s %s %s" % (p[0][0]+X+width, p[0][1]+embrasure, p[0][2]+Y+height)
O3 = "%s %s %s" % (p[0][0]+X, p[0][1]+embrasure, p[0][2]+Y+height)
elif op['wall'] == 1:
W0 = "%s %s %s" % (p[1][0], p[1][1]+X, p[1][2]+Y)
W1 = "%s %s %s" % (p[1][0], p[1][1]+X+width, p[1][2]+Y)
W2 = "%s %s %s" % (p[1][0], p[1][1]+X+width, p[1][2]+Y+height)
W3 = "%s %s %s" % (p[1][0], p[1][1]+X, p[1][2]+Y+height)
O0 = "%s %s %s" % (p[1][0]-embrasure, p[1][1]+X, p[1][2]+Y)
O1 = "%s %s %s" % (p[1][0]-embrasure, p[1][1]+X+width, p[1][2]+Y)
O2 = "%s %s %s" % (p[1][0]-embrasure, p[1][1]+X+width, p[1][2]+Y+height)
O3 = "%s %s %s" % (p[1][0]-embrasure, p[1][1]+X, p[1][2]+Y+height)
elif op['wall'] == 2:
W0 = "%s %s %s" % (p[2][0]-X, p[2][1], p[2][2]+Y)
W1 = "%s %s %s" % (p[2][0]-X-width, p[2][1], p[2][2]+Y)
W2 = "%s %s %s" % (p[2][0]-X-width, p[2][1], p[2][2]+Y+height)
W3 = "%s %s %s" % (p[2][0]-X, p[2][1], p[2][2]+Y+height)
O0 = "%s %s %s" % (p[2][0]-X, p[2][1]-embrasure, p[2][2]+Y)
O1 = "%s %s %s" % (p[2][0]-X-width, p[2][1]-embrasure, p[2][2]+Y)
O2 = "%s %s %s" % (p[2][0]-X-width, p[2][1]-embrasure, p[2][2]+Y+height)
O3 = "%s %s %s" % (p[2][0]-X, p[2][1]-embrasure, p[2][2]+Y+height)
elif op['wall'] == 3:
W0 = "%s %s %s" % (p[3][0], p[3][1]-X, p[3][2]+Y)
W1 = "%s %s %s" % (p[3][0], p[3][1]-X-width, p[3][2]+Y)
W2 = "%s %s %s" % (p[3][0], p[3][1]-X-width, p[3][2]+Y+height)
W3 = "%s %s %s" % (p[3][0], p[3][1]-X, p[3][2]+Y+height)
O0 = "%s %s %s" % (p[3][0]+embrasure, p[3][1]-X, p[3][2]+Y)
O1 = "%s %s %s" % (p[3][0]+embrasure, p[3][1]-X-width, p[3][2]+Y)
O2 = "%s %s %s" % (p[3][0]+embrasure, p[3][1]-X-width, p[3][2]+Y+height)
O3 = "%s %s %s" % (p[3][0]+embrasure, p[3][1]-X, p[3][2]+Y+height)
else:
raise ValueError("The door is positioned on an unknown wall.")
##-- Unchanged
ringW = O0 + ' '
ringW+= O1 + ' '
ringW+= O2 + ' '
ringW+= O3 + ' '
ringW+= O0
ring0 = W0 + ' '
ring0+= O0 + ' '
ring0+= O3 + ' '
ring0+= W3 + ' '
ring0+= W0
ring1 = W0 + ' '
ring1+= W1 + ' '
ring1+= O1 + ' '
ring1+= O0 + ' '
ring1+= W0
ring2 = W2 + ' '
ring2+= O2 + ' '
ring2+= O1 + ' '
ring2+= W1 + ' '
ring2+= W2
ring3 = W2 + ' '
ring3+= W3 + ' '
ring3+= O3 + ' '
ring3+= O2 + ' '
ring3+= W2
odict['surfaces'] = [ring0, ring1, ring2, ring3]
odict['openings'] = [ringW]
odict['type'] = currentType
embO[op['wall']].append(odict)
return embO
def addsurface(skipsm, CompositeSurface, coords, interior=None):
"""
Adds a surface to the CompositeSurface (and others).
Input: coordinates of the LinearRing of the surface to be added to the CompositeSurface.
Output: Upgraded CompositeSurface.
If skipsm is toggled, it will skip the creation of the <gml:SurfaceMember>
"""
if skipsm is False:
surfaceMember = etree.SubElement(CompositeSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
else:
Polygon = etree.SubElement(CompositeSurface, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
def addSurfaceWithEmbrasure(skipsm, CompositeSurface, coords, interior=None, embO=None):
"""
Adds a surface to the CompositeSurface.
Input: coordinates of the LinearRing of the surface to be added to the CompositeSurface.
Output: Upgraded CompositeSurface.
If skipsm is toggled, it will skip the creation of the <gml:SurfaceMember>
"""
if skipsm is False:
surfaceMember = etree.SubElement(CompositeSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
else:
Polygon = etree.SubElement(CompositeSurface, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
for opening in embO:
for s in opening['surfaces']:
surfaceMember = etree.SubElement(CompositeSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = s
for o in opening['openings']:
DoorsurfaceMember = etree.SubElement(CompositeSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = o#['ring']
def interiorDormer(cs, d, side):
"""Interior of a dormer."""
dList, dListGML = d
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[0]
d2 = dListGML[0] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[3] + ' ' + dListGML[0]
d3 = dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[5]
addsurface(False, cs, d1)
addsurface(False, cs, d2)
addsurface(False, cs, d3)
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
addsurface(False, cs, d4)
def buildinginstallation(bldg, kind, d, semantics=0, window=None, side=None, embrasure=None):
"""Generate a building installation: for dormers and chimneys."""
dList, dListGML = d
if window is not None:
pass
obi = etree.SubElement(bldg, "{%s}outerBuildingInstallation" % ns_bldg)
bi = etree.SubElement(obi, "{%s}BuildingInstallation" % ns_bldg)
def binosemantics(XMLelement, coords, window = None):
MultiSurface = etree.SubElement(XMLelement, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if window is not None:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = window
def bisemantics(XMLelement, coords, semantics, window = None, fillHole = True):
boundedBy = etree.SubElement(XMLelement, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
lod3geometry = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lod3geometry, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if window is not None:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = GMLreversedRing(window)
if fillHole is True:
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmlwin = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmlwin, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = window
def bisemanticsMulti(XMLelement, coords, semantics, window = None):
boundedBy = etree.SubElement(XMLelement, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
lod3geometry = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lod3geometry, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
for coord in coords:
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coord
if window is not None:
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmlwin = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmlwin, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = window
if semantics == 0:
if kind == 'dormer':
lod3geometry = etree.SubElement(bi, "{%s}lod3Geometry" % ns_bldg)
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[0]
d2 = dListGML[0] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[3] + ' ' + dListGML[0]
d3 = dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[5]
binosemantics(lod3geometry, d1)
binosemantics(lod3geometry, d2)
binosemantics(lod3geometry, d3)
if window is None:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
binosemantics(lod3geometry, d4)
if window is not None:
if embrasure is not None and embrasure > 0.0:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
ew[0] = [dList[4][0] - embrasure, dList[4][1] + window, dList[4][2] - window]
ew[1] = [dList[1][0] - embrasure, dList[1][1] + window, dList[1][2] + window]
ew[2] = [dList[2][0] - embrasure, dList[2][1] - window, dList[2][2] + window]
ew[3] = [dList[5][0] - embrasure, dList[5][1] - window, dList[5][2] - window]
elif side == 3:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
ew[0] = [dList[4][0] + embrasure, dList[4][1] - window, dList[4][2] - window]
ew[1] = [dList[1][0] + embrasure, dList[1][1] - window, dList[1][2] + window]
ew[2] = [dList[2][0] + embrasure, dList[2][1] + window, dList[2][2] + window]
ew[3] = [dList[5][0] + embrasure, dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1] + embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1] + embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1] + embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1] + embrasure, dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1] - embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1] - embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1] - embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1] - embrasure, dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
binosemantics(lod3geometry, d4, dwring)
dw0 = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(dw[0])
dw1 = GMLPointList(dw[1]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(dw[1])
dw2 = GMLPointList(dw[3]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[3])
dw3 = GMLPointList(dw[3]) + ' ' + GMLPointList(dw[0]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(dw[3])
ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
for bipoly in [dw0, dw1, dw2, dw3]:
binosemantics(lod3geometry, bipoly)
binosemantics(lod3geometry, ew0)
else:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
ew[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
ew[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
ew[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
ew[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
elif side == 3:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
ew[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
ew[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
ew[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
ew[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
#ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
binosemantics(lod3geometry, d4, dwring)
#binosemantics(lod3geometry, ew0)
elif kind == 'chimney':
lod3geometry = etree.SubElement(bi, "{%s}lod3Geometry" % ns_bldg)
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[7] + ' ' + dListGML[0]
d2 = dListGML[3] + ' ' + dListGML[6] + ' ' + dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3]
d3 = dListGML[0] + ' ' + dListGML[7] + ' ' + dListGML[6] + ' ' + dListGML[3] + ' ' + dListGML[0]
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
binosemantics(lod3geometry, d1)
binosemantics(lod3geometry, d2)
binosemantics(lod3geometry, d3)
binosemantics(lod3geometry, d4)
d5 = dListGML[7] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[6] + ' ' + dListGML[7]
binosemantics(lod3geometry, d5)
#-- Closure surface in the roof
# d0 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[0]
# binosemantics(lod3geometry, d0)
#-- Closure surface in the roof
d0 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[0]
bisemantics(lod3geometry, d0, "ClosureSurface")
if semantics == 1:
if kind == 'dormer':
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[0]
d2 = dListGML[0] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[3] + ' ' + dListGML[0]
d3 = dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[5]
bisemantics(bi, d1, "WallSurface")
bisemantics(bi, d2, "RoofSurface")
bisemantics(bi, d3, "WallSurface")
if window is None:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
bisemantics(bi, d4, "WallSurface")
if window is not None:
#-- Face with the window
if embrasure is not None and embrasure > 0.0:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
ew[0] = [dList[4][0] - embrasure, dList[4][1] + window, dList[4][2] - window]
ew[1] = [dList[1][0] - embrasure, dList[1][1] + window, dList[1][2] + window]
ew[2] = [dList[2][0] - embrasure, dList[2][1] - window, dList[2][2] + window]
ew[3] = [dList[5][0] - embrasure, dList[5][1] - window, dList[5][2] - window]
elif side == 3:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
ew[0] = [dList[4][0] + embrasure, dList[4][1] - window, dList[4][2] - window]
ew[1] = [dList[1][0] + embrasure, dList[1][1] - window, dList[1][2] + window]
ew[2] = [dList[2][0] + embrasure, dList[2][1] + window, dList[2][2] + window]
ew[3] = [dList[5][0] + embrasure, dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1] + embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1] + embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1] + embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1] + embrasure, dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1] - embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1] - embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1] - embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1] - embrasure, dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
bisemantics(bi, d4, "WallSurface", dwring, False)
dw0 = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(dw[0])
dw1 = GMLPointList(dw[1]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(dw[1])
dw2 = GMLPointList(dw[3]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[3])
dw3 = GMLPointList(dw[3]) + ' ' + GMLPointList(dw[0]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(dw[3])
ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
bisemanticsMulti(bi, [dw0, dw1, dw2, dw3], "WallSurface", ew0)
else:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
elif side == 3:
dw = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
ew0 = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[0])
bisemantics(bi, d4, "WallSurface", ew0, True)
#bisemantics(bi, d4, "WallSurface", dwring, ew0)
elif kind == 'chimney':
lod3geometry = etree.SubElement(bi, "{%s}lod3Geometry" % ns_bldg)
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[7] + ' ' + dListGML[0]
d2 = dListGML[3] + ' ' + dListGML[6] + ' ' + dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3]
d3 = dListGML[0] + ' ' + dListGML[7] + ' ' + dListGML[6] + ' ' + dListGML[3] + ' ' + dListGML[0]
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
bisemantics(lod3geometry, d1, "WallSurface")
bisemantics(lod3geometry, d2, "WallSurface")
bisemantics(lod3geometry, d3, "WallSurface")
bisemantics(lod3geometry, d4, "WallSurface")
#-- Closure surface on the top
d5 = dListGML[7] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[6] + ' ' + dListGML[7]
bisemantics(lod3geometry, d5, "ClosureSurface")
#-- Closure surface in the roof
d0 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[0]
bisemantics(lod3geometry, d0, "ClosureSurface")
def buildinginstallationSolid(skipsm, cs, kind, d, semantics=0, window=None, side=None, embrasure=None):
"""Generate the solid of a building installation."""
dList, dListGML = d
if semantics == 0:
if kind == 'dormer':
d1 = dListGML[0] + ' ' + dListGML[1] + ' ' + dListGML[4] + ' ' + dListGML[0]
d2 = dListGML[0] + ' ' + dListGML[4] + ' ' + dListGML[5] + ' ' + dListGML[3] + ' ' + dListGML[0]
d3 = dListGML[5] + ' ' + dListGML[2] + ' ' + dListGML[3] + ' ' + dListGML[5]
addsurface(skipsm, cs, d1)
addsurface(skipsm, cs, d2)
addsurface(skipsm, cs, d3)
#d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
#addsurface(skipsm, cs, d4)
#-- Face with the window
if embrasure is not None and embrasure > 0.0:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
if side == 1:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] + window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] + window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] - window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] - window, dList[5][2] - window]
ew[0] = [dList[4][0] - embrasure, dList[4][1] + window, dList[4][2] - window]
ew[1] = [dList[1][0] - embrasure, dList[1][1] + window, dList[1][2] + window]
ew[2] = [dList[2][0] - embrasure, dList[2][1] - window, dList[2][2] + window]
ew[3] = [dList[5][0] - embrasure, dList[5][1] - window, dList[5][2] - window]
elif side == 3:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0], dList[4][1] - window, dList[4][2] - window]
dw[1] = [dList[1][0], dList[1][1] - window, dList[1][2] + window]
dw[2] = [dList[2][0], dList[2][1] + window, dList[2][2] + window]
dw[3] = [dList[5][0], dList[5][1] + window, dList[5][2] - window]
ew[0] = [dList[4][0] + embrasure, dList[4][1] - window, dList[4][2] - window]
ew[1] = [dList[1][0] + embrasure, dList[1][1] - window, dList[1][2] + window]
ew[2] = [dList[2][0] + embrasure, dList[2][1] + window, dList[2][2] + window]
ew[3] = [dList[5][0] + embrasure, dList[5][1] + window, dList[5][2] - window]
elif side == 0:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1] + embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1] + embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1] + embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1] + embrasure, dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1] - embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1] - embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1] - embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1] - embrasure, dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
addsurface(skipsm, cs, d4, [dwring])
dw0 = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(dw[0])
dw1 = GMLPointList(dw[1]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(dw[1])
dw2 = GMLPointList(dw[3]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[3])
dw3 = GMLPointList(dw[3]) + ' ' + GMLPointList(dw[0]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(dw[3])
ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
addsurface(skipsm, cs, dw0)
addsurface(skipsm, cs, dw1)
addsurface(skipsm, cs, dw2)
addsurface(skipsm, cs, dw3)
addsurface(skipsm, cs, ew0)
else:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
addsurface(skipsm, cs, d4)
def plainMultiSurface(surfaceMember, coords, interior=None):
"""Adds a polygon to the SurfaceMember."""
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
def multiSurface(bldg, coords, semantics, interior=None, LOD=None, opening=None):
"""
Write a surface with input coordinates.
Input: coordinates of the LinearRing.
Output: CompositeSurface.
"""
boundedBy = etree.SubElement(bldg, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
if LOD == 3:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
elif LOD == 2:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
else:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
if opening:
dooropening = opening[0]
if dooropening != []:
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmldoor = etree.SubElement(gmlopening, "{%s}Door" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmldoor, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = GMLreversedRing(dooropening['ring'])
if len(opening[1]) > 0:
for win in opening[1]:
#print win
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmlwin = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmlwin, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = GMLreversedRing(win['ring'])
def multiSurface2(bldg, coords, semantics, interior=None, LOD=None, window=None):
"""
Write a surface with input coordinates.
Input: coordinates of the LinearRing.
Output: MultiSurface.
"""
boundedBy = etree.SubElement(bldg, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
if LOD == 3:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
elif LOD == 2:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
else:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
if window:
if len(window) > 0:
for win in window:
#print win
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmlwin = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmlwin, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = win
def multiSurfaceWithEmbrasure(bldg, coords, semantics, interior=None, LOD=None, embO=None):
"""
Write a surface with input coordinates, taking into account the embrasures.
Input: coordinates of the LinearRing.
Output: CompositeSurface.
"""
boundedBy = etree.SubElement(bldg, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
if LOD == 3:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
elif LOD == 2:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
else:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
for opening in embO:
for s in opening['surfaces']:
# MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = s
for o in opening['openings']:
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
if opening['type'] == 'Door':
gmldoor = etree.SubElement(gmlopening, "{%s}Door" % ns_bldg)
elif opening['type'] == 'Window':
gmldoor = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
else:
raise ValueError("Door or window allowed.")
lod3MultiSurface = etree.SubElement(gmldoor, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = o#['ring']
def multiSurfaceLOD0(bldg, coords, footedge):
"""
Write a surface with input coordinates.
Input: coordinates of the LinearRing.
Output: MultiSurface.
"""
if footedge == "footprint":
lod0MultiSurface = etree.SubElement(bldg, "{%s}lod0FootPrint" % ns_bldg)
elif footedge == "roofedge":
lod0MultiSurface = etree.SubElement(bldg, "{%s}lod0RoofEdge" % ns_bldg)
MultiSurface = etree.SubElement(lod0MultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
def CityGMLbuildingLOD0(CityModel, ID, attributes, o, x, y, z, h=None, rtype=None, top=None, override=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Generate a cityObjectMember representing a building in LOD0.
Output: CityGML code of the cityObjectMember.
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
if top is not None:
if top == 1.0 and rtype == 'Shed':
p = verticesBody(o, x, y, z, h, None, override)
else:
p = verticesBody(o, x, y, z, h, top)
elif top is None:
p = verticesBody(o, x, y, z, h, None, override)
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
footprints = []
roofedges = []
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '0.1':
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
footprints.append(faceBottom)
#faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
#roofedges.append(faceTop)
elif LOD == '0.2':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
footprints.append(faceBottom)
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
roofedges.append(faceTop)
elif LOD == '0.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
footprints.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
roofedges.append(faceTop)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
roofedges.append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '0.1' or LOD == '0.2' or LOD == '0.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
footprints.append(faceBottom)
if LOD == '0.2':
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
roofedges.append(faceTop)
elif LOD == '0.3':
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
roofedges.append(faceTop)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
roofedges.append(gtop)
else:
footprint = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
footprints.append(footprint)
if LOD == '0.2' or LOD == '0.3':
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
roofedges.append(faceTop)
#-- Bottom face
for ft in footprints:
multiSurfaceLOD0(bldg, GMLreversedRing(ft), "footprint")
#-- Top face
for re in roofedges:
multiSurfaceLOD0(bldg, re, "roofedge")
def CityGMLbuildingLOD1(CityModel, ID, attributes, o, x, y, z, h=None, rtype=None, top=None, override=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Generate a cityObjectMember representing a building in LOD1.
Input: ID, origin, width, depth, height, and optionally: height of the roof, roof type, block model top modelling rule, walls modelling rule.
Output: CityGML code of the cityObjectMember.
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
if top is not None:
if top == 1.0 and rtype == 'Shed':
p = verticesBody(o, x, y, z, h, None, override)
else:
p = verticesBody(o, x, y, z, h, top)
elif top is None:
p = verticesBody(o, x, y, z, h, None, override)
lod1MultiSurface = etree.SubElement(bldg, "{%s}lod1MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lod1MultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
faces = []
face0 = "%s %s %s %s %s" % (p[0], p[1], p[5], p[4], p[0])
faces.append(face0)
face2 = "%s %s %s %s %s" % (p[2], p[3], p[7], p[6], p[2])
faces.append(face2)
face3 = "%s %s %s %s %s" % (p[3], p[0], p[4], p[7], p[3])
faces.append(face3)
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '1.1':
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
faces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
elif LOD == '1.2':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
faces.append(faceTop)
face1_0 = "%s %s %s %s %s" % (p[1], bp[0], bpT[4], p[5], p[1])
faces.append(face1_0)
face1_1 = "%s %s %s %s %s" % (p[2], p[6], bpT[7], bp[3], p[2])
faces.append(face1_1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bpT[5], bpT[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bpT[6], bpT[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bpT[7], bpT[6], bp[2], bp[3])
faces.append(gface3)
elif LOD == '1.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
faces.append(face1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
faces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
faces.append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '1.1' or LOD == '1.2' or LOD == '1.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
faces.append(faceBottom)
if LOD == '1.1' or LOD == '1.2':
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
faces.append(faceTop)
face1_0 = "%s %s %s %s %s" % (p[1], bp[0], bpT[4], p[5], p[1])
faces.append(face1_0)
face1_1 = "%s %s %s %s %s" % (p[2], p[6], bpT[7], bp[3], p[2])
faces.append(face1_1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bpT[5], bpT[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bpT[6], bpT[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bpT[7], bpT[6], bp[2], bp[3])
faces.append(gface3)
elif LOD == '1.3':
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
faces.append(face1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
faces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
faces.append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
faces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
for face in faces:
plainMultiSurface(surfaceMember, face)
def CityGMLbuildingLOD1Semantics(CityModel, ID, attributes, o, x, y, z, h=None, rtype=None, top=None, override=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Generate a cityObjectMember representing a building in a special experimental form of LOD1 currently not really supported by the standard.
Input: ID, origin, width, depth, height, and optionally: height of the roof, roof type, block model top modelling rule, walls modelling rule.
Output: CityGML code of the cityObjectMember.
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
if top is not None:
if top == 1.0 and rtype == 'Shed':
p = verticesBody(o, x, y, z, h, None, override)
else:
p = verticesBody(o, x, y, z, h, top)
elif top is None:
p = verticesBody(o, x, y, z, h, None, override)
Wfaces = []
Rfaces = []
Gfaces = []
face0 = "%s %s %s %s %s" % (p[0], p[1], p[5], p[4], p[0])
Wfaces.append(face0)
face2 = "%s %s %s %s %s" % (p[2], p[3], p[7], p[6], p[2])
Wfaces.append(face2)
face3 = "%s %s %s %s %s" % (p[3], p[0], p[4], p[7], p[3])
Wfaces.append(face3)
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '1.1':
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
Wfaces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
Gfaces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
Rfaces.append(faceTop)
elif LOD == '1.2':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
Gfaces.append(faceBottom)
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
Rfaces.append(faceTop)
face1_0 = "%s %s %s %s %s" % (p[1], bp[0], bpT[4], p[5], p[1])
Wfaces.append(face1_0)
face1_1 = "%s %s %s %s %s" % (p[2], p[6], bpT[7], bp[3], p[2])
Wfaces.append(face1_1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bpT[5], bpT[4], bp[0])
Wfaces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bpT[6], bpT[5], bp[1])
Wfaces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bpT[7], bpT[6], bp[2], bp[3])
Wfaces.append(gface3)
elif LOD == '1.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
Gfaces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
Rfaces.append(faceTop)
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
Wfaces.append(face1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
Wfaces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
Wfaces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
Wfaces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
Rfaces.append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '1.1' or LOD == '1.2' or LOD == '1.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
Gfaces.append(faceBottom)
if LOD == '1.1' or LOD == '1.2':
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
Rfaces.append(faceTop)
face1_0 = "%s %s %s %s %s" % (p[1], bp[0], bpT[4], p[5], p[1])
Wfaces.append(face1_0)
face1_1 = "%s %s %s %s %s" % (p[2], p[6], bpT[7], bp[3], p[2])
Wfaces.append(face1_1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bpT[5], bpT[4], bp[0])
Wfaces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bpT[6], bpT[5], bp[1])
Wfaces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bpT[7], bpT[6], bp[2], bp[3])
Wfaces.append(gface3)
elif LOD == '1.3':
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
Rfaces.append(faceTop)
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
Wfaces.append(face1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
Wfaces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
Wfaces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
Wfaces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
Rfaces.append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
Wfaces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
Gfaces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
Rfaces.append(faceTop)
for face in Wfaces:
multiSurface(bldg, face, "WallSurface", None)
for face in Gfaces:
multiSurface(bldg, face, "GroundSurface", None)
for face in Rfaces:
multiSurface(bldg, face, "RoofSurface", None)
def CityGMLbuildingLOD1Solid(CityModel, ID, attributes, o, x, y, z, h=None, rtype=None, top=None, override=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Generate a cityObjectMember representing a building as an LOD1 solid.
Input: ID, origin, width, depth, height, and optionally: height of the roof, roof type, block model top modelling rule, walls modelling rule.
Output: CityGML code of the cityObjectMember.
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
if top is not None:
if top == 1.0 and rtype == 'Shed':
p = verticesBody(o, x, y, z, h, None, override)
else:
p = verticesBody(o, x, y, z, h, top)
elif top is None:
p = verticesBody(o, x, y, z, h, None, override)
lod1Solid = etree.SubElement(bldg, "{%s}lod1Solid" % ns_bldg)
Solid = etree.SubElement(lod1Solid, "{%s}Solid" % ns_gml)
if ASSIGNID:
Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
faces = []
face0 = "%s %s %s %s %s" % (p[0], p[1], p[5], p[4], p[0])
faces.append(face0)
face2 = "%s %s %s %s %s" % (p[2], p[3], p[7], p[6], p[2])
faces.append(face2)
face3 = "%s %s %s %s %s" % (p[3], p[0], p[4], p[7], p[3])
faces.append(face3)
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '1.1':
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
faces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
elif LOD == '1.2':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
faces.append(faceTop)
face1_0 = "%s %s %s %s %s" % (p[1], bp[0], bpT[4], p[5], p[1])
faces.append(face1_0)
face1_1 = "%s %s %s %s %s" % (p[2], p[6], bpT[7], bp[3], p[2])
faces.append(face1_1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bpT[5], bpT[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bpT[6], bpT[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bpT[7], bpT[6], bp[2], bp[3])
faces.append(gface3)
elif LOD == '1.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
faces.append(face1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
faces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
faces.append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '1.1' or LOD == '1.2' or LOD == '1.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
faces.append(faceBottom)
if LOD == '1.1' or LOD == '1.2':
faceTop = "%s %s %s %s %s %s %s %s %s" % (p[4], p[5], bpT[4], bpT[5], bpT[6], bpT[7], p[6], p[7], p[4])
faces.append(faceTop)
face1_0 = "%s %s %s %s %s" % (p[1], bp[0], bpT[4], p[5], p[1])
faces.append(face1_0)
face1_1 = "%s %s %s %s %s" % (p[2], p[6], bpT[7], bp[3], p[2])
faces.append(face1_1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bpT[5], bpT[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bpT[6], bpT[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bpT[7], bpT[6], bp[2], bp[3])
faces.append(gface3)
elif LOD == '1.3':
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
faces.append(face1)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
faces.append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
faces.append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
faces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
faces.append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
faces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
for face in faces:
addsurface(False, CompositeSurface, face)
def CityGMLbuildingLOD2Solid(CityModel, ID, attributes, o, x, y, z, h, rtype=None, width=None, ovh=None, rep=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Create LOD2 of the building with a basic roof shape. Solid representation.
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
p = verticesBody(o, x, y, z)
r = verticesRoof([o, x, y, z], h, rtype, width)
#-- Computations for the LOD2 with explicit overhangs
eaves = z
upperEaves = z
if ovh is not None:
overhangs, interiors, eaves, ovhy_recalculated = verticesOverhangs([o, x, y, z], p, h, rtype, ovh, r, width)
if rtype == 'Shed':
upperEaves = z + h + (z - eaves)
else:
overhangs = None
if rep == 'solid':
lod2rep = etree.SubElement(bldg, "{%s}lod2Solid" % ns_bldg)
repres = etree.SubElement(lod2rep, "{%s}Solid" % ns_gml)
exterior = etree.SubElement(repres, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
elif rep == 'brep':
lod2rep = etree.SubElement(bldg, "{%s}lod2MultiSurface" % ns_bldg)
repres = etree.SubElement(lod2rep, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(repres, "{%s}surfaceMember" % ns_gml)
if ASSIGNID:
repres.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
east_faces = {}
east_faces['rest'] = []
east_faces['roof'] = []
east_faces['outerfloor'] = []
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '2.0':
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
elif LOD == '2.1' or LOD == '2.2' or LOD == '2.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['outerfloor'].append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '2.0' or LOD == '2.1' or LOD == '2.2' or LOD == '2.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['roof'].append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
#-- Bottom face (in all cases regardless of the roof type)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
if rep == 'solid':
addsurface(False, CompositeSurface, faceBottom)
elif rep == 'brep':
plainMultiSurface(surfaceMember, faceBottom)
#-- Roof surfaces and wall surfaces depending on the type of the roof.
if rtype == 'Gabled':
if rep == 'solid':
gabledRoof(CompositeSurface, p, r, east_faces)
elif rep == 'brep':
gabledRoof(surfaceMember, p, r, east_faces)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
elif rtype == 'Shed':
if rep == 'solid':
shedRoof(CompositeSurface, p, r, east_faces)
elif rep == 'brep':
shedRoof(surfaceMember, p, r, east_faces)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
#shedRoof(CompositeSurface, p, r, east_faces)
elif rtype == 'Hipped' or rtype == 'Pyramidal':
if rep == 'solid':
hippedRoof(CompositeSurface, p, r, east_faces)
elif rep == 'brep':
hippedRoof(surfaceMember, p, r, east_faces)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
#hippedRoof(CompositeSurface, p, r, east_faces)
elif rtype == 'Flat' or None:
if rep == 'solid':
flatRoof(CompositeSurface, p, r, east_faces)
elif rep == 'brep':
flatRoof(surfaceMember, p, r, east_faces)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
#flatRoof(CompositeSurface, p, r, east_faces)
def CityGMLbuildingLOD2Semantics(CityModel, ID, attributes, o, x, y, z, h, rtype=None, width=None, ovh=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Create LOD2 of the building with a basic roof shape and standard semantics (brep multisurfaces).
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
p = verticesBody(o, x, y, z)
r = verticesRoof([o, x, y, z], h, rtype, width)
#-- Computations for the LOD2 with explicit overhangs
eaves = z
upperEaves = z
if ovh is not None:
overhangs, interiors, eaves, ovhy_recalculated = verticesOverhangs([o, x, y, z], p, h, rtype, ovh, r, width)
if rtype == 'Shed':
upperEaves = z + h + (z - eaves)
else:
overhangs = None
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
east_faces = {}
east_faces['rest'] = []
east_faces['roof'] = []
east_faces['outerfloor'] = []
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '2.0':
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
elif LOD == '2.1' or LOD == '2.2' or LOD == '2.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['outerfloor'].append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '2.0' or LOD == '2.1' or LOD == '2.2' or LOD == '2.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['roof'].append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
#-- Bottom face (in all cases regardless of the roof type)
multiSurface(bldg, faceBottom, "GroundSurface", None)
#-- Roof surfaces and wall surfaces depending on the type of the roof.
if rtype == 'Gabled':
gabledRoof(bldg, p, r, east_faces, True)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
elif rtype == 'Shed':
shedRoof(bldg, p, r, east_faces, True)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
elif rtype == 'Hipped' or rtype == 'Pyramidal':
hippedRoof(bldg, p, r, east_faces, True)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
elif rtype == 'Flat' or None:
flatRoof(bldg, p, r, east_faces, True)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
def CityGMLbuildingLOD3Semantics(CityModel, ID, attributes, o, x, y, z, h, rtype=None, ovh=None, width=None, door=None, wallWindows=None, dormers=None, roofWindows=None, chimney=None, embrasure=None, BiSem=1, aux=None, buildingpart=None, aerial=False):
"""
Create LOD3 of the building with an advanced roof shape and semantics (multisurfaces).
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
p = verticesBody(o, x, y, z)
pList = verticesBodyList(o, x, y, z)
r = verticesRoof([o, x, y, z], h, rtype, width)
if r == []:
r = None
eaves = z
upperEaves = z
if ovh is not None:
overhangs, interiors, eaves, ovhy_recalculated = verticesOverhangs([o, x, y, z], p, h, rtype, ovh, r, width)
if rtype == 'Shed':
upperEaves = z + h + (z - eaves)
else:
ovhy_recalculated = None
#-- Is the building part covered by overhangs?
if buildingpart is not None and aerial is True:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
east_faces = {}
east_faces['rest'] = []
east_faces['roof'] = []
east_faces['outerfloor'] = []
#-- Account for building parts
if buildingpart is not None and not covered:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
if buildingpart['type'] == 'Alcove':
east_faces['outerfloor'].append(gtop)
elif buildingpart['type'] == 'Garage':
east_faces['roof'].append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
ropenings = [[], [], [], []]
ropenings_rw = [[], [], [], []]
#-- Dormers
if dormers or roofWindows:
if dormers and len(dormers) > 0:
for drm in dormers:
#-- Get a list of vertices of each dormer
dList, dListGML = dormerVertices([drm], pList, h, rtype, [o, x, y, z], width)
#-- Get the opening for creating a hole in the roof surface
#--Inverted
ropenings[int(drm['side'])].append(str(dListGML[0][0] + ' ' + dListGML[0][3] + ' ' + dListGML[0][2] + ' ' + dListGML[0][1] + ' ' + dListGML[0][0]))
#-- Construct the dormer
if aerial is True:
buildinginstallation(bldg, "dormer", [dList[0], dListGML[0]], BiSem, None, drm['side'], embrasure)
elif aerial is None or aerial is False:
buildinginstallation(bldg, "dormer", [dList[0], dListGML[0]], BiSem, 0.1, drm['side'], embrasure)
elif roofWindows and len(roofWindows) > 0:
# ropenings_rw.append("")
# ropenings_rw.append([])
for rfw in roofWindows:
#-- Get a list of vertices of each window. It is the same as for dormer so the same function is used.
dList, dListGML = dormerVertices([rfw], pList, h, rtype, [o, x, y, z], width)
#-- Get the opening for creating a hole in the roof surface
ropenings[int(rfw['side'])].append(str(dListGML[0][0] + ' ' + dListGML[0][3] + ' ' + dListGML[0][2] + ' ' + dListGML[0][1] + ' ' + dListGML[0][0]))
ropenings_rw[int(rfw['side'])].append(str(dListGML[0][0] + ' ' + dListGML[0][1] + ' ' + dListGML[0][2] + ' ' + dListGML[0][3] + ' ' + dListGML[0][0]))
#-- Deal with chimney(s)
if chimney:
if len(chimney) > 0:
for ch in chimney:
#-- List of vertices
dList, dListGML = chimneyVertices([ch], pList, h, rtype, [o, x, y, z], width)
#-- Get the opening for creating a hole in the roof surface
ropenings[int(ch['side'])].append(str(dListGML[0][0] + ' ' + dListGML[0][3] + ' ' + dListGML[0][2] + ' ' + dListGML[0][1] + ' ' + dListGML[0][0]))
#-- Construct the chimney
buildinginstallation(bldg, "chimney", [dList[0], dListGML[0]], BiSem, None, ch['side'])
chimneyHeight = dList[0][7][2]
else:
chimneyHeight = None
#-- Bottom face (in all cases the same regardless of the roof type)
#faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
multiSurface(bldg, faceBottom, "GroundSurface", None, 3)
openings = []
#-- Door
if door:
door['ring'] = openingRing(door, pList)
openings.append(door)
else:
openings.append("")
if wallWindows:
if len(wallWindows) > 0:
openings.append([])
i = 0
for ww in wallWindows:
wallWindows[i]['ring'] = openingRing(ww, pList)
openings[1].append(wallWindows[i])
i += 1
else:
openings.append("")
#-- Roof surfaces and wall surfaces depending on the type of the roof.
if rtype == 'Gabled':
gabledRoof(bldg, p, r, east_faces, True, openings, ropenings, ropenings_rw, embrasure, pList)
if ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
elif rtype == 'Shed':
shedRoof(bldg, p, r, east_faces, True, openings, ropenings, ropenings_rw, embrasure, pList)
if ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
elif rtype == 'Hipped' or rtype == 'Pyramidal':
hippedRoof(bldg, p, r, east_faces, True, openings, ropenings, ropenings_rw, embrasure, pList)
if ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
elif rtype == 'Flat' or None:
flatRoof(bldg, p, r, east_faces, True, openings, ropenings, ropenings_rw, embrasure, pList)
if ovh[0] > 0:
roofOverhangs(bldg, overhangs, interiors, True)
if rtype == 'Shed':
if chimneyHeight is not None:
if chimneyHeight < upperEaves:
chimneyHeight = upperEaves
else:
chimneyHeight = upperEaves
return chimneyHeight, eaves, ovhy_recalculated
def CityGMLbuildingLOD3Solid(CityModel, ID, attributes, o, x, y, z, h, rtype=None, ovh=None, width=None, door=None, wallWindows=None, dormers=None, roofWindows=None, chimney=None, embrasure=None, additional=None, rep=None, aux=None, buildingpart=None, aerial=False):
"""
Create LOD3 solid or plain geometry (brep without semantics).
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
if rep == 'solid':
lod3rep = etree.SubElement(bldg, "{%s}lod3Solid" % ns_bldg)
repres = etree.SubElement(lod3rep, "{%s}Solid" % ns_gml)
exterior = etree.SubElement(repres, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
elif rep == 'brep':
lod3rep = etree.SubElement(bldg, "{%s}lod3MultiSurface" % ns_bldg)
repres = etree.SubElement(lod3rep, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(repres, "{%s}surfaceMember" % ns_gml)
if ASSIGNID:
repres.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
p = verticesBody(o, x, y, z)
pList = verticesBodyList(o, x, y, z)
r = verticesRoof([o, x, y, z], h, rtype, width)
if r == []:
r = None
if ovh is not None:
overhangs, interiors, eaves, ovhy_recalculated = verticesOverhangs([o, x, y, z], p, h, rtype, ovh, r, width)
ropenings = [[], [], [], []]
ropenings_rw = [[], [], [], []]
#-- Is the building part covered by overhangs?
if buildingpart is not None and aerial is True:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
east_faces = {}
east_faces['rest'] = []
east_faces['roof'] = []
east_faces['outerfloor'] = []
#-- Account for building parts
if buildingpart is not None and not covered:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['roof'].append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
#-- Dormers
roofWindows = None
if dormers or roofWindows:
if dormers and len(dormers) > 0:
for drm in dormers:
#-- Get a list of vertices of each dormer
dList, dListGML = dormerVertices([drm], pList, h, rtype, [o, x, y, z], width)
#-- Get the opening for creating a hole in the roof surface
ropenings[int(drm['side'])].append(str(dListGML[0][0] + ' ' + dListGML[0][3] + ' ' + dListGML[0][2] + ' ' + dListGML[0][1] + ' ' + dListGML[0][0]))
#-- Construct the dormer
if rep == 'solid':
if aerial is True:
buildinginstallationSolid(False, CompositeSurface, "dormer", [dList[0], dListGML[0]], 0, None, drm['side'], embrasure)
else:
buildinginstallationSolid(False, CompositeSurface, "dormer", [dList[0], dListGML[0]], 0, 0.1, drm['side'], embrasure)
elif rep == 'brep':
if aerial is True:
buildinginstallationSolid(False, surfaceMember, "dormer", [dList[0], dListGML[0]], 0, None, drm['side'], embrasure)
else:
buildinginstallationSolid(False, surfaceMember, "dormer", [dList[0], dListGML[0]], 0, 0.1, drm['side'], embrasure)
#-- Bottom face (in all cases the same regardless of the roof type)
#faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
if rep == 'solid':
addsurface(False, CompositeSurface, faceBottom)
elif rep == 'brep':
plainMultiSurface(surfaceMember, faceBottom)
openings = []
#-- Door
if door:
door['ring'] = openingRing(door, pList)
openings.append(door)
else:
openings.append("")
if wallWindows:
if len(wallWindows) > 0:
openings.append([])
i = 0
for ww in wallWindows:
wallWindows[i]['ring'] = openingRing(ww, pList)
openings[1].append(wallWindows[i])
i += 1
else:
openings.append("")
#-- Roof surfaces and wall surfaces depending on the type of the roof.
if rtype == 'Gabled':
if rep == 'solid':
gabledRoof(CompositeSurface, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
elif rep == 'brep':
gabledRoof(surfaceMember, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
elif rtype == 'Shed':
if rep == 'solid':
shedRoof(CompositeSurface, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
elif rep == 'brep':
shedRoof(surfaceMember, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
elif rtype == 'Hipped' or rtype == 'Pyramidal':
if rep == 'solid':
hippedRoof(CompositeSurface, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
elif rep == 'brep':
hippedRoof(surfaceMember, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
elif rtype == 'Flat' or None:
if rep == 'solid':
flatRoof(CompositeSurface, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
elif rep == 'brep':
flatRoof(surfaceMember, p, r, east_faces, None, openings, ropenings, None, embrasure, pList)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
def CityGMLbuildingInteriorLOD0(CityModel, ID, attributes, o, x, y, z, h, floors, floorHeight, rtype=None, width=None, wallThickness=0.2, joist=0.2, aux=None, buildingpart=None):
"""Create the interior footprints. One for each storey."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
#-- Coordinates of the main interior points (offset from the exterior surface)
Xa = o[0] + wallThickness
Xb = o[0] + x - wallThickness
Ya = o[1] + wallThickness
Yb = o[1] + y - wallThickness
if rtype != 'Flat':
floors += 1
#-- Construct a surface for each floor
for floor in range(1, int(floors) + 1):
#-- Floor elevation
fel = (floor - 1) * floorHeight + 0.5*joist
#-- Ceiling elevation
cel = floor * floorHeight - 0.5*joist
#-- XML tree
lod1MultiSurface = etree.SubElement(bldg, "{%s}lod1MultiSurface" % ns_bldg)
ms = etree.SubElement(lod1MultiSurface, "{%s}MultiSurface" % ns_gml)
if ASSIGNID:
ms.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
# exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
# CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
#-- The eight points of the solid. F=Floor, C=Ceiling
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p0C = str(Xa) + ' ' + str(Ya) + ' ' + str(cel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p1C = str(Xb) + ' ' + str(Ya) + ' ' + str(cel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p2C = str(Xb) + ' ' + str(Yb) + ' ' + str(cel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
p3C = str(Xa) + ' ' + str(Yb) + ' ' + str(cel)
if floor == 1:
if buildingpart is not None:
if buildingpart['type'] == 'Alcove':
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[3] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[4] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[7] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
#E = "%s %s %s %s %s %s %s %s %s" % (p1F, bp[0], bp[4], bp[7], bp[3], p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p0F, p3F, p2F, bp[3], bp[2], bp[1], bp[0], p1F, p0F)
elif buildingpart['type'] == 'Garage':
#E = "%s %s %s %s %s" % (p1F, p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
#top = "%s %s %s %s %s" % (p0C, p1C, p2C, p3C, p0C)
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x + wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[3] = GMLPointList([o[0] + x + wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[4] = GMLPointList([o[0] + x + wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[7] = GMLPointList([o[0] + x + wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
gbottom = "%s %s %s %s %s" % (bp[0], bp[3], bp[2], bp[1], bp[0])
addsurface(False, ms, gbottom)
else:
# E = "%s %s %s %s %s" % (p1F, p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
# top = "%s %s %s %s %s" % (p0C, p1C, p2C, p3C, p0C)
else:
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
addsurface(False, ms, faceBottom)
def CityGMLbuildingInteriorLOD1(CityModel, ID, attributes, o, x, y, z, h, floors, floorHeight, rtype=None, width=None, wallThickness=0.2, joist=0.2, aux=None, buildingpart=None):
"""Create the interior of an "LOD1+" according to (Boeters et al., 2015)."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
#-- Coordinates of the main interior points (offset from the exterior surface)
Xa = o[0] + wallThickness
Xb = o[0] + x - wallThickness
Ya = o[1] + wallThickness
Yb = o[1] + y - wallThickness
#-- Floor elevation
fel = 0.5*joist
#-- Ceiling elevation
if rtype == 'Flat':
cel = floors * floorHeight - 0.5*joist
else:
cel = floors * floorHeight + 0.5*joist
#-- XML tree
lod2Solid = etree.SubElement(bldg, "{%s}lod2Solid" % ns_bldg)
Solid = etree.SubElement(lod2Solid, "{%s}Solid" % ns_gml)
if ASSIGNID:
Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
#-- The eight points of the solid. F=Floor, C=Ceiling
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p0C = str(Xa) + ' ' + str(Ya) + ' ' + str(cel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p1C = str(Xb) + ' ' + str(Ya) + ' ' + str(cel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p2C = str(Xb) + ' ' + str(Yb) + ' ' + str(cel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
p3C = str(Xa) + ' ' + str(Yb) + ' ' + str(cel)
S = "%s %s %s %s %s" % (p0F, p1F, p1C, p0C, p0F)
if buildingpart is not None:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[3] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[4] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[7] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
E = "%s %s %s %s %s %s %s %s %s" % (p1F, bp[0], bp[4], bp[7], bp[3], p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p0F, p3F, p2F, bp[3], bp[2], bp[1], bp[0], p1F, p0F)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
addsurface(False, CompositeSurface, gface0)
addsurface(False, CompositeSurface, gface1)
addsurface(False, CompositeSurface, gface3)
addsurface(False, CompositeSurface, gtop)
else:
E = "%s %s %s %s %s" % (p1F, p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
N = "%s %s %s %s %s" % (p2F, p3F, p3C, p2C, p2F)
W = "%s %s %s %s %s" % (p3F, p0F, p0C, p3C, p3F)
addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, faceBottom)
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
if rtype != 'Flat':
p = verticesBody(o, x, y, z)
pList = verticesBodyList(o, x, y, z)
if rtype == 'Shed':
h2 = (h/x) * (x - 2* wallThickness)
topThickness = h - h2 - .5*joist
rWth = (topThickness/h)*(x)
else:
h2 = (h/(.5*x)) * (.5*x - wallThickness)
topThickness = h - h2 - .5*joist
rWth = (topThickness/h)*(.5*x)
#-- Extension for the attic
if rtype != 'Flat':
XTa = o[0] + wallThickness
XTb = o[0] + x - wallThickness
YTa = o[1] + wallThickness
YTb = o[1] + y - wallThickness
r = verticesRoof([o, x, y, z], h, rtype, width)
r[0] = GMLstring2points(r[0])[0]
r[1] = GMLstring2points(r[1])[0]
#-- Floor elevation
fel = floors * floorHeight + 0.5*joist
#-- Ceiling elevation
if rtype == 'Shed':
h2 = (h/x) * (x - wallThickness)
topThickness = h - h2 - .5*joist
else:
h2 = (h/(.5*x)) * (.5*x - wallThickness)
topThickness = h - h2 - .5*joist
cel = float(r[0][2]) - topThickness
# #-- XML tree
# lod2Solid = etree.SubElement(bldg, "{%s}lod2Solid" % ns_bldg)
# Solid = etree.SubElement(lod2Solid, "{%s}Solid" % ns_gml)
# if ASSIGNID:
# Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
# exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
# CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
if rtype == 'Gabled':
gabledAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness)
elif rtype == 'Hipped':
hippedAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness)
elif rtype == 'Pyramidal':
pyramidalAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness)
elif rtype == 'Shed':
shedAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness)
else:
top = "%s %s %s %s %s" % (p0C, p1C, p2C, p3C, p0C)
addsurface(False, CompositeSurface, top)
def CityGMLbuildingInteriorLOD2(CityModel, ID, attributes, o, x, y, z, h, floors, floorHeight, rtype=None, width=None, wallThickness=0.2, joist=0.2, aux=None, buildingpart=None, dormers=None):
"""Create the interior of an "LOD2+" according to (Boeters et al., 2015)."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
#-- Coordinates of the main interior points (offset from the exterior surface)
Xa = o[0] + wallThickness
Xb = o[0] + x - wallThickness
Ya = o[1] + wallThickness
Yb = o[1] + y - wallThickness
#-- XML tree
lod2Solid = etree.SubElement(bldg, "{%s}lod2Solid" % ns_bldg)
MultiSolid = etree.SubElement(lod2Solid, "{%s}MultiSolid" % ns_gml)
#-- Construct a solid for each floor
for floor in range(1, int(floors) + 1):
#-- Floor elevation
fel = (floor - 1) * floorHeight + 0.5*joist
#-- Ceiling elevation
cel = floor * floorHeight - 0.5*joist
#-- Add solids of the multisolid
Solid = etree.SubElement(MultiSolid, "{%s}Solid" % ns_gml)
if ASSIGNID:
Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
#-- The eight points of the solid. F=Floor, C=Ceiling
p0F = str(Xa) + ' ' + str(Ya) + ' ' + str(fel)
p0C = str(Xa) + ' ' + str(Ya) + ' ' + str(cel)
p1F = str(Xb) + ' ' + str(Ya) + ' ' + str(fel)
p1C = str(Xb) + ' ' + str(Ya) + ' ' + str(cel)
p2F = str(Xb) + ' ' + str(Yb) + ' ' + str(fel)
p2C = str(Xb) + ' ' + str(Yb) + ' ' + str(cel)
p3F = str(Xa) + ' ' + str(Yb) + ' ' + str(fel)
p3C = str(Xa) + ' ' + str(Yb) + ' ' + str(cel)
if floor == 1:
if buildingpart is not None:
if buildingpart['type'] == 'Alcove':
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[3] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[4] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[7] = GMLPointList([o[0] + x - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
E = "%s %s %s %s %s %s %s %s %s" % (p1F, bp[0], bp[4], bp[7], bp[3], p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p0F, p3F, p2F, bp[3], bp[2], bp[1], bp[0], p1F, p0F)
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
addsurface(False, CompositeSurface, gface0)
addsurface(False, CompositeSurface, gface1)
addsurface(False, CompositeSurface, gface3)
addsurface(False, CompositeSurface, gtop)
top = "%s %s %s %s %s %s %s %s %s" % (p0C, p1C, bp[4], bp[5], bp[6], bp[7], p2C, p3C, p0C)
elif buildingpart['type'] == 'Garage':
GarageSolid = etree.SubElement(MultiSolid, "{%s}Solid" % ns_gml)
GarageExterior = etree.SubElement(GarageSolid, "{%s}exterior" % ns_gml)
GarageCompositeSurface = etree.SubElement(GarageExterior, "{%s}CompositeSurface" % ns_gml)
E = "%s %s %s %s %s" % (p1F, p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
top = "%s %s %s %s %s" % (p0C, p1C, p2C, p3C, p0C)
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x + 0.0, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + 0.5*joist])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[3] = GMLPointList([o[0] + x + 0.0, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + 0.5*joist])
bp[4] = GMLPointList([o[0] + x + 0.0, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']) - wallThickness, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
bp[7] = GMLPointList([o[0] + x + 0.0, aux['origin'][1] + buildingpart['o'] + buildingpart['y'] - wallThickness, aux['origin'][2] + buildingpart['z'] - 0.5*joist])
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
gface2 = "%s %s %s %s %s" % (bp[0], bp[4], bp[7], bp[3], bp[0])
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
gbottom = "%s %s %s %s %s" % (bp[0], bp[3], bp[2], bp[1], bp[0])
addsurface(False, GarageCompositeSurface, gface0)
addsurface(False, GarageCompositeSurface, gface1)
addsurface(False, GarageCompositeSurface, gface2)
addsurface(False, GarageCompositeSurface, gface3)
addsurface(False, GarageCompositeSurface, gtop)
addsurface(False, GarageCompositeSurface, gbottom)
else:
E = "%s %s %s %s %s" % (p1F, p2F, p2C, p1C, p1F)
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
top = "%s %s %s %s %s" % (p0C, p1C, p2C, p3C, p0C)
else:
faceBottom = "%s %s %s %s %s" % (p0F, p3F, p2F, p1F, p0F)
top = "%s %s %s %s %s" % (p0C, p1C, p2C, p3C, p0C)
E = "%s %s %s %s %s" % (p1F, p2F, p2C, p1C, p1F)
S = "%s %s %s %s %s" % (p0F, p1F, p1C, p0C, p0F)
N = "%s %s %s %s %s" % (p2F, p3F, p3C, p2C, p2F)
W = "%s %s %s %s %s" % (p3F, p0F, p0C, p3C, p3F)
addsurface(False, CompositeSurface, faceBottom)
addsurface(False, CompositeSurface, top)
addsurface(False, CompositeSurface, S)
addsurface(False, CompositeSurface, E)
addsurface(False, CompositeSurface, N)
addsurface(False, CompositeSurface, W)
dormerTickness = .1
if rtype != 'Flat':
if rtype == 'Shed':
h2 = (h/x) * (x - 2* wallThickness)
topThickness = h - h2 - .5*joist
#rWth = (topThickness/h)*(x)
rWth = (topThickness/h)*(x) - wallThickness
rWth2 = None
elif rtype == 'Pyramidal' or rtype == 'Hipped':
h2 = (h/(.5*x)) * (.5*x - wallThickness)
topThickness = h - h2 - .5*joist
rWth = (topThickness/h)*(.5*x)
#rWth2 = (topThickness/h)*(width)
# auxl1 = (topThickness*width)/h
# rWth2 = topThickness**2 * auxl1
rWth2 = wallThickness - ((width * .5*joist)/h)
else:
h2 = (h/(.5*x)) * (.5*x - wallThickness)
topThickness = h - h2 - .5*joist
rWth = (topThickness/h)*(.5*x)
rWth2 = None
p = verticesBody(o, x, y, z)
pList = verticesBodyList(o, x, y, z)
ropenings = [[], [], [], []]
if dormers and len(dormers) > 0:
for drm in dormers:
#-- Get a list of vertices of each dormer
dList, dListGML = interiordormerVertices([drm], pList, h, rtype, [o, x, y, z], width, wallThickness, rWth, dormerTickness, topThickness, rWth2)
#-- Get the opening for creating a hole in the roof surface
ropenings[int(drm['side'])].append(str(dListGML[0][0] + ' ' + dListGML[0][3] + ' ' + dListGML[0][2] + ' ' + dListGML[0][1] + ' ' + dListGML[0][0]))
interiorDormer(CompositeSurface, [dList[0], dListGML[0]], drm['side'])
#-- Solid for the attic
if rtype != 'Flat':
XTa = o[0] + wallThickness
XTb = o[0] + x - wallThickness
YTa = o[1] + wallThickness
YTb = o[1] + y - wallThickness
r = verticesRoof([o, x, y, z], h, rtype, width)
r[0] = GMLstring2points(r[0])[0]
r[1] = GMLstring2points(r[1])[0]
#-- Floor elevation
fel = floors * floorHeight + 0.5*joist
#-- Ceiling elevation of the roof. Requires some computations to preserve parallel walls
cel = float(r[0][2]) - topThickness
#-- XML tree
# lod2Solid = etree.SubElement(bldg, "{%s}lod2Solid" % ns_bldg)
Solid = etree.SubElement(MultiSolid, "{%s}Solid" % ns_gml)
if ASSIGNID:
Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
if rtype == 'Gabled':
gabledAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness, True, ropenings)
elif rtype == 'Hipped':
hippedAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness, True, ropenings)
#addsurface(False, CompositeSurface, aS)
elif rtype == 'Pyramidal':
pyramidalAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness, True, ropenings)
elif rtype == 'Shed':
shedAttic(CompositeSurface, [XTa, YTa, XTb, YTb], p, r, fel, cel, wallThickness, topThickness, True, ropenings)
def b2p(exts):
"""Convert two points of a polygon into its bounding box.
(Rectangular polygon parallel with axes.)
"""
p0x = exts[0][0]
p0y = exts[0][1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = exts[0][2]
p1y = exts[0][3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
e = "%s %s %s %s %s" % (p0, pb, p1, pu, p0)
i = []
if exts[1] is not None:
for h in exts[1]:
p0x = h[0]
p0y = h[1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p1x = h[2]
p1y = h[3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
i.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
return e, i
def b2s(exts):
"""Convert two points of a solid into its bounding box.
(Cube-like solid parallel with axes.)
"""
p0x = exts[0][0]
p0y = exts[0][1]
p0 = str(p0x) + ' ' + str(p0y) + ' ' + '0.0'
p0T = str(p0x) + ' ' + str(p0y) + ' ' + str(exts[1])
p1x = exts[0][2]
p1y = exts[0][3]
p1 = str(p1x) + ' ' + str(p1y) + ' ' + '0.0'
p1T = str(p1x) + ' ' + str(p1y) + ' ' + str(exts[1])
pb = str(p1x) + ' ' + str(p0y) + ' ' + '0.0'
pbT = str(p1x) + ' ' + str(p0y) + ' ' + str(exts[1])
pu = str(p0x) + ' ' + str(p1y) + ' ' + '0.0'
puT = str(p0x) + ' ' + str(p1y) + ' ' + str(exts[1])
surfaces = []
surfaces.append("%s %s %s %s %s" % (p0, pu, p1, pb, p0))
surfaces.append("%s %s %s %s %s" % (p0T, pbT, p1T, puT, p0T))
surfaces.append("%s %s %s %s %s" % (p0, pb, pbT, p0T, p0))
surfaces.append("%s %s %s %s %s" % (pb, p1, p1T, pbT, pb))
surfaces.append("%s %s %s %s %s" % (p1, pu, puT, p1T, p1))
surfaces.append("%s %s %s %s %s" % (pu, p0, p0T, puT, pu))
return surfaces
def CityGMLstreets(CityModel, street_data):
"""Generates a road network with the thematic module for Transportation Objects."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
transpobj = etree.SubElement(cityObject, "{%s}Road" % ns_tran)
if ASSIGNID:
transpobj.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
transpms = etree.SubElement(transpobj, "{%s}lod1MultiSurface" % ns_tran)
MultiSurface = etree.SubElement(transpms, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
street_points = b2p(street_data)
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = street_points[0]
for h in street_points[1]:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = h
def CityGMLplantCoverLOD0(CityModel, pc_data):
"""Generates a PlantCover as a 2.5D surface."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
pcobj = etree.SubElement(cityObject, "{%s}PlantCover" % ns_veg)
if ASSIGNID:
pcobj.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
pcms = etree.SubElement(pcobj, "{%s}lod1MultiSurface" % ns_veg)
MultiSurface = etree.SubElement(pcms, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
pc_points = b2p([pc_data[0], None])
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = pc_points[0]
def CityGMLplantCoverLOD1(CityModel, pc_data):
"""Generates a PlantCover as a solid."""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
pcobj = etree.SubElement(cityObject, "{%s}PlantCover" % ns_veg)
if ASSIGNID:
pcobj.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
lod1MultiSolid = etree.SubElement(pcobj, "{%s}lod1MultiSolid" % ns_veg)
multiSolid = etree.SubElement(lod1MultiSolid, "{%s}MultiSolid" % ns_gml)
solidmember = etree.SubElement(multiSolid, "{%s}solidMember" % ns_gml)
Solid = etree.SubElement(solidmember, "{%s}Solid" % ns_gml)
if ASSIGNID:
Solid.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
exterior = etree.SubElement(Solid, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
pc_surfaces = b2s(pc_data)
for pc_s in pc_surfaces:
addsurface(False, CompositeSurface, pc_s)
def rotator(vertex, sine, cos, origin_of_rotation):
"Rotate the vertex around the origin by an angle (2D). Cos and sin are already precomputed to make the calculations more efficient due to many repetitions."
vertex = [float(vertex[0]), float(vertex[1]), float(vertex[2])]
rotated = [None, None, vertex[2]]
rotated[0] = ((vertex[0]-origin_of_rotation[0]) * cos - (vertex[1]-origin_of_rotation[1]) * sine) + origin_of_rotation[0]
rotated[1] = ((vertex[0]-origin_of_rotation[0]) * sine + (vertex[1]-origin_of_rotation[1]) * cos) + origin_of_rotation[1]
return rotated
#----------------------------------------------------------------------
#-- Start of the program
print('Parsing file', BUILDINGFILE, '...')
#-- Parse the file containing the building information
BUILDINGFILE = etree.parse(BUILDINGFILE)
root = BUILDINGFILE.getroot()
#-- Buildings will be stored here
buildings = []
#-- Streets will be stored here
streets = []
#-- PlantCover will be stored here
plantcover = []
#-- Find all instances of city objects in the XML and put them in a list
for obj in root.getiterator('building'):
buildings.append(obj)
for obj in root.getiterator('streets'):
streets.append(obj)
for obj in root.getiterator('parks'):
plantcover.append(obj)
print("There are", len(buildings), "buildings(s) in this XML. Processing...")
print("Opening empty CityGML files...")
CityGMLs = {}
#-- Instances
## LOD0
#-- LOD0.0
CityGMLs['LOD0_0'] = createCityGML('LOD0_0')
#-- LOD0.1
if VARIANTS:
CityGMLs['LOD0_1_F0_H0'] = createCityGML('LOD0_1_F0_H0')
CityGMLs['LOD0_1_F0_H1'] = createCityGML('LOD0_1_F0_H1')
CityGMLs['LOD0_1_F0_H2'] = createCityGML('LOD0_1_F0_H2')
CityGMLs['LOD0_1_F0_H3'] = createCityGML('LOD0_1_F0_H3')
if VARIANTS:
CityGMLs['LOD0_1_F0_H4'] = createCityGML('LOD0_1_F0_H4')
CityGMLs['LOD0_1_F0_H5'] = createCityGML('LOD0_1_F0_H5')
CityGMLs['LOD0_1_F0_H6'] = createCityGML('LOD0_1_F0_H6')
CityGMLs['LOD0_1_F0_HAvg'] = createCityGML('LOD0_1_F0_HAvg')
CityGMLs['LOD0_1_F0_HMed'] = createCityGML('LOD0_1_F0_HMed')
if VARIANTS:
CityGMLs['LOD0_1_F1_H0'] = createCityGML('LOD0_1_F1_H0')
CityGMLs['LOD0_1_F1_H1'] = createCityGML('LOD0_1_F1_H1')
CityGMLs['LOD0_1_F1_H2'] = createCityGML('LOD0_1_F1_H2')
CityGMLs['LOD0_1_F1_H3'] = createCityGML('LOD0_1_F1_H3')
CityGMLs['LOD0_1_F1_H4'] = createCityGML('LOD0_1_F1_H4')
CityGMLs['LOD0_1_F1_H5'] = createCityGML('LOD0_1_F1_H5')
CityGMLs['LOD0_1_F1_H6'] = createCityGML('LOD0_1_F1_H6')
CityGMLs['LOD0_1_F1_HAvg'] = createCityGML('LOD0_1_F1_HAvg')
CityGMLs['LOD0_1_F1_HMed'] = createCityGML('LOD0_1_F1_HMed')
if VARIANTS:
CityGMLs['LOD0_1_Fd_H0'] = createCityGML('LOD0_1_Fd_H0')
CityGMLs['LOD0_1_Fd_H1'] = createCityGML('LOD0_1_Fd_H1')
CityGMLs['LOD0_1_Fd_H2'] = createCityGML('LOD0_1_Fd_H2')
CityGMLs['LOD0_1_Fd_H3'] = createCityGML('LOD0_1_Fd_H3')
CityGMLs['LOD0_1_Fd_H4'] = createCityGML('LOD0_1_Fd_H4')
CityGMLs['LOD0_1_Fd_H5'] = createCityGML('LOD0_1_Fd_H5')
CityGMLs['LOD0_1_Fd_H6'] = createCityGML('LOD0_1_Fd_H6')
CityGMLs['LOD0_1_Fd_HAvg'] = createCityGML('LOD0_1_Fd_HAvg')
CityGMLs['LOD0_1_Fd_HMed'] = createCityGML('LOD0_1_Fd_HMed')
#-- LOD0.2
if VARIANTS:
CityGMLs['LOD0_2_F0_H0'] = createCityGML('LOD0_2_F0_H0')
CityGMLs['LOD0_2_F0_H1'] = createCityGML('LOD0_2_F0_H1')
CityGMLs['LOD0_2_F0_H2'] = createCityGML('LOD0_2_F0_H2')
CityGMLs['LOD0_2_F0_H3'] = createCityGML('LOD0_2_F0_H3')
if VARIANTS:
CityGMLs['LOD0_2_F0_H4'] = createCityGML('LOD0_2_F0_H4')
CityGMLs['LOD0_2_F0_H5'] = createCityGML('LOD0_2_F0_H5')
CityGMLs['LOD0_2_F0_H6'] = createCityGML('LOD0_2_F0_H6')
CityGMLs['LOD0_2_F0_HAvg'] = createCityGML('LOD0_2_F0_HAvg')
CityGMLs['LOD0_2_F0_HMed'] = createCityGML('LOD0_2_F0_HMed')
if VARIANTS:
CityGMLs['LOD0_2_F1_H0'] = createCityGML('LOD0_2_F1_H0')
CityGMLs['LOD0_2_F1_H1'] = createCityGML('LOD0_2_F1_H1')
CityGMLs['LOD0_2_F1_H2'] = createCityGML('LOD0_2_F1_H2')
CityGMLs['LOD0_2_F1_H3'] = createCityGML('LOD0_2_F1_H3')
CityGMLs['LOD0_2_F1_H4'] = createCityGML('LOD0_2_F1_H4')
CityGMLs['LOD0_2_F1_H5'] = createCityGML('LOD0_2_F1_H5')
CityGMLs['LOD0_2_F1_H6'] = createCityGML('LOD0_2_F1_H6')
CityGMLs['LOD0_2_F1_HAvg'] = createCityGML('LOD0_2_F1_HAvg')
CityGMLs['LOD0_2_F1_HMed'] = createCityGML('LOD0_2_F1_HMed')
if VARIANTS:
CityGMLs['LOD0_2_Fd_H0'] = createCityGML('LOD0_2_Fd_H0')
CityGMLs['LOD0_2_Fd_H1'] = createCityGML('LOD0_2_Fd_H1')
CityGMLs['LOD0_2_Fd_H2'] = createCityGML('LOD0_2_Fd_H2')
CityGMLs['LOD0_2_Fd_H3'] = createCityGML('LOD0_2_Fd_H3')
CityGMLs['LOD0_2_Fd_H4'] = createCityGML('LOD0_2_Fd_H4')
CityGMLs['LOD0_2_Fd_H5'] = createCityGML('LOD0_2_Fd_H5')
CityGMLs['LOD0_2_Fd_H6'] = createCityGML('LOD0_2_Fd_H6')
CityGMLs['LOD0_2_Fd_HAvg'] = createCityGML('LOD0_2_Fd_HAvg')
CityGMLs['LOD0_2_Fd_HMed'] = createCityGML('LOD0_2_Fd_HMed')
#-- LOD0.3
if VARIANTS:
CityGMLs['LOD0_3_F0_H0'] = createCityGML('LOD0_3_F0_H0')
CityGMLs['LOD0_3_F0_H1'] = createCityGML('LOD0_3_F0_H1')
CityGMLs['LOD0_3_F0_H2'] = createCityGML('LOD0_3_F0_H2')
CityGMLs['LOD0_3_F0_H3'] = createCityGML('LOD0_3_F0_H3')
if VARIANTS:
CityGMLs['LOD0_3_F0_H4'] = createCityGML('LOD0_3_F0_H4')
CityGMLs['LOD0_3_F0_H5'] = createCityGML('LOD0_3_F0_H5')
CityGMLs['LOD0_3_F0_H6'] = createCityGML('LOD0_3_F0_H6')
CityGMLs['LOD0_3_F0_HAvg'] = createCityGML('LOD0_3_F0_HAvg')
CityGMLs['LOD0_3_F0_HMed'] = createCityGML('LOD0_3_F0_HMed')
if VARIANTS:
CityGMLs['LOD0_3_F1_H0'] = createCityGML('LOD0_3_F1_H0')
CityGMLs['LOD0_3_F1_H1'] = createCityGML('LOD0_3_F1_H1')
CityGMLs['LOD0_3_F1_H2'] = createCityGML('LOD0_3_F1_H2')
CityGMLs['LOD0_3_F1_H3'] = createCityGML('LOD0_3_F1_H3')
CityGMLs['LOD0_3_F1_H4'] = createCityGML('LOD0_3_F1_H4')
CityGMLs['LOD0_3_F1_H5'] = createCityGML('LOD0_3_F1_H5')
CityGMLs['LOD0_3_F1_H6'] = createCityGML('LOD0_3_F1_H6')
CityGMLs['LOD0_3_F1_HAvg'] = createCityGML('LOD0_3_F1_HAvg')
CityGMLs['LOD0_3_F1_HMed'] = createCityGML('LOD0_3_F1_HMed')
if VARIANTS:
CityGMLs['LOD0_3_Fd_H0'] = createCityGML('LOD0_3_Fd_H0')
CityGMLs['LOD0_3_Fd_H1'] = createCityGML('LOD0_3_Fd_H1')
CityGMLs['LOD0_3_Fd_H2'] = createCityGML('LOD0_3_Fd_H2')
CityGMLs['LOD0_3_Fd_H3'] = createCityGML('LOD0_3_Fd_H3')
CityGMLs['LOD0_3_Fd_H4'] = createCityGML('LOD0_3_Fd_H4')
CityGMLs['LOD0_3_Fd_H5'] = createCityGML('LOD0_3_Fd_H5')
CityGMLs['LOD0_3_Fd_H6'] = createCityGML('LOD0_3_Fd_H6')
CityGMLs['LOD0_3_Fd_HAvg'] = createCityGML('LOD0_3_Fd_HAvg')
CityGMLs['LOD0_3_Fd_HMed'] = createCityGML('LOD0_3_Fd_HMed')
## LOD1
#-- LOD1.0
CityGMLs['LOD1_0_HMin'] = createCityGML('LOD1_0_HMin')
if SOLIDS:
CityGMLs['LOD1_0_HMin_solid'] = createCityGML('LOD1_0_HMin_solid')
CityGMLs['LOD1_0_HMin_semantics'] = createCityGML('LOD1_0_HMin_semantics')
if VARIANTS:
CityGMLs['LOD1_0_HAvg'] = createCityGML('LOD1_0_HAvg')
if SOLIDS:
CityGMLs['LOD1_0_HAvg_solid'] = createCityGML('LOD1_0_HAvg_solid')
CityGMLs['LOD1_0_HAvg_semantics'] = createCityGML('LOD1_0_HAvg_semantics')
CityGMLs['LOD1_0_HMax'] = createCityGML('LOD1_0_HMax')
if SOLIDS:
CityGMLs['LOD1_0_HMax_solid'] = createCityGML('LOD1_0_HMax_solid')
CityGMLs['LOD1_0_HMax_semantics'] = createCityGML('LOD1_0_HMax_semantics')
CityGMLs['LOD1_0_HMedian'] = createCityGML('LOD1_0_HMedian')
if SOLIDS:
CityGMLs['LOD1_0_HMedian_solid'] = createCityGML('LOD1_0_HMedian_solid')
CityGMLs['LOD1_0_HMedian_semantics'] = createCityGML('LOD1_0_HMedian_semantics')
#-- LOD1.1
if VARIANTS:
CityGMLs['LOD1_1_F0_H0'] = createCityGML('LOD1_1_F0_H0')
CityGMLs['LOD1_1_F0_H1'] = createCityGML('LOD1_1_F0_H1')
CityGMLs['LOD1_1_F0_H2'] = createCityGML('LOD1_1_F0_H2')
CityGMLs['LOD1_1_F0_H3'] = createCityGML('LOD1_1_F0_H3')
if VARIANTS:
CityGMLs['LOD1_1_F0_H4'] = createCityGML('LOD1_1_F0_H4')
CityGMLs['LOD1_1_F0_H5'] = createCityGML('LOD1_1_F0_H5')
CityGMLs['LOD1_1_F0_H6'] = createCityGML('LOD1_1_F0_H6')
CityGMLs['LOD1_1_F0_HAvg'] = createCityGML('LOD1_1_F0_HAvg')
CityGMLs['LOD1_1_F0_HMed'] = createCityGML('LOD1_1_F0_HMed')
if VARIANTS:
CityGMLs['LOD1_1_F1_H0'] = createCityGML('LOD1_1_F1_H0')
CityGMLs['LOD1_1_F1_H1'] = createCityGML('LOD1_1_F1_H1')
CityGMLs['LOD1_1_F1_H2'] = createCityGML('LOD1_1_F1_H2')
CityGMLs['LOD1_1_F1_H3'] = createCityGML('LOD1_1_F1_H3')
CityGMLs['LOD1_1_F1_H4'] = createCityGML('LOD1_1_F1_H4')
CityGMLs['LOD1_1_F1_H5'] = createCityGML('LOD1_1_F1_H5')
CityGMLs['LOD1_1_F1_H6'] = createCityGML('LOD1_1_F1_H6')
CityGMLs['LOD1_1_F1_HAvg'] = createCityGML('LOD1_1_F1_HAvg')
CityGMLs['LOD1_1_F1_HMed'] = createCityGML('LOD1_1_F1_HMed')
if VARIANTS:
CityGMLs['LOD1_1_Fd_H0'] = createCityGML('LOD1_1_Fd_H0')
CityGMLs['LOD1_1_Fd_H1'] = createCityGML('LOD1_1_Fd_H1')
CityGMLs['LOD1_1_Fd_H2'] = createCityGML('LOD1_1_Fd_H2')
CityGMLs['LOD1_1_Fd_H3'] = createCityGML('LOD1_1_Fd_H3')
CityGMLs['LOD1_1_Fd_H4'] = createCityGML('LOD1_1_Fd_H4')
CityGMLs['LOD1_1_Fd_H5'] = createCityGML('LOD1_1_Fd_H5')
CityGMLs['LOD1_1_Fd_H6'] = createCityGML('LOD1_1_Fd_H6')
CityGMLs['LOD1_1_Fd_HAvg'] = createCityGML('LOD1_1_Fd_HAvg')
CityGMLs['LOD1_1_Fd_HMed'] = createCityGML('LOD1_1_Fd_HMed')
if SOLIDS:
if VARIANTS:
CityGMLs['LOD1_1_F0_H0_solid'] = createCityGML('LOD1_1_F0_H0_solid')
CityGMLs['LOD1_1_F0_H1_solid'] = createCityGML('LOD1_1_F0_H1_solid')
CityGMLs['LOD1_1_F0_H2_solid'] = createCityGML('LOD1_1_F0_H2_solid')
CityGMLs['LOD1_1_F0_H3_solid'] = createCityGML('LOD1_1_F0_H3_solid')
if VARIANTS:
CityGMLs['LOD1_1_F0_H4_solid'] = createCityGML('LOD1_1_F0_H4_solid')
CityGMLs['LOD1_1_F0_H5_solid'] = createCityGML('LOD1_1_F0_H5_solid')
CityGMLs['LOD1_1_F0_H6_solid'] = createCityGML('LOD1_1_F0_H6_solid')
CityGMLs['LOD1_1_F0_HAvg_solid'] = createCityGML('LOD1_1_F0_HAvg_solid')
CityGMLs['LOD1_1_F0_HMed_solid'] = createCityGML('LOD1_1_F0_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_1_F1_H0_solid'] = createCityGML('LOD1_1_F1_H0_solid')
CityGMLs['LOD1_1_F1_H1_solid'] = createCityGML('LOD1_1_F1_H1_solid')
CityGMLs['LOD1_1_F1_H2_solid'] = createCityGML('LOD1_1_F1_H2_solid')
CityGMLs['LOD1_1_F1_H3_solid'] = createCityGML('LOD1_1_F1_H3_solid')
CityGMLs['LOD1_1_F1_H4_solid'] = createCityGML('LOD1_1_F1_H4_solid')
CityGMLs['LOD1_1_F1_H5_solid'] = createCityGML('LOD1_1_F1_H5_solid')
CityGMLs['LOD1_1_F1_H6_solid'] = createCityGML('LOD1_1_F1_H6_solid')
CityGMLs['LOD1_1_F1_HAvg_solid'] = createCityGML('LOD1_1_F1_HAvg_solid')
CityGMLs['LOD1_1_F1_HMed_solid'] = createCityGML('LOD1_1_F1_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_1_Fd_H0_solid'] = createCityGML('LOD1_1_Fd_H0_solid')
CityGMLs['LOD1_1_Fd_H1_solid'] = createCityGML('LOD1_1_Fd_H1_solid')
CityGMLs['LOD1_1_Fd_H2_solid'] = createCityGML('LOD1_1_Fd_H2_solid')
CityGMLs['LOD1_1_Fd_H3_solid'] = createCityGML('LOD1_1_Fd_H3_solid')
CityGMLs['LOD1_1_Fd_H4_solid'] = createCityGML('LOD1_1_Fd_H4_solid')
CityGMLs['LOD1_1_Fd_H5_solid'] = createCityGML('LOD1_1_Fd_H5_solid')
CityGMLs['LOD1_1_Fd_H6_solid'] = createCityGML('LOD1_1_Fd_H6_solid')
CityGMLs['LOD1_1_Fd_HAvg_solid'] = createCityGML('LOD1_1_Fd_HAvg_solid')
CityGMLs['LOD1_1_Fd_HMed_solid'] = createCityGML('LOD1_1_Fd_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_1_F0_H0_semantics'] = createCityGML('LOD1_1_F0_H0_semantics')
CityGMLs['LOD1_1_F0_H1_semantics'] = createCityGML('LOD1_1_F0_H1_semantics')
CityGMLs['LOD1_1_F0_H2_semantics'] = createCityGML('LOD1_1_F0_H2_semantics')
CityGMLs['LOD1_1_F0_H3_semantics'] = createCityGML('LOD1_1_F0_H3_semantics')
if VARIANTS:
CityGMLs['LOD1_1_F0_H4_semantics'] = createCityGML('LOD1_1_F0_H4_semantics')
CityGMLs['LOD1_1_F0_H5_semantics'] = createCityGML('LOD1_1_F0_H5_semantics')
CityGMLs['LOD1_1_F0_H6_semantics'] = createCityGML('LOD1_1_F0_H6_semantics')
CityGMLs['LOD1_1_F0_HAvg_semantics'] = createCityGML('LOD1_1_F0_HAvg_semantics')
CityGMLs['LOD1_1_F0_HMed_semantics'] = createCityGML('LOD1_1_F0_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_1_F1_H0_semantics'] = createCityGML('LOD1_1_F1_H0_semantics')
CityGMLs['LOD1_1_F1_H1_semantics'] = createCityGML('LOD1_1_F1_H1_semantics')
CityGMLs['LOD1_1_F1_H2_semantics'] = createCityGML('LOD1_1_F1_H2_semantics')
CityGMLs['LOD1_1_F1_H3_semantics'] = createCityGML('LOD1_1_F1_H3_semantics')
CityGMLs['LOD1_1_F1_H4_semantics'] = createCityGML('LOD1_1_F1_H4_semantics')
CityGMLs['LOD1_1_F1_H5_semantics'] = createCityGML('LOD1_1_F1_H5_semantics')
CityGMLs['LOD1_1_F1_H6_semantics'] = createCityGML('LOD1_1_F1_H6_semantics')
CityGMLs['LOD1_1_F1_HAvg_semantics'] = createCityGML('LOD1_1_F1_HAvg_semantics')
CityGMLs['LOD1_1_F1_HMed_semantics'] = createCityGML('LOD1_1_F1_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_1_Fd_H0_semantics'] = createCityGML('LOD1_1_Fd_H0_semantics')
CityGMLs['LOD1_1_Fd_H1_semantics'] = createCityGML('LOD1_1_Fd_H1_semantics')
CityGMLs['LOD1_1_Fd_H2_semantics'] = createCityGML('LOD1_1_Fd_H2_semantics')
CityGMLs['LOD1_1_Fd_H3_semantics'] = createCityGML('LOD1_1_Fd_H3_semantics')
CityGMLs['LOD1_1_Fd_H4_semantics'] = createCityGML('LOD1_1_Fd_H4_semantics')
CityGMLs['LOD1_1_Fd_H5_semantics'] = createCityGML('LOD1_1_Fd_H5_semantics')
CityGMLs['LOD1_1_Fd_H6_semantics'] = createCityGML('LOD1_1_Fd_H6_semantics')
CityGMLs['LOD1_1_Fd_HAvg_semantics'] = createCityGML('LOD1_1_Fd_HAvg_semantics')
CityGMLs['LOD1_1_Fd_HMed_semantics'] = createCityGML('LOD1_1_Fd_HMed_semantics')
#-- LOD1.2
if VARIANTS:
CityGMLs['LOD1_2_F0_H0'] = createCityGML('LOD1_2_F0_H0')
CityGMLs['LOD1_2_F0_H1'] = createCityGML('LOD1_2_F0_H1')
CityGMLs['LOD1_2_F0_H2'] = createCityGML('LOD1_2_F0_H2')
CityGMLs['LOD1_2_F0_H3'] = createCityGML('LOD1_2_F0_H3')
if VARIANTS:
CityGMLs['LOD1_2_F0_H4'] = createCityGML('LOD1_2_F0_H4')
CityGMLs['LOD1_2_F0_H5'] = createCityGML('LOD1_2_F0_H5')
CityGMLs['LOD1_2_F0_H6'] = createCityGML('LOD1_2_F0_H6')
CityGMLs['LOD1_2_F0_HAvg'] = createCityGML('LOD1_2_F0_HAvg')
CityGMLs['LOD1_2_F0_HMed'] = createCityGML('LOD1_2_F0_HMed')
if VARIANTS:
CityGMLs['LOD1_2_F1_H0'] = createCityGML('LOD1_2_F1_H0')
CityGMLs['LOD1_2_F1_H1'] = createCityGML('LOD1_2_F1_H1')
CityGMLs['LOD1_2_F1_H2'] = createCityGML('LOD1_2_F1_H2')
CityGMLs['LOD1_2_F1_H3'] = createCityGML('LOD1_2_F1_H3')
CityGMLs['LOD1_2_F1_H4'] = createCityGML('LOD1_2_F1_H4')
CityGMLs['LOD1_2_F1_H5'] = createCityGML('LOD1_2_F1_H5')
CityGMLs['LOD1_2_F1_H6'] = createCityGML('LOD1_2_F1_H6')
CityGMLs['LOD1_2_F1_HAvg'] = createCityGML('LOD1_2_F1_HAvg')
CityGMLs['LOD1_2_F1_HMed'] = createCityGML('LOD1_2_F1_HMed')
if VARIANTS:
CityGMLs['LOD1_2_Fd_H0'] = createCityGML('LOD1_2_Fd_H0')
CityGMLs['LOD1_2_Fd_H1'] = createCityGML('LOD1_2_Fd_H1')
CityGMLs['LOD1_2_Fd_H2'] = createCityGML('LOD1_2_Fd_H2')
CityGMLs['LOD1_2_Fd_H3'] = createCityGML('LOD1_2_Fd_H3')
CityGMLs['LOD1_2_Fd_H4'] = createCityGML('LOD1_2_Fd_H4')
CityGMLs['LOD1_2_Fd_H5'] = createCityGML('LOD1_2_Fd_H5')
CityGMLs['LOD1_2_Fd_H6'] = createCityGML('LOD1_2_Fd_H6')
CityGMLs['LOD1_2_Fd_HAvg'] = createCityGML('LOD1_2_Fd_HAvg')
CityGMLs['LOD1_2_Fd_HMed'] = createCityGML('LOD1_2_Fd_HMed')
if SOLIDS:
if VARIANTS:
CityGMLs['LOD1_2_F0_H0_solid'] = createCityGML('LOD1_2_F0_H0_solid')
CityGMLs['LOD1_2_F0_H1_solid'] = createCityGML('LOD1_2_F0_H1_solid')
CityGMLs['LOD1_2_F0_H2_solid'] = createCityGML('LOD1_2_F0_H2_solid')
CityGMLs['LOD1_2_F0_H3_solid'] = createCityGML('LOD1_2_F0_H3_solid')
if VARIANTS:
CityGMLs['LOD1_2_F0_H4_solid'] = createCityGML('LOD1_2_F0_H4_solid')
CityGMLs['LOD1_2_F0_H5_solid'] = createCityGML('LOD1_2_F0_H5_solid')
CityGMLs['LOD1_2_F0_H6_solid'] = createCityGML('LOD1_2_F0_H6_solid')
CityGMLs['LOD1_2_F0_HAvg_solid'] = createCityGML('LOD1_2_F0_HAvg_solid')
CityGMLs['LOD1_2_F0_HMed_solid'] = createCityGML('LOD1_2_F0_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_2_F1_H0_solid'] = createCityGML('LOD1_2_F1_H0_solid')
CityGMLs['LOD1_2_F1_H1_solid'] = createCityGML('LOD1_2_F1_H1_solid')
CityGMLs['LOD1_2_F1_H2_solid'] = createCityGML('LOD1_2_F1_H2_solid')
CityGMLs['LOD1_2_F1_H3_solid'] = createCityGML('LOD1_2_F1_H3_solid')
CityGMLs['LOD1_2_F1_H4_solid'] = createCityGML('LOD1_2_F1_H4_solid')
CityGMLs['LOD1_2_F1_H5_solid'] = createCityGML('LOD1_2_F1_H5_solid')
CityGMLs['LOD1_2_F1_H6_solid'] = createCityGML('LOD1_2_F1_H6_solid')
CityGMLs['LOD1_2_F1_HAvg_solid'] = createCityGML('LOD1_2_F1_HAvg_solid')
CityGMLs['LOD1_2_F1_HMed_solid'] = createCityGML('LOD1_2_F1_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_2_Fd_H0_solid'] = createCityGML('LOD1_2_Fd_H0_solid')
CityGMLs['LOD1_2_Fd_H1_solid'] = createCityGML('LOD1_2_Fd_H1_solid')
CityGMLs['LOD1_2_Fd_H2_solid'] = createCityGML('LOD1_2_Fd_H2_solid')
CityGMLs['LOD1_2_Fd_H3_solid'] = createCityGML('LOD1_2_Fd_H3_solid')
CityGMLs['LOD1_2_Fd_H4_solid'] = createCityGML('LOD1_2_Fd_H4_solid')
CityGMLs['LOD1_2_Fd_H5_solid'] = createCityGML('LOD1_2_Fd_H5_solid')
CityGMLs['LOD1_2_Fd_H6_solid'] = createCityGML('LOD1_2_Fd_H6_solid')
CityGMLs['LOD1_2_Fd_HAvg_solid'] = createCityGML('LOD1_2_Fd_HAvg_solid')
CityGMLs['LOD1_2_Fd_HMed_solid'] = createCityGML('LOD1_2_Fd_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_2_F0_H0_semantics'] = createCityGML('LOD1_2_F0_H0_semantics')
CityGMLs['LOD1_2_F0_H1_semantics'] = createCityGML('LOD1_2_F0_H1_semantics')
CityGMLs['LOD1_2_F0_H2_semantics'] = createCityGML('LOD1_2_F0_H2_semantics')
CityGMLs['LOD1_2_F0_H3_semantics'] = createCityGML('LOD1_2_F0_H3_semantics')
if VARIANTS:
CityGMLs['LOD1_2_F0_H4_semantics'] = createCityGML('LOD1_2_F0_H4_semantics')
CityGMLs['LOD1_2_F0_H5_semantics'] = createCityGML('LOD1_2_F0_H5_semantics')
CityGMLs['LOD1_2_F0_H6_semantics'] = createCityGML('LOD1_2_F0_H6_semantics')
CityGMLs['LOD1_2_F0_HAvg_semantics'] = createCityGML('LOD1_2_F0_HAvg_semantics')
CityGMLs['LOD1_2_F0_HMed_semantics'] = createCityGML('LOD1_2_F0_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_2_F1_H0_semantics'] = createCityGML('LOD1_2_F1_H0_semantics')
CityGMLs['LOD1_2_F1_H1_semantics'] = createCityGML('LOD1_2_F1_H1_semantics')
CityGMLs['LOD1_2_F1_H2_semantics'] = createCityGML('LOD1_2_F1_H2_semantics')
CityGMLs['LOD1_2_F1_H3_semantics'] = createCityGML('LOD1_2_F1_H3_semantics')
CityGMLs['LOD1_2_F1_H4_semantics'] = createCityGML('LOD1_2_F1_H4_semantics')
CityGMLs['LOD1_2_F1_H5_semantics'] = createCityGML('LOD1_2_F1_H5_semantics')
CityGMLs['LOD1_2_F1_H6_semantics'] = createCityGML('LOD1_2_F1_H6_semantics')
CityGMLs['LOD1_2_F1_HAvg_semantics'] = createCityGML('LOD1_2_F1_HAvg_semantics')
CityGMLs['LOD1_2_F1_HMed_semantics'] = createCityGML('LOD1_2_F1_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_2_Fd_H0_semantics'] = createCityGML('LOD1_2_Fd_H0_semantics')
CityGMLs['LOD1_2_Fd_H1_semantics'] = createCityGML('LOD1_2_Fd_H1_semantics')
CityGMLs['LOD1_2_Fd_H2_semantics'] = createCityGML('LOD1_2_Fd_H2_semantics')
CityGMLs['LOD1_2_Fd_H3_semantics'] = createCityGML('LOD1_2_Fd_H3_semantics')
CityGMLs['LOD1_2_Fd_H4_semantics'] = createCityGML('LOD1_2_Fd_H4_semantics')
CityGMLs['LOD1_2_Fd_H5_semantics'] = createCityGML('LOD1_2_Fd_H5_semantics')
CityGMLs['LOD1_2_Fd_H6_semantics'] = createCityGML('LOD1_2_Fd_H6_semantics')
CityGMLs['LOD1_2_Fd_HAvg_semantics'] = createCityGML('LOD1_2_Fd_HAvg_semantics')
CityGMLs['LOD1_2_Fd_HMed_semantics'] = createCityGML('LOD1_2_Fd_HMed_semantics')
#-- LOD1.3
if VARIANTS:
CityGMLs['LOD1_3_F0_H0'] = createCityGML('LOD1_3_F0_H0')
CityGMLs['LOD1_3_F0_H1'] = createCityGML('LOD1_3_F0_H1')
CityGMLs['LOD1_3_F0_H2'] = createCityGML('LOD1_3_F0_H2')
CityGMLs['LOD1_3_F0_H3'] = createCityGML('LOD1_3_F0_H3')
if VARIANTS:
CityGMLs['LOD1_3_F0_H4'] = createCityGML('LOD1_3_F0_H4')
CityGMLs['LOD1_3_F0_H5'] = createCityGML('LOD1_3_F0_H5')
CityGMLs['LOD1_3_F0_H6'] = createCityGML('LOD1_3_F0_H6')
CityGMLs['LOD1_3_F0_HAvg'] = createCityGML('LOD1_3_F0_HAvg')
CityGMLs['LOD1_3_F0_HMed'] = createCityGML('LOD1_3_F0_HMed')
if VARIANTS:
CityGMLs['LOD1_3_F1_H0'] = createCityGML('LOD1_3_F1_H0')
CityGMLs['LOD1_3_F1_H1'] = createCityGML('LOD1_3_F1_H1')
CityGMLs['LOD1_3_F1_H2'] = createCityGML('LOD1_3_F1_H2')
CityGMLs['LOD1_3_F1_H3'] = createCityGML('LOD1_3_F1_H3')
CityGMLs['LOD1_3_F1_H4'] = createCityGML('LOD1_3_F1_H4')
CityGMLs['LOD1_3_F1_H5'] = createCityGML('LOD1_3_F1_H5')
CityGMLs['LOD1_3_F1_H6'] = createCityGML('LOD1_3_F1_H6')
CityGMLs['LOD1_3_F1_HAvg'] = createCityGML('LOD1_3_F1_HAvg')
CityGMLs['LOD1_3_F1_HMed'] = createCityGML('LOD1_3_F1_HMed')
if VARIANTS:
CityGMLs['LOD1_3_Fd_H0'] = createCityGML('LOD1_3_Fd_H0')
CityGMLs['LOD1_3_Fd_H1'] = createCityGML('LOD1_3_Fd_H1')
CityGMLs['LOD1_3_Fd_H2'] = createCityGML('LOD1_3_Fd_H2')
CityGMLs['LOD1_3_Fd_H3'] = createCityGML('LOD1_3_Fd_H3')
CityGMLs['LOD1_3_Fd_H4'] = createCityGML('LOD1_3_Fd_H4')
CityGMLs['LOD1_3_Fd_H5'] = createCityGML('LOD1_3_Fd_H5')
CityGMLs['LOD1_3_Fd_H6'] = createCityGML('LOD1_3_Fd_H6')
CityGMLs['LOD1_3_Fd_HAvg'] = createCityGML('LOD1_3_Fd_HAvg')
CityGMLs['LOD1_3_Fd_HMed'] = createCityGML('LOD1_3_Fd_HMed')
if SOLIDS:
if VARIANTS:
CityGMLs['LOD1_3_F0_H0_solid'] = createCityGML('LOD1_3_F0_H0_solid')
CityGMLs['LOD1_3_F0_H1_solid'] = createCityGML('LOD1_3_F0_H1_solid')
CityGMLs['LOD1_3_F0_H2_solid'] = createCityGML('LOD1_3_F0_H2_solid')
CityGMLs['LOD1_3_F0_H3_solid'] = createCityGML('LOD1_3_F0_H3_solid')
if VARIANTS:
CityGMLs['LOD1_3_F0_H4_solid'] = createCityGML('LOD1_3_F0_H4_solid')
CityGMLs['LOD1_3_F0_H5_solid'] = createCityGML('LOD1_3_F0_H5_solid')
CityGMLs['LOD1_3_F0_H6_solid'] = createCityGML('LOD1_3_F0_H6_solid')
CityGMLs['LOD1_3_F0_HAvg_solid'] = createCityGML('LOD1_3_F0_HAvg_solid')
CityGMLs['LOD1_3_F0_HMed_solid'] = createCityGML('LOD1_3_F0_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_3_F1_H0_solid'] = createCityGML('LOD1_3_F1_H0_solid')
CityGMLs['LOD1_3_F1_H1_solid'] = createCityGML('LOD1_3_F1_H1_solid')
CityGMLs['LOD1_3_F1_H2_solid'] = createCityGML('LOD1_3_F1_H2_solid')
CityGMLs['LOD1_3_F1_H3_solid'] = createCityGML('LOD1_3_F1_H3_solid')
CityGMLs['LOD1_3_F1_H4_solid'] = createCityGML('LOD1_3_F1_H4_solid')
CityGMLs['LOD1_3_F1_H5_solid'] = createCityGML('LOD1_3_F1_H5_solid')
CityGMLs['LOD1_3_F1_H6_solid'] = createCityGML('LOD1_3_F1_H6_solid')
CityGMLs['LOD1_3_F1_HAvg_solid'] = createCityGML('LOD1_3_F1_HAvg_solid')
CityGMLs['LOD1_3_F1_HMed_solid'] = createCityGML('LOD1_3_F1_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_3_Fd_H0_solid'] = createCityGML('LOD1_3_Fd_H0_solid')
CityGMLs['LOD1_3_Fd_H1_solid'] = createCityGML('LOD1_3_Fd_H1_solid')
CityGMLs['LOD1_3_Fd_H2_solid'] = createCityGML('LOD1_3_Fd_H2_solid')
CityGMLs['LOD1_3_Fd_H3_solid'] = createCityGML('LOD1_3_Fd_H3_solid')
CityGMLs['LOD1_3_Fd_H4_solid'] = createCityGML('LOD1_3_Fd_H4_solid')
CityGMLs['LOD1_3_Fd_H5_solid'] = createCityGML('LOD1_3_Fd_H5_solid')
CityGMLs['LOD1_3_Fd_H6_solid'] = createCityGML('LOD1_3_Fd_H6_solid')
CityGMLs['LOD1_3_Fd_HAvg_solid'] = createCityGML('LOD1_3_Fd_HAvg_solid')
CityGMLs['LOD1_3_Fd_HMed_solid'] = createCityGML('LOD1_3_Fd_HMed_solid')
if VARIANTS:
CityGMLs['LOD1_3_F0_H0_semantics'] = createCityGML('LOD1_3_F0_H0_semantics')
CityGMLs['LOD1_3_F0_H1_semantics'] = createCityGML('LOD1_3_F0_H1_semantics')
CityGMLs['LOD1_3_F0_H2_semantics'] = createCityGML('LOD1_3_F0_H2_semantics')
CityGMLs['LOD1_3_F0_H3_semantics'] = createCityGML('LOD1_3_F0_H3_semantics')
if VARIANTS:
CityGMLs['LOD1_3_F0_H4_semantics'] = createCityGML('LOD1_3_F0_H4_semantics')
CityGMLs['LOD1_3_F0_H5_semantics'] = createCityGML('LOD1_3_F0_H5_semantics')
CityGMLs['LOD1_3_F0_H6_semantics'] = createCityGML('LOD1_3_F0_H6_semantics')
CityGMLs['LOD1_3_F0_HAvg_semantics'] = createCityGML('LOD1_3_F0_HAvg_semantics')
CityGMLs['LOD1_3_F0_HMed_semantics'] = createCityGML('LOD1_3_F0_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_3_F1_H0_semantics'] = createCityGML('LOD1_3_F1_H0_semantics')
CityGMLs['LOD1_3_F1_H1_semantics'] = createCityGML('LOD1_3_F1_H1_semantics')
CityGMLs['LOD1_3_F1_H2_semantics'] = createCityGML('LOD1_3_F1_H2_semantics')
CityGMLs['LOD1_3_F1_H3_semantics'] = createCityGML('LOD1_3_F1_H3_semantics')
CityGMLs['LOD1_3_F1_H4_semantics'] = createCityGML('LOD1_3_F1_H4_semantics')
CityGMLs['LOD1_3_F1_H5_semantics'] = createCityGML('LOD1_3_F1_H5_semantics')
CityGMLs['LOD1_3_F1_H6_semantics'] = createCityGML('LOD1_3_F1_H6_semantics')
CityGMLs['LOD1_3_F1_HAvg_semantics'] = createCityGML('LOD1_3_F1_HAvg_semantics')
CityGMLs['LOD1_3_F1_HMed_semantics'] = createCityGML('LOD1_3_F1_HMed_semantics')
if VARIANTS:
CityGMLs['LOD1_3_Fd_H0_semantics'] = createCityGML('LOD1_3_Fd_H0_semantics')
CityGMLs['LOD1_3_Fd_H1_semantics'] = createCityGML('LOD1_3_Fd_H1_semantics')
CityGMLs['LOD1_3_Fd_H2_semantics'] = createCityGML('LOD1_3_Fd_H2_semantics')
CityGMLs['LOD1_3_Fd_H3_semantics'] = createCityGML('LOD1_3_Fd_H3_semantics')
CityGMLs['LOD1_3_Fd_H4_semantics'] = createCityGML('LOD1_3_Fd_H4_semantics')
CityGMLs['LOD1_3_Fd_H5_semantics'] = createCityGML('LOD1_3_Fd_H5_semantics')
CityGMLs['LOD1_3_Fd_H6_semantics'] = createCityGML('LOD1_3_Fd_H6_semantics')
CityGMLs['LOD1_3_Fd_HAvg_semantics'] = createCityGML('LOD1_3_Fd_HAvg_semantics')
CityGMLs['LOD1_3_Fd_HMed_semantics'] = createCityGML('LOD1_3_Fd_HMed_semantics')
## LOD2
#-- LOD2.0
CityGMLs['LOD2_0_F0'] = createCityGML('LOD2_0_F0')
if VARIANTS:
CityGMLs['LOD2_0_Fd'] = createCityGML('LOD2_0_Fd')
CityGMLs['LOD2_0_F1'] = createCityGML('LOD2_0_F1')
#-- Non semantic version
if SOLIDS:
CityGMLs['LOD2_0_F0_S0'] = createCityGML('LOD2_0_F0_S0')
if VARIANTS:
CityGMLs['LOD2_0_Fd_S0'] = createCityGML('LOD2_0_Fd_S0')
CityGMLs['LOD2_0_F1_S0'] = createCityGML('LOD2_0_F1_S0')
#--Solids
if SOLIDS:
CityGMLs['LOD2_0_F0_solid'] = createCityGML('LOD2_0_F0_solid')
if VARIANTS:
CityGMLs['LOD2_0_Fd_solid'] = createCityGML('LOD2_0_Fd_solid')
CityGMLs['LOD2_0_F1_solid'] = createCityGML('LOD2_0_F1_solid')
#-- LOD2.1
CityGMLs['LOD2_1_F0'] = createCityGML('LOD2_1_F0')
if VARIANTS:
CityGMLs['LOD2_1_Fd'] = createCityGML('LOD2_1_Fd')
CityGMLs['LOD2_1_F1'] = createCityGML('LOD2_1_F1')
#-- Non semantic version
if SOLIDS:
CityGMLs['LOD2_1_F0_S0'] = createCityGML('LOD2_1_F0_S0')
if VARIANTS:
CityGMLs['LOD2_1_Fd_S0'] = createCityGML('LOD2_1_Fd_S0')
CityGMLs['LOD2_1_F1_S0'] = createCityGML('LOD2_1_F1_S0')
#--Solids
if SOLIDS:
CityGMLs['LOD2_1_F0_solid'] = createCityGML('LOD2_1_F0_solid')
if VARIANTS:
CityGMLs['LOD2_1_Fd_solid'] = createCityGML('LOD2_1_Fd_solid')
CityGMLs['LOD2_1_F1_solid'] = createCityGML('LOD2_1_F1_solid')
#-- LOD2.2
CityGMLs['LOD2_2_F0'] = createCityGML('LOD2_2_F0')
if VARIANTS:
CityGMLs['LOD2_2_F1'] = createCityGML('LOD2_2_F1')
CityGMLs['LOD2_2_Fd'] = createCityGML('LOD2_2_Fd')
#-- Non semantic version
if SOLIDS:
CityGMLs['LOD2_2_F0_S0'] = createCityGML('LOD2_2_F0_S0')
if VARIANTS:
CityGMLs['LOD2_2_F1_S0'] = createCityGML('LOD2_2_F1_S0')
CityGMLs['LOD2_2_Fd_S0'] = createCityGML('LOD2_2_Fd_S0')
#--Solids
if SOLIDS:
CityGMLs['LOD2_2_F0_solid'] = createCityGML('LOD2_2_F0_solid')
if VARIANTS:
CityGMLs['LOD2_2_F1_solid'] = createCityGML('LOD2_2_F1_solid')
CityGMLs['LOD2_2_Fd_solid'] = createCityGML('LOD2_2_Fd_solid')
#-- LOD2.3
CityGMLs['LOD2_3_F0'] = createCityGML('LOD2_3_F0')
if VARIANTS:
CityGMLs['LOD2_3_Fd'] = createCityGML('LOD2_3_Fd')
#-- Non semantic version
if SOLIDS:
CityGMLs['LOD2_3_F0_S0'] = createCityGML('LOD2_3_F0_S0')
if VARIANTS:
CityGMLs['LOD2_3_Fd_S0'] = createCityGML('LOD2_3_Fd_S0')
#--Solids
if SOLIDS:
CityGMLs['LOD2_3_F0_solid'] = createCityGML('LOD2_3_F0_solid')
if VARIANTS:
CityGMLs['LOD2_3_Fd_solid'] = createCityGML('LOD2_3_Fd_solid')
#-- LOD2.3 with dormers
if VARIANTS:
CityGMLs['LOD2_3_F0_with_dormers'] = createCityGML('LOD2_3_F0_with_dormers')
CityGMLs['LOD2_3_Fd_with_dormers'] = createCityGML('LOD2_3_Fd_with_dormers')
#-- Non semantic version
if SOLIDS:
CityGMLs['LOD2_3_F0_S0_with_dormers'] = createCityGML('LOD2_3_F0_S0_with_dormers')
if VARIANTS:
CityGMLs['LOD2_3_Fd_S0_with_dormers'] = createCityGML('LOD2_3_Fd_S0_with_dormers')
#--Solids
if SOLIDS:
CityGMLs['LOD2_3_F0_solid_with_dormers'] = createCityGML('LOD2_3_F0_solid_with_dormers')
if VARIANTS:
CityGMLs['LOD2_3_Fd_solid_with_dormers'] = createCityGML('LOD2_3_Fd_solid_with_dormers')
#--LOD3 variants
#--Normal LOD3 with flat openings
CityGMLs['LOD3_2'] = createCityGML('LOD3_2')
#--The best LOD3 model available, with embrasures at openings
CityGMLs['LOD3_3'] = createCityGML('LOD3_3')
# #CityGMLs['LOD3BI'] = createCityGML('LOD3BI')
#-- Hybrid models
CityGMLs['LOD3_1'] = createCityGML('LOD3_1')
CityGMLs['LOD3_0'] = createCityGML('LOD3_0')
# CityGMLs['LOD3RF1'] = createCityGML('LOD3RF1')
#-- No semantics
if SOLIDS:
CityGMLs['LOD3_2_S0'] = createCityGML('LOD3_2_S0')
CityGMLs['LOD3_3_S0'] = createCityGML('LOD3_3_S0')
CityGMLs['LOD3_1_S0'] = createCityGML('LOD3_1_S0')
CityGMLs['LOD3_0_S0'] = createCityGML('LOD3_0_S0')
#--Solid counterparts
if SOLIDS:
CityGMLs['LOD3_2_solid'] = createCityGML('LOD3_2_solid')
CityGMLs['LOD3_3_solid'] = createCityGML('LOD3_3_solid')
CityGMLs['LOD3_1_solid'] = createCityGML('LOD3_1_solid')
CityGMLs['LOD3_0_solid'] = createCityGML('LOD3_0_solid')
#-- Interior
CityGMLs['interior-LOD0'] = createCityGML('interior-LOD0')
CityGMLs['interior-LOD1'] = createCityGML('interior-LOD1')
CityGMLs['interior-LOD2_2'] = createCityGML('interior-LOD2_2')
CityGMLs['interior-LOD2_3'] = createCityGML('interior-LOD2_3')
#-- Non-building features
if STREETS:
CityGMLs['Road-LOD0'] = createCityGML('Road-LOD0')
if VEGETATION:
CityGMLs['PlantCover-LOD0'] = createCityGML('PlantCover-LOD0')
CityGMLs['PlantCover-LOD1'] = createCityGML('PlantCover-LOD1')
#-- Iterate the list of buildings in the XML and extract their data
buildingcounter = 0
print("Constructing buildings and other city objects...")
if REPORT:
fish = ProgressFish(total=len(buildings))
for b in buildings:
#-- Report on the progress
if REPORT:
fish.animate(amount=buildingcounter+1)
buildingcounter += 1
#-- Building UUID
ID = b.attrib['ID']
#-- Origin in (x,y,z) as a list of floats
origin = b.findall('origin')[0]
origin_coords = [float(x) for x in origin.text.split(" ")]
#-- Position in the grid
order = b.findall('order')[0]
order = [int(x) for x in order.text.split(" ")]
#-- Rotation angle
angle_of_rotationXML = b.findall('rotation')[0]
angle_of_rotation = float(angle_of_rotationXML.text)
#-- Dimensions of the building
xsize = b.findall('xSize')[0]
xsize = float(xsize.text)
ysize = b.findall('ySize')[0]
ysize = float(ysize.text)
zsize = b.findall('zSize')[0]
zsize = float(zsize.text)
#-- Other building geometric properties
floors = b.findall('floors')[0]
floors = float(floors.text)
floorHeight = b.findall('floorHeight')[0]
floorHeight = float(floorHeight.text)
embrasure = b.findall('embrasure')[0]
embrasure = float(embrasure.text)
wallThickness = b.findall('wallThickness')[0]
wallThickness = float(wallThickness.text)
joist = b.findall('joist')[0]
joist = float(joist.text)
#-- Store the attributes
attributes = {}
attrs = b.findall('properties')[0]
attributes['yearOfConstruction'] = str(attrs.findall('yearOfConstruction')[0].text)
attributes['function'] = str(attrs.findall('usage')[0].text)
attributes['storeysAboveGround'] = str(int(floors))
#-- Building part
if BUILDINGPARTS:
bpartXML = b.findall('buildingPart')
if len(bpartXML) > 0:
buildingpart = {}
bpartXML = bpartXML[0]
partType = bpartXML.findall('partType')[0].text
partOrigin = float(bpartXML.findall('partOrigin')[0].text)
width = float(bpartXML.findall('width')[0].text)
length = float(bpartXML.findall('length')[0].text)
height = float(bpartXML.findall('height')[0].text)
buildingpart['o'] = partOrigin
buildingpart['type'] = partType
buildingpart['x'] = width
buildingpart['y'] = length
buildingpart['z'] = height
else:
buildingpart = None
else:
buildingpart = None
#-- Roof
roof = b.findall('roof')[0]
roofType = roof.findall('roofType')[0]
roofType = roofType.text
if roofType == 'Flat':
h = None
r = None
ovh = roof.findall('overhangs')[0]
ovhx = ovh.findall('xlength')[0]
ovhy = ovh.findall('ylength')[0]
ovhx = float(ovhx.text)
ovhy = float(ovhy.text)
elif roofType == 'Hipped' or roofType == 'Pyramidal':
h = roof.findall('h')[0]
h = float(h.text)
r = roof.findall('r')[0]
r = float(r.text)
ovh = roof.findall('overhangs')[0]
ovhx = ovh.findall('xlength')[0]
ovhy = ovh.findall('ylength')[0]
ovhx = float(ovhx.text)
ovhy = float(ovhy.text)
else:
h = roof.findall('h')[0]
h = float(h.text)
r = None
ovh = roof.findall('overhangs')[0]
ovhx = ovh.findall('xlength')[0]
ovhy = ovh.findall('ylength')[0]
ovhx = float(ovhx.text)
ovhy = float(ovhy.text)
#-- Overhangs
if ovh is not None:
ovh = [ovhx, ovhy]
#-- Chimney
chimney = []
chimneyXML = roof.findall('chimney')
if len(chimneyXML) > 0:
chimneyXML = chimneyXML[0]
chimneyFace = chimneyXML.findall('side')[0]
chimneyOrigin = chimneyXML.findall('origin')[0]
chimneyOriginX = chimneyOrigin.findall('x')[0]
chimneyOriginX = float(chimneyOriginX.text)
chimneyOriginY = chimneyOrigin.findall('y')[0]
chimneyOriginY = float(chimneyOriginY.text)
chimneySize = chimneyXML.findall('size')[0]
chimneyWidth = chimneySize.findall('width')[0]
chimneyWidth = float(chimneyWidth.text)
chimneyHeight = chimneySize.findall('height')[0]
chimneyHeight = float(chimneyHeight.text)
chimneyDict = {}
chimneyDict['side'] = int(chimneyFace.text)
chimneyDict['origin'] = [chimneyOriginX, chimneyOriginY]
chimneyDict['size'] = [chimneyWidth, chimneyWidth, chimneyHeight]
chimney.append(chimneyDict)
#-- Door
door = b.findall('door')[0]
doorFace = door.findall('wall')[0]
doorOrigin = door.findall('origin')[0]
doorOriginX = doorOrigin.findall('x')[0]
doorOriginX = float(doorOriginX.text)
doorOriginY = doorOrigin.findall('y')[0]
doorOriginY = float(doorOriginY.text)
doorSize = door.findall('size')[0]
doorWidth = doorSize.findall('width')[0]
doorWidth = float(doorWidth.text)
doorHeight = doorSize.findall('height')[0]
doorHeight = float(doorHeight.text)
doorDict = {}
doorDict['wall'] = int(doorFace.text)
doorDict['origin'] = [doorOriginX, doorOriginY]
doorDict['size'] = [doorWidth, doorHeight]
#-- Wall windows
wallWindows = []
allwindowsXML = b.findall('windows')
if len(allwindowsXML) > 0:
allwindowsXML = allwindowsXML[0]
for winXML in allwindowsXML.findall('window'):
wallWindows.append({'wall' : int(winXML.findall('wall')[0].text), 'size' : [float((winXML.findall('size')[0]).findall('width')[0].text), float((winXML.findall('size')[0]).findall('height')[0].text)], 'origin' : [float((winXML.findall('origin')[0]).findall('x')[0].text), float((winXML.findall('origin')[0]).findall('y')[0].text)]})
embrasure = float(winXML.findall('depth')[0].text)
else:
embrasure = 0.0
#-- Dormers
dormers = []
alldormersXML = roof.findall('dormers')
if len(alldormersXML) > 0:
alldormersXML = alldormersXML[0]
for dormXML in alldormersXML.findall('dormer'):
dormers.append({'side' : int(dormXML.findall('side')[0].text), 'size' : [float((dormXML.findall('size')[0]).findall('width')[0].text), float((dormXML.findall('size')[0]).findall('height')[0].text)], 'origin' : [float((dormXML.findall('origin')[0]).findall('x')[0].text), float((dormXML.findall('origin')[0]).findall('y')[0].text)]})
roofWindows = []
allrfwinXML = roof.findall('roofWindows')
if len(allrfwinXML) > 0:
allrfwinXML = allrfwinXML[0]
for rfwinXML in allrfwinXML.findall('roofWindow'):
roofWindows.append({'side' : int(rfwinXML.findall('side')[0].text), 'size' : [float((rfwinXML.findall('size')[0]).findall('width')[0].text), float((rfwinXML.findall('size')[0]).findall('height')[0].text)], 'origin' : [float((rfwinXML.findall('origin')[0]).findall('x')[0].text), float((rfwinXML.findall('origin')[0]).findall('y')[0].text)]})
#-- Additional data
additional = {'overhangs' : ovh, 'embrasure': embrasure}
valueDict = {'ovh' : ovh, 'doorDict' : doorDict, 'wallWindows' : wallWindows, 'dormers' : dormers, 'roofWindows' : roofWindows, 'chimney' : chimney, 'embrasure' : embrasure}
#-- LOD3, first because we need the output of many parameters like absolute height of the chimney, eaves and corrected overhang lenghts
CityGMLs['dummyLOD3'] = createCityGML('dummyLOD3')
chimneyHeight, eaves, ovhy_recalculated = CityGMLbuildingLOD3Semantics(CityGMLs['dummyLOD3'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, valueDict['doorDict'], valueDict['wallWindows'], valueDict['dormers'], valueDict['roofWindows'], valueDict['chimney'], valueDict['embrasure'], 1, None, None)
del CityGMLs['dummyLOD3']
#-- Adjust for footprint as the roof overhangs projection (modelling rule F1)
adjorigin = [origin_coords[0]-ovhx, origin_coords[1]-ovhy_recalculated, origin_coords[2]]
adjxsize = xsize + 2 * ovhx
adjysize = ysize + 2 * ovhy_recalculated
adjzsize = zsize - (zsize - eaves)
#-- Adjust the height of the roof
if h is not None:
if roofType == 'Shed':
adjh = h + 2 * (zsize - eaves)
else:
adjh = h + (zsize - eaves)
else:
adjh = None
if r is not None:
adjr = r + ovhy_recalculated
else:
adjr = None
#-- Adjust for footprint as the offset from the roof overhangs projection (modelling rule Fd)
offset = 0.2
#-- Defined here because the coordinates of the roof features have to be adjusted
#-- Edges and other things for the offset
adjorigin_offset = [origin_coords[0]-ovhx+offset, origin_coords[1]-ovhy_recalculated+offset, origin_coords[2]]
adjxsize_offset = xsize + 2*(ovhx-offset)
adjysize_offset = ysize + 2*(ovhy_recalculated-offset)
#-- Auxiliary data in a dictionary
aux = {}
aux['ovhx'] = ovhx
aux['ovhy'] = ovhy_recalculated
aux['origin'] = origin_coords
aux['xsize'] = xsize
aux['ysize'] = ysize
aux['zsize'] = zsize
aux['offset'] = offset
aux['adjxsize_offset'] = adjxsize_offset
aux['adjysize_offset'] = adjysize_offset
if h is not None:
if offset < ovhx:
eo = (zsize - eaves) * (ovhx - offset) / ovhx
adjzsize_offset = zsize - eo
adjh_offset = h + eo
if roofType == 'Shed':
adjh_offset = h + 2*eo
elif offset == ovhx:
adjzsize_offset = zsize
adjh_offset = h
elif offset > ovhx and ovhx != 0.0:
eo = (zsize - eaves) * (offset/ovhx) - zsize + eaves
adjzsize_offset = zsize + eo
adjh_offset = h - eo
if roofType == 'Shed':
adjh_offset = h - 2*eo
elif ovhx == 0.0:
eo = h * (offset/(xsize*.5))
adjzsize_offset = zsize + eo
adjh_offset = h - eo
if roofType == 'Shed':
adjh_offset = h - 2*eo
else:
adjzsize_offset = zsize
adjh_offset = None
#-- Workaround to calculate the pyramidal and hipped building overhang in y direction
CityGMLs['dummy'] = createCityGML('dummy')
dummy1, eaves_offset, ovhy_recalculated_offset = CityGMLbuildingLOD3Semantics(CityGMLs['dummy'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], r, valueDict['doorDict'], valueDict['wallWindows'], valueDict['dormers'], valueDict['roofWindows'], valueDict['chimney'], valueDict['embrasure'], 1, aux, buildingpart)
del CityGMLs['dummy']
if r is not None:
if r > 0:
adjr_offset = r + ovhy_recalculated - offset
else:
adjr_offset = 0
else:
adjr_offset = None
chimney_ovh = []
chimney_offset = []
for chi in chimney:
chi_ovh = copy.deepcopy(chi)
chi_offset = copy.deepcopy(chi)
chi_ovh['origin'] = adjustRoofFeatures(roofType, zsize - eaves, chi['origin'], ovhx, ovhy_recalculated, chi['side'])
chi_offset['origin'] = adjustRoofFeatures(roofType, zsize - adjzsize_offset, chi['origin'], ovhx - offset, ovhy_recalculated - offset, chi['side'])
chimney_ovh.append(chi_ovh)
chimney_offset.append(chi_offset)
dormers_ovh = []
dormers_offset = []
for dor in dormers:
dor_ovh = copy.deepcopy(dor)
dor_offset = copy.deepcopy(dor)
dor_ovh['origin'] = adjustRoofFeatures(roofType, zsize - eaves, dor['origin'], ovhx, ovhy_recalculated, dor['side'])
dor_offset['origin'] = adjustRoofFeatures(roofType, zsize - adjzsize_offset, dor['origin'], ovhx - offset, ovhy_recalculated - offset, dor['side'])
dormers_ovh.append(dor_ovh)
dormers_offset.append(dor_offset)
roofWindows_ovh = []
roofWindows_offset = []
for roofwindow in roofWindows:
roofwindow_ovh = copy.deepcopy(roofwindow)
roofwindow_offset = copy.deepcopy(roofwindow)
roofwindow_ovh['origin'] = adjustRoofFeatures(roofType, zsize - eaves, roofwindow['origin'], ovhx, ovhy_recalculated, roofwindow['side'])
roofwindow_offset['origin'] = adjustRoofFeatures(roofType, zsize - adjzsize_offset, roofwindow['origin'], ovhx - offset, ovhy_recalculated - offset, roofwindow['side'])
roofWindows_ovh.append(roofwindow_ovh)
roofWindows_offset.append(roofwindow_offset)
#-- Geometric reference for the height
if adjh is not None:
onethird = adjh * (1.0/3.0) + adjzsize
half = adjh * .5 + adjzsize
twothird = adjh * (2.0/3.0) + adjzsize
else:
onethird = adjzsize
half = adjzsize
twothird = adjzsize
##-- Start generating the CityGML buildings
#-- Tentative aggregation
cellsize = 20.0
if order[0] % 3 == 0 and order[1] % 3 == 0:
xo = order[0] * cellsize
yo = order[1] * cellsize
gxsize = cellsize * 3 - 6.0
gysize = cellsize * 3 - 6.0
gen_roofType = 'Flat'
CityGMLbuildingLOD0(CityGMLs["LOD0_0"], ID, attributes, [xo, yo, 0.0], gxsize, gysize, zsize, h, gen_roofType, None, eaves, '0.0')
CityGMLbuildingLOD1(CityGMLs["LOD1_0_HMin"], ID, attributes, [xo, yo, 0.0], gxsize, gysize, zsize, h, gen_roofType, None, eaves, '1.0') #-- This one is with the eaves
if SOLIDS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_0_HMin_solid"], ID, attributes, [xo, yo, 0.0], gxsize, gysize, zsize, h, gen_roofType, None, eaves, '1.0') #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_0_HMin_semantics"], ID, attributes, [xo, yo, 0.0], gxsize, gysize, zsize, h, gen_roofType, None, eaves, '1.0') #-- This one is with the eaves
#####-- LOD0
##-- LOD0.1
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '0.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H1"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H2"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H3"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '0.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H4"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H5"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F0_H6"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '0.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H0"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '0.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H1"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H2"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H3"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H4"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H5"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '0.1', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_F1_H6"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '0.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '0.1', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H1"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '0.1', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H2"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '0.1', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H3"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '0.1', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H4"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '0.1', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H5"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '0.1', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_1_Fd_H6"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '0.1', aux, buildingpart, True) #-- This one is with the chimney or eaves
##-- LOD0.2
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '0.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H1"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H2"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H3"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '0.2', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H4"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H5"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F0_H6"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '0.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H0"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '0.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H1"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H2"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H3"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H4"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H5"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '0.2', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_F1_H6"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '0.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '0.2', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H1"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '0.2', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H2"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '0.2', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H3"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '0.2', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H4"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '0.2', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H5"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '0.2', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_2_Fd_H6"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '0.2', aux, buildingpart, True) #-- This one is with the chimney or eaves
##-- LOD0.3
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '0.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H1"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H2"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H3"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '0.3', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H4"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H5"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F0_H6"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '0.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H0"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '0.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H1"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H2"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H3"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H4"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H5"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '0.3', aux, buildingpart)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_F1_H6"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '0.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '0.3', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H1"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '0.3', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H2"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '0.3', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H3"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '0.3', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H4"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '0.3', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H5"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '0.3', aux, buildingpart, True)
CityGMLbuildingLOD0(CityGMLs["LOD0_3_Fd_H6"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '0.3', aux, buildingpart, True) #-- This one is with the chimney or eaves
#####-- LOD1
##-- LOD1.3
#- Multisurface (brep)
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H1"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H2"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H3"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H4"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H5"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F0_H6"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H0"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H1"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H2"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H3"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H4"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H5"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_F1_H6"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H1"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H2"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H3"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H4"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H5"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_1_Fd_H6"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart, True) #-- This one is with the chimney or eaves
#- Solids
if SOLIDS:
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H0_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H1_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H2_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H3_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H4_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H5_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F0_H6_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H0_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H1_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H2_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H3_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H4_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H5_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_F1_H6_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H0_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H1_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H2_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H3_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H4_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H5_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_1_Fd_H6_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart, True) #-- This one is with the chimney or eaves
#- Enhanced semantics
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H0_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H1_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H2_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H3_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H4_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H5_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F0_H6_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H0_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H1_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H2_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H3_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H4_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H5_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_F1_H6_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H0_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.1', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H1_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H2_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H3_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H4_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H5_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.1', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_1_Fd_H6_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.1', aux, buildingpart, True) #-- This one is with the chimney or eaves
##-- LOD1.2
#- Multisurface (brep)
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H1"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H2"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H3"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.2', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H4"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H5"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F0_H6"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H0"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H1"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H2"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H3"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H4"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H5"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_F1_H6"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H1"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H2"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H3"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H4"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H5"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_2_Fd_H6"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart, True) #-- This one is with the chimney or eaves
#- Solids
if SOLIDS:
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H0_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H1_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H2_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H3_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.2', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H4_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H5_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F0_H6_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H0_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H1_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H2_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H3_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H4_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H5_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_F1_H6_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H0_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H1_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H2_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H3_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H4_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H5_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_2_Fd_H6_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart, True) #-- This one is with the chimney or eaves
#- Semantics
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H0_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H1_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H2_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H3_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.2', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H4_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H5_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F0_H6_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H0_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H1_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H2_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H3_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H4_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H5_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_F1_H6_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H0_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.2', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H1_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H2_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H3_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H4_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H5_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.2', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_2_Fd_H6_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.2', aux, buildingpart, True) #-- This one is with the chimney or eaves
##-- LOD1.3
#- Multisurface (brep)
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H1"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H2"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H3"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.3', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H4"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H5"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F0_H6"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H0"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H1"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H2"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H3"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H4"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H5"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_F1_H6"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H1"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H2"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H3"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H4"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H5"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1(CityGMLs["LOD1_3_Fd_H6"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart, True) #-- This one is with the chimney or eaves
#- Solids
if SOLIDS:
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H0_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H1_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H2_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H3_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.3', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H4_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H5_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F0_H6_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H0_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H1_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H2_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H3_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H4_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H5_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_F1_H6_solid"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H0_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H1_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H2_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H3_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H4_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H5_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Solid(CityGMLs["LOD1_3_Fd_H6_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart, True) #-- This one is with the chimney or eaves
#- Semantics
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H0_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H1_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H2_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H3_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, half, '1.3', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H4_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H5_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F0_H6_semantics"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H0_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H1_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H2_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H3_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, half, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H4_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H5_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_F1_H6_semantics"], ID, attributes, adjorigin, adjxsize, adjysize, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart) #-- This one is with the chimney or eaves
if VARIANTS:
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H0_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, eaves, '1.3', aux, buildingpart, True) #-- This one is with the eaves
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H1_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 0.0, None, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H2_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, onethird, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H3_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, half, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H4_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, twothird, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H5_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, 1, chimneyHeight, '1.3', aux, buildingpart, True)
CityGMLbuildingLOD1Semantics(CityGMLs["LOD1_3_Fd_H6_semantics"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, zsize, h, roofType, None, chimneyHeight, '1.3', aux, buildingpart, True) #-- This one is with the chimney or eaves
#--LOD2
#-LOD2.0
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_0_F0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, None, '2.0', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_0_F1"], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, adjr, None, '2.0', aux, buildingpart)
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_0_Fd"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, None, '2.0', aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_0_F0_S0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, None, 'brep', '2.0', aux, buildingpart)
if SOLIDS:
if VARIANTS:
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_0_F1_S0"], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, adjr, None, 'brep', '2.0', aux, buildingpart)
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_0_Fd_S0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, None, 'brep', '2.0', aux, buildingpart, True)
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_0_F0_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, None, 'solid', '2.0', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_0_F1_solid"], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, adjr, None, 'solid', '2.0', aux, buildingpart)
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_0_Fd_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, None, 'solid', '2.0', aux, buildingpart, True)
#-LOD2.1
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_1_F0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, None, '2.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_1_F1"], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, adjr, None, '2.1', aux, buildingpart)
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_1_Fd"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, None, '2.1', aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_1_F0_S0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, None, 'brep', '2.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_1_F1_S0"], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, adjr, None, 'brep', '2.1', aux, buildingpart)
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_1_Fd_S0"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, None, 'brep', '2.1', aux, buildingpart, True)
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_1_F0_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, None, 'solid', '2.1', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_1_F1_solid"], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, adjr, None, 'solid', '2.1', aux, buildingpart)
CityGMLbuildingLOD2Solid(CityGMLs["LOD2_1_Fd_solid"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, None, 'solid', '2.1', aux, buildingpart, True)
#-LOD2.2
#-Realised with LOD3 functions for programming reasons
CityGMLbuildingLOD3Semantics(CityGMLs['LOD2_2_F0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, [0.0, 0.0], r, None, None, dormers, None, None, None, 1, aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_2_F0_solid'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, [0.0, 0.0], r, None, None, dormers, None, None, None, additional, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_2_F0_S0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, [0.0, 0.0], r, None, None, dormers, None, None, None, additional, 'brep', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD3Semantics(CityGMLs['LOD2_2_F1'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers_ovh, None, None, None, 1, aux, buildingpart, True)
CityGMLbuildingLOD3Semantics(CityGMLs['LOD2_2_Fd'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, dormers_offset, None, None, None, 1, aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_2_F1_solid'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers_ovh, None, None, None, additional, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_2_F1_S0'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers_ovh, None, None, None, additional, 'brep', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_2_Fd_solid'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, dormers_offset, None, None, None, additional, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_2_Fd_S0'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, dormers_offset, None, None, None, additional, 'brep', aux, buildingpart)
#-LOD2.3
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_3_F0"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, r, ovh, '2.3', aux, buildingpart)
if VARIANTS:
CityGMLbuildingLOD2Semantics(CityGMLs["LOD2_3_Fd"], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, adjr_offset, [offset, offset], '2.3', aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_F0_S0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, None, None, None, None, additional, 'brep', aux, buildingpart)
if VARIANTS and SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_Fd_S0'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, None, None, None, None, additional, 'brep', aux, buildingpart)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs["LOD2_3_F0_solid"], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, [0.0, 0.0], r, None, None, None, None, None, None, additional, 'solid', aux, buildingpart)
if VARIANTS and SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_Fd_solid'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, None, None, None, None, additional, 'solid', aux, buildingpart)
#-LOD2.3 with dormers
#-Realised with LOD3 functions for programming reasons
if VARIANTS:
CityGMLbuildingLOD3Semantics(CityGMLs['LOD2_3_F0_with_dormers'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, dormers, None, None, None, 1, aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_F0_solid_with_dormers'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, dormers, None, None, None, additional, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_F0_S0_with_dormers'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, dormers, None, None, None, additional, 'brep', aux, buildingpart)
CityGMLbuildingLOD3Semantics(CityGMLs['LOD2_3_Fd_with_dormers'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, dormers_offset, None, None, None, 1, aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_Fd_solid_with_dormers'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, dormers_offset, None, None, None, additional, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD2_3_Fd_S0_with_dormers'], ID, attributes, adjorigin_offset, adjxsize_offset, adjysize_offset, adjzsize_offset, adjh_offset, roofType, [offset, offset], adjr_offset, None, None, dormers_offset, None, None, None, additional, 'brep', aux, buildingpart)
#-- LOD3 variants
#-- LOD3.0
CityGMLbuildingLOD3Semantics(CityGMLs['LOD3_2'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, dormers, roofWindows, chimney, None, 1, aux, buildingpart)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_2_solid'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, dormers, None, None, None, additional, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_2_S0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, dormers, None, None, None, additional, 'brep', aux, buildingpart)
#-- LOD3.1
CityGMLbuildingLOD3Semantics(CityGMLs['LOD3_3'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, dormers, roofWindows, chimney, embrasure, 1, aux, buildingpart)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_3_solid'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, dormers, roofWindows, chimney, embrasure, 1, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_3_S0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, dormers, roofWindows, chimney, embrasure, 1, 'brep', aux, buildingpart)
#-- Hybrid models
CityGMLbuildingLOD3Semantics(CityGMLs['LOD3_1'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, None, None, None, None, 1, aux, buildingpart)
CityGMLbuildingLOD3Semantics(CityGMLs['LOD3_0'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers_ovh, roofWindows_ovh, chimney_ovh, None, 1, aux, buildingpart, True)
if SOLIDS:
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_1_solid'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, None, None, None, None, 1, 'solid', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_0_solid'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers_ovh, roofWindows_ovh, chimney_ovh, None, 1, 'solid', aux, buildingpart, True)
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_1_S0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, None, None, None, None, 1, 'brep', aux, buildingpart)
CityGMLbuildingLOD3Solid(CityGMLs['LOD3_0_S0'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers_ovh, roofWindows_ovh, chimney_ovh, None, 1, 'brep', aux, buildingpart, True)
# #-- BI without structured semantics
# CityGMLbuildingLOD3Semantics(CityGMLs['LOD3BI'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, doorDict, wallWindows, dormers, roofWindows, chimney, embrasure, 0)
# CityGMLbuildingLOD3Semantics(CityGMLs['LOD3RF1'], ID, attributes, adjorigin, adjxsize, adjysize, adjzsize, adjh, roofType, [0.0, 0.0], adjr, None, None, dormers, roofWindows, chimney, embrasure, additional)
# #CityGMLbuildingLOD3Solid(CityGMLs['LOD3-solid'], ID, attributes, origin_coords, xsize, ysize, zsize, h, roofType, ovh, r, None, None, dormers, None, None, None, additional)
#-- Interior
CityGMLbuildingInteriorLOD0(CityGMLs['interior-LOD0'], ID, attributes, origin_coords, xsize, ysize, zsize, h, floors, floorHeight, roofType, r, wallThickness, joist, aux, buildingpart)
CityGMLbuildingInteriorLOD1(CityGMLs['interior-LOD1'], ID, attributes, origin_coords, xsize, ysize, zsize, h, floors, floorHeight, roofType, r, wallThickness, joist, aux, buildingpart)
CityGMLbuildingInteriorLOD2(CityGMLs['interior-LOD2_2'], ID, attributes, origin_coords, xsize, ysize, zsize, h, floors, floorHeight, roofType, r, wallThickness, joist, aux, buildingpart)
CityGMLbuildingInteriorLOD2(CityGMLs['interior-LOD2_3'], ID, attributes, origin_coords, xsize, ysize, zsize, h, floors, floorHeight, roofType, r, wallThickness, joist, aux, buildingpart, dormers)
#-- Perform the rotation of coordinates
if ROTATIONENABLED:
radian_rotation = math.radians(angle_of_rotation)
sine_rotation = math.sin(radian_rotation)
cosine_rotation = math.cos(radian_rotation)
for representation in CityGMLs:
for entity in CityGMLs[representation]:
#-- Iterate cityObjectMembers
if entity.tag == "cityObjectMember":
#-- Select the current one
if entity.getchildren()[0].attrib['{%s}id' % ns_gml] == ID:
#-- Get the building XML node
curr_b_inxml = entity.getchildren()[0]
#-- Store all the <gml:posList> in a list
posList_to_rotate = curr_b_inxml.findall(".//{%s}posList" % ns_gml)
for pos in posList_to_rotate:
points_to_rotate = GMLstring2points(pos.text)
new_rotated_points = ''
for point_to_rotate in points_to_rotate:
rotated_point = rotator(point_to_rotate, sine_rotation, cosine_rotation, origin_coords)
new_rotated_points += GMLPointList(rotated_point) + ' '
pos.text = new_rotated_points[:-1]
#-- End of loop of each building
if STREETS:
for s in streets:
street_outline = s.findall('outline')[0]
street_outline_coors = [float(x) for x in street_outline.text.split(" ")]
street_holes_collection = s.findall('holes')[0]
street_holes = street_holes_collection.findall('hole')
street_data = [street_outline_coors, []]
for street_hole in street_holes:
street_hole_coors = [float(x) for x in street_hole.text.split(" ")]
street_data[1].append(street_hole_coors)
CityGMLstreets(CityGMLs['Road-LOD0'], street_data)
if VEGETATION:
for pccollection in plantcover:
pcs = pccollection.findall('park')
for pc in pcs:
park_outline = pc.findall('outline')[0]
park_outline_coors = [float(x) for x in park_outline.text.split(" ")]
park_height = pc.findall('height')[0].text
pc_data = [park_outline_coors, park_height]
CityGMLplantCoverLOD0(CityGMLs['PlantCover-LOD0'], pc_data)
CityGMLplantCoverLOD1(CityGMLs['PlantCover-LOD1'], pc_data)
#-- Write to file(s)
print("\nGenerated", len(CityGMLs), "CityGML file(s) in the memory. Now writing to disk...")
filecounter = 0
if REPORT:
fish = ProgressFish(total=len(CityGMLs))
for element in CityGMLs:
#-- Report on the progress
if REPORT:
fish.animate(amount=filecounter+1)
filecounter += 1
# print(filecounter, "...", end=' ')
storeCityGML(element)
print("\nWritten the CityGML file(s). Cleaning the memory...")
|
tudelft3d/Random3Dcity
|
generateCityGML.py
|
Python
|
mit
| 351,967
|
[
"xTB"
] |
d087b6533209576919e00fdcf957517d5a65dd7b1e55952c7727870babc9cd50
|
""" Schemas for mesoscope scans."""
import datajoint as dj
from datajoint.jobs import key_hash
import matplotlib.pyplot as plt
import numpy as np
import scanreader
from . import experiment, notify, shared
from .utils import galvo_corrections, signal, quality, mask_classification, performance
from .exceptions import PipelineException
schema = dj.schema('pipeline_meso', locals(), create_tables=False)
CURRENT_VERSION = 1
@schema
class Version(dj.Manual):
definition = """ # versions for the meso pipeline
-> shared.PipelineVersion
---
description = '' : varchar(256) # any notes on this version
date = CURRENT_TIMESTAMP : timestamp # automatic
"""
@schema
class ScanInfo(dj.Imported):
definition = """ # general data about mesoscope scans
-> experiment.Scan
-> Version # meso version
---
nfields : tinyint # number of fields
nchannels : tinyint # number of channels
nframes : int # number of recorded frames
nframes_requested : int # number of requested frames (from header)
x : float # (um) ScanImage's 0 point in the motor coordinate system
y : float # (um) ScanImage's 0 point in the motor coordinate system
fps : float # (Hz) frames per second
bidirectional : boolean # true = bidirectional scanning
usecs_per_line : float # microseconds per scan line
fill_fraction : float # raster scan temporal fill fraction (see scanimage)
nrois : tinyint # number of ROIs (see scanimage)
"""
@property
def key_source(self):
meso_scans = experiment.Scan() & (experiment.Session() & {'rig': '2P4'})
return meso_scans * (Version() & {'pipe_version': CURRENT_VERSION})
class Field(dj.Part):
definition = """ # field-specific scan information
-> ScanInfo
-> shared.Field
---
px_height : smallint # height in pixels
px_width : smallint # width in pixels
um_height : float # height in microns
um_width : float # width in microns
x : float # (um) center of field in the motor coordinate system
y : float # (um) center of field in the motor coordinate system
z : float # (um) absolute depth with respect to the surface of the cortex
delay_image : longblob # (ms) delay between the start of the scan and pixels in this field
roi : tinyint # ROI to which this field belongs
valid_depth=false : boolean # whether depth has been manually check
"""
def make(self, key, scan, field_id):
# Create results tuple
tuple_ = key.copy()
tuple_['field'] = field_id + 1
# Get attributes
x_zero, y_zero, _ = scan.motor_position_at_zero # motor x, y at ScanImage's 0
surf_z = (experiment.Scan() & key).fetch1('depth') # surface depth in fastZ coordinates
tuple_['px_height'] = scan.field_heights[field_id]
tuple_['px_width'] = scan.field_widths[field_id]
tuple_['um_height'] = scan.field_heights_in_microns[field_id]
tuple_['um_width'] = scan.field_widths_in_microns[field_id]
tuple_['x'] = x_zero + scan._degrees_to_microns(scan.fields[field_id].x)
tuple_['y'] = y_zero + scan._degrees_to_microns(scan.fields[field_id].y)
tuple_['z'] = scan.field_depths[field_id] - surf_z # fastZ only
tuple_['delay_image'] = scan.field_offsets[field_id]
tuple_['roi'] = scan.field_rois[field_id][0]
# Insert
self.insert1(tuple_)
@property
def microns_per_pixel(self):
""" Returns an array with microns per pixel in height and width. """
um_height, px_height, um_width, px_width = self.fetch1('um_height', 'px_height',
'um_width', 'px_width')
return np.array([um_height / px_height, um_width / px_width])
def make(self, key):
""" Read and store some scan parameters."""
# Read the scan
print('Reading header...')
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get attributes
tuple_ = key.copy() # in case key is reused somewhere else
tuple_['nfields'] = scan.num_fields
tuple_['nchannels'] = scan.num_channels
tuple_['nframes'] = scan.num_frames
tuple_['nframes_requested'] = scan.num_requested_frames
tuple_['x'] = scan.motor_position_at_zero[0]
tuple_['y'] = scan.motor_position_at_zero[1]
tuple_['fps'] = scan.fps
tuple_['bidirectional'] = scan.is_bidirectional
tuple_['usecs_per_line'] = scan.seconds_per_line * 1e6
tuple_['fill_fraction'] = scan.temporal_fill_fraction
tuple_['nrois'] = scan.num_rois
tuple_['valid_depth'] = True
# Insert in ScanInfo
self.insert1(tuple_)
# Insert field information
for field_id in range(scan.num_fields):
ScanInfo.Field().make(key, scan, field_id)
# Fill in CorrectionChannel if only one channel
if scan.num_channels == 1:
CorrectionChannel().fill(key)
# Fill SegmentationTask if scan in autosegment
if experiment.AutoProcessing() & key & {'autosegment': True}:
SegmentationTask().fill(key)
@schema
class Quality(dj.Computed):
definition = """ # different quality metrics for a scan (before corrections)
-> ScanInfo
"""
@property
def key_source(self):
return ScanInfo() & {'pipe_version': CURRENT_VERSION}
class MeanIntensity(dj.Part):
definition = """ # mean intensity values across time
-> Quality
-> shared.Field
-> shared.Channel
---
intensities : longblob
"""
class SummaryFrames(dj.Part):
definition = """ # 16-part summary of the scan (mean of 16 blocks)
-> Quality
-> shared.Field
-> shared.Channel
---
summary : longblob # h x w x 16
"""
class Contrast(dj.Part):
definition = """ # difference between 99 and 1 percentile across time
-> Quality
-> shared.Field
-> shared.Channel
---
contrasts : longblob
"""
class QuantalSize(dj.Part):
definition = """ # quantal size in images
-> Quality
-> shared.Field
-> shared.Channel
---
min_intensity : int # min value in movie
max_intensity : int # max value in movie
quantal_size : float # variance slope, corresponds to quantal size
zero_level : int # level corresponding to zero (computed from variance dependence)
quantal_frame : longblob # average frame expressed in quanta
"""
class EpileptiformEvents(dj.Part):
definition = """ # compute frequency of epileptiform events
-> Quality
-> shared.Field
-> shared.Channel
---
frequency : float # (events / sec) frequency of epileptiform events
abn_indices : longblob # indices of epileptiform events (0-based)
peak_indices : longblob # indices of all local maxima peaks (0-based)
prominences : longblob # peak prominence for all peaks
widths : longblob # (secs) width at half prominence for all peaks
"""
def make(self, key):
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Insert in Quality
self.insert1(key)
for field_id in range(scan.num_fields):
print('Computing quality metrics for field', field_id + 1)
for channel in range(scan.num_channels):
# Map: Compute quality metrics in parallel
results = performance.map_frames(performance.parallel_quality_metrics,
scan, field_id=field_id, channel=channel)
# Reduce
mean_intensities = np.zeros(scan.num_frames)
contrasts = np.zeros(scan.num_frames)
for frames, chunk_mis, chunk_contrasts, _ in results:
mean_intensities[frames] = chunk_mis
contrasts[frames] = chunk_contrasts
sorted_results = sorted(results, key=lambda res: res[0])
mean_groups = np.array_split([r[3] for r in sorted_results], 16) # 16 groups
frames = np.stack([np.mean(g, axis=0) for g in mean_groups if g.any()], axis=-1)
# Compute quantal size
middle_frame = int(np.floor(scan.num_frames / 2))
mini_scan = scan[field_id, :, :, channel, max(middle_frame - 2000, 0): middle_frame + 2000]
mini_scan = mini_scan.astype(np.float32)
results = quality.compute_quantal_size(mini_scan)
min_intensity, max_intensity, _, _, quantal_size, zero_level = results
quantal_frame = (np.mean(mini_scan, axis=-1) - zero_level) / quantal_size
# Compute abnormal event frequency
deviations = (mean_intensities - mean_intensities.mean()) / mean_intensities.mean()
peaks, prominences, widths = quality.find_peaks(deviations)
widths = [w / scan.fps for w in widths] # in seconds
abnormal = peaks[[p > 0.2 and w < 0.4 for p, w in zip(prominences, widths)]]
abnormal_freq = len(abnormal) / (scan.num_frames / scan.fps)
# Insert
field_key = {**key, 'field': field_id + 1, 'channel': channel + 1}
self.MeanIntensity().insert1({**field_key, 'intensities': mean_intensities})
self.Contrast().insert1({**field_key, 'contrasts': contrasts})
self.SummaryFrames().insert1({**field_key, 'summary': frames})
self.QuantalSize().insert1({**field_key, 'min_intensity': min_intensity,
'max_intensity': max_intensity,
'quantal_size': quantal_size,
'zero_level': zero_level,
'quantal_frame': quantal_frame})
self.EpileptiformEvents.insert1({**field_key, 'frequency': abnormal_freq,
'abn_indices': abnormal,
'peak_indices': peaks,
'prominences': prominences,
'widths': widths})
self.notify(field_key, frames, mean_intensities, contrasts)
@notify.ignore_exceptions
def notify(self, key, summary_frames, mean_intensities, contrasts):
# Send summary frames
import imageio
video_filename = '/tmp/' + key_hash(key) + '.gif'
percentile_99th = np.percentile(summary_frames, 99.5)
summary_frames = np.clip(summary_frames, None, percentile_99th)
summary_frames = signal.float2uint8(summary_frames).transpose([2, 0, 1])
imageio.mimsave(video_filename, summary_frames, duration=0.4)
msg = ('summary frames for {animal_id}-{session}-{scan_idx} field {field} '
'channel {channel}').format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=video_filename, file_title=msg)
# Send intensity and contrasts
fig, axes = plt.subplots(2, 1, figsize=(15, 8), sharex=True)
axes[0].set_title('Mean intensity', size='small')
axes[0].plot(mean_intensities)
axes[0].set_ylabel('Pixel intensities')
axes[1].set_title('Contrast (99 - 1 percentile)', size='small')
axes[1].plot(contrasts)
axes[1].set_xlabel('Frames')
axes[1].set_ylabel('Pixel intensities')
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('quality traces for {animal_id}-{session}-{scan_idx} field {field} '
'channel {channel}').format(**key)
slack_user.notify(file=img_filename, file_title=msg)
@schema
class CorrectionChannel(dj.Manual):
definition = """ # channel to use for raster and motion correction
-> experiment.Scan
-> shared.Field
---
-> shared.Channel
"""
def fill(self, key, channel=1):
for field_key in (ScanInfo.Field() & key).fetch(dj.key):
self.insert1({**field_key, 'channel': channel}, ignore_extra_fields=True,
skip_duplicates=True)
@schema
class RasterCorrection(dj.Computed):
definition = """ # raster correction for bidirectional resonant scans
-> ScanInfo # animal_id, session, scan_idx, version
-> CorrectionChannel # animal_id, session, scan_idx, field
---
raster_template : longblob # average frame from the middle of the movie
raster_phase : float # difference between expected and recorded scan angle
"""
@property
def key_source(self):
return ScanInfo * CorrectionChannel & {'pipe_version': CURRENT_VERSION}
def make(self, key):
from scipy.signal import tukey
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
# Select correction channel
channel = (CorrectionChannel() & key).fetch1('channel') - 1
field_id = key['field'] -1
# Load some frames from the middle of the scan
middle_frame = int(np.floor(scan.num_frames / 2))
frames = slice(max(middle_frame - 1000, 0), middle_frame + 1000)
mini_scan = scan[field_id, :, :, channel, frames]
# Create results tuple
tuple_ = key.copy()
# Create template (average frame tapered to avoid edge artifacts)
taper = np.sqrt(np.outer(tukey(scan.field_heights[field_id], 0.4),
tukey(scan.field_widths[field_id], 0.4)))
anscombed = 2 * np.sqrt(mini_scan - mini_scan.min() + 3 / 8) # anscombe transform
template = np.mean(anscombed, axis=-1) * taper
tuple_['raster_template'] = template
# Compute raster correction parameters
if scan.is_bidirectional:
tuple_['raster_phase'] = galvo_corrections.compute_raster_phase(template,
scan.temporal_fill_fraction)
else:
tuple_['raster_phase'] = 0
# Insert
self.insert1(tuple_)
def get_correct_raster(self):
""" Returns a function to perform raster correction on the scan. """
raster_phase = self.fetch1('raster_phase')
fill_fraction = (ScanInfo() & self).fetch1('fill_fraction')
if abs(raster_phase) < 1e-7:
correct_raster = lambda scan: scan.astype(np.float32, copy=False)
else:
correct_raster = lambda scan: galvo_corrections.correct_raster(scan,
raster_phase, fill_fraction)
return correct_raster
@schema
class MotionCorrection(dj.Computed):
definition = """ # motion correction for galvo scans
-> RasterCorrection
---
motion_template : longblob # image used as alignment template
y_shifts : longblob # (pixels) y motion correction shifts
x_shifts : longblob # (pixels) x motion correction shifts
y_std : float # (pixels) standard deviation of y shifts
x_std : float # (pixels) standard deviation of x shifts
outlier_frames : longblob # mask with true for frames with outlier shifts (already corrected)
align_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return RasterCorrection() & {'pipe_version': CURRENT_VERSION}
def make(self, key):
"""Computes the motion shifts per frame needed to correct the scan."""
from scipy import ndimage
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Get some params
px_height, px_width = (ScanInfo.Field() & key).fetch1('px_height', 'px_width')
channel = (CorrectionChannel() & key).fetch1('channel') - 1
field_id = key['field'] -1
# Load some frames from middle of scan to compute template
skip_rows = int(round(px_height * 0.10)) # we discard some rows/cols to avoid edge artifacts
skip_cols = int(round(px_width * 0.10))
middle_frame = int(np.floor(scan.num_frames / 2))
mini_scan = scan[field_id, skip_rows: -skip_rows, skip_cols: -skip_cols, channel,
max(middle_frame - 1000, 0): middle_frame + 1000]
mini_scan = mini_scan.astype(np.float32, copy=False)
# Correct mini scan
correct_raster = (RasterCorrection() & key).get_correct_raster()
mini_scan = correct_raster(mini_scan)
# Create template
mini_scan = 2 * np.sqrt(mini_scan - mini_scan.min() + 3 / 8) # *
template = np.mean(mini_scan, axis=-1)
template = ndimage.gaussian_filter(template, 0.7) # **
# * Anscombe tranform to normalize noise, increase contrast and decrease outliers' leverage
# ** Small amount of gaussian smoothing to get rid of high frequency noise
# Map: compute motion shifts in parallel
f = performance.parallel_motion_shifts # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'template': template}
results = performance.map_frames(f, scan, field_id=field_id,
y=slice(skip_rows, -skip_rows),
x=slice(skip_cols, -skip_cols), channel=channel,
kwargs=kwargs)
# Reduce
y_shifts = np.zeros(scan.num_frames)
x_shifts = np.zeros(scan.num_frames)
for frames, chunk_y_shifts, chunk_x_shifts in results:
y_shifts[frames] = chunk_y_shifts
x_shifts[frames] = chunk_x_shifts
# Detect outliers
max_y_shift, max_x_shift = 20 / (ScanInfo.Field() & key).microns_per_pixel
y_shifts, x_shifts, outliers = galvo_corrections.fix_outliers(y_shifts, x_shifts,
max_y_shift, max_x_shift)
# Center shifts around zero
y_shifts -= np.median(y_shifts)
x_shifts -= np.median(x_shifts)
# Create results tuple
tuple_ = key.copy()
tuple_['motion_template'] = template
tuple_['y_shifts'] = y_shifts
tuple_['x_shifts'] = x_shifts
tuple_['outlier_frames'] = outliers
tuple_['y_std'] = np.std(y_shifts)
tuple_['x_std'] = np.std(x_shifts)
# Insert
self.insert1(tuple_)
# Notify after all fields have been processed
scan_key = {'animal_id': key['animal_id'], 'session': key['session'],
'scan_idx': key['scan_idx'], 'pipe_version': key['pipe_version']}
if len(MotionCorrection - CorrectionChannel & scan_key) > 0:
self.notify(scan_key, scan.num_frames, scan.num_fields)
@notify.ignore_exceptions
def notify(self, key, num_frames, num_fields):
fps = (ScanInfo() & key).fetch1('fps')
seconds = np.arange(num_frames) / fps
fig, axes = plt.subplots(num_fields, 1, figsize=(15, 4 * num_fields), sharey=True)
axes = [axes] if num_fields == 1 else axes # make list if single axis object
for i in range(num_fields):
y_shifts, x_shifts = (self & key & {'field': i + 1}).fetch1('y_shifts',
'x_shifts')
axes[i].set_title('Shifts for field {}'.format(i + 1))
axes[i].plot(seconds, y_shifts, label='y shifts')
axes[i].plot(seconds, x_shifts, label='x shifts')
axes[i].set_ylabel('Pixels')
axes[i].set_xlabel('Seconds')
axes[i].legend()
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'motion shifts for {animal_id}-{session}-{scan_idx}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def save_video(self, filename='galvo_corrections.mp4', channel=1, start_index=0,
seconds=30, dpi=250):
""" Creates an animation video showing the original vs corrected scan.
:param string filename: Output filename (path + filename)
:param int channel: What channel from the scan to use. Starts at 1
:param int start_index: Where in the scan to start the video.
:param int seconds: How long in seconds should the animation run.
:param int dpi: Dots per inch, controls the quality of the video.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get fps and total_num_frames
fps = (ScanInfo() & self).fetch1('fps')
num_video_frames = int(round(fps * seconds))
stop_index = start_index + num_video_frames
# Load the scan
scan_filename = (experiment.Scan() & self).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
scan_ = scan[self.fetch1('field') - 1, :, :, channel - 1, start_index: stop_index]
original_scan = scan_.copy()
# Correct the scan
correct_raster = (RasterCorrection() & self).get_correct_raster()
correct_motion = self.get_correct_motion()
corrected_scan = correct_motion(correct_raster(scan_), slice(start_index, stop_index))
# Create animation
import matplotlib.animation as animation
## Set the figure
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
axes[0].set_title('Original')
im1 = axes[0].imshow(original_scan[:, :, 0], vmin=original_scan.min(),
vmax=original_scan.max()) # just a placeholder
fig.colorbar(im1, ax=axes[0])
axes[0].axis('off')
axes[1].set_title('Corrected')
im2 = axes[1].imshow(corrected_scan[:, :, 0], vmin=corrected_scan.min(),
vmax=corrected_scan.max()) # just a placeholder
fig.colorbar(im2, ax=axes[1])
axes[1].axis('off')
## Make the animation
def update_img(i):
im1.set_data(original_scan[:, :, i])
im2.set_data(corrected_scan[:, :, i])
video = animation.FuncAnimation(fig, update_img, corrected_scan.shape[2],
interval=1000 / fps)
# Save animation
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi, '(default)')
video.save(filename, dpi=dpi)
return fig
def get_correct_motion(self):
""" Returns a function to perform motion correction on scans. """
x_shifts, y_shifts = self.fetch1('x_shifts', 'y_shifts')
return lambda scan, indices=slice(None): galvo_corrections.correct_motion(scan,
x_shifts[indices], y_shifts[indices])
@schema
class SummaryImages(dj.Computed):
definition = """ # summary images for each field and channel after corrections
-> MotionCorrection
-> shared.Channel
"""
@property
def key_source(self):
return MotionCorrection() & {'pipe_version': CURRENT_VERSION}
class Average(dj.Part):
definition = """ # mean of each pixel across time
-> master
---
average_image : longblob
"""
class Correlation(dj.Part):
definition = """ # average temporal correlation between each pixel and its eight neighbors
-> master
---
correlation_image : longblob
"""
class L6Norm(dj.Part):
definition = """ # l6-norm of each pixel across time
-> master
---
l6norm_image : longblob
"""
def make(self, key):
# Read the scan
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
for channel in range(scan.num_channels):
# Map: Compute some statistics in different chunks of the scan
f = performance.parallel_summary_images # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts}
results = performance.map_frames(f, scan, field_id=key['field'] -1,
channel=channel, kwargs=kwargs)
# Reduce: Compute average images
average_image = np.sum([r[0] for r in results], axis=0) / scan.num_frames
l6norm_image = np.sum([r[1] for r in results], axis=0) ** (1 / 6)
# Reduce: Compute correlation image
sum_x = np.sum([r[2] for r in results], axis=0) # h x w
sum_sqx = np.sum([r[3] for r in results], axis=0) # h x w
sum_xy = np.sum([r[4] for r in results], axis=0) # h x w x 8
denom_factor = np.sqrt(scan.num_frames * sum_sqx - sum_x ** 2)
corrs = np.zeros(sum_xy.shape)
for k in [0, 1, 2, 3]:
rotated_corrs = np.rot90(corrs, k=k)
rotated_sum_x = np.rot90(sum_x, k=k)
rotated_dfactor = np.rot90(denom_factor, k=k)
rotated_sum_xy = np.rot90(sum_xy, k=k)
# Compute correlation
rotated_corrs[1:, :, k] = (scan.num_frames * rotated_sum_xy[1:, :, k] -
rotated_sum_x[1:] * rotated_sum_x[:-1]) / \
(rotated_dfactor[1:] * rotated_dfactor[:-1])
rotated_corrs[1:, 1:, 4 + k] = ((scan.num_frames * rotated_sum_xy[1:, 1:, 4 + k] -
rotated_sum_x[1:, 1:] * rotated_sum_x[:-1, : -1]) /
(rotated_dfactor[1:, 1:] * rotated_dfactor[:-1, :-1]))
# Return back to original orientation
corrs = np.rot90(rotated_corrs, k=4 - k)
correlation_image = np.sum(corrs, axis=-1)
norm_factor = 5 * np.ones(correlation_image.shape) # edges
norm_factor[[0, -1, 0, -1], [0, -1, -1, 0]] = 3 # corners
norm_factor[1:-1, 1:-1] = 8 # center
correlation_image /= norm_factor
# Insert
field_key = {**key, 'channel': channel + 1}
self.insert1(field_key)
self.Average().insert1({**field_key, 'average_image': average_image})
self.L6Norm().insert1({**field_key, 'l6norm_image': l6norm_image})
self.Correlation().insert1({**field_key, 'correlation_image': correlation_image})
self.notify(key, scan.num_channels)
@notify.ignore_exceptions
def notify(self, key, num_channels):
fig, axes = plt.subplots(num_channels, 2, squeeze=False, figsize=(12, 5 * num_channels))
axes[0, 0].set_title('L6-Norm', size='small')
axes[0, 1].set_title('Correlation', size='small')
for ax in axes.ravel():
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for channel in range(num_channels):
axes[channel, 0].set_ylabel('Channel {}'.format(channel + 1), size='large',
rotation='horizontal', ha='right')
corr = (SummaryImages.Correlation() & key & {'channel': channel + 1}).fetch1('correlation_image')
l6norm = (SummaryImages.L6Norm() & key & {'channel': channel + 1}).fetch1('l6norm_image')
axes[channel, 0].imshow(l6norm)
axes[channel, 1].imshow(corr)
fig.tight_layout()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'summary images for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg, channel='#pipeline_quality')
@schema
class SegmentationTask(dj.Manual):
definition = """ # defines the target of segmentation and the channel to use
-> experiment.Scan
-> shared.Field
-> shared.Channel
-> shared.SegmentationMethod
---
-> experiment.Compartment
"""
def fill(self, key, channel=1, segmentation_method=6, compartment='soma'):
for field_key in (ScanInfo.Field() & key).fetch(dj.key):
tuple_ = {**field_key, 'channel': channel, 'compartment': compartment,
'segmentation_method': segmentation_method}
self.insert1(tuple_, ignore_extra_fields=True, skip_duplicates=True)
def estimate_num_components(self):
""" Estimates the number of components per field using simple rules of thumb.
For somatic scans, estimate number of neurons based on:
(100x100x100)um^3 = 1e6 um^3 -> 100 neurons; (1x1x1)mm^3 = 1e9 um^3 -> 100K neurons
For axonal/dendritic scans, just ten times our estimate of neurons.
:returns: Number of components
:rtype: int
"""
# Get field dimensions (in micrometers)
scan = (ScanInfo.Field() & self & {'pipe_version': CURRENT_VERSION})
field_height, field_width = scan.fetch1('um_height', 'um_width')
field_thickness = 10 # assumption
field_volume = field_width * field_height * field_thickness
# Estimate number of components
compartment = self.fetch1('compartment')
if compartment == 'soma':
num_components = field_volume * 0.0001
elif compartment == 'axon':
num_components = field_volume * 0.0005 # five times as many neurons
elif compartment == 'bouton':
num_components = field_volume * 0.001 # ten times as many neurons
else:
PipelineException("Compartment type '{}' not recognized".format(compartment))
return int(round(num_components))
@schema
class DoNotSegment(dj.Manual):
definition = """ # field/channels that should not be segmented (used for web interface only)
-> experiment.Scan
-> shared.Field
-> shared.Channel
"""
@schema
class Segmentation(dj.Computed):
definition = """ # Different mask segmentations.
-> MotionCorrection # animal_id, session, scan_idx, version, field
-> SegmentationTask # animal_id, session, scan_idx, field, channel, segmentation_method
---
segmentation_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return MotionCorrection() * SegmentationTask() & {'pipe_version': CURRENT_VERSION}
class Mask(dj.Part):
definition = """ # mask produced by segmentation.
-> Segmentation
mask_id : smallint
---
pixels : longblob # indices into the image in column major (Fortran) order
weights : longblob # weights of the mask at the indices above
"""
def get_mask_as_image(self):
""" Return this mask as an image (2-d numpy array)."""
# Get params
pixels, weights = self.fetch('pixels', 'weights')
image_height, image_width = (ScanInfo.Field() & self).fetch1('px_height',
'px_width')
# Reshape mask
mask = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
return np.squeeze(mask)
class Manual(dj.Part):
definition = """ # masks created manually
-> Segmentation
"""
def make(self, key):
print('Warning: Manual segmentation is not implemented in Python.')
# Copy any masks (and MaskClassification) that were there before
# Delete key from Segmentation (this is needed for trace and ScanSet and Activity computation to restart when things are added)
# Show GUI with the current masks
# User modifies it somehow to produce the new set of masks
# Insert info in Segmentation -> Segmentation.Manual -> Segmentation.Mask -> MaskClassification -> MaskClassification.Type
# http://scikit-image.org/docs/dev/api/skimage.future.html#manual-lasso-segmentation (Python lasso masks)
class CNMF(dj.Part):
definition = """ # source extraction using constrained non-negative matrix factorization
-> Segmentation
---
params : varchar(1024) # parameters send to CNMF as JSON array
"""
def make(self, key):
""" Use CNMF to extract masks and traces.
See caiman_interface.extract_masks for explanation of parameters
"""
from .utils import caiman_interface as cmn
import json
import uuid
import os
print('')
print('*' * 85)
print('Processing {}'.format(key))
# Get some parameters
field_id = key['field'] - 1
channel = key['channel'] - 1
image_height, image_width = (ScanInfo.Field() & key).fetch1('px_height', 'px_width')
num_frames = (ScanInfo() & key).fetch1('nframes')
# Read scan
print('Reading scan...')
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Create memory mapped file (as expected by CaImAn)
print('Creating memory mapped file...')
filename = '/tmp/caiman-{}_d1_{}_d2_{}_d3_1_order_C_frames_{}_.mmap'.format(
uuid.uuid4(), image_height, image_width, num_frames)
mmap_shape = (image_height * image_width, num_frames)
mmap_scan = np.memmap(filename, mode='w+', shape=mmap_shape, dtype=np.float32)
# Map: Correct scan and save in memmap scan
f = performance.parallel_save_memmap # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction,
'y_shifts': y_shifts, 'x_shifts': x_shifts, 'mmap_scan': mmap_scan}
results = performance.map_frames(f, scan, field_id=field_id, channel=channel,
kwargs=kwargs)
# Reduce: Use the minimum values to make memory mapped scan nonnegative
mmap_scan -= np.min(results) # bit inefficient but necessary
# Set CNMF parameters
## Set general parameters
kwargs = {}
kwargs['num_background_components'] = 1
kwargs['merge_threshold'] = 0.7
kwargs['fps'] = (ScanInfo() & key).fetch1('fps')
# Set params specific to method and segmentation target
target = (SegmentationTask() & key).fetch1('compartment')
if key['segmentation_method'] == 2: # nmf
if target == 'axon':
kwargs['init_on_patches'] = True
kwargs['proportion_patch_overlap'] = 0.2 # 20% overlap
kwargs['num_components_per_patch'] = 15
kwargs['init_method'] = 'sparse_nmf'
kwargs['snmf_alpha'] = 500 # 10^2 to 10^3.5 is a good range
kwargs['patch_size'] = tuple(50 / (ScanInfo.Field() & key).microns_per_pixel) # 50 x 50 microns
elif target == 'bouton':
kwargs['init_on_patches'] = False
kwargs['num_components'] = (SegmentationTask() & key).estimate_num_components()
kwargs['init_method'] = 'greedy_roi'
kwargs['soma_diameter'] = tuple(2 / (ScanInfo.Field() & key).microns_per_pixel)
else: # soma
kwargs['init_on_patches'] = False
kwargs['num_components'] = (SegmentationTask() & key).estimate_num_components()
kwargs['init_method'] = 'greedy_roi'
kwargs['soma_diameter'] = tuple(14 / (ScanInfo.Field() & key).microns_per_pixel)
else: #nmf-new
kwargs['init_on_patches'] = True
kwargs['proportion_patch_overlap'] = 0.2 # 20% overlap
if target == 'axon':
kwargs['num_components_per_patch'] = 15
kwargs['init_method'] = 'sparse_nmf'
kwargs['snmf_alpha'] = 500 # 10^2 to 10^3.5 is a good range
kwargs['patch_size'] = tuple(50 / (ScanInfo.Field() & key).microns_per_pixel) # 50 x 50 microns
elif target == 'bouton':
kwargs['num_components_per_patch'] = 5
kwargs['init_method'] = 'greedy_roi'
kwargs['patch_size'] = tuple(20 / (ScanInfo.Field() & key).microns_per_pixel) # 20 x 20 microns
kwargs['soma_diameter'] = tuple(2 / (ScanInfo.Field() & key).microns_per_pixel)
else: # soma
kwargs['num_components_per_patch'] = 6
kwargs['init_method'] = 'greedy_roi'
kwargs['patch_size'] = tuple(50 / (ScanInfo.Field() & key).microns_per_pixel)
kwargs['soma_diameter'] = tuple(8 / (ScanInfo.Field() & key).microns_per_pixel)
## Set performance/execution parameters (heuristically), decrease if memory overflows
kwargs['num_processes'] = 8 # Set to None for all cores available
kwargs['num_pixels_per_process'] = 10000
# Extract traces
print('Extracting masks and traces (cnmf)...')
scan_ = mmap_scan.reshape((image_height, image_width, num_frames), order='F')
cnmf_result = cmn.extract_masks(scan_, mmap_scan, **kwargs)
(masks, traces, background_masks, background_traces, raw_traces) = cnmf_result
# Delete memory mapped scan
print('Deleting memory mapped scan...')
os.remove(mmap_scan.filename)
# Insert CNMF results
print('Inserting masks, background components and traces...')
dj.conn()
## Insert in CNMF, Segmentation and Fluorescence
self.insert1({**key, 'params': json.dumps(kwargs)})
Fluorescence().insert1(key, allow_direct_insert=True) # we also insert traces
## Insert background components
Segmentation.CNMFBackground().insert1({**key, 'masks': background_masks,
'activity': background_traces})
## Insert masks and traces (masks in Matlab format)
num_masks = masks.shape[-1]
masks = masks.reshape(-1, num_masks, order='F').T # [num_masks x num_pixels] in F order
raw_traces = raw_traces.astype(np.float32, copy=False)
for mask_id, mask, trace in zip(range(1, num_masks + 1), masks, raw_traces):
mask_pixels = np.where(mask)[0]
mask_weights = mask[mask_pixels]
mask_pixels += 1 # matlab indices start at 1
Segmentation.Mask().insert1({**key, 'mask_id': mask_id, 'pixels': mask_pixels,
'weights': mask_weights})
Fluorescence.Trace().insert1({**key, 'mask_id': mask_id, 'trace': trace}, allow_direct_insert=True)
Segmentation().notify(key)
def save_video(self, filename='cnmf_results.mp4', start_index=0, seconds=30,
dpi=250, first_n=None):
""" Creates an animation video showing the results of CNMF.
:param string filename: Output filename (path + filename)
:param int start_index: Where in the scan to start the video.
:param int seconds: How long in seconds should the animation run.
:param int dpi: Dots per inch, controls the quality of the video.
:param int first_n: Draw only the first n components.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get fps and calculate total number of frames
fps = (ScanInfo() & self).fetch1('fps')
num_video_frames = int(round(fps * seconds))
stop_index = start_index + num_video_frames
# Load the scan
channel = self.fetch1('channel') - 1
field_id = self.fetch1('field') - 1
scan_filename = (experiment.Scan() & self).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename, dtype=np.float32)
scan_ = scan[field_id, :, :, channel, start_index: stop_index]
# Correct the scan
correct_raster = (RasterCorrection() & self).get_correct_raster()
correct_motion = (MotionCorrection() & self).get_correct_motion()
scan_ = correct_motion(correct_raster(scan_), slice(start_index, stop_index))
# Get scan dimensions
image_height, image_width, _ = scan_.shape
num_pixels = image_height * image_width
# Get masks and traces
masks = (Segmentation() & self).get_all_masks()
traces = (Fluorescence() & self).get_all_traces()
background_masks, background_traces = (Segmentation.CNMFBackground() &
self).fetch1('masks', 'activity')
# Select first n components
if first_n is not None:
masks = masks[:, :, :first_n]
traces = traces[:first_n, :]
# Drop frames that won't be displayed
traces = traces[:, start_index: stop_index]
background_traces = background_traces[:, start_index: stop_index]
# Create movies
extracted = np.dot(masks.reshape(num_pixels, -1), traces)
extracted = extracted.reshape(image_height, image_width, -1)
background = np.dot(background_masks.reshape(num_pixels, -1), background_traces)
background = background.reshape(image_height, image_width, -1)
residual = scan_ - extracted - background
# Create animation
import matplotlib.animation as animation
## Set the figure
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
axes[0, 0].set_title('Original (Y)')
im1 = axes[0, 0].imshow(scan_[:, :, 0], vmin=scan_.min(), vmax=scan_.max()) # just a placeholder
fig.colorbar(im1, ax=axes[0, 0])
axes[0, 1].set_title('Extracted (A*C)')
im2 = axes[0, 1].imshow(extracted[:, :, 0], vmin=extracted.min(), vmax=extracted.max())
fig.colorbar(im2, ax=axes[0, 1])
axes[1, 0].set_title('Background (B*F)')
im3 = axes[1, 0].imshow(background[:, :, 0], vmin=background.min(),
vmax=background.max())
fig.colorbar(im3, ax=axes[1, 0])
axes[1, 1].set_title('Residual (Y - A*C - B*F)')
im4 = axes[1, 1].imshow(residual[:, :, 0], vmin=residual.min(), vmax=residual.max())
fig.colorbar(im4, ax=axes[1, 1])
for ax in axes.ravel():
ax.axis('off')
## Make the animation
def update_img(i):
im1.set_data(scan_[:, :, i])
im2.set_data(extracted[:, :, i])
im3.set_data(background[:, :, i])
im4.set_data(residual[:, :, i])
video = animation.FuncAnimation(fig, update_img, scan_.shape[2],
interval=1000 / fps)
# Save animation
if not filename.endswith('.mp4'):
filename += '.mp4'
print('Saving video at:', filename)
print('If this takes too long, stop it and call again with dpi <', dpi, '(default)')
video.save(filename, dpi=dpi)
return fig
class CNMFBackground(dj.Part):
definition = """ # inferred background components
-> Segmentation.CNMF
---
masks : longblob # array (im_height x im_width x num_background_components)
activity : longblob # array (num_background_components x timesteps)
"""
def make(self, key):
# Create masks
if key['segmentation_method'] == 1: # manual
Segmentation.Manual().make(key)
elif key['segmentation_method'] in [2, 6]: # nmf
self.insert1(key)
Segmentation.CNMF().make(key)
elif key['segmentation_method'] in [3, 4]: # nmf_patches, nmf-boutons
msg = 'This method has been deprecated, use segmentation_method 6'
raise PipelineException(msg)
else:
msg = 'Unrecognized segmentation method {}'.format(key['segmentation_method'])
raise PipelineException(msg)
@notify.ignore_exceptions
def notify(self, key):
fig = (Segmentation() & key).plot_masks()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'segmentation for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
@staticmethod
def reshape_masks(mask_pixels, mask_weights, image_height, image_width):
""" Reshape masks into an image_height x image_width x num_masks array."""
masks = np.zeros([image_height, image_width, len(mask_pixels)], dtype=np.float32)
# Reshape each mask
for i, (mp, mw) in enumerate(zip(mask_pixels, mask_weights)):
mask_as_vector = np.zeros(image_height * image_width)
mask_as_vector[np.squeeze(mp - 1).astype(int)] = np.squeeze(mw)
masks[:, :, i] = mask_as_vector.reshape(image_height, image_width, order='F')
return masks
def get_all_masks(self):
"""Returns an image_height x image_width x num_masks matrix with all masks."""
mask_rel = (Segmentation.Mask() & self)
# Get masks
image_height, image_width = (ScanInfo.Field() & self).fetch1('px_height', 'px_width')
mask_pixels, mask_weights = mask_rel.fetch('pixels', 'weights', order_by='mask_id')
# Reshape masks
masks = Segmentation.reshape_masks(mask_pixels, mask_weights, image_height, image_width)
return masks
def plot_masks(self, threshold=0.97, first_n=None):
""" Draw contours of masks over the correlation image (if available).
:param threshold: Threshold on the cumulative mass to define mask contours. Lower
for tighter contours.
:param first_n: Number of masks to plot. None for all.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get masks
masks = self.get_all_masks()
if first_n is not None:
masks = masks[:, :, :first_n]
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
background_image = np.zeros(masks.shape[:-1])
# Plot background
image_height, image_width, num_masks = masks.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
# Draw contours
cumsum_mask = np.empty([image_height, image_width])
for i in range(num_masks):
mask = masks[:, :, i]
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0), mask.shape) # max to min value in mask
cumsum_mask[indices] = np.cumsum(mask[indices]**2) / np.sum(mask**2)
## Plot contour at desired threshold (with random color)
random_color = (np.random.rand(), np.random.rand(), np.random.rand())
plt.contour(cumsum_mask, [threshold], linewidths=0.8, colors=[random_color])
return fig
@schema
class Fluorescence(dj.Computed):
definition = """ # fluorescence traces before spike extraction or filtering
-> Segmentation
"""
@property
def key_source(self):
return Segmentation() & {'pipe_version': CURRENT_VERSION}
class Trace(dj.Part):
definition = """
-> Fluorescence
-> Segmentation.Mask
---
trace : longblob
"""
def make(self, key):
# Load scan
print('Reading scan...')
field_id = key['field'] - 1
channel = key['channel'] - 1
scan_filename = (experiment.Scan() & key).local_filenames_as_wildcard
scan = scanreader.read_scan(scan_filename)
# Map: Extract traces
print('Creating fluorescence traces...')
f = performance.parallel_fluorescence # function to map
raster_phase = (RasterCorrection() & key).fetch1('raster_phase')
fill_fraction = (ScanInfo() & key).fetch1('fill_fraction')
y_shifts, x_shifts = (MotionCorrection() & key).fetch1('y_shifts', 'x_shifts')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
kwargs = {'raster_phase': raster_phase, 'fill_fraction': fill_fraction, 'y_shifts': y_shifts,
'x_shifts': x_shifts, 'mask_pixels': pixels, 'mask_weights': weights}
results = performance.map_frames(f, scan, field_id=field_id, channel=channel, kwargs=kwargs)
# Reduce: Concatenate
traces = np.zeros((len(mask_ids), scan.num_frames), dtype=np.float32)
for frames, chunk_traces in results:
traces[:, frames] = chunk_traces
# Insert
self.insert1(key)
for mask_id, trace in zip(mask_ids, traces):
Fluorescence.Trace().insert1({**key, 'mask_id': mask_id, 'trace': trace})
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = plt.figure(figsize=(15, 4))
plt.plot((Fluorescence() & key).get_all_traces().T)
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'calcium traces for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def get_all_traces(self):
""" Returns a num_traces x num_timesteps matrix with all traces."""
traces = (Fluorescence.Trace() & self).fetch('trace', order_by='mask_id')
return np.array([x.squeeze() for x in traces])
@schema
class MaskClassification(dj.Computed):
definition = """ # classification of segmented masks.
-> Segmentation # animal_id, session, scan_idx, reso_version, field, channel, segmentation_method
-> shared.ClassificationMethod
---
classif_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return (Segmentation() * shared.ClassificationMethod() &
{'pipe_version': CURRENT_VERSION})
class Type(dj.Part):
definition = """
-> MaskClassification
-> Segmentation.Mask
---
-> shared.MaskType
"""
def make(self, key):
# Skip axonal scans
target = (SegmentationTask() & key).fetch1('compartment')
if key['classification_method'] == 2 and target != 'soma':
print('Warning: Skipping {}. Automatic classification works only with somatic '
'scans'.format(key))
return
# Get masks
image_height, image_width = (ScanInfo.Field() & key).fetch1('px_height', 'px_width')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
masks = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
# Classify masks
if key['classification_method'] == 1: # manual
if not SummaryImages() & key:
msg = 'Need to populate SummaryImages before manual mask classification'
raise PipelineException(msg)
template = (SummaryImages.Correlation() & key).fetch1('correlation_image')
masks = masks.transpose([2, 0, 1]) # num_masks, image_height, image_width
mask_types = mask_classification.classify_manual(masks, template)
elif key['classification_method'] == 2: # cnn-caiman
from .utils import caiman_interface as cmn
soma_diameter = tuple(14 / (ScanInfo.Field() & key).microns_per_pixel)
probs = cmn.classify_masks(masks, soma_diameter)
mask_types = ['soma' if prob > 0.75 else 'artifact' for prob in probs]
else:
msg = 'Unrecognized classification method {}'.format(key['classification_method'])
raise PipelineException(msg)
print('Generated types:', mask_types)
# Insert results
self.insert1(key)
for mask_id, mask_type in zip(mask_ids, mask_types):
MaskClassification.Type().insert1({**key, 'mask_id': mask_id, 'type': mask_type})
self.notify(key, mask_types)
@notify.ignore_exceptions
def notify(self, key, mask_types):
fig = (MaskClassification() & key).plot_masks()
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = ('mask classification for {animal_id}-{session}-{scan_idx} field {field}: '
'{somas} somas and {arts} artifacts').format(**key,
somas=mask_types.count('soma'), arts=mask_types.count('artifact'))
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg, channel='#pipeline_quality')
def plot_masks(self, threshold=0.99):
""" Draw contours of masks over the correlation image (if available) with different
colors per type
:param threshold: Threshold on the cumulative mass to define mask contours. Lower
for tighter contours.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get masks
masks = (Segmentation() & self).get_all_masks()
mask_types = (MaskClassification.Type() & self).fetch('type')
colormap = {'soma': 'b', 'axon': 'k', 'dendrite': 'c', 'neuropil': 'y',
'artifact': 'r', 'unknown': 'w'}
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
background_image = np.zeros(masks.shape[:-1])
# Plot background
image_height, image_width, num_masks = masks.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
# Draw contours
cumsum_mask = np.empty([image_height, image_width])
for i in range(num_masks):
mask = masks[:, :, i]
color = colormap[mask_types[i]]
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0), mask.shape) # max to min value in mask
cumsum_mask[indices] = np.cumsum(mask[indices]**2) / np.sum(mask**2)
## Plot contour at desired threshold
plt.contour(cumsum_mask, [threshold], linewidths=0.8, colors=[color])
return fig
@schema
class ScanSet(dj.Computed):
definition = """ # set of all units in the same scan
-> Fluorescence # processing done per field
"""
@property
def key_source(self):
return Fluorescence() & {'pipe_version': CURRENT_VERSION}
class Unit(dj.Part):
definition = """ # single unit in the scan
-> ScanInfo
-> shared.SegmentationMethod
unit_id : int # unique per scan & segmentation method
---
-> ScanSet # for it to act as a part table of ScanSet
-> Fluorescence.Trace
"""
class UnitInfo(dj.Part):
definition = """ # unit type, coordinates and delay time
-> ScanSet.Unit
---
um_x : smallint # x-coordinate of centroid in motor coordinate system
um_y : smallint # y-coordinate of centroid in motor coordinate system
um_z : smallint # z-coordinate of mask relative to surface of the cortex
px_x : smallint # x-coordinate of centroid in the frame
px_y : smallint # y-coordinate of centroid in the frame
ms_delay : smallint # (ms) delay from start of frame to recording of this unit
"""
def _job_key(self, key):
# Force reservation key to be per scan so diff fields are not run in parallel
return {k: v for k, v in key.items() if k not in ['field', 'channel']}
def make(self, key):
from pipeline.utils import caiman_interface as cmn
# Get masks
image_height, image_width = (ScanInfo.Field() & key).fetch1('px_height', 'px_width')
mask_ids, pixels, weights = (Segmentation.Mask() & key).fetch('mask_id', 'pixels', 'weights')
masks = Segmentation.reshape_masks(pixels, weights, image_height, image_width)
# Compute units' coordinates
px_center = [image_height / 2, image_width / 2]
um_center = (ScanInfo.Field() & key).fetch1('y', 'x')
um_z = (ScanInfo.Field() & key).fetch1('z')
px_centroids = cmn.get_centroids(masks)
um_centroids = um_center + (px_centroids - px_center) * (ScanInfo.Field() & key).microns_per_pixel
# Compute units' delays
delay_image = (ScanInfo.Field() & key).fetch1('delay_image')
delays = (np.sum(masks * np.expand_dims(delay_image, -1), axis=(0, 1)) /
np.sum(masks, axis=(0, 1)))
delays = np.round(delays * 1e3).astype(np.int16) # in milliseconds
# Get next unit_id for scan
unit_rel = (ScanSet.Unit().proj() & key)
unit_id = np.max(unit_rel.fetch('unit_id')) + 1 if unit_rel else 1
# Insert in ScanSet
self.insert1(key)
# Insert units
unit_ids = range(unit_id, unit_id + len(mask_ids) + 1)
for unit_id, mask_id, (um_y, um_x), (px_y, px_x), delay in zip(unit_ids, mask_ids,
um_centroids, px_centroids, delays):
ScanSet.Unit().insert1({**key, 'unit_id': unit_id, 'mask_id': mask_id})
unit_info = {**key, 'unit_id': unit_id, 'um_x': um_x, 'um_y': um_y,
'um_z': um_z, 'px_x': px_x, 'px_y': px_y, 'ms_delay': delay}
ScanSet.UnitInfo().insert1(unit_info, ignore_extra_fields=True) # ignore field and channel
def plot_centroids(self, first_n=None):
""" Draw masks centroids over the correlation image. Works on a single field/channel
:param first_n: Number of masks to plot. None for all
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
# Get centroids
centroids = self.get_all_centroids(centroid_type='px')
if first_n is not None:
centroids = centroids[:, :first_n] # select first n components
# Get correlation image if defined, black background otherwise.
image_rel = SummaryImages.Correlation() & self
if image_rel:
background_image = image_rel.fetch1('correlation_image')
else:
image_height, image_width = (ScanInfo.Field() & self).fetch1('px_height', 'px_width')
background_image = np.zeros([image_height, image_width])
# Plot centroids
image_height, image_width = background_image.shape
figsize = np.array([image_width, image_height]) / min(image_height, image_width)
fig = plt.figure(figsize=figsize * 7)
plt.imshow(background_image)
plt.plot(centroids[:, 0], centroids[:, 1], 'ow', markersize=3)
return fig
def plot_centroids3d(self):
""" Plots the centroids of all units in the motor coordinate system (in microns)
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
from mpl_toolkits.mplot3d import Axes3D
# Get centroids
centroids = self.get_all_centroids()
# Plot
# TODO: Add different colors for different types, correlation image as 2-d planes
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(centroids[:, 0], centroids[:, 1], centroids[:, 2])
ax.invert_zaxis()
ax.set_xlabel('x (um)')
ax.set_ylabel('y (um)')
ax.set_zlabel('z (um)')
return fig
def get_all_centroids(self, centroid_type='um'):
""" Returns the centroids for all units in ScanSet (could be limited to field).
Centroid type is either 'um' or 'px':
'um': Array (num_units x 3) with x, y, z in motor coordinate system (microns).
'px': Array (num_units x 2) with x, y pixel coordinates.
"""
units_rel = ScanSet.UnitInfo() & (ScanSet.Unit() & self)
if centroid_type == 'um':
xs, ys, zs = units_rel.fetch('um_x', 'um_y', 'um_z', order_by='unit_id')
centroids = np.stack([xs, ys, zs], axis=1)
else:
xs, ys = units_rel.fetch('px_x', 'px_y', order_by='unit_id')
centroids = np.stack([xs, ys], axis=1)
return centroids
@schema
class Activity(dj.Computed):
definition = """ # activity inferred from fluorescence traces
-> ScanSet # processing done per field
-> shared.SpikeMethod
---
activity_time=CURRENT_TIMESTAMP : timestamp # automatic
"""
@property
def key_source(self):
return ScanSet() * shared.SpikeMethod() & {'pipe_version': CURRENT_VERSION}
class Trace(dj.Part):
definition = """ # deconvolved calcium acitivity
-> ScanSet.Unit
-> shared.SpikeMethod
---
-> Activity # for it to act as part table of Activity
trace : longblob
"""
class ARCoefficients(dj.Part):
definition = """ # fitted parameters for the autoregressive process (nmf deconvolution)
-> Activity.Trace
---
g : blob # g1, g2, ... coefficients for the AR process
"""
def make(self, key):
print('Creating activity traces for', key)
# Get fluorescence
fps = (ScanInfo() & key).fetch1('fps')
unit_ids, traces = (ScanSet.Unit() * Fluorescence.Trace() & key).fetch('unit_id', 'trace')
full_traces = [signal.fill_nans(np.squeeze(trace).copy()) for trace in traces]
# Insert in Activity
self.insert1(key)
if key['spike_method'] == 2: # oopsie
import pyfnnd # Install from https://github.com/cajal/PyFNND.git
for unit_id, trace in zip(unit_ids, full_traces):
spike_trace = pyfnnd.deconvolve(trace, dt=1 / fps)[0].astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
elif key['spike_method'] == 3: # stm
import c2s # Install from https://github.com/lucastheis/c2s
for unit_id, trace in zip(unit_ids, full_traces):
start = signal.notnan(trace)
end = signal.notnan(trace, len(trace) - 1, increment=-1)
trace_dict = {'calcium': np.atleast_2d(trace[start:end + 1]), 'fps': fps}
data = c2s.predict(c2s.preprocess([trace_dict], fps=fps), verbosity=0)
spike_trace = np.squeeze(data[0].pop('predictions')).astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
elif key['spike_method'] == 5: # nmf
from pipeline.utils import caiman_interface as cmn
import multiprocessing as mp
with mp.Pool(10) as pool:
results = pool.map(cmn.deconvolve, full_traces)
for unit_id, (spike_trace, ar_coeffs) in zip(unit_ids, results):
spike_trace = spike_trace.astype(np.float32, copy=False)
Activity.Trace().insert1({**key, 'unit_id': unit_id, 'trace': spike_trace})
Activity.ARCoefficients().insert1({**key, 'unit_id': unit_id, 'g': ar_coeffs},
ignore_extra_fields=True)
else:
msg = 'Unrecognized spike method {}'.format(key['spike_method'])
raise PipelineException(msg)
self.notify(key)
@notify.ignore_exceptions
def notify(self, key):
fig = plt.figure(figsize=(15, 4))
plt.plot((Activity() & key).get_all_spikes().T)
img_filename = '/tmp/' + key_hash(key) + '.png'
fig.savefig(img_filename, bbox_inches='tight')
plt.close(fig)
msg = 'spike traces for {animal_id}-{session}-{scan_idx} field {field}'.format(**key)
slack_user = notify.SlackUser() & (experiment.Session() & key)
slack_user.notify(file=img_filename, file_title=msg)
def plot_impulse_responses(self, num_timepoints=100):
""" Plots the impulse response functions for all traces.
:param int num_timepoints: The number of points after impulse to use for plotting.
:returns Figure. You can call show() on it.
:rtype: matplotlib.figure.Figure
"""
ar_rel = Activity.ARCoefficients() & (Activity.Trace() & self)
if ar_rel: # if an AR model was used
# Get some params
fps = (ScanInfo() & self).fetch1('fps')
ar_coeffs = ar_rel.fetch('g')
# Define the figure
fig = plt.figure()
x_axis = np.arange(num_timepoints) / fps # make it seconds
# Over each trace
for g in ar_coeffs:
AR_order = len(g)
# Calculate impulse response function
irf = np.zeros(num_timepoints)
irf[0] = 1 # initial spike
for i in range(1, num_timepoints):
if i <= AR_order: # start of the array needs special care
irf[i] = np.sum(g[:i] * irf[i - 1:: -1])
else:
irf[i] = np.sum(g * irf[i - 1: i - AR_order - 1: -1])
# Plot
plt.plot(x_axis, irf)
plt.xlabel('Seconds')
return fig
def get_all_spikes(self):
""" Returns a num_traces x num_timesteps matrix with all spikes."""
spikes = (Activity.Trace() & self).fetch('trace', order_by='unit_id')
return np.array([x.squeeze() for x in spikes])
@schema
class ScanDone(dj.Computed):
definition = """ # scans that are fully processed (updated every time a field is added)
-> ScanInfo
-> shared.SegmentationMethod
-> shared.SpikeMethod
"""
@property
def key_source(self):
return Activity() & {'pipe_version': CURRENT_VERSION}
@property
def target(self):
return ScanDone.Partial() # trigger make_tuples for fields in Activity that aren't in ScanDone.Partial
def _job_key(self, key):
# Force reservation key to be per scan so diff fields are not run in parallel
return {k: v for k, v in key.items() if k not in ['field', 'channel']}
class Partial(dj.Part):
definition = """ # fields that have been processed in the current scan
-> ScanDone
-> Activity
"""
def make(self, key):
scan_key = {k: v for k, v in key.items() if k in self.heading}
# Delete current ScanDone entry
with dj.config(safemode=False):
(ScanDone() & scan_key).delete()
# Reinsert in ScanDone
self.insert1(scan_key)
# Insert all processed fields in Partial
ScanDone.Partial().insert((Activity() & scan_key).proj())
from . import stack
@schema
class StackCoordinates(dj.Computed):
definition = """ # centroids of each unit in motor/stack coordinate system
-> ScanSet # animal_id, session, scan_idx, channel, field, segmentation_method, pipe_version
-> stack.Registration.proj(session='scan_session') # animal_id, stack_session, stack_idx, volume_id, session, scan_idx, field, stack_channel, scan_channel, registration_method
"""
class UnitInfo(dj.Part):
definition = """ # ScanSet.UnitInfo centroids mapped to stack coordinates
-> master # this will add field and channels back
-> ScanSet.Unit
---
stack_x : float
stack_y : float
stack_z : float
"""
def make(self, key):
from scipy import ndimage
# Get registration grid (px -> stack_coordinate)
stack_key = {**key, 'scan_session': key['session']}
field_res = (ScanInfo.Field & key).microns_per_pixel
grid = (stack.Registration & stack_key).get_grid(type='affine',
desired_res=field_res)
self.insert1(key)
field_units = ScanSet.UnitInfo & (ScanSet.Unit & key)
for unit_key, px_x, px_y in zip(*field_units.fetch('KEY', 'px_x', 'px_y')):
px_coords = np.array([[px_y], [px_x]])
unit_x, unit_y, unit_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
StackCoordinates.UnitInfo.insert1({**key, **unit_key, 'stack_x': unit_x,
'stack_y': unit_y, 'stack_z': unit_z})
anatomy = dj.create_virtual_module('pipeline_anatomy','pipeline_anatomy')
@schema
class AreaMembership(dj.Computed):
definition = """ # cell membership in visual areas according to stack registered retinotopy
ret_hash : varchar(32) # single attribute representation of retinotopic map key
-> StackCoordinates
---
"""
class UnitInfo(dj.Part):
definition = """ # confidence in area assignment per unit according to stack coordinates
-> master
-> anatomy.Area
-> StackCoordinates.UnitInfo
---
confidence : float # confidence in area assignment
"""
@property
def key_source(self):
ret_rel = stack.Area.proj(ret_session='scan_session',
ret_scan_idx='scan_idx',
ret_channel='scan_channel')
key_source = ret_rel * StackCoordinates
heading_str = list(self.heading)
return dj.U(*heading_str) & key_source
def make(self,key):
from scipy.interpolate import griddata
mask_rel = stack.Area.Mask.proj('mask',
ret_session='scan_session',
ret_scan_idx='scan_idx',
ret_channel='scan_channel')
mask_keys, masks = (mask_rel & key).fetch('KEY', 'mask')
fetch_str = ['x', 'y', 'um_width', 'um_height', 'px_width', 'px_height']
stack_rel = stack.CorrectedStack.proj(*fetch_str, stack_session='session') & key
cent_x, cent_y, um_w, um_h, px_w, px_h = stack_rel.fetch1(*fetch_str)
stack_edges = np.array((cent_x - um_w / 2, cent_y - um_h / 2))
stack_px_dims = np.array((px_w, px_h))
stack_um_dims = np.array((um_w, um_h))
stack_px_grid = np.meshgrid(*[np.arange(d) + 0.5 for d in stack_px_dims])
ks, sxs, sys = (StackCoordinates.UnitInfo & key).fetch('KEY', 'stack_x', 'stack_y')
pxs, pys = [np.array((coord - edge) * px_per_um) for coord, edge, px_per_um
in zip((sxs, sys), stack_edges, stack_px_dims / stack_um_dims)]
unit_tups = []
for mask_key, mask in zip(mask_keys, masks):
grid_locs = np.array([grid.ravel() for grid in stack_px_grid]).T
grid_vals = mask.ravel()
grid_query = np.vstack((pxs, pys)).T
confs = griddata(grid_locs, grid_vals, grid_query, method='nearest')
mems = confs > 0
unit_tups.append([{**mask_key, **k, 'confidence': conf} for k, conf in zip(np.array(ks)[mems], confs[mems])])
self.insert1(key)
self.UnitInfo.insert(np.concatenate(unit_tups),ignore_extra_fields=True)
@schema
class Func2StructMatching(dj.Computed):
definition = """ # match functional masks to structural masks
-> ScanSet # animal_id, session, scan_idx, pipe_version, field, channel
-> stack.FieldSegmentation.proj(session='scan_session') # animal_id, stack_session, stack_idx, volume_id, session, scan_idx, field, stack_channel, scan_channel, registration_method, stacksegm_channel, stacksegm_method
---
key_hash : varchar(32) # single attribute representation of the key (used to avoid going over 16 attributes in the key)
"""
class AllMatches(dj.Part):
definition = """ # store all possible matches (one functional cell could match with more than one structural mask and viceversa)
key_hash : varchar(32) # master key
unit_id : int # functional unit id
sunit_id : int # structural unit id
---
iou : float # intersection-over-union of the 2-d masks
"""
# Used key_hash because key using ScanSet.Unit, FieldSegmentation.StackUnit has
# more than 16 attributes and MySQL complains. I added the foreign key constraints
# manually
class Match(dj.Part):
definition = """ # match of a functional mask to a structural mask (1:1 relation)
-> master
-> ScanSet.Unit
---
-> stack.FieldSegmentation.StackUnit.proj(session='scan_session')
iou : float # Intersection-over-Union of the 2-d masks
distance2d : float # distance between centroid of 2-d masks
distance3d : float # distance between functional centroid and structural centroid
"""
def make(self, key):
from .utils import registration
from scipy import ndimage
# Get caiman masks and resize them
field_dims = (ScanInfo.Field & key).fetch1('um_height', 'um_width')
masks = np.moveaxis((Segmentation & key).get_all_masks(), -1, 0)
masks = np.stack([registration.resize(m, field_dims, desired_res=1) for m in
masks])
scansetunit_keys = (ScanSet.Unit & key).fetch('KEY', order_by='mask_id')
# Binarize masks
binary_masks = np.zeros(masks.shape, dtype=bool)
for i, mask in enumerate(masks):
## Compute cumulative mass (similar to caiman)
indices = np.unravel_index(np.flip(np.argsort(mask, axis=None), axis=0),
mask.shape) # max to min value in mask
cumsum_mask = np.cumsum(mask[indices] ** 2) / np.sum(mask ** 2) # + 1e-9)
binary_masks[i][indices] = cumsum_mask < 0.9
# Get structural segmentation and registration grid
stack_key = {**key, 'scan_session': key['session']}
segmented_field = (stack.FieldSegmentation & stack_key).fetch1('segm_field')
grid = (stack.Registration & stack_key).get_grid(type='affine', desired_res=1)
sunit_ids = (stack.FieldSegmentation.StackUnit & stack_key).fetch('sunit_id',
order_by='sunit_id')
# Create matrix with IOU values (rows for structural units, columns for functional units)
ious = []
for sunit_id in sunit_ids:
binary_sunit = segmented_field == sunit_id
intersection = np.logical_and(binary_masks, binary_sunit).sum(
axis=(1, 2)) # num_masks
union = np.logical_or(binary_masks, binary_sunit).sum(
axis=(1, 2)) # num_masks
ious.append(intersection / union)
iou_matrix = np.stack(ious)
# Save all possible matches / iou_matrix > 0
self.insert1({**key, 'key_hash': key_hash(key)})
for mask_idx, func_idx in zip(*np.nonzero(iou_matrix)):
self.AllMatches.insert1({'key_hash': key_hash(key),
'unit_id': scansetunit_keys[func_idx]['unit_id'],
'sunit_id': sunit_ids[mask_idx],
'iou': iou_matrix[mask_idx, func_idx]})
# Iterate over matches (from best to worst), insert
while iou_matrix.max() > 0:
# Get next best
best_mask, best_func = np.unravel_index(np.argmax(iou_matrix),
iou_matrix.shape)
best_iou = iou_matrix[best_mask, best_func]
# Get stack unit coordinates
coords = (stack.FieldSegmentation.StackUnit & stack_key &
{'sunit_id': sunit_ids[best_mask]}).fetch1('sunit_z', 'sunit_y',
'sunit_x', 'mask_z',
'mask_y', 'mask_x')
sunit_z, sunit_y, sunit_x, mask_z, mask_y, mask_x = coords
# Compute distance to 2-d and 3-d mask
px_y, px_x = ndimage.measurements.center_of_mass(binary_masks[best_func])
px_coords = np.array([[px_y], [px_x]])
func_x, func_y, func_z = [ndimage.map_coordinates(grid[..., i], px_coords,
order=1)[0] for i in
range(3)]
distance2d = np.sqrt((func_z - mask_z) ** 2 + (func_y - mask_y) ** 2 +
(func_x - mask_x) ** 2)
distance3d = np.sqrt((func_z - sunit_z) ** 2 + (func_y - sunit_y) ** 2 +
(func_x - sunit_x) ** 2)
self.Match.insert1({**key, **scansetunit_keys[best_func],
'sunit_id': sunit_ids[best_mask], 'iou': best_iou,
'distance2d': distance2d, 'distance3d': distance3d})
# Deactivate match
iou_matrix[best_mask, :] = 0
iou_matrix[:, best_func] = 0
|
ecobost/pipeline
|
python/pipeline/meso.py
|
Python
|
lgpl-3.0
| 82,180
|
[
"Gaussian"
] |
03063006f9b91e3b3e0a453aaf345830b275714568318008f6ee73fb8f20a93b
|
import numpy as np
import tensorflow as tf
from traindata import training
from test import parsing
import logging
import re
import os
import csv
import gensim
import json
import argparse
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',level=logging.DEBUG)
logging.info('logger started')
def traindata1(fn, wordvecpath):
X2, Y2, X_lengths = training(fn, wordvecpath)
print(X2.shape)
np.save(numpypath+'X2', X2)
X_lengths = X_lengths.astype(int)
print(X_lengths.shape)
np.save(numpypath+'X_lengths', X_lengths)
print(Y2.shape)
Y2 = Y2.astype(int)
np.save(numpypath+'Y2', Y2)
def train_classifier(numpypath, ckptpath, lr, iters):
logging.info('classifier training started')
loss = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
inH = np.zeros([BATCHSIZE, CELLSIZE * NLAYERS])
X_data = np.load(numpypath + 'X2.npy')
Y_data = np.load(numpypath + 'Y2.npy')
X_lengths = np.load(numpypath + 'X_lengths.npy')
s = X_lengths.size
iters=iters*s+1
epoch =0
while epoch<iters:
X_, X_len, Y_b= next_batch(BATCHSIZE, X_data, X_lengths, Y_data)
dic = {X : X_, XL : X_len,Y_ : Y_b, Hin : inH, keep_prob : 0.75}
_,outH = sess.run([train_step, H,], feed_dict=dic)
inH = outH
epoch+=1
logging.info('epoch no : '+str(epoch)+', iterations : '+str(epoch/s))
np.save(numpypath + "outH_" + str(lr) + "_" + str(int(epoch / s)) + "_", outH)
saver.save(sess, ckptpath + "classifier_" + str(lr) + "_" + str(int(epoch / s)) + "_")
logging.info('checkpoint save of epoch ' + str(epoch))
logging.info('classifier training complete')
i=0
k=0
def next_batch(bs, X_data, X_lengths, Y_data):
global i,k
if k == X_lengths.size :
k=0
i=0
XLres = X_lengths[k:k+bs]
k+=bs
ma = np.amax(XLres)
Xres = np.zeros([1, CELLSIZE])
Yres = np.zeros([1, NCLASSES])
for t in XLres:
temp=X_data[i:i+t]
temp2 = Y_data[i:i + t]
i+=t
if t<ma and temp.size:
npad = ((0, ma - t), (0, 0))
temp = np.pad(temp, pad_width=npad, mode='constant', constant_values=0)
temp2 = np.pad(temp2, pad_width=npad, mode='constant', constant_values=0)
Xres = np.vstack((Xres,temp))
Yres = np.vstack((Yres, temp2))
Xres = np.delete(Xres, 0, axis=0)
Yres = np.delete(Yres, 0, axis=0)
Xres = Xres.reshape([bs, ma, CELLSIZE])
return Xres, XLres, Yres
ap = argparse.ArgumentParser()
ap.add_argument("-t","--train", help="input file for training")
ap.add_argument("-p","--parse", help="input file for parsing")
ap.add_argument("-w","--wordvecs", help="input file for wordvectors/wordembeddings")
ap.add_argument("-lr","--learningrate", help="learning rate for training classifier", default=0.001)
ap.add_argument("-its","--iterations", help="number of iterations for training classifier", default=10)
args = ap.parse_args()
traindatafile = args.train
testfile = args.parse
lr = float(args.learningrate)
iters = int(args.iterations)
numpypath = './tmpdata/'
ckptpath = './tmpdata/'
if traindatafile : traindata1(traindatafile, args.wordvecs)
if args.wordvecs : wordvecpath=args.wordvecs
else: wordvecpath = './tmpdata/vecs.bin'
mode = gensim.models.Word2Vec.load(wordvecpath)
vecdims = mode.layer1_size
vecdims = vecdims+11+2+2
with open('./tmpdata/deprel.json', 'r') as fp:
depreldic = json.load(fp)
ndeprel=len(depreldic)
tf.variable_scope('tfl', reuse=True)
BATCHSIZE = 1
CELLSIZE = vecdims * 5 + 4
NCLASSES = ndeprel + 4
NLAYERS = 3
X = tf.placeholder(tf.float32, [None, None, CELLSIZE])
Y_ = tf.placeholder(tf.int32, [None, NCLASSES])
XL = tf.placeholder(tf.int32, [None])
keep_prob = tf.placeholder(tf.float32)
Hin = tf.placeholder(tf.float32, [None, CELLSIZE * NLAYERS])
cell = tf.contrib.rnn.GRUCell(CELLSIZE)
mcell = tf.contrib.rnn.MultiRNNCell([cell] * NLAYERS, state_is_tuple=False)
Hr, H = tf.nn.dynamic_rnn(cell=mcell, inputs=X, sequence_length=XL, initial_state=Hin, dtype=tf.float32,
time_major=False)
Hd = tf.nn.dropout(Hr, keep_prob)
Hf = tf.reshape(Hd, [-1, CELLSIZE])
Ylogits = tf.contrib.layers.linear(Hf, NCLASSES)
Y = tf.nn.softmax(Ylogits)
Yp = tf.argmax(tf.slice(Y[0], [0], [4]), 0)
Yd = tf.argmax(tf.slice(Y[0], [4], [52]), 0)
if traindatafile : train_classifier(numpypath, ckptpath, lr, iters)
if testfile : parsing(testfile,wordvecpath, numpypath, ckptpath, Yp, Yd, H, X, XL, Hin, keep_prob)
logging.info('end of program')
|
rakat/depparser
|
main.py
|
Python
|
apache-2.0
| 4,816
|
[
"MCell"
] |
c9699d4edd02e12a9ddd075cffebeec9b89daa164838ecfaf52a72440c227c40
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2013 by Florian Mounier, Kozea
# This file is part of pystil, licensed under a 3-clause BSD license.
def labelize(string, lang):
"""return the label for a criteria"""
if lang == 'fr':
return {
'new': 'Nouvelles visites',
'unique': 'Visites',
'all': 'Pages vues',
'spent_time': u'Temps passé sur de site',
'hour': 'Visites par heure'
}[string]
return {
'new': 'New visits',
'unique': 'Visits',
'all': 'Page hits',
'spent_time': 'Time spent on site',
'hour': 'Visits per hour'
}[string]
def titlize(string, lang):
"""return the label for a criteria"""
if lang == 'fr':
return {
'all': 'Statistiques par jour',
'asn': "Top fournisseur d'accès",
'country_code': "Carte du monde des visites",
'host': 'Top sites',
'page': 'Pages les plus vues',
'hash': 'Hashs les plus vus',
'referrer_domain': 'Top référeurs',
'hour': 'Visites par heure',
'subdomain': 'Top sous domaines',
'browser_name': 'Top navigateurs',
'browser_name_version': 'Top version de navigateur',
'size': "Top tailles d'écran",
'platform': 'Top plateforme',
'spent_time': 'Temps passé sur le site',
'country': 'Top pays',
'day': 'Top jours',
'ip': 'Top adresses IP',
}[string]
return {
'all': 'Stats by day',
'asn': 'Top access networks',
'country_code': 'Visit worldmap',
'host': 'Top sites',
'page': 'Most viewed pages',
'hash': 'Most viewed hashes',
'referrer_domain': 'Best referrers',
'hour': 'Visits per hour',
'subdomain': 'Top subdomains',
'browser_name': 'Top browsers',
'browser_name_version': 'Top browser versions',
'size': 'Top screen sizes',
'platform': 'Top platforms',
'spent_time': 'Time spent on site',
'country': 'Top countries',
'day': 'Top days',
'ip': 'Top IP addresses'
}[string]
def criteria(criterion, lang='us'):
return {
'id': 'Identifier',
'uuid': 'Unique identifier (uuid)',
'browser_name': 'Browser Name',
'hash': 'History tag',
'host': 'Host',
'browser_version': 'Browser Version',
'client_tz_offset': 'Timezone',
'date': 'Date',
'last_visit': 'Last Visit',
'ip': 'Ip',
'language': 'Language',
'page': 'Page visited',
'platform': 'Operating system',
'query': 'Arguments',
'referrer': 'Referrer',
'pretty_referrer': 'Pretty Referrer',
'referrer_domain': 'Referrer domain',
'site': 'Site url',
'size': 'Screen Size',
'time': 'Time spent on site',
'country': 'Country',
'country_code': 'Country Code',
'city': 'City',
'lat': 'Latitude',
'lng': 'Longitude',
'asn': 'Access Service Network',
'browser_name_version': 'Browser (Name and Version)',
'day': 'Day Number',
'hour': 'Hour of day',
'spent_time': 'Time Spent',
'subdomain': 'Subdomain',
'domain': 'Domain'
}.get(criterion, criterion)
|
Kozea/pystil
|
pystil/i18n.py
|
Python
|
bsd-3-clause
| 3,404
|
[
"VisIt"
] |
bc9dc6c71e81e8d9ad91fcfa8f4d56bbbd2aee33f0e56e01307a53b93a94df38
|
from math import pi, sqrt
# flux in 1/(m^2*s)
# 1e15 is the surface atom density in atoms/cm^2. I convert to m^2 by a factor of 100^2
Na = 6.02214e23
Z = 1e15*(100**2)
# mass in kg
m = 28/(Na*1000)
# boltzmann constant J/K, kb = R/Na
kb = 1.38066e-23
# Temperature, Kelvin
T = 300
# sticking coefficient
sticking = 1
# Hertz knudsen equation. The pressure is in Pascal
P = Z*sqrt(2*pi*m*kb*T)/sticking
P_torr = P*760/101325
print('Pressure is: {:.4}'.format(P_torr))
# T = 707.1 # Assume desorption at 707.1 K
#
# dmm = 10 # 8 mm diameter of crystal
# d = dmm/1000 # convert mm to m
# # Area = pi*((d/10)**2)/4 # Area of crystal in m^2
# Area = pi*(d**2)/4 # Area of crystal in m^2
# # print(Area)
# # Area = d**2
# # print(Area)
# Na = 6.022*10**23 # molecules/mole
# M = 28/1000 # mass of Li in kg/mol
# R = 8.314 # gas const J/(mol*K)
# dNdtfigure4 = 1e15 # Li Desorption rate at Tp (atoms/(cm^2*sec) from Figure 4 Top panel
# dNdt = dNdtfigure4/(100*100) # convert Li desorption rate to (atoms/(m^2*sec)
# P = ((1/Area)*dNdt*(sqrt(2*pi*M*R*T)))/Na # Pressure in Pa
# Pa_to_torr = 0.00750062 # 1 Pa = 0.00750062 torr
# Ptorr = P*Pa_to_torr
# print(Ptorr)
|
mannyfin/IRAS
|
plotTPD_data programs/HertzKnudsen.py
|
Python
|
bsd-3-clause
| 1,193
|
[
"CRYSTAL"
] |
4ec8e4a0a0eb7d1df82af93a243b0c97ee0e4514880070a69edd27e267ecf934
|
##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""GL tracing generator."""
import re
import sys
from trace import Tracer
from dispatch import function_pointer_type, function_pointer_value
import specs.stdapi as stdapi
import specs.glapi as glapi
import specs.glparams as glparams
from specs.glxapi import glxapi
class TypeGetter(stdapi.Visitor):
'''Determine which glGet*v function that matches the specified type.'''
def __init__(self, prefix = 'glGet', long_suffix = True, ext_suffix = ''):
self.prefix = prefix
self.long_suffix = long_suffix
self.ext_suffix = ext_suffix
def visitConst(self, const):
return self.visit(const.type)
def visitAlias(self, alias):
if alias.expr == 'GLboolean':
if self.long_suffix:
suffix = 'Booleanv'
arg_type = alias.expr
else:
suffix = 'iv'
arg_type = 'GLint'
elif alias.expr == 'GLdouble':
if self.long_suffix:
suffix = 'Doublev'
arg_type = alias.expr
else:
suffix = 'dv'
arg_type = alias.expr
elif alias.expr == 'GLfloat':
if self.long_suffix:
suffix = 'Floatv'
arg_type = alias.expr
else:
suffix = 'fv'
arg_type = alias.expr
elif alias.expr in ('GLint', 'GLuint', 'GLsizei'):
if self.long_suffix:
suffix = 'Integerv'
arg_type = 'GLint'
else:
suffix = 'iv'
arg_type = 'GLint'
else:
print alias.expr
assert False
function_name = self.prefix + suffix + self.ext_suffix
return function_name, arg_type
def visitEnum(self, enum):
return self.visit(glapi.GLint)
def visitBitmask(self, bitmask):
return self.visit(glapi.GLint)
def visitOpaque(self, pointer):
return self.prefix + 'Pointerv' + self.ext_suffix, 'GLvoid *'
class GlTracer(Tracer):
arrays = [
("Vertex", "VERTEX"),
("Normal", "NORMAL"),
("Color", "COLOR"),
("Index", "INDEX"),
("TexCoord", "TEXTURE_COORD"),
("EdgeFlag", "EDGE_FLAG"),
("FogCoord", "FOG_COORD"),
("SecondaryColor", "SECONDARY_COLOR"),
]
arrays.reverse()
# arrays available in ES1
arrays_es1 = ("Vertex", "Normal", "Color", "TexCoord")
def header(self, api):
Tracer.header(self, api)
print '#include <algorithm>'
print
print '#include "gltrace.hpp"'
print
# Which glVertexAttrib* variant to use
print 'enum vertex_attrib {'
print ' VERTEX_ATTRIB,'
print ' VERTEX_ATTRIB_NV,'
print '};'
print
print 'static vertex_attrib _get_vertex_attrib(void) {'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (ctx->user_arrays_nv) {'
print ' GLboolean _vertex_program = GL_FALSE;'
print ' _glGetBooleanv(GL_VERTEX_PROGRAM_ARB, &_vertex_program);'
print ' if (_vertex_program) {'
print ' if (ctx->user_arrays_nv) {'
print ' GLint _vertex_program_binding_nv = _glGetInteger(GL_VERTEX_PROGRAM_BINDING_NV);'
print ' if (_vertex_program_binding_nv) {'
print ' return VERTEX_ATTRIB_NV;'
print ' }'
print ' }'
print ' }'
print ' }'
print ' return VERTEX_ATTRIB;'
print '}'
print
self.defineShadowBufferHelper()
# Whether we need user arrays
print 'static inline bool _need_user_arrays(void)'
print '{'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (!ctx->user_arrays) {'
print ' return false;'
print ' }'
print
print ' glprofile::Profile profile = ctx->profile;'
print ' bool es1 = profile.es() && profile.major == 1;'
print
for camelcase_name, uppercase_name in self.arrays:
# in which profile is the array available?
profile_check = 'profile.desktop()'
if camelcase_name in self.arrays_es1:
profile_check = '(' + profile_check + ' || es1)';
function_name = 'gl%sPointer' % camelcase_name
enable_name = 'GL_%s_ARRAY' % uppercase_name
binding_name = 'GL_%s_ARRAY_BUFFER_BINDING' % uppercase_name
print ' // %s' % function_name
print ' if (%s) {' % profile_check
self.array_prolog(api, uppercase_name)
print ' if (_glIsEnabled(%s) &&' % enable_name
print ' _glGetInteger(%s) == 0) {' % binding_name
self.array_cleanup(api, uppercase_name)
print ' return true;'
print ' }'
self.array_epilog(api, uppercase_name)
print ' }'
print
print ' // ES1 does not support generic vertex attributes'
print ' if (es1)'
print ' return false;'
print
print ' vertex_attrib _vertex_attrib = _get_vertex_attrib();'
print
print ' // glVertexAttribPointer'
print ' if (_vertex_attrib == VERTEX_ATTRIB) {'
print ' GLint _max_vertex_attribs = _glGetInteger(GL_MAX_VERTEX_ATTRIBS);'
print ' for (GLint index = 0; index < _max_vertex_attribs; ++index) {'
print ' if (_glGetVertexAttribi(index, GL_VERTEX_ATTRIB_ARRAY_ENABLED) &&'
print ' _glGetVertexAttribi(index, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING) == 0) {'
print ' return true;'
print ' }'
print ' }'
print ' }'
print
print ' // glVertexAttribPointerNV'
print ' if (_vertex_attrib == VERTEX_ATTRIB_NV) {'
print ' for (GLint index = 0; index < 16; ++index) {'
print ' if (_glIsEnabled(GL_VERTEX_ATTRIB_ARRAY0_NV + index)) {'
print ' return true;'
print ' }'
print ' }'
print ' }'
print
print ' return false;'
print '}'
print
print 'static void _trace_user_arrays(GLuint count);'
print
print '// whether glLockArraysEXT() has ever been called'
print 'static bool _checkLockArraysEXT = false;'
print
# Buffer mappings
print '// whether glMapBufferRange(GL_MAP_WRITE_BIT) has ever been called'
print 'static bool _checkBufferMapRange = false;'
print
print '// whether glBufferParameteriAPPLE(GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE) has ever been called'
print 'static bool _checkBufferFlushingUnmapAPPLE = false;'
print
# Generate a helper function to determine whether a parameter name
# refers to a symbolic value or not
print 'static bool'
print 'is_symbolic_pname(GLenum pname) {'
print ' switch (pname) {'
for function, type, count, name in glparams.parameters:
if type is glapi.GLenum:
print ' case %s:' % name
print ' return true;'
print ' default:'
print ' return false;'
print ' }'
print '}'
print
# Generate a helper function to determine whether a parameter value is
# potentially symbolic or not; i.e., if the value can be represented in
# an enum or not
print 'template<class T>'
print 'static inline bool'
print 'is_symbolic_param(T param) {'
print ' return static_cast<T>(static_cast<GLenum>(param)) == param;'
print '}'
print
# Generate a helper function to know how many elements a parameter has
print 'static size_t'
print '_gl_param_size(GLenum pname) {'
print ' switch (pname) {'
for function, type, count, name in glparams.parameters:
if name == 'GL_PROGRAM_BINARY_FORMATS':
count = 0
if type is not None:
print ' case %s: return %s;' % (name, count)
print ' default:'
print r' os::log("apitrace: warning: %s: unknown GLenum 0x%04X\n", __FUNCTION__, pname);'
print ' return 1;'
print ' }'
print '}'
print
# states such as GL_UNPACK_ROW_LENGTH are not available in GLES
print 'static inline bool'
print 'can_unpack_subimage(void) {'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' return ctx->profile.desktop();'
print '}'
print
# VMWX_map_buffer_debug
print r'extern "C" PUBLIC'
print r'void APIENTRY'
print r'glNotifyMappedBufferRangeVMWX(const void * start, GLsizeiptr length) {'
self.emit_memcpy('start', 'length')
print r'}'
print
getProcAddressFunctionNames = []
def traceApi(self, api):
if self.getProcAddressFunctionNames:
# Generate a function to wrap proc addresses
getProcAddressFunction = api.getFunctionByName(self.getProcAddressFunctionNames[0])
argType = getProcAddressFunction.args[0].type
retType = getProcAddressFunction.type
print 'static %s _wrapProcAddress(%s procName, %s procPtr);' % (retType, argType, retType)
print
Tracer.traceApi(self, api)
print 'static %s _wrapProcAddress(%s procName, %s procPtr) {' % (retType, argType, retType)
# Provide fallback functions to missing debug functions
print ' if (!procPtr) {'
else_ = ''
for function_name in self.debug_functions:
if self.api.getFunctionByName(function_name):
print ' %sif (strcmp("%s", (const char *)procName) == 0) {' % (else_, function_name)
print ' return (%s)&%s;' % (retType, function_name)
print ' }'
else_ = 'else '
print ' %s{' % else_
print ' return NULL;'
print ' }'
print ' }'
for function in api.getAllFunctions():
ptype = function_pointer_type(function)
pvalue = function_pointer_value(function)
print ' if (strcmp("%s", (const char *)procName) == 0) {' % function.name
print ' %s = (%s)procPtr;' % (pvalue, ptype)
print ' return (%s)&%s;' % (retType, function.name,)
print ' }'
print ' os::log("apitrace: warning: unknown function \\"%s\\"\\n", (const char *)procName);'
print ' return procPtr;'
print '}'
print
else:
Tracer.traceApi(self, api)
def defineShadowBufferHelper(self):
print 'void _shadow_glGetBufferSubData(GLenum target, GLintptr offset,'
print ' GLsizeiptr size, GLvoid *data)'
print '{'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (!ctx->needsShadowBuffers() || target != GL_ELEMENT_ARRAY_BUFFER) {'
print ' _glGetBufferSubData(target, offset, size, data);'
print ' return;'
print ' }'
print
print ' GLint buffer_binding = _glGetInteger(GL_ELEMENT_ARRAY_BUFFER_BINDING);'
print ' if (buffer_binding > 0) {'
print ' gltrace::Buffer & buf = ctx->buffers[buffer_binding];'
print ' buf.getSubData(offset, size, data);'
print ' }'
print '}'
def shadowBufferMethod(self, method):
# Emit code to fetch the shadow buffer, and invoke a method
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (ctx->needsShadowBuffers() && target == GL_ELEMENT_ARRAY_BUFFER) {'
print ' GLint buffer_binding = _glGetInteger(GL_ELEMENT_ARRAY_BUFFER_BINDING);'
print ' if (buffer_binding > 0) {'
print ' gltrace::Buffer & buf = ctx->buffers[buffer_binding];'
print ' buf.' + method + ';'
print ' }'
print ' }'
print
def shadowBufferProlog(self, function):
if function.name == 'glBufferData':
self.shadowBufferMethod('bufferData(size, data)')
if function.name == 'glBufferSubData':
self.shadowBufferMethod('bufferSubData(offset, size, data)')
if function.name == 'glDeleteBuffers':
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' if (ctx->needsShadowBuffers()) {'
print ' for (GLsizei i = 0; i < n; i++) {'
print ' ctx->buffers.erase(buffer[i]);'
print ' }'
print ' }'
array_pointer_function_names = set((
"glVertexPointer",
"glNormalPointer",
"glColorPointer",
"glIndexPointer",
"glTexCoordPointer",
"glEdgeFlagPointer",
"glFogCoordPointer",
"glSecondaryColorPointer",
"glInterleavedArrays",
"glVertexPointerEXT",
"glNormalPointerEXT",
"glColorPointerEXT",
"glIndexPointerEXT",
"glTexCoordPointerEXT",
"glEdgeFlagPointerEXT",
"glFogCoordPointerEXT",
"glSecondaryColorPointerEXT",
"glVertexAttribPointer",
"glVertexAttribPointerARB",
"glVertexAttribPointerNV",
"glVertexAttribIPointer",
"glVertexAttribIPointerEXT",
"glVertexAttribLPointer",
"glVertexAttribLPointerEXT",
#"glMatrixIndexPointerARB",
))
# XXX: We currently ignore the gl*Draw*ElementArray* functions
draw_function_regex = re.compile(r'^gl([A-Z][a-z]+)*Draw(Range)?(Arrays|Elements)([A-Z][a-zA-Z]*)?$' )
interleaved_formats = [
'GL_V2F',
'GL_V3F',
'GL_C4UB_V2F',
'GL_C4UB_V3F',
'GL_C3F_V3F',
'GL_N3F_V3F',
'GL_C4F_N3F_V3F',
'GL_T2F_V3F',
'GL_T4F_V4F',
'GL_T2F_C4UB_V3F',
'GL_T2F_C3F_V3F',
'GL_T2F_N3F_V3F',
'GL_T2F_C4F_N3F_V3F',
'GL_T4F_C4F_N3F_V4F',
]
def traceFunctionImplBody(self, function):
# Defer tracing of user array pointers...
if function.name in self.array_pointer_function_names:
print ' GLint _array_buffer = _glGetInteger(GL_ARRAY_BUFFER_BINDING);'
print ' if (!_array_buffer) {'
print ' static bool warned = false;'
print ' if (!warned) {'
print ' warned = true;'
print ' os::log("apitrace: warning: %s: call will be faked due to pointer to user memory (https://github.com/apitrace/apitrace/blob/master/docs/BUGS.markdown#tracing)\\n", __FUNCTION__);'
print ' }'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' ctx->user_arrays = true;'
if function.name == "glVertexAttribPointerNV":
print ' ctx->user_arrays_nv = true;'
self.invokeFunction(function)
# And also break down glInterleavedArrays into the individual calls
if function.name == 'glInterleavedArrays':
print
# Initialize the enable flags
for camelcase_name, uppercase_name in self.arrays:
flag_name = '_' + uppercase_name.lower()
print ' GLboolean %s = GL_FALSE;' % flag_name
print
# Switch for the interleaved formats
print ' switch (format) {'
for format in self.interleaved_formats:
print ' case %s:' % format
for camelcase_name, uppercase_name in self.arrays:
flag_name = '_' + uppercase_name.lower()
if format.find('_' + uppercase_name[0]) >= 0:
print ' %s = GL_TRUE;' % flag_name
print ' break;'
print ' default:'
print ' return;'
print ' }'
print
# Emit fake glEnableClientState/glDisableClientState flags
for camelcase_name, uppercase_name in self.arrays:
flag_name = '_' + uppercase_name.lower()
enable_name = 'GL_%s_ARRAY' % uppercase_name
# Emit a fake function
print ' {'
print ' static const trace::FunctionSig &_sig = %s ? _glEnableClientState_sig : _glDisableClientState_sig;' % flag_name
print ' unsigned _call = trace::localWriter.beginEnter(&_sig, true);'
print ' trace::localWriter.beginArg(0);'
self.serializeValue(glapi.GLenum, enable_name)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_call);'
print ' trace::localWriter.endLeave();'
print ' }'
# Warn about buggy glGet(GL_*ARRAY_SIZE) not returning GL_BGRA
buggyFunctions = {
'glColorPointer': ('glGetIntegerv', '', 'GL_COLOR_ARRAY_SIZE'),
'glSecondaryColorPointer': ('glGetIntegerv', '', 'GL_SECONDARY_COLOR_ARRAY_SIZE'),
'glVertexAttribPointer': ('glGetVertexAttribiv', 'index, ', 'GL_VERTEX_ATTRIB_ARRAY_SIZE'),
'glVertexAttribPointerARB': ('glGetVertexAttribivARB', 'index, ', 'GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB'),
}
if function.name in buggyFunctions:
getter, extraArg, pname = buggyFunctions[function.name]
print r' static bool _checked = false;'
print r' if (!_checked && size == GL_BGRA) {'
print r' GLint _size = 0;'
print r' _%s(%s%s, &_size);' % (getter, extraArg, pname)
print r' if (_size != GL_BGRA) {'
print r' os::log("apitrace: warning: %s(%s) does not return GL_BGRA; trace will be incorrect (https://github.com/apitrace/apitrace/issues/261)\n");' % (getter, pname)
print r' }'
print r' _checked = true;'
print r' }'
print ' return;'
print ' }'
# ... to the draw calls
if self.draw_function_regex.match(function.name):
print ' if (_need_user_arrays()) {'
if 'Indirect' in function.name:
print r' os::log("apitrace: warning: %s: indirect user arrays not supported\n");' % (function.name,)
else:
arg_names = ', '.join([arg.name for arg in function.args[1:]])
print ' GLuint _count = _%s_count(%s);' % (function.name, arg_names)
# Some apps, in particular Quake3, can tell the driver to lock more
# vertices than those actually required for the draw call.
print ' if (_checkLockArraysEXT) {'
print ' GLuint _locked_count = _glGetInteger(GL_ARRAY_ELEMENT_LOCK_FIRST_EXT)'
print ' + _glGetInteger(GL_ARRAY_ELEMENT_LOCK_COUNT_EXT);'
print ' _count = std::max(_count, _locked_count);'
print ' }'
print ' _trace_user_arrays(_count);'
print ' }'
if function.name == 'glLockArraysEXT':
print ' _checkLockArraysEXT = true;'
# Warn if user arrays are used with glBegin/glArrayElement/glEnd.
if function.name == 'glBegin':
print r' gltrace::Context *ctx = gltrace::getContext();'
print r' ctx->userArraysOnBegin = _need_user_arrays();'
if function.name.startswith('glArrayElement'):
print r' gltrace::Context *ctx = gltrace::getContext();'
print r' if (ctx->userArraysOnBegin) {'
print r' os::log("apitrace: warning: user arrays with glArrayElement not supported (https://github.com/apitrace/apitrace/issues/276)\n");'
print r' ctx->userArraysOnBegin = false;'
print r' }'
# Emit a fake memcpy on buffer uploads
if function.name == 'glBufferParameteriAPPLE':
print ' if (pname == GL_BUFFER_FLUSHING_UNMAP_APPLE && param == GL_FALSE) {'
print ' _checkBufferFlushingUnmapAPPLE = true;'
print ' }'
if function.name in ('glUnmapBuffer', 'glUnmapBufferARB'):
if function.name.endswith('ARB'):
suffix = 'ARB'
else:
suffix = ''
print ' GLint access_flags = 0;'
print ' GLint access = 0;'
print ' bool flush;'
print ' // GLES3 does not have GL_BUFFER_ACCESS;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_ACCESS_FLAGS, &access_flags);' % suffix
print ' flush = (access_flags & GL_MAP_WRITE_BIT) && !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT));'
print ' } else {'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_ACCESS, &access);' % suffix
print ' flush = access != GL_READ_ONLY;'
print ' }'
print ' if (flush) {'
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointerv%s(target, GL_BUFFER_MAP_POINTER, &map);' % suffix
print ' if (map) {'
print ' GLint length = -1;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_MAP_LENGTH, &length);' % suffix
print ' if (length == -1) {'
print ' // Mesa drivers refuse GL_BUFFER_MAP_LENGTH without GL 3.0 up-to'
print ' // http://cgit.freedesktop.org/mesa/mesa/commit/?id=ffee498fb848b253a7833373fe5430f8c7ca0c5f'
print ' static bool warned = false;'
print ' if (!warned) {'
print ' os::log("apitrace: warning: glGetBufferParameteriv%s(GL_BUFFER_MAP_LENGTH) failed\\n");' % suffix
print ' warned = true;'
print ' }'
print ' }'
print ' } else {'
print ' length = 0;'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_SIZE, &length);' % suffix
print ' }'
print ' if (_checkBufferFlushingUnmapAPPLE) {'
print ' GLint flushing_unmap = GL_TRUE;'
print ' _glGetBufferParameteriv%s(target, GL_BUFFER_FLUSHING_UNMAP_APPLE, &flushing_unmap);' % suffix
print ' flush = flush && flushing_unmap;'
print ' }'
print ' if (flush && length > 0) {'
self.emit_memcpy('map', 'length')
print ' }'
print ' }'
print ' }'
if function.name == 'glUnmapBufferOES':
print ' GLint access_flags = 0;'
print ' GLint access = 0;'
print ' bool flush;'
print ' // GLES3 does not have GL_BUFFER_ACCESS;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_ACCESS_FLAGS, &access_flags);'
print ' flush = (access_flags & GL_MAP_WRITE_BIT) && !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT));'
print ' } else {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_ACCESS, &access);'
print ' flush = access != GL_READ_ONLY;'
print ' }'
print ' if (flush) {'
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointervOES(target, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map) {'
print ' GLint length = 0;'
print ' GLint offset = 0;'
print ' if (_checkBufferMapRange) {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_MAP_LENGTH, &length);'
print ' _glGetBufferParameteriv(target, GL_BUFFER_MAP_OFFSET, &offset);'
print ' } else {'
print ' _glGetBufferParameteriv(target, GL_BUFFER_SIZE, &length);'
print ' }'
print ' if (flush && length > 0) {'
self.emit_memcpy('map', 'length')
self.shadowBufferMethod('bufferSubData(offset, length, map)')
print ' }'
print ' }'
print ' }'
if function.name == 'glUnmapNamedBuffer':
print ' GLint access_flags = 0;'
print ' _glGetNamedBufferParameteriv(buffer, GL_BUFFER_ACCESS_FLAGS, &access_flags);'
print ' if ((access_flags & GL_MAP_WRITE_BIT) &&'
print ' !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT))) {'
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointerv(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' GLint length = 0;'
print ' _glGetNamedBufferParameteriv(buffer, GL_BUFFER_MAP_LENGTH, &length);'
print ' if (map && length > 0) {'
self.emit_memcpy('map', 'length')
print ' }'
print ' }'
if function.name == 'glUnmapNamedBufferEXT':
print ' GLint access_flags = 0;'
print ' _glGetNamedBufferParameterivEXT(buffer, GL_BUFFER_ACCESS_FLAGS, &access_flags);'
print ' if ((access_flags & GL_MAP_WRITE_BIT) &&'
print ' !(access_flags & (GL_MAP_FLUSH_EXPLICIT_BIT | GL_MAP_PERSISTENT_BIT))) {'
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointervEXT(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' GLint length = 0;'
print ' _glGetNamedBufferParameterivEXT(buffer, GL_BUFFER_MAP_LENGTH, &length);'
print ' if (map && length > 0) {'
self.emit_memcpy('map', 'length')
print ' }'
print ' }'
if function.name == 'glFlushMappedBufferRange':
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointerv(target, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
if function.name == 'glFlushMappedBufferRangeEXT':
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointervOES(target, GL_BUFFER_MAP_POINTER_OES, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
if function.name == 'glFlushMappedBufferRangeAPPLE':
print ' GLvoid *map = NULL;'
print ' _glGetBufferPointerv(target, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && size > 0) {'
self.emit_memcpy('(const char *)map + offset', 'size')
print ' }'
if function.name == 'glFlushMappedNamedBufferRange':
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointerv(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
if function.name == 'glFlushMappedNamedBufferRangeEXT':
print ' GLvoid *map = NULL;'
print ' _glGetNamedBufferPointervEXT(buffer, GL_BUFFER_MAP_POINTER, &map);'
print ' if (map && length > 0) {'
self.emit_memcpy('(const char *)map + offset', 'length')
print ' }'
# FIXME: We don't support coherent/pinned memory mappings
if function.name in ('glBufferStorage', 'glNamedBufferStorage', 'glNamedBufferStorageEXT'):
print r' if (!(flags & GL_MAP_PERSISTENT_BIT)) {'
print r' os::log("apitrace: warning: %s: MAP_NOTIFY_EXPLICIT_BIT_VMWX set w/o MAP_PERSISTENT_BIT\n", __FUNCTION__);'
print r' }'
print r' flags &= ~GL_MAP_NOTIFY_EXPLICIT_BIT_VMWX;'
if function.name in ('glMapBufferRange', 'glMapBufferRangeEXT', 'glMapNamedBufferRange', 'glMapNamedBufferRangeEXT'):
print r' if (access & GL_MAP_NOTIFY_EXPLICIT_BIT_VMWX) {'
print r' if (!(access & GL_MAP_PERSISTENT_BIT)) {'
print r' os::log("apitrace: warning: %s: MAP_NOTIFY_EXPLICIT_BIT_VMWX set w/o MAP_PERSISTENT_BIT\n", __FUNCTION__);'
print r' }'
print r' if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {'
print r' os::log("apitrace: warning: %s: MAP_NOTIFY_EXPLICIT_BIT_VMWX set w/ MAP_FLUSH_EXPLICIT_BIT\n", __FUNCTION__);'
print r' }'
print r' access &= ~GL_MAP_NOTIFY_EXPLICIT_BIT_VMWX;'
print r' } else if (access & GL_MAP_COHERENT_BIT) {'
print r' os::log("apitrace: warning: %s: MAP_COHERENT_BIT unsupported (https://github.com/apitrace/apitrace/issues/232)\n", __FUNCTION__);'
print r' } else if ((access & GL_MAP_PERSISTENT_BIT) &&'
print r' !(access & GL_MAP_FLUSH_EXPLICIT_BIT)) {'
print r' os::log("apitrace: warning: %s: MAP_PERSISTENT_BIT w/o FLUSH_EXPLICIT_BIT unsupported (https://github.com/apitrace/apitrace/issues/232)\n", __FUNCTION__);'
print r' }'
if function.name in ('glBufferData', 'glBufferDataARB'):
print r' if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {'
print r' os::log("apitrace: warning: GL_AMD_pinned_memory not fully supported\n");'
print r' }'
# TODO: We don't track GL_INTEL_map_texture mappings
if function.name == 'glMapTexture2DINTEL':
print r' if (access & GL_MAP_WRITE_BIT) {'
print r' os::log("apitrace: warning: GL_INTEL_map_texture not fully supported\n");'
print r' }'
# Don't leave vertex attrib locations to chance. Instead emit fake
# glBindAttribLocation calls to ensure that the same locations will be
# used when retracing. Trying to remap locations after the fact would
# be an herculian task given that vertex attrib locations appear in
# many entry-points, including non-shader related ones.
if function.name == 'glLinkProgram':
Tracer.invokeFunction(self, function)
print ' GLint active_attributes = 0;'
print ' _glGetProgramiv(program, GL_ACTIVE_ATTRIBUTES, &active_attributes);'
print ' for (GLint attrib = 0; attrib < active_attributes; ++attrib) {'
print ' GLint size = 0;'
print ' GLenum type = 0;'
print ' GLchar name[256];'
# TODO: Use ACTIVE_ATTRIBUTE_MAX_LENGTH instead of 256
print ' _glGetActiveAttrib(program, attrib, sizeof name, NULL, &size, &type, name);'
print " if (name[0] != 'g' || name[1] != 'l' || name[2] != '_') {"
print ' GLint location = _glGetAttribLocation(program, name);'
print ' if (location >= 0) {'
bind_function = glapi.glapi.getFunctionByName('glBindAttribLocation')
self.fake_call(bind_function, ['program', 'location', 'name'])
print ' }'
print ' }'
print ' }'
if function.name == 'glLinkProgramARB':
Tracer.invokeFunction(self, function)
print ' GLint active_attributes = 0;'
print ' _glGetObjectParameterivARB(programObj, GL_OBJECT_ACTIVE_ATTRIBUTES_ARB, &active_attributes);'
print ' for (GLint attrib = 0; attrib < active_attributes; ++attrib) {'
print ' GLint size = 0;'
print ' GLenum type = 0;'
print ' GLcharARB name[256];'
# TODO: Use ACTIVE_ATTRIBUTE_MAX_LENGTH instead of 256
print ' _glGetActiveAttribARB(programObj, attrib, sizeof name, NULL, &size, &type, name);'
print " if (name[0] != 'g' || name[1] != 'l' || name[2] != '_') {"
print ' GLint location = _glGetAttribLocationARB(programObj, name);'
print ' if (location >= 0) {'
bind_function = glapi.glapi.getFunctionByName('glBindAttribLocationARB')
self.fake_call(bind_function, ['programObj', 'location', 'name'])
print ' }'
print ' }'
print ' }'
self.shadowBufferProlog(function)
Tracer.traceFunctionImplBody(self, function)
# These entrypoints are only expected to be implemented by tools;
# drivers will probably not implement them.
marker_functions = [
# GL_GREMEDY_string_marker
'glStringMarkerGREMEDY',
# GL_GREMEDY_frame_terminator
'glFrameTerminatorGREMEDY',
]
# These entrypoints may be implemented by drivers, but are also very useful
# for debugging / analysis tools.
debug_functions = [
# GL_KHR_debug
'glDebugMessageControl',
'glDebugMessageInsert',
'glDebugMessageCallback',
'glGetDebugMessageLog',
'glPushDebugGroup',
'glPopDebugGroup',
'glObjectLabel',
'glGetObjectLabel',
'glObjectPtrLabel',
'glGetObjectPtrLabel',
# GL_KHR_debug (for OpenGL ES)
'glDebugMessageControlKHR',
'glDebugMessageInsertKHR',
'glDebugMessageCallbackKHR',
'glGetDebugMessageLogKHR',
'glPushDebugGroupKHR',
'glPopDebugGroupKHR',
'glObjectLabelKHR',
'glGetObjectLabelKHR',
'glObjectPtrLabelKHR',
'glGetObjectPtrLabelKHR',
# GL_ARB_debug_output
'glDebugMessageControlARB',
'glDebugMessageInsertARB',
'glDebugMessageCallbackARB',
'glGetDebugMessageLogARB',
# GL_AMD_debug_output
'glDebugMessageEnableAMD',
'glDebugMessageInsertAMD',
'glDebugMessageCallbackAMD',
'glGetDebugMessageLogAMD',
# GL_EXT_debug_label
'glLabelObjectEXT',
'glGetObjectLabelEXT',
# GL_EXT_debug_marker
'glInsertEventMarkerEXT',
'glPushGroupMarkerEXT',
'glPopGroupMarkerEXT',
]
def invokeFunction(self, function):
if function.name in ('glLinkProgram', 'glLinkProgramARB'):
# These functions have been dispatched already
return
# Force glProgramBinary to fail. Per ARB_get_program_binary this
# should signal the app that it needs to recompile.
if function.name in ('glProgramBinary', 'glProgramBinaryOES'):
print r' binaryFormat = 0xDEADDEAD;'
print r' binary = &binaryFormat;'
print r' length = sizeof binaryFormat;'
Tracer.invokeFunction(self, function)
def doInvokeFunction(self, function):
# Same as invokeFunction() but called both when trace is enabled or disabled.
#
# Used to modify the behavior of GL entry-points.
# Override GL extensions
if function.name in ('glGetString', 'glGetIntegerv', 'glGetStringi'):
Tracer.doInvokeFunction(self, function, prefix = 'gltrace::_', suffix = '_override')
return
# We implement GL_GREMEDY_*, etc., and not the driver
if function.name in self.marker_functions:
return
# We may be faking KHR_debug, so ensure the pointer queries result is
# always zeroed to prevent dereference of unitialized pointers
if function.name == 'glGetPointerv':
print ' if (params &&'
print ' (pname == GL_DEBUG_CALLBACK_FUNCTION ||'
print ' pname == GL_DEBUG_CALLBACK_USER_PARAM)) {'
print ' *params = NULL;'
print ' }'
if function.name in self.getProcAddressFunctionNames:
nameArg = function.args[0].name
print ' if (strcmp("glNotifyMappedBufferRangeVMWX", (const char *)%s) == 0) {' % (nameArg,)
print ' _result = (%s)&glNotifyMappedBufferRangeVMWX;' % (function.type,)
for marker_function in self.marker_functions:
if self.api.getFunctionByName(marker_function):
print ' } else if (strcmp("%s", (const char *)%s) == 0) {' % (marker_function, nameArg)
print ' _result = (%s)&%s;' % (function.type, marker_function)
print ' } else {'
Tracer.doInvokeFunction(self, function)
# Replace function addresses with ours
# XXX: Doing this here instead of wrapRet means that the trace will
# contain the addresses of the wrapper functions, and not the real
# functions, but in practice this should make no difference.
if function.name in self.getProcAddressFunctionNames:
print ' _result = _wrapProcAddress(%s, _result);' % (nameArg,)
print ' }'
return
if function.name in ('glGetProgramBinary', 'glGetProgramBinaryOES'):
print r' bufSize = 0;'
Tracer.doInvokeFunction(self, function)
if function.name == 'glGetProgramiv':
print r' if (params && pname == GL_PROGRAM_BINARY_LENGTH) {'
print r' *params = 0;'
print r' }'
if function.name in ('glGetProgramBinary', 'glGetProgramBinaryOES'):
print r' if (length) {'
print r' *length = 0;'
print r' }'
buffer_targets = [
'ARRAY_BUFFER',
'ELEMENT_ARRAY_BUFFER',
'PIXEL_PACK_BUFFER',
'PIXEL_UNPACK_BUFFER',
'UNIFORM_BUFFER',
'TEXTURE_BUFFER',
'TRANSFORM_FEEDBACK_BUFFER',
'COPY_READ_BUFFER',
'COPY_WRITE_BUFFER',
'DRAW_INDIRECT_BUFFER',
'ATOMIC_COUNTER_BUFFER',
]
def wrapRet(self, function, instance):
Tracer.wrapRet(self, function, instance)
# Keep track of buffer mappings
if function.name in ('glMapBufferRange', 'glMapBufferRangeEXT'):
print ' if (access & GL_MAP_WRITE_BIT) {'
print ' _checkBufferMapRange = true;'
print ' }'
boolean_names = [
'GL_FALSE',
'GL_TRUE',
]
def gl_boolean(self, value):
return self.boolean_names[int(bool(value))]
# Regular expression for the names of the functions that unpack from a
# pixel buffer object. See the ARB_pixel_buffer_object specification.
unpack_function_regex = re.compile(r'^gl(' + r'|'.join([
r'Bitmap',
r'PolygonStipple',
r'PixelMap[a-z]+v',
r'DrawPixels',
r'Color(Sub)?Table',
r'(Convolution|Separable)Filter[12]D',
r'(Compressed)?(Multi)?Tex(ture)?(Sub)?Image[1-4]D',
]) + r')[0-9A-Z]*$')
def serializeArgValue(self, function, arg):
# Recognize offsets instead of blobs when a PBO is bound
if self.unpack_function_regex.match(function.name) \
and (isinstance(arg.type, stdapi.Blob) \
or (isinstance(arg.type, stdapi.Const) \
and isinstance(arg.type.type, stdapi.Blob))):
print ' {'
print ' gltrace::Context *ctx = gltrace::getContext();'
print ' GLint _unpack_buffer = 0;'
print ' if (ctx->profile.desktop())'
print ' _glGetIntegerv(GL_PIXEL_UNPACK_BUFFER_BINDING, &_unpack_buffer);'
print ' if (_unpack_buffer) {'
print ' trace::localWriter.writePointer((uintptr_t)%s);' % arg.name
print ' } else {'
Tracer.serializeArgValue(self, function, arg)
print ' }'
print ' }'
return
# Several GL state functions take GLenum symbolic names as
# integer/floats; so dump the symbolic name whenever possible
if function.name.startswith('gl') \
and arg.type in (glapi.GLint, glapi.GLfloat, glapi.GLdouble) \
and arg.name == 'param':
assert arg.index > 0
assert function.args[arg.index - 1].name == 'pname'
assert function.args[arg.index - 1].type == glapi.GLenum
print ' if (is_symbolic_pname(pname) && is_symbolic_param(%s)) {' % arg.name
self.serializeValue(glapi.GLenum, arg.name)
print ' } else {'
Tracer.serializeArgValue(self, function, arg)
print ' }'
return
Tracer.serializeArgValue(self, function, arg)
def footer(self, api):
Tracer.footer(self, api)
# A simple state tracker to track the pointer values
# update the state
print 'static void _trace_user_arrays(GLuint count)'
print '{'
print ' gltrace::Context *ctx = gltrace::getContext();'
print
print ' glprofile::Profile profile = ctx->profile;'
print ' bool es1 = profile.es() && profile.major == 1;'
print
# Temporarily unbind the array buffer
print ' GLint _array_buffer = _glGetInteger(GL_ARRAY_BUFFER_BINDING);'
print ' if (_array_buffer) {'
self.fake_glBindBuffer(api, 'GL_ARRAY_BUFFER', '0')
print ' }'
print
for camelcase_name, uppercase_name in self.arrays:
# in which profile is the array available?
profile_check = 'profile.desktop()'
if camelcase_name in self.arrays_es1:
profile_check = '(' + profile_check + ' || es1)';
function_name = 'gl%sPointer' % camelcase_name
enable_name = 'GL_%s_ARRAY' % uppercase_name
binding_name = 'GL_%s_ARRAY_BUFFER_BINDING' % uppercase_name
function = api.getFunctionByName(function_name)
print ' // %s' % function.prototype()
print ' if (%s) {' % profile_check
self.array_trace_prolog(api, uppercase_name)
self.array_prolog(api, uppercase_name)
print ' if (_glIsEnabled(%s)) {' % enable_name
print ' GLint _binding = _glGetInteger(%s);' % binding_name
print ' if (!_binding) {'
# Get the arguments via glGet*
for arg in function.args:
arg_get_enum = 'GL_%s_ARRAY_%s' % (uppercase_name, arg.name.upper())
arg_get_function, arg_type = TypeGetter().visit(arg.type)
print ' %s %s = 0;' % (arg_type, arg.name)
print ' _%s(%s, &%s);' % (arg_get_function, arg_get_enum, arg.name)
arg_names = ', '.join([arg.name for arg in function.args[:-1]])
print ' size_t _size = _%s_size(%s, count);' % (function.name, arg_names)
# Emit a fake function
self.array_trace_intermezzo(api, uppercase_name)
print ' unsigned _call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg in function.args:
assert not arg.output
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
if arg.name != 'pointer':
self.serializeValue(arg.type, arg.name)
else:
print ' trace::localWriter.writeBlob((const void *)%s, _size);' % (arg.name)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_call);'
print ' trace::localWriter.endLeave();'
print ' }'
print ' }'
self.array_epilog(api, uppercase_name)
self.array_trace_epilog(api, uppercase_name)
print ' }'
print
# Samething, but for glVertexAttribPointer*
#
# Some variants of glVertexAttribPointer alias conventional and generic attributes:
# - glVertexAttribPointer: no
# - glVertexAttribPointerARB: implementation dependent
# - glVertexAttribPointerNV: yes
#
# This means that the implementations of these functions do not always
# alias, and they need to be considered independently.
#
print ' // ES1 does not support generic vertex attributes'
print ' if (es1)'
print ' return;'
print
print ' vertex_attrib _vertex_attrib = _get_vertex_attrib();'
print
for suffix in ['', 'NV']:
if suffix:
SUFFIX = '_' + suffix
else:
SUFFIX = suffix
function_name = 'glVertexAttribPointer' + suffix
function = api.getFunctionByName(function_name)
print ' // %s' % function.prototype()
print ' if (_vertex_attrib == VERTEX_ATTRIB%s) {' % SUFFIX
if suffix == 'NV':
print ' GLint _max_vertex_attribs = 16;'
else:
print ' GLint _max_vertex_attribs = _glGetInteger(GL_MAX_VERTEX_ATTRIBS);'
print ' for (GLint index = 0; index < _max_vertex_attribs; ++index) {'
print ' GLint _enabled = 0;'
if suffix == 'NV':
print ' _glGetIntegerv(GL_VERTEX_ATTRIB_ARRAY0_NV + index, &_enabled);'
else:
print ' _glGetVertexAttribiv%s(index, GL_VERTEX_ATTRIB_ARRAY_ENABLED%s, &_enabled);' % (suffix, SUFFIX)
print ' if (_enabled) {'
print ' GLint _binding = 0;'
if suffix != 'NV':
# It doesn't seem possible to use VBOs with NV_vertex_program.
print ' _glGetVertexAttribiv%s(index, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING%s, &_binding);' % (suffix, SUFFIX)
print ' if (!_binding) {'
# Get the arguments via glGet*
for arg in function.args[1:]:
if suffix == 'NV':
arg_get_enum = 'GL_ATTRIB_ARRAY_%s%s' % (arg.name.upper(), SUFFIX)
else:
arg_get_enum = 'GL_VERTEX_ATTRIB_ARRAY_%s%s' % (arg.name.upper(), SUFFIX)
arg_get_function, arg_type = TypeGetter('glGetVertexAttrib', False, suffix).visit(arg.type)
print ' %s %s = 0;' % (arg_type, arg.name)
print ' _%s(index, %s, &%s);' % (arg_get_function, arg_get_enum, arg.name)
arg_names = ', '.join([arg.name for arg in function.args[1:-1]])
print ' size_t _size = _%s_size(%s, count);' % (function.name, arg_names)
# Emit a fake function
print ' unsigned _call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg in function.args:
assert not arg.output
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
if arg.name != 'pointer':
self.serializeValue(arg.type, arg.name)
else:
print ' trace::localWriter.writeBlob((const void *)%s, _size);' % (arg.name)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_call);'
print ' trace::localWriter.endLeave();'
print ' }'
print ' }'
print ' }'
print ' }'
print
# Restore the original array_buffer
print ' if (_array_buffer) {'
self.fake_glBindBuffer(api, 'GL_ARRAY_BUFFER', '_array_buffer')
print ' }'
print
print '}'
print
#
# Hooks for glTexCoordPointer, which is identical to the other array
# pointers except the fact that it is indexed by glClientActiveTexture.
#
def array_prolog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' GLint max_units = 0;'
print ' if (ctx->profile.desktop())'
print ' _glGetIntegerv(GL_MAX_TEXTURE_COORDS, &max_units);'
print ' else'
print ' _glGetIntegerv(GL_MAX_TEXTURE_UNITS, &max_units);'
print ' GLint client_active_texture = GL_TEXTURE0;'
print ' if (max_units > 0) {'
print ' _glGetIntegerv(GL_CLIENT_ACTIVE_TEXTURE, &client_active_texture);'
print ' }'
print ' GLint unit = 0;'
print ' do {'
print ' GLint texture = GL_TEXTURE0 + unit;'
print ' if (max_units > 0) {'
print ' _glClientActiveTexture(texture);'
print ' }'
def array_trace_prolog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' bool client_active_texture_dirty = false;'
def array_epilog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' } while (++unit < max_units);'
self.array_cleanup(api, uppercase_name)
def array_cleanup(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' if (max_units > 0) {'
print ' _glClientActiveTexture(client_active_texture);'
print ' }'
def array_trace_intermezzo(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' if (texture != client_active_texture || client_active_texture_dirty) {'
print ' client_active_texture_dirty = true;'
self.fake_glClientActiveTexture_call(api, "texture");
print ' }'
def array_trace_epilog(self, api, uppercase_name):
if uppercase_name == 'TEXTURE_COORD':
print ' if (client_active_texture_dirty) {'
self.fake_glClientActiveTexture_call(api, "client_active_texture");
print ' }'
def fake_glBindBuffer(self, api, target, buffer):
function = api.getFunctionByName('glBindBuffer')
self.fake_call(function, [target, buffer])
def fake_glClientActiveTexture_call(self, api, texture):
function = api.getFunctionByName('glClientActiveTexture')
self.fake_call(function, [texture])
def emitFakeTexture2D(self):
function = glapi.glapi.getFunctionByName('glTexImage2D')
instances = function.argNames()
print ' unsigned _fake_call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg in function.args:
assert not arg.output
self.serializeArg(function, arg)
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_fake_call);'
print ' trace::localWriter.endLeave();'
|
tuanthng/apitrace
|
wrappers/gltrace.py
|
Python
|
mit
| 54,571
|
[
"VisIt"
] |
8a132b4f6f4bcff87eb122a8f3b3f2581792b79a8d8b7ad5d68fc81d9c7ce4e4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# derived_models.py: Models that decorate and extend other models.
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
# Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## FEATURES ###################################################################
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division # Ensures that a/b is always a float.
## ALL ########################################################################
# We use __all__ to restrict what globals are visible to external modules.
__all__ = [
'DerivedModel',
'PoisonedModel',
'BinomialModel',
'DifferentiableBinomialModel',
'GaussianHyperparameterizedModel',
'MultinomialModel',
'MLEModel',
'RandomWalkModel',
'GaussianRandomWalkModel'
]
## IMPORTS ####################################################################
from builtins import range
from functools import reduce
from past.builtins import basestring
import numpy as np
from scipy.stats import binom, multivariate_normal, norm
from itertools import combinations_with_replacement as tri_comb
from qinfer.utils import binomial_pdf, multinomial_pdf, sample_multinomial
from qinfer.abstract_model import Model, DifferentiableModel
from qinfer._lib import enum # <- TODO: replace with flufl.enum!
from qinfer.utils import binom_est_error
from qinfer.domains import IntegerDomain, MultinomialDomain, RealDomain
## FUNCTIONS ###################################################################
def rmfield( a, *fieldnames_to_remove ):
# Removes named fields from a structured np array
return a[ [ name for name in a.dtype.names if name not in fieldnames_to_remove ] ]
## CLASSES #####################################################################
class DerivedModel(Model):
"""
Base class for any model that decorates another model.
Provides passthroughs for modelparam_names, n_modelparams, etc.
Many of these passthroughs can and should be overriden by
specific subclasses, but it is rare that something will
override all of them.
"""
_underlying_model = None
def __init__(self, underlying_model):
self._underlying_model = underlying_model
super(DerivedModel, self).__init__()
@property
def underlying_model(self):
return self._underlying_model
@property
def base_model(self):
return self._underlying_model.base_model
@property
def model_chain(self):
return self._underlying_model.model_chain + (self._underlying_model, )
@property
def n_modelparams(self):
# We have as many modelparameters as the underlying model.
return self.underlying_model.n_modelparams
@property
def expparams_dtype(self):
return self.underlying_model.expparams_dtype
@property
def modelparam_names(self):
return self.underlying_model.modelparam_names
@property
def Q(self):
return self.underlying_model.Q
def clear_cache(self):
self.underlying_model.clear_cache()
def n_outcomes(self, expparams):
return self.underlying_model.n_outcomes(expparams)
def are_models_valid(self, modelparams):
return self.underlying_model.are_models_valid(modelparams)
def domain(self, expparams):
return self.underlying_model.domain(expparams)
def are_expparam_dtypes_consistent(self, expparams):
return self.underlying_model.are_expparam_dtypes_consistent(expparams)
def update_timestep(self, modelparams, expparams):
return self.underlying_model.update_timestep(modelparams, expparams)
def canonicalize(self, modelparams):
return self.underlying_model.canonicalize(modelparams)
PoisonModes = enum.enum("ALE", "MLE")
class PoisonedModel(DerivedModel):
r"""
Model that simulates sampling error incurred by the MLE or ALE methods of
reconstructing likelihoods from sample data. The true likelihood given by an
underlying model is perturbed by a normally distributed random variable
:math:`\epsilon`, and then truncated to the interval :math:`[0, 1]`.
The variance of :math:`\epsilon` can be specified either as a constant,
to simulate ALE (in which samples are collected until a given threshold is
met), or as proportional to the variance of a possibly-hedged binomial
estimator, to simulate MLE.
:param Model underlying_model: The "true" model to be poisoned.
:param float tol: For ALE, specifies the given error tolerance to simulate.
:param int n_samples: For MLE, specifies the number of samples collected.
:param float hedge: For MLE, specifies the hedging used in estimating the
true likelihood.
"""
def __init__(self, underlying_model,
tol=None, n_samples=None, hedge=None
):
super(PoisonedModel, self).__init__(underlying_model)
if tol is None != n_samples is None:
raise ValueError(
"Exactly one of tol and n_samples must be specified"
)
if tol is not None:
self._mode = PoisonModes.ALE
self._tol = tol
else:
self._mode = PoisonModes.MLE
self._n_samples = n_samples
self._hedge = hedge if hedge is not None else 0.0
## METHODS ##
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
# Get the original, undisturbed likelihoods.
super(PoisonedModel, self).likelihood(outcomes, modelparams, expparams)
L = self.underlying_model.likelihood(
outcomes, modelparams, expparams)
# Now get the random variates from a standard normal [N(0, 1)]
# distribution; we'll rescale them soon.
epsilon = np.random.normal(size=L.shape)
# If ALE, rescale by a constant tolerance.
if self._mode == PoisonModes.ALE:
epsilon *= self._tol
# Otherwise, rescale by the estimated error in the binomial estimator.
elif self._mode == PoisonModes.MLE:
epsilon *= binom_est_error(p=L, N=self._n_samples, hedge=self._hedge)
# Now we truncate and return.
np.clip(L + epsilon, 0, 1, out=L)
return L
def simulate_experiment(self, modelparams, expparams, repeat=1):
"""
Simulates experimental data according to the original (unpoisoned)
model. Note that this explicitly causes the simulated data and the
likelihood function to disagree. This is, strictly speaking, a violation
of the assumptions made about `~qinfer.abstract_model.Model` subclasses.
This violation is by intention, and allows for testing the robustness
of inference algorithms against errors in that assumption.
"""
super(PoisonedModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
class BinomialModel(DerivedModel):
"""
Model representing finite numbers of iid samples from another model,
using the binomial distribution to calculate the new likelihood function.
:param qinfer.abstract_model.Model underlying_model: An instance of a two-
outcome model to be decorated by the binomial distribution.
Note that a new experimental parameter field ``n_meas`` is added by this
model. This parameter field represents how many times a measurement should
be made at a given set of experimental parameters. To ensure the correct
operation of this model, it is important that the decorated model does not
also admit a field with the name ``n_meas``.
"""
def __init__(self, underlying_model):
super(BinomialModel, self).__init__(underlying_model)
if not (underlying_model.is_n_outcomes_constant and underlying_model.n_outcomes(None) == 2):
raise ValueError("Decorated model must be a two-outcome model.")
if isinstance(underlying_model.expparams_dtype, str):
# We default to calling the original experiment parameters "x".
self._expparams_scalar = True
self._expparams_dtype = [('x', underlying_model.expparams_dtype), ('n_meas', 'uint')]
else:
self._expparams_scalar = False
self._expparams_dtype = underlying_model.expparams_dtype + [('n_meas', 'uint')]
## PROPERTIES ##
@property
def decorated_model(self):
# Provided for backcompat only.
return self.underlying_model
@property
def expparams_dtype(self):
return self._expparams_dtype
@property
def is_n_outcomes_constant(self):
"""
Returns ``True`` if and only if the number of outcomes for each
experiment is independent of the experiment being performed.
This property is assumed by inference engines to be constant for
the lifetime of a Model instance.
"""
return False
## METHODS ##
def n_outcomes(self, expparams):
"""
Returns an array of dtype ``uint`` describing the number of outcomes
for each experiment specified by ``expparams``.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
"""
return expparams['n_meas'] + 1
def domain(self, expparams):
"""
Returns a list of ``Domain``s, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property, or, in the case where ``n_outcomes_constant`` is ``True``,
``None`` should be a valid input.
:rtype: list of ``Domain``
"""
return [IntegerDomain(min=0,max=n_o-1) for n_o in self.n_outcomes(expparams)]
def are_expparam_dtypes_consistent(self, expparams):
"""
Returns `True` iff all of the given expparams
correspond to outcome domains with the same dtype.
For efficiency, concrete subclasses should override this method
if the result is always `True`.
:param np.ndarray expparams: Array of expparamms
of type `expparams_dtype`
:rtype: `bool`
"""
# The output type is always the same, even though the domain is not.
return True
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
super(BinomialModel, self).likelihood(outcomes, modelparams, expparams)
pr1 = self.underlying_model.likelihood(
np.array([1], dtype='uint'),
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
# Now we concatenate over outcomes.
L = np.concatenate([
binomial_pdf(expparams['n_meas'][np.newaxis, :], outcomes[idx], pr1)
for idx in range(outcomes.shape[0])
])
assert not np.any(np.isnan(L))
return L
def simulate_experiment(self, modelparams, expparams, repeat=1):
# FIXME: uncommenting causes a slowdown, but we need to call
# to track sim counts.
#super(BinomialModel, self).simulate_experiment(modelparams, expparams)
# Start by getting the pr(1) for the underlying model.
pr1 = self.underlying_model.likelihood(
np.array([1], dtype='uint'),
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
dist = binom(
expparams['n_meas'].astype('int'), # ← Really, NumPy?
pr1[0, :, :]
)
sample = (
(lambda: dist.rvs()[np.newaxis, :, :])
if pr1.size != 1 else
(lambda: np.array([[[dist.rvs()]]]))
)
os = np.concatenate([
sample()
for idx in range(repeat)
], axis=0)
return os[0,0,0] if os.size == 1 else os
def update_timestep(self, modelparams, expparams):
return self.underlying_model.update_timestep(modelparams,
expparams['x'] if self._expparams_scalar else expparams
)
class DifferentiableBinomialModel(BinomialModel, DifferentiableModel):
"""
Extends :class:`BinomialModel` to take advantage of differentiable
two-outcome models.
"""
def __init__(self, underlying_model):
if not isinstance(underlying_model, DifferentiableModel):
raise TypeError("Decorated model must also be differentiable.")
BinomialModel.__init__(self, underlying_model)
def score(self, outcomes, modelparams, expparams):
raise NotImplementedError("Not yet implemented.")
def fisher_information(self, modelparams, expparams):
# Since the FI simply adds, we can multiply the single-shot
# FI provided by the underlying model by the number of measurements
# that we perform.
two_outcome_fi = self.underlying_model.fisher_information(
modelparams, expparams
)
return two_outcome_fi * expparams['n_meas']
class GaussianHyperparameterizedModel(DerivedModel):
"""
Model representing a two-outcome model viewed through samples
from one of two distinct Gaussian distributions. This model adds four new
model parameters to its underlying model, respectively representing the
mean outcome conditioned on an underlying 0, the mean outcome conditioned
on an underlying 1, and the variance of outcomes conditioned in each case.
:param qinfer.abstract_model.Model underlying_model: An instance of a two-
outcome model to be viewed through Gaussian distributions.
"""
def __init__(self, underlying_model):
super(GaussianHyperparameterizedModel, self).__init__(underlying_model)
if not (underlying_model.is_n_outcomes_constant and underlying_model.n_outcomes(None) == 2):
raise ValueError("Decorated model must be a two-outcome model.")
n_orig_mps = underlying_model.n_modelparams
self._orig_mps_slice = np.s_[:n_orig_mps]
self._mu_slice = np.s_[n_orig_mps:n_orig_mps + 2]
self._sigma2_slice = np.s_[n_orig_mps + 2:n_orig_mps + 4]
## PROPERTIES ##
@property
def decorated_model(self):
# Provided for backcompat only.
return self.underlying_model
@property
def modelparam_names(self):
return self.underlying_model.modelparam_names + [
r'\mu_0', r'\mu_1',
r'\sigma_0^2', r'\sigma_1^2'
]
@property
def n_modelparams(self):
return len(self.modelparam_names)
## METHODS ##
def domain(self, expparams):
return [RealDomain()] * len(expparams) if expparams is not None else RealDomain()
def are_expparam_dtypes_consistent(self, expparams):
return True
def are_models_valid(self, modelparams):
orig_mps = modelparams[:, self._orig_mps_slice]
sigma2 = modelparams[:, self._sigma2_slice]
return np.all([
self.underlying_model.are_models_valid(orig_mps),
np.all(sigma2 > 0, axis=-1)
], axis=0)
def underlying_likelihood(self, binary_outcomes, modelparams, expparams):
"""
Given outcomes hypothesized for the underlying model, returns the likelihood
which which those outcomes occur.
"""
original_mps = modelparams[..., self._orig_mps_slice]
return self.underlying_model.likelihood(binary_outcomes, original_mps, expparams)
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
super(GaussianHyperparameterizedModel, self).likelihood(outcomes, modelparams, expparams)
# We want these to broadcast to the shape
# (idx_underlying_outcome, idx_outcome, idx_modelparam, idx_experiment).
# Thus, we need shape
# (idx_underlying_outcome, 1, idx_modelparam, 1).
mu = (modelparams[:, self._mu_slice].T)[:, np.newaxis, :, np.newaxis]
sigma = np.sqrt(
(modelparams[:, self._sigma2_slice].T)[:, np.newaxis, :, np.newaxis]
)
assert np.all(sigma > 0)
# Now we can rescale the outcomes to be random variates z drawn from N(0, 1).
scaled_outcomes = (outcomes[np.newaxis,:,np.newaxis,np.newaxis] - mu) / sigma
# We can then compute the conditional likelihood Pr(z | underlying_outcome, model).
conditional_L = norm(0, 1).pdf(scaled_outcomes)
# To find the marginalized likeihood, we now need the underlying likelihood
# Pr(underlying_outcome | model), so that we can sum over the idx_u_o axis.
# Note that we need to add a new axis to shift the underlying outcomes left
# of the real-valued outcomes z.
underlying_L = self.underlying_likelihood(
np.array([0, 1], dtype='uint'),
modelparams, expparams
)[:, None, :, :]
# Now we marginalize and return.
return (underlying_L * conditional_L).sum(axis=0)
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(GaussianHyperparameterizedModel, self).simulate_experiment(modelparams, expparams)
# Start by generating a bunch of (0, 1) normalized random variates
# that we'll randomly rescale to the right location and shape.
zs = np.random.randn(modelparams.shape[0], expparams.shape[0])
# Next, we sample a bunch of underlying outcomes to figure out
# how to rescale everything.
underlying_outcomes = self.underlying_model.simulate_experiment(
modelparams[:, :-4], expparams, repeat=repeat
)
# We can now rescale zs to obtain the actual outcomes.
mu = (modelparams[:, self._mu_slice].T)[:, None, :, None]
sigma = np.sqrt(
(modelparams[:, self._sigma2_slice].T)[:, None, :, None]
)
outcomes = (
np.where(underlying_outcomes, mu[1], mu[0]) +
np.where(underlying_outcomes, sigma[1], sigma[0]) * zs
)
return outcomes[0,0,0] if outcomes.size == 1 else outcomes
class MultinomialModel(DerivedModel):
"""
Model representing finite numbers of iid samples from another model with
a fixed and finite number of outcomes,
using the multinomial distribution to calculate the new likelihood function.
:param qinfer.abstract_model.FiniteOutcomeModel underlying_model: An instance
of a D-outcome model to be decorated by the multinomial distribution.
This underlying model must have ``is_n_outcomes_constant`` as ``True``.
Note that a new experimental parameter field ``n_meas`` is added by this
model. This parameter field represents how many times a measurement should
be made at a given set of experimental parameters. To ensure the correct
operation of this model, it is important that the decorated model does not
also admit a field with the name ``n_meas``.
"""
## INITIALIZER ##
def __init__(self, underlying_model):
super(MultinomialModel, self).__init__(underlying_model)
if isinstance(underlying_model.expparams_dtype, str):
# We default to calling the original experiment parameters "x".
self._expparams_scalar = True
self._expparams_dtype = [('x', underlying_model.expparams_dtype), ('n_meas', 'uint')]
else:
self._expparams_scalar = False
self._expparams_dtype = underlying_model.expparams_dtype + [('n_meas', 'uint')]
# Demand that the underlying model always has the same number of outcomes
# This assumption could in principle be generalized, but not worth the effort now.
assert(self.underlying_model.is_n_outcomes_constant)
self._underlying_domain = self.underlying_model.domain(None)
self._n_sides = self._underlying_domain.n_members
# Useful for getting the right type, etc.
self._example_domain = MultinomialDomain(n_elements=self.n_sides, n_meas=3)
## PROPERTIES ##
@property
def decorated_model(self):
# Provided for backcompat only.
return self.underlying_model
@property
def expparams_dtype(self):
return self._expparams_dtype
@property
def is_n_outcomes_constant(self):
"""
Returns ``True`` if and only if the number of outcomes for each
experiment is independent of the experiment being performed.
This property is assumed by inference engines to be constant for
the lifetime of a Model instance.
"""
# Different values of n_meas result in different numbers of outcomes
return False
@property
def n_sides(self):
"""
Returns the number of possible outcomes of the underlying model.
"""
return self._n_sides
@property
def underlying_domain(self):
"""
Returns the `Domain` of the underlying model.
"""
return self._underlying_domain
## METHODS ##
def n_outcomes(self, expparams):
"""
Returns an array of dtype ``uint`` describing the number of outcomes
for each experiment specified by ``expparams``.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
"""
# Standard combinatorial formula equal to the number of
# possible tuples whose non-negative integer entries sum to n_meas.
n = expparams['n_meas']
k = self.n_sides
return scipy.special.binom(n + k - 1, k - 1)
def domain(self, expparams):
"""
Returns a list of :class:`Domain` objects, one for each input expparam.
:param numpy.ndarray expparams: Array of experimental parameters. This
array must be of dtype agreeing with the ``expparams_dtype``
property.
:rtype: list of ``Domain``
"""
return [
MultinomialDomain(n_elements=self.n_sides, n_meas=ep['n_meas'])
for ep in expparams
]
def are_expparam_dtypes_consistent(self, expparams):
"""
Returns `True` iff all of the given expparams
correspond to outcome domains with the same dtype.
For efficiency, concrete subclasses should override this method
if the result is always `True`.
:param np.ndarray expparams: Array of expparamms
of type `expparams_dtype`
:rtype: `bool`
"""
# The output type is always the same, even though the domain is not.
return True
def likelihood(self, outcomes, modelparams, expparams):
# By calling the superclass implementation, we can consolidate
# call counting there.
super(MultinomialModel, self).likelihood(outcomes, modelparams, expparams)
# Save a wee bit of time by only calculating the likelihoods of outcomes 0,...,d-2
prs = self.underlying_model.likelihood(
self.underlying_domain.values[:-1],
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
# shape (sides-1, n_mps, n_eps)
prs = np.tile(prs, (outcomes.shape[0],1,1,1)).transpose((1,0,2,3))
# shape (n_outcomes, sides-1, n_mps, n_eps)
os = self._example_domain.to_regular_array(outcomes)
# shape (n_outcomes, sides)
os = np.tile(os, (modelparams.shape[0],expparams.shape[0],1,1)).transpose((3,2,0,1))
# shape (n_outcomes, sides, n_mps, n_eps)
L = multinomial_pdf(os, prs)
assert not np.any(np.isnan(L))
return L
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(MultinomialModel, self).simulate_experiment(modelparams, expparams)
n_sides = self.n_sides
n_mps = modelparams.shape[0]
n_eps = expparams.shape[0]
# Save a wee bit of time by only calculating the likelihoods of outcomes 0,...,d-2
prs = np.empty((n_sides,n_mps,n_eps))
prs[:-1,...] = self.underlying_model.likelihood(
self.underlying_domain.values[:-1],
modelparams,
expparams['x'] if self._expparams_scalar else expparams)
# shape (sides, n_mps, n_eps)
os = np.concatenate([
sample_multinomial(n_meas, prs[:,:,idx_n_meas], size=repeat)[np.newaxis,...]
for idx_n_meas, n_meas in enumerate(expparams['n_meas'].astype('int'))
]).transpose((3,2,0,1))
# convert to fancy data type
os = self._example_domain.from_regular_array(os)
return os[0,0,0] if os.size == 1 else os
class MLEModel(DerivedModel):
r"""
Uses the method of [JDD08]_ to approximate the maximum likelihood
estimator as the mean of a fictional posterior formed by amplifying the
Bayes update by a given power :math:`\gamma`. As :math:`\gamma \to
\infty`, this approximation to the MLE improves, but at the cost of
numerical stability.
:param float likelihood_power: Power to which the likelihood calls
should be rasied in order to amplify the Bayes update.
"""
def __init__(self, underlying_model, likelihood_power):
super(MLEModel, self).__init__(underlying_model)
self._pow = likelihood_power
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(MLEModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
def likelihood(self, outcomes, modelparams, expparams):
L = self.underlying_model.likelihood(outcomes, modelparams, expparams)
return L**self._pow
class RandomWalkModel(DerivedModel):
r"""
Model such that after each time step, a random perturbation is added to
each model parameter vector according to a given distribution.
:param Model underlying_model: Model representing the likelihood with no
random walk added.
:param Distribution step_distribution: Distribution over step vectors.
"""
def __init__(self, underlying_model, step_distribution):
self._step_dist = step_distribution
super(RandomWalkModel, self).__init__(underlying_model)
if self.underlying_model.n_modelparams != self._step_dist.n_rvs:
raise TypeError("Step distribution does not match model dimension.")
## METHODS ##
def likelihood(self, outcomes, modelparams, expparams):
super(RandomWalkModel, self).likelihood(outcomes, modelparams, expparams)
return self.underlying_model.likelihood(outcomes, modelparams, expparams)
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(RandomWalkModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams, expparams, repeat)
def update_timestep(self, modelparams, expparams):
# Note that the timestep update is presumed to be independent of the
# experiment.
steps = self._step_dist.sample(n=modelparams.shape[0] * expparams.shape[0])
# Break apart the first two axes and transpose.
steps = steps.reshape((modelparams.shape[0], expparams.shape[0], self.n_modelparams))
steps = steps.transpose((0, 2, 1))
return modelparams[:, :, np.newaxis] + steps
class GaussianRandomWalkModel(DerivedModel):
r"""
Model such that after each time step, a random perturbation is
added to each model parameter vector according to a
zero-mean gaussian distribution.
The :math:`n\times n` covariance matrix of this distribution is
either fixed and known, or its entries are treated as unknown,
being appended to the model parameters.
For diagonal covariance matrices, :math:`n` parameters are added to the model
storing the square roots of the diagonal entries of the covariance matrix.
For dense covariance matrices, :math:`n(n+1)/2` parameters are added to
the model, storing the entries of the lower triangular portion of the
Cholesky factorization of the covariance matrix.
:param Model underlying_model: Model representing the likelihood with no
random walk added.
:param random_walk_idxs: A list or ``np.slice`` of
``underlying_model`` model parameter indeces to add the random walk to.
Indeces larger than ``underlying_model.n_modelparams`` should not
be touched.
:param fixed_covariance: An ``np.ndarray`` specifying the fixed covariance
matrix (or diagonal thereof if ``diagonal`` is ``True``) of the
gaussian distribution. If set to ``None`` (default), this matrix is
presumed unknown and parameters are appended to the model describing
it.
:param boolean diagonal: Whether the gaussian distribution covariance matrix
is diagonal, or densely populated. Default is
``True``.
:param scale_mult: A function which takes an array of expparams and
outputs a real number for each one, representing the scale of the
given experiment. This is useful if different experiments have
different time lengths and therefore incur different dispersion amounts.\
If a string is given instead of a function,
thee scale multiplier is the ``exparam`` with that name.
:param model_transformation: Either ``None`` or a pair of functions
``(transform, inv_transform)`` specifying a transformation of ``modelparams``
(of the underlying model) before gaussian noise is added,
and the inverse operation after
the gaussian noise has been added.
"""
def __init__(
self, underlying_model, random_walk_idxs='all',
fixed_covariance=None, diagonal=True,
scale_mult=None, model_transformation=None
):
self._diagonal = diagonal
self._rw_idxs = np.s_[:underlying_model.n_modelparams] \
if random_walk_idxs == 'all' else random_walk_idxs
explicit_idxs = np.arange(underlying_model.n_modelparams)[self._rw_idxs]
if explicit_idxs.size == 0:
raise IndexError('At least one model parameter must take a random walk.')
self._rw_names = [
underlying_model.modelparam_names[idx]
for idx in explicit_idxs
]
self._n_rw = len(explicit_idxs)
self._srw_names = []
if fixed_covariance is None:
# In this case we need to lean the covariance parameters too,
# therefore, we need to add modelparams
self._has_fixed_covariance = False
if self._diagonal:
self._srw_names = [r"\sigma_{{{}}}".format(name) for name in self._rw_names]
self._srw_idxs = (underlying_model.n_modelparams + \
np.arange(self._n_rw)).astype(np.int)
else:
self._srw_idxs = (underlying_model.n_modelparams +
np.arange(self._n_rw * (self._n_rw + 1) / 2)).astype(np.int)
# the following list of indeces tells us how to populate
# a cholesky matrix with a 1D list of values
self._srw_tri_idxs = np.tril_indices(self._n_rw)
for idx1, name1 in enumerate(self._rw_names):
for name2 in self._rw_names[:idx1+1]:
if name1 == name2:
self._srw_names.append(r"\sigma_{{{}}}".format(name1))
else:
self._srw_names.append(r"\sigma_{{{},{}}}".format(name2,name1))
else:
# In this case the covariance matrix is fixed and fully specified
self._has_fixed_covariance = True
if self._diagonal:
if fixed_covariance.ndim != 1:
raise ValueError('Diagonal covariance requested, but fixed_covariance has {} dimensions.'.format(fixed_covariance.ndim))
if fixed_covariance.size != self._n_rw:
raise ValueError('fixed_covariance dimension, {}, inconsistent with number of parameters, {}'.format(fixed_covariance.size, self.n_rw))
self._fixed_scale = np.sqrt(fixed_covariance)
else:
if fixed_covariance.ndim != 2:
raise ValueError('Dense covariance requested, but fixed_covariance has {} dimensions.'.format(fixed_covariance.ndim))
if fixed_covariance.size != self._n_rw **2 or fixed_covariance.shape[-2] != fixed_covariance.shape[-1]:
raise ValueError('fixed_covariance expected to be square with width {}'.format(self._n_rw))
self._fixed_chol = np.linalg.cholesky(fixed_covariance)
self._fixed_distribution = multivariate_normal(
np.zeros(self._n_rw),
np.dot(self._fixed_chol, self._fixed_chol.T)
)
super(GaussianRandomWalkModel, self).__init__(underlying_model)
if np.max(np.arange(self.n_modelparams)[self._rw_idxs]) > np.max(explicit_idxs):
raise IndexError('random_walk_idxs out of bounds; must index (a subset of ) underlying_model modelparams.')
if scale_mult is None:
self._scale_mult_fcn = (lambda expparams: 1)
elif isinstance(scale_mult, basestring):
self._scale_mult_fcn = lambda x: x[scale_mult]
else:
self._scale_mult_fcn = scale_mult
self._has_transformation = model_transformation is not None
if self._has_transformation:
self._transform = model_transformation[0]
self._inv_transform = model_transformation[1]
## PROPERTIES ##
@property
def modelparam_names(self):
return self.underlying_model.modelparam_names + self._srw_names
@property
def n_modelparams(self):
return len(self.modelparam_names)
@property
def is_n_outcomes_constant(self):
return False
## METHODS ##
def are_models_valid(self, modelparams):
ud_valid = self.underlying_model.are_models_valid(modelparams[...,:self.underlying_model.n_modelparams])
if self._has_fixed_covariance:
return ud_valid
elif self._diagonal:
pos_std = np.greater_equal(modelparams[...,self._srw_idxs], 0).all(axis=-1)
return np.logical_and(ud_valid, pos_std)
else:
return ud_valid
def likelihood(self, outcomes, modelparams, expparams):
super(GaussianRandomWalkModel, self).likelihood(outcomes, modelparams, expparams)
return self.underlying_model.likelihood(outcomes, modelparams[...,:self.underlying_model.n_modelparams], expparams)
def simulate_experiment(self, modelparams, expparams, repeat=1):
super(GaussianRandomWalkModel, self).simulate_experiment(modelparams, expparams, repeat)
return self.underlying_model.simulate_experiment(modelparams[...,:self.underlying_model.n_modelparams], expparams, repeat)
def est_update_covariance(self, modelparams):
"""
Returns the covariance of the gaussian noise process for one
unit step. In the case where the covariance is being learned,
the expected covariance matrix is returned.
:param modelparams: Shape `(n_models, n_modelparams)` shape array
of model parameters.
"""
if self._diagonal:
cov = (self._fixed_scale ** 2 if self._has_fixed_covariance \
else np.mean(modelparams[:, self._srw_idxs] ** 2, axis=0))
cov = np.diag(cov)
else:
if self._has_fixed_covariance:
cov = np.dot(self._fixed_chol, self._fixed_chol.T)
else:
chol = np.zeros((modelparams.shape[0], self._n_rw, self._n_rw))
chol[(np.s_[:],) + self._srw_tri_idxs] = modelparams[:, self._srw_idxs]
cov = np.mean(np.einsum('ijk,ilk->ijl', chol, chol), axis=0)
return cov
def update_timestep(self, modelparams, expparams):
n_mps = modelparams.shape[0]
n_eps = expparams.shape[0]
if self._diagonal:
scale = self._fixed_scale if self._has_fixed_covariance else modelparams[:, self._srw_idxs]
# the following works when _fixed_scale has shape (n_rw) or (n_mps,n_rw)
# in the latter, each particle gets dispersed by its own belief of the scale
steps = scale * np.random.normal(size = (n_eps, n_mps, self._n_rw))
steps = steps.transpose((1,2,0))
else:
if self._has_fixed_covariance:
steps = np.dot(
self._fixed_chol,
np.random.normal(size = (self._n_rw, n_mps * n_eps))
).reshape(self._n_rw, n_mps, n_eps).transpose((1,0,2))
else:
chol = np.zeros((n_mps, self._n_rw, self._n_rw))
chol[(np.s_[:],) + self._srw_tri_idxs] = modelparams[:, self._srw_idxs]
# each particle gets dispersed by its own belief of the cholesky
steps = np.einsum('kij,kjl->kil', chol, np.random.normal(size = (n_mps, self._n_rw, n_eps)))
# multiply by the scales of the current experiments
steps = self._scale_mult_fcn(expparams) * steps
if self._has_transformation:
# repeat model params for every expparam
new_mps = np.repeat(modelparams[np.newaxis,:,:], n_eps, axis=0).reshape((n_eps * n_mps, -1))
# run transformation on underlying slice
new_mps[:, :self.underlying_model.n_modelparams] = self._transform(
new_mps[:, :self.underlying_model.n_modelparams]
)
# add on the random steps to the relevant indeces
new_mps[:, self._rw_idxs] += steps.transpose((2,0,1)).reshape((n_eps * n_mps, -1))
# back to regular parameterization
new_mps[:, :self.underlying_model.n_modelparams] = self._inv_transform(
new_mps[:, :self.underlying_model.n_modelparams]
)
new_mps = new_mps.reshape((n_eps, n_mps, -1)).transpose((1,2,0))
else:
new_mps = np.repeat(modelparams[:,:,np.newaxis], n_eps, axis=2)
new_mps[:, self._rw_idxs, :] += steps
return new_mps
## TESTING CODE ###############################################################
if __name__ == "__main__":
import operator as op
from .test_models import SimplePrecessionModel
m = BinomialModel(SimplePrecessionModel())
os = np.array([6, 7, 8, 9, 10])
mps = np.array([[0.1], [0.35], [0.77]])
eps = np.array([(0.5 * np.pi, 10), (0.51 * np.pi, 10)], dtype=m.expparams_dtype)
L = m.likelihood(
os, mps, eps
)
print(L)
assert m.call_count == reduce(op.mul, [os.shape[0], mps.shape[0], eps.shape[0]]), "Call count inaccurate."
assert L.shape == (os.shape[0], mps.shape[0], eps.shape[0]), "Shape mismatch."
|
Alan-Robertson/python-qinfer
|
src/qinfer/derived_models.py
|
Python
|
agpl-3.0
| 41,714
|
[
"Gaussian"
] |
c4347ffa05c7f7204f51a24df9c7b3c6e9c64f62562b1f3e3af2e74143aea7aa
|
#!/usr/bin/env python
#Dan Blankenberg
"""
Updates metadata in the database to match rev 1891.
Remember to backup your database before running.
"""
import sys, os, ConfigParser
import galaxy.app
from galaxy.util.bunch import Bunch
import galaxy.datatypes.tabular
assert sys.version_info[:2] >= ( 2, 4 )
def main():
ini_file = sys.argv.pop(1)
conf_parser = ConfigParser.ConfigParser({'here':os.getcwd()})
conf_parser.read(ini_file)
configuration = {}
for key, value in conf_parser.items("app:main"): configuration[key] = value
app = galaxy.app.UniverseApplication( global_conf = ini_file, **configuration )
#Step through Database, turning metadata bunches into dictionaries.
#print "Changing metadata bunches to dictionaries."
#for row in app.model.Dataset.table.select().execute():
# if isinstance (row.metadata, Bunch):
# print row.id
# app.model.Dataset.table.update(app.model.Dataset.table.c.id == row.id).execute( _metadata = row.metadata.__dict__ )
#Make sure all metadata is jsonified
#print "Rewriting all metadata to database, setting metadata dbkey, to ensure JSONified storage."
#for row in app.model.Dataset.table.select().execute():
# print row.id
# data = app.model.Dataset.get(row.id)
# dbkey = data.old_dbkey
# if not dbkey or data.metadata.dbkey not in ["?", ["?"], None, []]:
# dbkey = data.metadata.dbkey
# if not dbkey: dbkey = "?"
# #change dbkey then flush, then change to real value and flush, ensures that metadata is rewritten to database
# data.dbkey="~"
# data.flush()
# data.dbkey=dbkey
# data.flush()
#Search out tabular datatypes (and subclasses) and initialize metadata
print "Seeking out tabular based files and initializing metadata"
for row in app.model.Dataset.table.select().execute():
data = app.model.Dataset.get(row.id)
if issubclass(type(data.datatype), type(app.datatypes_registry.get_datatype_by_extension('tabular'))):
print row.id, data.extension
#Call meta_data for all tabular files
#special case interval type where we do not want to overwrite chr, start, end, etc assignments
if issubclass(type(data.datatype), type(app.datatypes_registry.get_datatype_by_extension('interval'))):
galaxy.datatypes.tabular.Tabular().set_meta(data)
else:
data.set_meta()
app.model.context.add( data )
app.model.context.flush()
#Search out maf datatypes and make sure that available species is set.
#print "Seeking out maf files and setting available species."
#for row in app.model.Dataset.table.select(app.model.Dataset.table.c.extension == 'maf').execute():
# print row.id
# sys.stdout.flush()
# data = app.model.Dataset.get(row.id)
# if data.missing_meta:
# data.set_meta() #Call maf set metadata method, setting available species
# data.flush()
app.shutdown()
sys.exit(0)
if __name__ == "__main__":
main()
|
volpino/Yeps-EURAC
|
scripts/cleanup_datasets/update_metadata.py
|
Python
|
mit
| 3,156
|
[
"Galaxy"
] |
48adf0462c9d339f9befbf022184af05a40e5ae44aa54ea889a988ad1f145487
|
""""
This script builds the landcover codes input csv for heat source based on
using the ttools nodes feature class
"""
from __future__ import print_function
import arcpy
import csv
from operator import itemgetter
from os.path import join
from os.path import exists
# ----------------------------------------------------------------------
# Start Fill in Data
# --- Current Conditons Settings
nodes_fc = r"C:\WorkSpace\Quantifying_Conservation_2014\SouthernWillamette\TTools_Sim01_Current.gdb\Nodes_Current"
outputdir = r"C:\WorkSpace\Quantifying_Conservation_2014\SouthernWillamette\Landcover_Codes"
canopy_cover = 0.85
transsample_count = 15
trans_count = 8
lccode_filename = "lccodes_current.csv"
exclude_nodes = True
exclude_field = "EXCLUDE"
include_value = "False" # This value is what indicates the node is kept
lccode_colnames = ["NAME", "CODE", "HEIGHT", "CANOPY_COVER", "OVERHANG"]
# ----------------------------------------------------------------------
def ft2m(ft):
"""converts feet to meters"""
return float(ft) * 0.3048
def setup_LC_data_headers(transsample_count, trans_count, heatsource8=False):
"""Generates a list of the landcover data file
column header names and data types"""
type = ["LC"]
lcdataheaders =["STREAM_KM","LONGITUDE","LATITUDE","TOPO_W","TOPO_S","TOPO_E"]
# a flag indicating the model should use the heat source 8 methods
# (same as 8 directions but no north)
if heatsource8 is True:
dirs = ["T{0}".format(x) for x in range(1, 8)]
else:
dirs = ["T{0}".format(x) for x in range(1, trans_count + 1)]
zones = range(1,int(transsample_count)+1)
# Concatenate the type, dir, and zone and order in the correct way
for t in type:
for d, dir in enumerate(dirs):
for z, zone in enumerate(zones):
if t !="ELE" and d==0 and z==0:
#lcdataheaders.append(t+"_EMERGENT") # add emergent
lcdataheaders.append(t+"_T0_S0") # add emergent
lcdataheaders.append("{0}_{1}_S{2}".format(t, dir, zone))
else:
lcdataheaders.append("{0}_{1}_S{2}".format(t, dir, zone))
return lcdataheaders
def make_lc_codes(lcdata_in, canopy_cover, trans_count, transsample_count):
"""
This function reads in landcover codes from the land cover
data file and generates the landcover code file
based on all the unique codes.
"""
codes = set()
for row in lcdata_in:
for col in range(8, (trans_count * transsample_count) + 9):
c = str(int(float(row[col])))
codes.add(str(int(float(row[col]))))
if float(c) > -9997:
if float(c[-3:]) > 656:
nodes.add(str(row[1]))
codes = list(codes)
codes.sort()
lccodes = []
for code in codes:
if float(code) < -9997:
lccodes.append(["Current", float(code), 0, 0, 0])
else:
lccodes.append(["Current", float(code), ft2m(code[-3:]), canopy_cover, 0])
return(lccodes)
def make_sp_codes(lcdata_in, input_geomorph, trans_count, transsample_count):
"""
This function translates geomorphic codes into site potential
vegetation codes as described in the Willamette Basin TMDL.
The function is based on Brian Kasper's VB macro located here:
\\deqhq1\TMDL\Library (weekly backup)\Willamette-Basinwide\Potential Veg\
Subbasin System Potential Veg calc.xls
"""
# read in the geomorhic codes and vegetation probabilities
geodict = read_csv_dict(input_geomorph, key_col=1, val_col=[2, 8], skipheader=True)
# set the random seed so this is repeatable
random.seed(42)
lcdata_out = []
for row in lcdata_in:
for col in range(8, (trans_count * transsample_count) + 9):
rnd = random.randint(1, 100) / 100
geocode = str(int(float(row[col])))
param = geodict[geocode]
if rnd <= float(param[0]):
spc = param[3]
elif rnd <= float(param[0]) + float(param[1]):
spc = param[4]
else:
spc = param[5]
row[col] = int(spc)
lcdata_out.append(row)
return lcdata_out
def read_nodes_fc(nodes_fc, readfields, wherecluase):
"""Reads an input point file and returns the fields as a
list"""
incursorFields = ["STREAM_ID","NODE_ID"] + readfields
# Determine input point spatial units
proj = arcpy.Describe(nodes_fc).spatialReference
lcdata = []
with arcpy.da.SearchCursor(nodes_fc, incursorFields, whereclause, proj) as Inrows:
for row in Inrows:
lcdata.append(row)
return(lcdata)
def write_csv(outputdir, filename, colnames, outlist):
"""write the output list to csv"""
# insert column header names
outlist.insert(0, colnames)
with open(join(outputdir, filename), "wb") as file_object:
writer = csv.writer(file_object, dialect= "excel")
writer.writerows(outlist)
lcsample_headers = setup_LC_data_headers(transsample_count, trans_count)
# Build a query to retreive just the nodes that are needed
if exclude_nodes:
whereclause = """{0} = '{1}'""".format(exclude_field, include_value)
lcdata_list = read_nodes_fc(nodes_fc, lcsample_headers, whereclause)
lccodes, nodes = make_lc_codes(lcdata_list, canopy_cover, trans_count, transsample_count)
write_csv(outputdir, lccode_filename, lccode_colnames, lccodes)
write_csv(outputdir, "9998_nodes.csv", ["NODE_ID"], nodes)
print("done")
|
rmichie/PyScripts
|
QC_S_Willamette/Build_LC_Codes.py
|
Python
|
apache-2.0
| 5,652
|
[
"Brian"
] |
8e93591e536f40b445372bbc1ddddf612704d553e89241892d5bd2b9a7c552c2
|
# $HeadURL: $
""" ElementInspectorAgent
This agent inspect Resources, and evaluates policies that apply.
"""
import datetime
import math
import Queue
from DIRAC import S_ERROR, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem.PEP import PEP
from DIRAC.ResourceStatusSystem.Utilities import Utils
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ), 'ResourceManagementClient')
__RCSID__ = '$Id$'
AGENT_NAME = 'ResourceStatus/ElementInspectorAgent'
class ElementInspectorAgent( AgentModule ):
""" ElementInspectorAgent
The ElementInspector agent is a generic agent used to check the elements
of one of the elementTypes ( e.g. Site, Resource, Node ).
This Agent takes care of the Elements. In order to do so, it gathers
the eligible ones and then evaluates their statuses with the PEP.
"""
# Max number of worker threads by default
__maxNumberOfThreads = 15
# Inspection freqs, defaults, the lower, the higher priority to be checked.
# Error state usually means there is a glitch somewhere, so it has the highest
# priority.
__checkingFreqs = {'Active' : 20,
'Degraded' : 20,
'Probing' : 20,
'Banned' : 15,
'Unknown' : 10,
'Error' : 5}
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
# ElementType, to be defined among Site, Resource or Node
self.elementType = ''
self.elementsToBeChecked = None
self.threadPool = None
self.rsClient = None
self.clients = {}
def initialize( self ):
""" Standard initialize.
"""
maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', self.__maxNumberOfThreads )
self.threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads )
self.elementType = self.am_getOption( 'elementType', self.elementType )
self.rsClient = ResourceStatusClient()
self.clients[ 'ResourceStatusClient' ] = self.rsClient
self.clients[ 'ResourceManagementClient' ] = ResourceManagementClient()
if not self.elementType:
return S_ERROR( 'Missing elementType' )
return S_OK()
def execute( self ):
""" execute
This is the main method of the agent. It gets the elements from the Database
which are eligible to be re-checked, calculates how many threads should be
started and spawns them. Each thread will get an element from the queue until
it is empty. At the end, the method will join the queue such that the agent
will not terminate a cycle until all elements have been processed.
"""
# Gets elements to be checked ( returns a Queue )
elementsToBeChecked = self.getElementsToBeChecked()
if not elementsToBeChecked[ 'OK' ]:
self.log.error( elementsToBeChecked[ 'Message' ] )
return elementsToBeChecked
self.elementsToBeChecked = elementsToBeChecked[ 'Value' ]
queueSize = self.elementsToBeChecked.qsize()
pollingTime = self.am_getPollingTime()
# Assigns number of threads on the fly such that we exhaust the PollingTime
# without having to spawn too many threads. We assume 10 seconds per element
# to be processed ( actually, it takes something like 1 sec per element ):
# numberOfThreads = elements * 10(s/element) / pollingTime
numberOfThreads = int( math.ceil( queueSize * 10. / pollingTime ) )
self.log.info( 'Needed %d threads to process %d elements' % ( numberOfThreads, queueSize ) )
for _x in xrange( numberOfThreads ):
jobUp = self.threadPool.generateJobAndQueueIt( self._execute )
if not jobUp[ 'OK' ]:
self.log.error( jobUp[ 'Message' ] )
self.log.info( 'blocking until all elements have been processed' )
# block until all tasks are done
self.elementsToBeChecked.join()
self.log.info( 'done')
return S_OK()
def getElementsToBeChecked( self ):
""" getElementsToBeChecked
This method gets all the rows in the <self.elementType>Status table, and then
discards entries with TokenOwner != rs_svc. On top of that, there are check
frequencies that are applied: depending on the current status of the element,
they will be checked more or less often.
"""
toBeChecked = Queue.Queue()
# We get all the elements, then we filter.
elements = self.rsClient.selectStatusElement( self.elementType, 'Status' )
if not elements[ 'OK' ]:
return elements
utcnow = datetime.datetime.utcnow().replace( microsecond = 0 )
# filter elements by Type
for element in elements[ 'Value' ]:
# Maybe an overkill, but this way I have NEVER again to worry about order
# of elements returned by mySQL on tuples
elemDict = dict( zip( elements[ 'Columns' ], element ) )
# This if-clause skips all the elements that are should not be checked yet
timeToNextCheck = self.__checkingFreqs[ elemDict[ 'Status' ] ]
if utcnow <= elemDict[ 'LastCheckTime' ] + datetime.timedelta( minutes = timeToNextCheck ):
continue
# We skip the elements with token different than "rs_svc"
if elemDict[ 'TokenOwner' ] != 'rs_svc':
self.log.verbose( 'Skipping %s ( %s ) with token %s' % ( elemDict[ 'Name' ],
elemDict[ 'StatusType' ],
elemDict[ 'TokenOwner' ]
))
continue
# We are not checking if the item is already on the queue or not. It may
# be there, but in any case, it is not a big problem.
lowerElementDict = { 'element' : self.elementType }
for key, value in elemDict.items():
lowerElementDict[ key[0].lower() + key[1:] ] = value
# We add lowerElementDict to the queue
toBeChecked.put( lowerElementDict )
self.log.verbose( '%s # "%s" # "%s" # %s # %s' % ( elemDict[ 'Name' ],
elemDict[ 'ElementType' ],
elemDict[ 'StatusType' ],
elemDict[ 'Status' ],
elemDict[ 'LastCheckTime' ]) )
return S_OK( toBeChecked )
# Private methods ............................................................
def _execute( self ):
"""
Method run by the thread pool. It enters a loop until there are no elements
on the queue. On each iteration, it evaluates the policies for such element
and enforces the necessary actions. If there are no more elements in the
queue, the loop is finished.
"""
pep = PEP( clients = self.clients )
while True:
try:
element = self.elementsToBeChecked.get_nowait()
except Queue.Empty:
return S_OK()
self.log.verbose( '%s ( %s / %s ) being processed' % ( element[ 'name' ],
element[ 'status' ],
element[ 'statusType' ] ) )
resEnforce = pep.enforce( element )
if not resEnforce[ 'OK' ]:
self.log.error( 'Failed policy enforcement', resEnforce[ 'Message' ] )
self.elementsToBeChecked.task_done()
continue
resEnforce = resEnforce[ 'Value' ]
oldStatus = resEnforce[ 'decisionParams' ][ 'status' ]
statusType = resEnforce[ 'decisionParams' ][ 'statusType' ]
newStatus = resEnforce[ 'policyCombinedResult' ][ 'Status' ]
reason = resEnforce[ 'policyCombinedResult' ][ 'Reason' ]
if oldStatus != newStatus:
self.log.info( '%s (%s) is now %s ( %s ), before %s' % ( element[ 'name' ],
statusType,
newStatus,
reason,
oldStatus ) )
# Used together with join !
self.elementsToBeChecked.task_done()
#...............................................................................
#EOF
|
hgiemza/DIRAC
|
ResourceStatusSystem/Agent/ElementInspectorAgent.py
|
Python
|
gpl-3.0
| 8,749
|
[
"DIRAC"
] |
412a566b269295fc833281077fb94a35c721a44cf6e7579b2deb1e929dc58176
|
#!/usr/bin/python
# system
from collections import defaultdict
from functools import wraps
import pdb
import pprint
import re
import sys
import time
import traceback
# pypi
from splinter import Browser
from treelib import Tree
# local
import user as userdata
import list_to_tree
pp = pprint.PrettyPrinter(indent=4)
base_url = 'http://www.karatbars.com'
action_path = dict(
login = "index.php?page=login_1",
binary = "members.php?page=binarytree"
)
def url_for_action(action):
return "{0}/{1}".format(base_url,action_path[action])
def try_method(fn):
@wraps(fn)
def wrapper(self):
try:
return fn(self)
except:
print traceback.format_exc()
self.visit_auction()
return wrapper
class Entry(object):
def __init__(self, user, browser):
self.user=user
self.browser=browser
def login(self):
print "Logging in..."
self.browser.visit(url_for_action('login'))
self.browser.fill('username', self.user['username'])
self.browser.fill('password', self.user['password'])
button = self.browser.find_by_id('btn_login')
button.click()
def visit_binary(self):
self.browser.visit(url_for_action('binary'))
tree = Tree()
while True:
users = self.browser.find_by_css('.binary_text')
users = [u.text for u in users]
l = list_to_tree.ListToTree(users)
l.show()
sleep_time = 5
print "\tSleeping for", sleep_time, "seconds"
time.sleep(sleep_time)
def main():
with Browser() as browser:
for user in userdata.users:
e = Entry(user, browser)
e.login()
e.visit_binary()
while True: pass
if __name__ == '__main__':
if len(sys.argv) == 2:
bid_url = sys.argv[1]
else:
bid_url = None
main(bid_url)
|
metaperl/karatbars-utils
|
k0de/upgraded/login.py
|
Python
|
mit
| 1,933
|
[
"VisIt"
] |
b6b5c35ff778b44faab423e9e5807aba0ba7f725c0ecbf0e7bff9b92773ae301
|
from functools import wraps
from hyperspy.component import Component
_CLASS_DOC = \
"""%s component (created with Expression).
.. math::
f(x) = %s
"""
def _fill_function_args(fn):
@wraps(fn)
def fn_wrapped(self, x):
return fn(x, *[p.value for p in self.parameters])
return fn_wrapped
def _fill_function_args_2d(fn):
@wraps(fn)
def fn_wrapped(self, x, y):
return fn(x, y, *[p.value for p in self.parameters])
return fn_wrapped
def _parse_substitutions(string):
import sympy
splits = map(str.strip, string.split(';'))
expr = sympy.sympify(next(splits))
# We substitute one by one manually, as passing all at the same time does
# not work as we want (subsitutions inside other substitutions do not work)
for sub in splits:
t = tuple(map(str.strip, sub.split('=')))
expr = expr.subs(t[0], sympy.sympify(t[1]))
return expr
class Expression(Component):
"""Create a component from a string expression.
"""
def __init__(self, expression, name, position=None, module="numpy",
autodoc=True, add_rotation=False, rotation_center=None,
**kwargs):
"""Create a component from a string expression.
It automatically generates the partial derivatives and the
class docstring.
Parameters
----------
expression: str
Component function in SymPy text expression format with
substitutions separated by `;`. See examples and the SymPy
documentation for details. The only additional constraint is that
the variable(s) must be `x` (for 1D components); or `x` and `y` for
2D components. Also, if `module` is "numexpr" the
functions are limited to those that numexpr support. See its
documentation for details.
name : str
Name of the component.
position: str, optional
The parameter name that defines the position of the component if
applicable. It enables interative adjustment of the position of the
component in the model. For 2D components, a tuple must be passed
with the name of the two parameters e.g. `("x0", "y0")`.
module: {"numpy", "numexpr"}, default "numpy"
Module used to evaluate the function. numexpr is often faster but
it supports fewer functions and requires installing numexpr.
add_rotation: bool, default False
This is only relevant for 2D components. If `True` it automatically
adds `rotation_angle` parameter.
rotation_center: {None, tuple}
If None, the rotation center is the center i.e. (0, 0) if `position`
is not defined, otherwise the center is the coordinates specified
by `position`. Alternatively a tuple with the (x, y) coordinates
of the center can be provided.
**kwargs
Keyword arguments can be used to initialise the value of the
parameters.
Methods
-------
recompile: useful to recompile the function and gradient with a
a different module.
Examples
--------
The following creates a Gaussian component and set the initial value
of the parameters:
>>> hs.model.components1D.Expression(
... expression="height * exp(-(x - x0) ** 2 * 4 * log(2)/ fwhm ** 2)",
... name="Gaussian",
... height=1,
... fwhm=1,
... x0=0,
... position="x0",)
Substitutions for long or complicated expressions are separated by
semicolumns:
>>> expr = 'A*B/(A+B) ; A = sin(x)+one; B = cos(y) - two; y = tan(x)'
>>> comp = hs.model.components1D.Expression(
... expression=expr,
... name='my function')
>>> comp.parameters
(<Parameter one of my function component>,
<Parameter two of my function component>)
"""
import sympy
self._add_rotation = add_rotation
self._str_expression = expression
if rotation_center is None:
self.compile_function(module=module, position=position)
else:
self.compile_function(module=module, position=rotation_center)
# Initialise component
Component.__init__(self, self._parameter_strings)
self._whitelist['expression'] = ('init', expression)
self._whitelist['name'] = ('init', name)
self._whitelist['position'] = ('init', position)
self._whitelist['module'] = ('init', module)
if self._is2D:
self._whitelist['add_rotation'] = ('init', self._add_rotation)
self._whitelist['rotation_center'] = ('init', rotation_center)
self.name = name
# Set the position parameter
if position:
if self._is2D:
self._position_x = getattr(self, position[0])
self._position_y = getattr(self, position[1])
else:
self._position = getattr(self, position)
# Set the initial value of the parameters
if kwargs:
for kwarg, value in kwargs.items():
setattr(getattr(self, kwarg), 'value', value)
if autodoc:
self.__doc__ = _CLASS_DOC % (
name, sympy.latex(_parse_substitutions(expression)))
def compile_function(self, module="numpy", position=False):
import sympy
from sympy.utilities.lambdify import lambdify
expr = _parse_substitutions(self._str_expression)
# Extract x
x, = [symbol for symbol in expr.free_symbols if symbol.name == "x"]
# Extract y
y = [symbol for symbol in expr.free_symbols if symbol.name == "y"]
self._is2D = True if y else False
if self._is2D:
y = y[0]
if self._is2D and self._add_rotation:
position = position or (0, 0)
rotx = sympy.sympify(
"{0} + (x - {0}) * cos(rotation_angle) - (y - {1}) *"
" sin(rotation_angle)"
.format(*position))
roty = sympy.sympify(
"{1} + (x - {0}) * sin(rotation_angle) + (y - {1}) *"
"cos(rotation_angle)"
.format(*position))
expr = expr.subs({"x": rotx, "y": roty}, simultaneous=False)
rvars = sympy.symbols([s.name for s in expr.free_symbols], real=True)
real_expr = expr.subs(
{orig: real_ for (orig, real_) in zip(expr.free_symbols, rvars)})
# just replace with the assumption that all our variables are real
expr = real_expr
eval_expr = expr.evalf()
# Extract parameters
variables = ("x", "y") if self._is2D else ("x", )
parameters = [
symbol for symbol in expr.free_symbols
if symbol.name not in variables]
parameters.sort(key=lambda x: x.name) # to have a reliable order
# Create compiled function
variables = [x, y] if self._is2D else [x]
self._f = lambdify(variables + parameters, eval_expr,
modules=module, dummify=False)
if self._is2D:
f = lambda x, y: self._f(x, y, *[p.value for p in self.parameters])
else:
f = lambda x: self._f(x, *[p.value for p in self.parameters])
setattr(self, "function", f)
parnames = [symbol.name for symbol in parameters]
self._parameter_strings = parnames
ffargs = _fill_function_args_2d if self._is2D else _fill_function_args
for parameter in parameters:
grad_expr = sympy.diff(eval_expr, parameter)
setattr(self,
"_f_grad_%s" % parameter.name,
lambdify(variables + parameters,
grad_expr.evalf(),
modules=module,
dummify=False)
)
setattr(self,
"grad_%s" % parameter.name,
ffargs(
getattr(
self,
"_f_grad_%s" %
parameter.name)).__get__(
self,
Expression)
)
|
CodeMonkeyJan/hyperspy
|
hyperspy/_components/expression.py
|
Python
|
gpl-3.0
| 8,366
|
[
"Gaussian"
] |
86c09b11e3b5c226bd10757d276040926c18f46445b8540b348726a61450fd04
|
"""Functionality for manipulating multiple grism exposures simultaneously
"""
import os
import time
import glob
from collections import OrderedDict
import multiprocessing as mp
import scipy.ndimage as nd
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import astropy.units as u
# local imports
from . import utils
from . import model
#from . import stack
from .fitting import GroupFitter
#from .utils_c import disperse
from .utils_c import interp
from .utils import GRISM_COLORS, GRISM_MAJOR, GRISM_LIMITS, DEFAULT_LINE_LIST
def _loadFLT(grism_file, sci_extn, direct_file, pad, ref_file,
ref_ext, seg_file, verbose, catalog, ix):
"""Helper function for loading `.model.GrismFLT` objects with `multiprocessing`.
TBD
"""
import time
try:
import cPickle as pickle
except:
# Python 3
import pickle
# slight random delay to avoid synchronization problems
# np.random.seed(ix)
# sleeptime = ix*1
# print '%s sleep %.3f %d' %(grism_file, sleeptime, ix)
# time.sleep(sleeptime)
# print grism_file, direct_file
new_root = '.{0:02d}.GrismFLT.fits'.format(sci_extn)
save_file = grism_file.replace('_flt.fits', new_root)
save_file = save_file.replace('_flc.fits', new_root)
save_file = save_file.replace('_cmb.fits', new_root)
save_file = save_file.replace('_rate.fits', new_root)
save_file = save_file.replace('_elec.fits', new_root)
if (save_file == grism_file) & ('GrismFLT' not in grism_file):
# couldn't build new filename based on the extensions
# so just insert at the end
save_file = grism_file.replace('.fits', new_root)
if (grism_file.find('_') < 0) & ('GrismFLT' not in grism_file):
save_file = 'xxxxxxxxxxxxxxxxxxx'
if os.path.exists(save_file) & ('GrismFLT' in save_file):
print('Load {0}!'.format(save_file))
fp = open(save_file.replace('GrismFLT.fits', 'GrismFLT.pkl'), 'rb')
flt = pickle.load(fp)
fp.close()
status = flt.load_from_fits(save_file)
else:
flt = model.GrismFLT(grism_file=grism_file, sci_extn=sci_extn,
direct_file=direct_file, pad=pad,
ref_file=ref_file, ref_ext=ref_ext,
seg_file=seg_file, shrink_segimage=True,
verbose=verbose)
if flt.direct.wcs.wcs.has_pc():
for obj in [flt.grism, flt.direct]:
obj.get_wcs()
if catalog is not None:
flt.catalog = flt.blot_catalog(catalog,
sextractor=('X_WORLD' in catalog.colnames))
flt.catalog_file = catalog
else:
flt.catalog = None
if flt.grism.instrument in ['NIRCAM']:
flt.apply_POM()
if flt.grism.instrument in ['NIRISS', 'NIRCAM']:
flt.transform_NIRISS()
return flt # , out_cat
def _fit_at_z(self, zgrid, i, templates, fitter, fit_background, poly_order):
"""
For parallel processing
"""
# self, z=0., templates={}, fitter='nnls',
# fit_background=True, poly_order=0
print(i, zgrid[i])
out = self.fit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
data = {'out': out, 'i': i}
return data
#A, coeffs[i,:], chi2[i], model_2d = out
def _beam_compute_model(beam, id, spectrum_1d, is_cgs, apply_sensitivity, scale, reset):
"""
wrapper function for multiprocessing
"""
beam.beam.compute_model(id=id, spectrum_1d=spectrum_1d,
is_cgs=is_cgs,
scale=scale, reset=reset,
apply_sensitivity=apply_sensitivity)
beam.modelf = beam.beam.modelf
beam.model = beam.beam.modelf.reshape(beam.beam.sh_beam)
return True
# def test_parallel():
#
# zgrid = np.linspace(1.1, 1.3, 10)
# templates = mb.load_templates(fwhm=800)
# fitter = 'nnls'
# fit_background = True
# poly_order = 0
#
# self.FLTs = []
# t0_pool = time.time()
#
# pool = mp.Pool(processes=4)
# results = [pool.apply_async(_fit_at_z, (mb, zgrid, i, templates, fitter, fit_background, poly_order)) for i in range(len(zgrid))]
#
# pool.close()
# pool.join()
#
# chi = zgrid*0.
#
# for res in results:
# data = res.get(timeout=1)
# A, coeffs, chi[data['i']], model_2d = data['out']
# #flt_i.catalog = cat_i
#
# t1_pool = time.time()
def _compute_model(i, flt, fit_info, is_cgs, store, model_kwargs):
"""Helper function for computing model orders.
"""
for id in fit_info:
try:
status = flt.compute_model_orders(id=id,
mag=fit_info[id]['mag'], in_place=True, store=store,
spectrum_1d=fit_info[id]['spec'], is_cgs=is_cgs,
verbose=False, **model_kwargs)
except:
print('Failed: {0} {1}'.format(flt.grism.parent_file, id))
continue
print('{0}: _compute_model Done'.format(flt.grism.parent_file))
return i, flt.model, flt.object_dispersers
class GroupFLT():
def __init__(self, grism_files=[], sci_extn=1, direct_files=[],
pad=200, group_name='group',
ref_file=None, ref_ext=0, seg_file=None,
shrink_segimage=True, verbose=True, cpu_count=0,
catalog='', polyx=[0.3, 2.35],
MW_EBV=0.):
"""Main container for handling multiple grism exposures together
Parameters
----------
grism_files : list
List of grism exposures (typically WFC3/IR "FLT" or ACS/UVIS "FLC"
files). These can be from different grisms and/or orients.
sci_extn : int
Science extension to extract from the files in `grism_files`. For
WFC3/IR this can only be 1, though for the two-chip instruments
WFC3/UVIS and ACS/WFC3 this can be 1 or 2.
direct_files : list
List of direct exposures (typically WFC3/IR "FLT" or ACS/UVIS
"FLC" files). This list should either be empty or should
correspond one-to-one with entries in the `grism_files` list,
i.e., from an undithered pair of direct and grism exposures. If
such pairs weren't obtained or if you simply wish to ignore them
and just use the `ref_file` reference image, set to an empty list
(`[]`).
pad : int
Padding in pixels to apply around the edge of the detector to
allow modeling of sources that fall off of the nominal FOV. For
this to work requires using a `ref_file` reference image that
covers this extra area.
group_name : str
Name to apply to products produced by this group.
ref_file : `None` or str
Undistorted reference image filename, e.g., a drizzled mosaic
covering the area around a given grism exposure.
ref_ext : 0
FITS extension of the reference file where to find the image
itself.
seg_file : `None` or str
Segmentation image filename.
shrink_segimage : bool
Do some preprocessing on the segmentation image to speed up the
blotting to the distorted frame of the grism exposures.
verbose : bool
Print verbose information.
cpu_count : int
Use parallelization if > 0. If equal to zero, then use the
maximum number of available cores.
catalog : str
Catalog filename assocated with `seg_file`. These are typically
generated with "SExtractor", but the source of the files
themselves isn't critical.
Attributes
----------
catalog : `~astropy.table.Table`
The table read in with from the above file specified in `catalog`.
FLTs : list
List of `~grizli.model.GrismFLT` objects generated from each of
the files in the `grism_files` list.
grp.N : int
Number of grism files (i.e., `len(FLTs)`.)
"""
N = len(grism_files)
if len(direct_files) != len(grism_files):
direct_files = ['']*N
self.grism_files = grism_files
self.direct_files = direct_files
self.group_name = group_name
# Wavelengths for polynomial fits
self.polyx = polyx
# Read catalog
if catalog:
if isinstance(catalog, str):
self.catalog = utils.GTable.gread(catalog)
else:
self.catalog = catalog
# necessary columns from SExtractor / photutils
pairs = [['NUMBER', 'id'],
['MAG_AUTO', 'mag'],
['MAGERR_AUTO', 'mag_err']]
cols = self.catalog.colnames
for pair in pairs:
if (pair[0] not in cols) & (pair[1] in cols):
self.catalog[pair[0]] = self.catalog[pair[1]]
else:
self.catalog = None
if cpu_count == 0:
cpu_count = mp.cpu_count()
self.FLTs = []
if cpu_count < 0:
# serial
t0_pool = time.time()
for i in range(N):
flt = _loadFLT(self.grism_files[i], sci_extn,
self.direct_files[i], pad, ref_file, ref_ext,
seg_file, verbose, self.catalog, i)
self.FLTs.append(flt)
t1_pool = time.time()
else:
# Read files in parallel
t0_pool = time.time()
pool = mp.Pool(processes=cpu_count)
results = [pool.apply_async(_loadFLT, (self.grism_files[i], sci_extn, self.direct_files[i], pad, ref_file, ref_ext, seg_file, verbose, self.catalog, i)) for i in range(N)]
pool.close()
pool.join()
for res in results:
flt_i = res.get(timeout=1)
#flt_i.catalog = cat_i
# somehow WCS getting flipped from cd to pc in res.get()???
if flt_i.direct.wcs.wcs.has_pc():
for obj in [flt_i.grism, flt_i.direct]:
obj.get_wcs()
self.FLTs.append(flt_i)
t1_pool = time.time()
if verbose:
print('Files loaded - {0:.2f} sec.'.format(t1_pool - t0_pool))
@property
def N(self):
return len(self.FLTs)
@property
def Ngrism(self):
"""
dictionary containing number of exposures by grism
"""
# Parse grisms & PAs
Ngrism = {}
for flt in self.FLTs:
if flt.grism.instrument == 'NIRISS':
grism = flt.grism.pupil
else:
grism = flt.grism.filter
if grism not in Ngrism:
Ngrism[grism] = 0
Ngrism[grism] += 1
return Ngrism
@property
def grisms(self):
"""
Available grisms
"""
grisms = list(self.Ngrism.keys())
return grisms
@property
def PA(self):
"""
Available PAs in each grism
"""
_PA = {}
for g in self.Ngrism:
_PA[g] = {}
for i, flt in enumerate(self.FLTs):
if flt.grism.instrument == 'NIRISS':
grism = flt.grism.pupil
else:
grism = flt.grism.filter
PA_i = flt.get_dispersion_PA(decimals=0)
if PA_i not in _PA[grism]:
_PA[grism][PA_i] = []
_PA[grism][PA_i].append(i)
return _PA
def save_full_data(self, warn=True):
"""Save models and data files for fast regeneration.
The filenames of the outputs are generated from the input grism
exposure filenames with the following:
>>> file = 'ib3701ryq_flt.fits'
>>> sci_extn = 1
>>> new_root = '.{0:02d}.GrismFLT.fits'.format(sci_extn)
>>>
>>> save_file = file.replace('_flt.fits', new_root)
>>> save_file = save_file.replace('_flc.fits', new_root)
>>> save_file = save_file.replace('_cmb.fits', new_root)
>>> save_file = save_file.replace('_rate.fits', new_root)
It will also save data to a `~pickle` file:
>>> pkl_file = save_file.replace('.fits', '.pkl')
Parameters
----------
warn : bool
Print a warning and skip if an output file is already found to
exist.
Notes
-----
The save filename format was changed May 9, 2017 to the format like
`ib3701ryq.01.GrismFLT.fits` from `ib3701ryq_GrismFLT.fits` to both
allow easier filename parsing and also to allow for instruments that
have multiple `SCI` extensions in a single calibrated file
(e.g., ACS and WFC3/UVIS).
"""
for _flt in self.FLTs:
file = _flt.grism_file
if _flt.grism.data is None:
if warn:
print('{0}: Looks like data already saved!'.format(file))
continue
new_root = '.{0:02d}.GrismFLT.fits'.format(_flt.grism.sci_extn)
save_file = file.replace('_flt.fits', new_root)
save_file = save_file.replace('_flc.fits', new_root)
save_file = save_file.replace('_cmb.fits', new_root)
save_file = save_file.replace('_rate.fits', new_root)
save_file = save_file.replace('_elec.fits', new_root)
if (save_file == file) & ('GrismFLT' not in file):
# couldn't build new filename based on the extensions
# so just insert at the end
save_file = file.replace('.fits', new_root)
print('Save {0}'.format(save_file))
_flt.save_full_pickle()
# Reload initialized data
_flt.load_from_fits(save_file)
def extend(self, new, verbose=True):
"""Add another `GroupFLT` instance to `self`
This function appends the exposures if a separate `GroupFLT` instance
to the current instance. You might do this, for example, if you
generate separate `GroupFLT` instances for different grisms and
reference images with different filters.
"""
import copy
self.FLTs.extend(new.FLTs)
direct_files = copy.copy(self.direct_files)
direct_files.extend(new.direct_files)
self.direct_files = direct_files
grism_files = copy.copy(self.grism_files)
grism_files.extend(new.grism_files)
self.grism_files = grism_files
# self.direct_files.extend(new.direct_files)
# self.grism_files.extend(new.grism_files)
if verbose:
print('Now we have {0:d} FLTs'.format(self.N))
def compute_single_model(self, id, center_rd=None, mag=-99, size=-1, store=False, spectrum_1d=None, is_cgs=False, get_beams=None, in_place=True, psf_param_dict={}):
"""Compute model spectrum in all exposures
TBD
Parameters
----------
id : type
center_rd : None
mag : type
size : type
store : type
spectrum_1d : type
get_beams : type
in_place : type
Returns
-------
TBD
"""
out_beams = []
for flt in self.FLTs:
if flt.grism.parent_file in psf_param_dict:
psf_params = psf_param_dict[flt.grism.parent_file]
else:
psf_params = None
if center_rd is None:
x = y = None
else:
x, y = flt.direct.wcs.all_world2pix(np.array(center_rd)[None, :], 0).flatten()
status = flt.compute_model_orders(id=id, x=x, y=y, verbose=False,
size=size, compute_size=(size < 0),
mag=mag, in_place=in_place, store=store,
spectrum_1d=spectrum_1d, is_cgs=is_cgs,
get_beams=get_beams, psf_params=psf_params)
out_beams.append(status)
if get_beams:
return out_beams
else:
return True
def compute_full_model(self, fit_info=None, verbose=True, store=False,
mag_limit=25, coeffs=[1.2, -0.5], cpu_count=0,
is_cgs=False, model_kwargs={'compute_size':True}):
"""Compute continuum models of all sources in an FLT
Parameters
----------
fit_info : dict
verbose : bool
store : bool
mag_limit : float
Faint limit of objects to compute
coeffs : list
Polynomial coefficients of the continuum model
cpu_count : int
Number of CPUs to use for parallel processing. If 0, then get
from `multiprocessing.cpu_count`.
is_cgs : bool
Spectral models are in cgs units
model_kwargs : dict
Keywords to pass to the
`~grizli.model.GrismFLT.compute_model_orders` method of the
`~grizli.model.GrismFLT` objects.
Returns
-------
Sets `object_dispersers` and `model` attributes on items in
`self.FLTs`
"""
if cpu_count <= 0:
cpu_count = np.maximum(mp.cpu_count() - 4, 1)
if fit_info is None:
bright = self.catalog['MAG_AUTO'] < mag_limit
ids = self.catalog['NUMBER'][bright]
mags = self.catalog['MAG_AUTO'][bright]
# Polynomial component
#xspec = np.arange(0.3, 5.35, 0.05)-1
xspec = np.arange(self.polyx[0], self.polyx[1], 0.05)-1
yspec = [xspec**o*coeffs[o] for o in range(len(coeffs))]
xspec = (xspec+1)*1.e4
yspec = np.sum(yspec, axis=0)
fit_info = OrderedDict()
for id, mag in zip(ids, mags):
fit_info[id] = {'mag': mag, 'spec': [xspec, yspec]}
is_cgs = False
t0_pool = time.time()
pool = mp.Pool(processes=cpu_count)
jobs = [pool.apply_async(_compute_model,
(i, self.FLTs[i], fit_info,
is_cgs, store, model_kwargs))
for i in range(self.N)]
pool.close()
pool.join()
for res in jobs:
i, model, dispersers = res.get(timeout=1)
self.FLTs[i].object_dispersers = dispersers
self.FLTs[i].model = model
t1_pool = time.time()
if verbose:
print('Models computed - {0:.2f} sec.'.format(t1_pool - t0_pool))
def get_beams(self, id, size=10, center_rd=None, beam_id='A',
min_overlap=0.1, min_valid_pix=10, min_mask=0.01,
min_sens=0.08, mask_resid=True, get_slice_header=True):
"""Extract 2D spectra "beams" from the GroupFLT exposures.
Parameters
----------
id : int
Catalog ID of the object to extract.
size : int
Half-size of the 2D spectrum to extract, along cross-dispersion
axis.
center_rd : optional, (float, float)
Extract based on RA/Dec rather than catalog ID.
beam_id : type
Name of the order to extract.
min_overlap : float
Fraction of the spectrum along wavelength axis that has one
or more valid pixels.
min_valid_pix : int
Minimum number of valid pixels (`beam.fit_mask == True`) in 2D
spectrum.
min_mask : float
Minimum factor relative to the maximum pixel value of the flat
f-lambda model where the 2D cutout data are considered good.
Passed through to `~grizli.model.BeamCutout`.
min_sens : float
See `~grizli.model.BeamCutout`.
get_slice_header : bool
Passed to `~grizli.model.BeamCutout`.
Returns
-------
beams : list
List of `~grizli.model.BeamCutout` objects.
"""
beams = self.compute_single_model(id, center_rd=center_rd, size=size, store=False, get_beams=[beam_id])
out_beams = []
for flt, beam in zip(self.FLTs, beams):
try:
out_beam = model.BeamCutout(flt=flt, beam=beam[beam_id],
conf=flt.conf, min_mask=min_mask,
min_sens=min_sens,
mask_resid=mask_resid,
get_slice_header=get_slice_header)
except:
#print('Except: get_beams')
continue
valid = (out_beam.grism['SCI'] != 0)
valid &= out_beam.fit_mask.reshape(out_beam.sh)
hasdata = (valid.sum(axis=0) > 0).sum()
if hasdata*1./out_beam.model.shape[1] < min_overlap:
continue
# Empty direct image?
if out_beam.beam.total_flux == 0:
continue
if out_beam.fit_mask.sum() < min_valid_pix:
continue
out_beams.append(out_beam)
return out_beams
def refine_list(self, ids=[], mags=[], poly_order=3, mag_limits=[16, 24],
max_coeff=5, ds9=None, verbose=True, fcontam=0.5,
wave=np.linspace(0.2, 2.5e4, 100)):
"""Refine contamination model for list of objects. Loops over `refine`.
Parameters
----------
ids : list
List of object IDs
mags : list
Magnitudes to to along with IDs. If `ids` and `mags` not
specified, then get the ID list from `self.catalog['MAG_AUTO']`.
poly_order : int
Order of the polynomial fit to the spectra.
mag_limits : [float, float]
Magnitude limits of objects to fit from `self.catalog['MAG_AUTO']`
when `ids` and `mags` not set.
max_coeff : float
Fit is considered bad when one of the coefficients is greater
than this value. See `refine`.
ds9 : `~grizli.ds9.DS9`, optional
Display the refined models to DS9 as they are computed.
verbose : bool
Print fit coefficients.
fcontam : float
Contamination weighting parameter.
wave : `~numpy.array`
Wavelength array for the polynomial fit.
Returns
-------
Updates `self.model` in place.
"""
if (len(ids) == 0) | (len(ids) != len(mags)):
bright = ((self.catalog['MAG_AUTO'] < mag_limits[1]) &
(self.catalog['MAG_AUTO'] > mag_limits[0]))
ids = self.catalog['NUMBER'][bright]*1
mags = self.catalog['MAG_AUTO'][bright]*1
so = np.argsort(mags)
ids, mags = ids[so], mags[so]
#wave = np.linspace(0.2,5.4e4,100)
poly_templates = utils.polynomial_templates(wave, order=poly_order, line=False)
for id, mag in zip(ids, mags):
self.refine(id, mag=mag, poly_order=poly_order,
max_coeff=max_coeff, size=30, ds9=ds9,
verbose=verbose, fcontam=fcontam,
templates=poly_templates)
def refine(self, id, mag=-99, poly_order=3, size=30, ds9=None, verbose=True, max_coeff=2.5, fcontam=0.5, templates=None):
"""Fit polynomial to extracted spectrum of single object to use for contamination model.
Parameters
----------
id : int
Object ID to extract.
mag : float
Object magnitude. Determines which orders to extract; see
`~grizli.model.GrismFLT.compute_model_orders`.
poly_order : int
Order of the polynomial to fit.
size : int
Size of cutout to extract.
ds9 : `~grizli.ds9.DS9`, optional
Display the refined models to DS9 as they are computed.
verbose : bool
Print information about the fit
max_coeff : float
The script computes the implied flux of the polynomial template
at the pivot wavelength of the direct image filters. If this
flux is greater than `max_coeff` times the *observed* flux in the
direct image, then the polynomal fit is considered bad.
fcontam : float
Contamination weighting parameter.
templates : dict, optional
Precomputed template dictionary. If `None` then compute
polynomial templates with order `poly_order`.
Returns
-------
Updates `self.model` in place.
"""
beams = self.get_beams(id, size=size, min_overlap=0.1,
get_slice_header=False, min_mask=0.01,
min_sens=0.01, mask_resid=True)
if len(beams) == 0:
return True
mb = MultiBeam(beams, fcontam=fcontam, min_sens=0.01, sys_err=0.03,
min_mask=0.01, mask_resid=True)
if templates is None:
wave = np.linspace(0.9*mb.wavef.min(), 1.1*mb.wavef.max(), 100)
templates = utils.polynomial_templates(wave, order=poly_order,
line=False)
try:
tfit = mb.template_at_z(z=0, templates=templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
except:
ret = False
return False
scale_coeffs = [tfit['cfit']['poly {0}'.format(i)][0] for i in range(1+poly_order)]
xspec, ypoly = tfit['cont1d'].wave, tfit['cont1d'].flux
# Don't extrapolate
mb_waves = mb.wavef[mb.fit_mask]
mb_clip = (xspec > mb_waves.min()) & (xspec < mb_waves.max())
if mb_clip.sum() > 0:
ypoly[xspec < mb_waves.min()] = ypoly[mb_clip][0]
ypoly[xspec > mb_waves.max()] = ypoly[mb_clip][-1]
# Check where templates inconsistent with broad-band fluxes
xb = [beam.direct.ref_photplam if beam.direct['REF'] is not None else beam.direct.photplam for beam in beams]
obs_flux = np.array([beam.beam.total_flux for beam in beams])
mod_flux = np.polyval(scale_coeffs[::-1], np.array(xb)/1.e4-1)
nonz = obs_flux != 0
if (np.abs(mod_flux/obs_flux)[nonz].max() > max_coeff) | ((~np.isfinite(mod_flux/obs_flux)[nonz]).sum() > 0) | (np.min(mod_flux[nonz]) < 0) | ((~np.isfinite(ypoly)).sum() > 0):
if verbose:
cstr = ' '.join(['{0:9.2e}'.format(c) for c in scale_coeffs])
print('{0:>5d} mag={1:6.2f} {2} xx'.format(id, mag, cstr))
return True
# Put the refined model into the full-field model
self.compute_single_model(id, mag=mag, size=-1, store=False, spectrum_1d=[xspec, ypoly], is_cgs=True, get_beams=None, in_place=True)
# Display the result?
if ds9:
flt = self.FLTs[0]
mask = flt.grism['SCI'] != 0
ds9.view((flt.grism['SCI'] - flt.model)*mask,
header=flt.grism.header)
if verbose:
cstr = ' '.join(['{0:9.2e}'.format(c) for c in scale_coeffs])
print('{0:>5d} mag={1:6.2f} {2}'.format(id, mag, cstr))
return True
#m2d = mb.reshape_flat(modelf)
############
def old_refine(self, id, mag=-99, poly_order=1, size=30, ds9=None, verbose=True, max_coeff=2.5):
"""TBD
"""
# Extract and fit beam spectra
beams = self.get_beams(id, size=size, min_overlap=0.5, get_slice_header=False)
if len(beams) == 0:
return True
mb = MultiBeam(beams)
try:
A, out_coeffs, chi2, modelf = mb.fit_at_z(poly_order=poly_order, fit_background=True, fitter='lstsq')
except:
return False
# Poly template
scale_coeffs = out_coeffs[mb.N*mb.fit_bg:mb.N*mb.fit_bg+mb.n_poly]
xspec, yfull = mb.eval_poly_spec(out_coeffs)
# Check where templates inconsistent with broad-band fluxes
xb = [beam.direct.ref_photplam if beam.direct['REF'] is not None else beam.direct.photplam for beam in beams]
fb = [beam.beam.total_flux for beam in beams]
mb = np.polyval(scale_coeffs[::-1], np.array(xb)/1.e4-1)
if (np.abs(mb/fb).max() > max_coeff) | (~np.isfinite(mb/fb).sum() > 0) | (np.min(mb) < 0):
if verbose:
print('{0} mag={1:6.2f} {2} xx'.format(id, mag, scale_coeffs))
return True
# Put the refined model into the full-field model
self.compute_single_model(id, mag=mag, size=-1, store=False, spectrum_1d=[(xspec+1)*1.e4, yfull], is_cgs=True, get_beams=None, in_place=True)
# Display the result?
if ds9:
flt = self.FLTs[0]
mask = flt.grism['SCI'] != 0
ds9.view((flt.grism['SCI'] - flt.model)*mask,
header=flt.grism.header)
if verbose:
print('{0} mag={1:6.2f} {2}'.format(id, mag, scale_coeffs))
return True
#m2d = mb.reshape_flat(modelf)
def make_stack(self, id, size=20, target='grism', skip=True, fcontam=1., scale=1, save=True, kernel='point', pixfrac=1, diff=True):
"""Make drizzled 2D stack for a given object
Parameters
----------
id : int
Object ID number.
target : str
Rootname for output files.
skip : bool
If True and the stack PNG file already exists, don't proceed.
fcontam : float
Contamination weighting parameter.
save : bool
Save the figure and FITS HDU to files with names like
>>> img_file = '{0}_{1:05d}.stack.png'.format(target, id)
>>> fits_file = '{0}_{1:05d}.stack.fits'.format(target, id)
diff : bool
Plot residual in final stack panel.
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDU of the stacked spectra.
fig : `~matplotlib.figure.Figure`
Stack figure object.
"""
print(target, id)
if os.path.exists('{0}_{1:05d}.stack.png'.format(target, id)) & skip:
return True
beams = self.get_beams(id, size=size, beam_id='A')
if len(beams) == 0:
print('id = {0}: No beam cutouts available.'.format(id))
return None
mb = MultiBeam(beams, fcontam=fcontam, group_name=target)
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam, flambda=False,
size=size, scale=scale,
kernel=kernel, pixfrac=pixfrac,
diff=diff)
if save:
fig.savefig('{0}_{1:05d}.stack.png'.format(target, id))
hdu.writeto('{0}_{1:05d}.stack.fits'.format(target, id),
overwrite=True)
return hdu, fig
def drizzle_grism_models(self, root='grism_model', kernel='square', scale=0.1, pixfrac=1, make_figure=True, fig_xsize=10):
"""
Make model-subtracted drizzled images of each grism / PA
Parameters
----------
root : str
Rootname of the output files.
kernel : str
Drizzle kernel e.g., ('square', 'point').
scale : float
Drizzle `scale` parameter, pixel scale in arcsec.
pixfrac : float
Drizzle "pixfrac".
"""
try:
from .utils import drizzle_array_groups
except:
from grizli.utils import drizzle_array_groups
# Loop through grisms and PAs
for g in self.PA:
for pa in self.PA[g]:
idx = self.PA[g][pa]
N = len(idx)
sci_list = [self.FLTs[i].grism['SCI'] for i in idx]
clean_list = [self.FLTs[i].grism['SCI']-self.FLTs[i].model
for i in idx]
wht_list = [(self.FLTs[i].grism['DQ'] == 0)/self.FLTs[i].grism['ERR']**2 for i in idx]
for i in range(N):
mask = ~np.isfinite(wht_list[i])
wht_list[i][mask] = 0
wcs_list = [self.FLTs[i].grism.wcs for i in idx]
for i, ix in enumerate(idx):
if wcs_list[i]._naxis[0] == 0:
wcs_list[i]._naxis = self.FLTs[ix].grism.sh
# Science array
outfile = '{0}-{1}-{2}_grism_sci.fits'.format(root, g.lower(),
pa)
print(outfile)
out = drizzle_array_groups(sci_list, wht_list, wcs_list,
scale=scale, kernel=kernel,
pixfrac=pixfrac)
outsci, _, _, header, outputwcs = out
header['FILTER'] = g
header['PA'] = pa
pyfits.writeto(outfile, data=outsci, header=header,
overwrite=True, output_verify='fix')
# Model-subtracted
outfile = '{0}-{1}-{2}_grism_clean.fits'.format(root, g.lower(),
pa)
print(outfile)
out = drizzle_array_groups(clean_list, wht_list, wcs_list,
scale=scale, kernel=kernel,
pixfrac=pixfrac)
outsci, _, _, header, outputwcs = out
header['FILTER'] = g
header['PA'] = pa
pyfits.writeto(outfile, data=outsci, header=header,
overwrite=True, output_verify='fix')
# Make figure
if make_figure:
with pyfits.open(outfile.replace('clean', 'sci')) as img:
im = img[0].data*1
im[im == 0] = np.nan
sh = im.shape
yp, xp = np.indices(sh)
mask = np.isfinite(im)
xmi = np.maximum(xp[mask].min()-10, 0)
xma = np.minimum(xp[mask].max()+10, sh[1])
ymi = np.maximum(yp[mask].min()-10, 0)
yma = np.minimum(yp[mask].max()+10, sh[0])
xsl = slice(xmi, xma)
ysl = slice(ymi, yma)
_dy = (ysl.stop - ysl.start)
_dx = (xsl.stop - xsl.start)
sh_aspect = _dy / _dx
vmi, vma = -0.05, 0.2
fig = plt.figure(figsize=[fig_xsize,
fig_xsize/2*sh_aspect])
ax = fig.add_subplot(121)
ax.imshow(im[ysl, xsl], origin='lower', cmap='gray_r',
vmin=vmi, vmax=vma)
# Clean
ax = fig.add_subplot(122)
with pyfits.open(outfile) as img:
im = img[0].data*1
im[im == 0] = np.nan
ax.imshow(im[ysl, xsl], origin='lower', cmap='gray_r',
vmin=vmi, vmax=vma)
for ax in fig.axes:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axis('off')
fig.tight_layout(pad=0.)
fig.text(0.5, 0.98, outfile.split('_grism')[0], color='k',
bbox=dict(facecolor='w', edgecolor='None'),
ha='center', va='top', transform=fig.transFigure)
fig.savefig(outfile.split('_clean')[0]+'.png',
transparent=True)
plt.close(fig)
def drizzle_full_wavelength(self, wave=1.4e4, ref_header=None,
kernel='point', pixfrac=1., verbose=True,
offset=[0, 0], fcontam=0.):
"""Drizzle FLT frames recentered at a specified wavelength
Script computes polynomial coefficients that define the dx and dy
offsets to a specific dispersed wavelengh relative to the reference
position and adds these to the SIP distortion keywords before
drizzling the input exposures to the output frame.
Parameters
----------
wave : float
Reference wavelength to center the output products
ref_header : `~astropy.io.fits.Header`
Reference header for setting the output WCS and image dimensions.
kernel : str, ('square' or 'point')
Drizzle kernel to use
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
verbose : bool
Print information to terminal
Returns
-------
sci, wht : `~np.ndarray`
Drizzle science and weight arrays with dimensions set in
`ref_header`.
"""
from astropy.modeling import models, fitting
import astropy.wcs as pywcs
# try:
# import drizzle
# if drizzle.__version__ != '1.12.99':
# # Not the fork that works for all input/output arrays
# raise(ImportError)
#
# #print('drizzle!!')
# from drizzle.dodrizzle import dodrizzle
# drizzler = dodrizzle
# dfillval = '0'
# except:
from drizzlepac import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
# Quick check now for which grism exposures we should use
if wave < 1.1e4:
use_grism = 'G102'
else:
use_grism = 'G141'
# Get the configuration file
conf = None
for i in range(self.N):
if self.FLTs[i].grism.filter == use_grism:
conf = self.FLTs[i].conf
# Grism not found in list
if conf is None:
return False
# Compute field-dependent dispersion parameters
dydx_0_p = conf.conf['DYDX_A_0']
dydx_1_p = conf.conf['DYDX_A_1']
dldp_0_p = conf.conf['DLDP_A_0']
dldp_1_p = conf.conf['DLDP_A_1']
yp, xp = np.indices((1014, 1014)) # hardcoded for WFC3/IR
sk = 10 # don't need to evaluate at every pixel
dydx_0 = conf.field_dependent(xp[::sk, ::sk], yp[::sk, ::sk], dydx_0_p)
dydx_1 = conf.field_dependent(xp[::sk, ::sk], yp[::sk, ::sk], dydx_1_p)
dldp_0 = conf.field_dependent(xp[::sk, ::sk], yp[::sk, ::sk], dldp_0_p)
dldp_1 = conf.field_dependent(xp[::sk, ::sk], yp[::sk, ::sk], dldp_1_p)
# Inverse pixel offsets from the specified wavelength
dp = (wave - dldp_0)/dldp_1
i_x, i_y = 1, 0 # indexing offsets
dx = dp/np.sqrt(1+dydx_1) + i_x
dy = dydx_0 + dydx_1*dx + i_y
dx += offset[0]
dy += offset[1]
# Compute polynomial coefficients
p_init = models.Polynomial2D(degree=4)
#fit_p = fitting.LevMarLSQFitter()
fit_p = fitting.LinearLSQFitter()
p_dx = fit_p(p_init, xp[::sk, ::sk]-507, yp[::sk, ::sk]-507, -dx)
p_dy = fit_p(p_init, xp[::sk, ::sk]-507, yp[::sk, ::sk]-507, -dy)
# Output WCS
out_wcs = pywcs.WCS(ref_header, relax=True)
out_wcs.pscale = utils.get_wcs_pscale(out_wcs)
# Initialize outputs
shape = (ref_header['NAXIS2'], ref_header['NAXIS1'])
outsci = np.zeros(shape, dtype=np.float32)
outwht = np.zeros(shape, dtype=np.float32)
outctx = np.zeros(shape, dtype=np.int32)
# Loop through exposures
for i in range(self.N):
flt = self.FLTs[i]
if flt.grism.filter != use_grism:
continue
h = flt.grism.header.copy()
# Update SIP coefficients
for j, p in enumerate(p_dx.param_names):
key = 'A_'+p[1:]
if key in h:
h[key] += p_dx.parameters[j]
else:
h[key] = p_dx.parameters[j]
for j, p in enumerate(p_dy.param_names):
key = 'B_'+p[1:]
if key in h:
h[key] += p_dy.parameters[j]
else:
h[key] = p_dy.parameters[j]
line_wcs = pywcs.WCS(h, relax=True)
line_wcs.pscale = utils.get_wcs_pscale(line_wcs)
if not hasattr(line_wcs, 'pixel_shape'):
line_wcs.pixel_shape = line_wcs._naxis1, line_wcs._naxis2
# Science and wht arrays
sci = flt.grism['SCI'] - flt.model
wht = 1/(flt.grism['ERR']**2)
scl = np.exp(-(fcontam*np.abs(flt.model)/flt.grism['ERR']))
wht *= scl
wht[~np.isfinite(wht)] = 0
# Drizzle it
if verbose:
print('Drizzle {0} to wavelength {1:.2f}'.format(flt.grism.parent_file, wave))
drizzler(sci, line_wcs, wht, out_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=line_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Done!
return outsci, outwht
# def replace_direct_image_cutouts(beams_file='', ref_image='gdn-100mas-f160w_drz_sci.fits', interp='poly5', cutout=200, background_func=utils.mode_statistic):
# """
# Replace "REF" extensions in a `beams.fits` file
#
# Parameters
# ----------
# beams_file : str
# Filename of a "beams.fits" file.
#
# ref_image : str or `~astropy.io.fits.HDUList`
# Filename or preloaded FITS file.
#
# interp : str
# Interpolation function to use for `~drizzlepac.astrodrizzle.ablot.do_blot`.
#
# cutout : int
# Make a slice of the `ref_image` with size [-cutout,+cutout] around
# the center position of the desired object before passing to `blot`.
#
# Returns
# -------
# beams_image : `~astropy.io.fits.HDUList`
# Image object with the "REF" extensions filled with the new blotted
# image cutouts.
#
# """
# from drizzlepac.astrodrizzle import ablot
#
# if isinstance(ref_image, pyfits.HDUList):
# ref_im = ref_image
# ref_image_filename = ref_image.filename()
# else:
# ref_im = pyfits.open(ref_image)
# ref_image_filename = ref_image
#
# ref_wcs = pywcs.WCS(ref_im[0].header, relax=True)
# ref_wcs.pscale = utils.get_wcs_pscale(ref_wcs)
#
# ref_photflam = ref_im[0].header['PHOTFLAM']
# ref_data = ref_im[0].data
# dummy_wht = np.ones_like(ref_im[0].data, dtype=np.float32)
#
# beams_image = pyfits.open(beams_file)
#
# beam_ra = beams_image[0].header['RA']
# beam_dec = beams_image[0].header['DEC']
#
# xy = np.cast[int](np.round(ref_wcs.all_world2pix([beam_ra], [beam_dec], 0))).flatten()
#
# slx = slice(xy[0]-cutout, xy[0]+cutout)
# sly = slice(xy[1]-cutout, xy[1]+cutout)
#
# bkg_data = []
#
# for ie, ext in enumerate(beams_image):
# if 'EXTNAME' not in ext.header:
# continue
# elif ext.header['EXTNAME'] == 'REF':
# #break
#
# ext.header['REF_FILE'] = ref_image_filename
# for k in ['PHOTFLAM', 'PHOTPLAM']:
# ext.header[k] = ref_im[0].header[k]
#
# the_filter = utils.get_hst_filter(ref_im[0].header)
# ext.header['FILTER'] = ext.header['DFILTER'] = the_filter
#
# wcs_file = ext.header['GPARENT'].replace('.fits', '.{0:02}.wcs.fits'.format(ext.header['SCI_EXTN']))
# if os.path.exists(wcs_file):
# wcs_fobj = pyfits.open(wcs_file)
#
# ext_wcs = pywcs.WCS(ext.header, relax=True,
# fobj=wcs_fobj)
# # ext_wcs.pixel_shape = (wcs_fobj[0].header['CRPIX1']*2,
# # wcs_fobj[0].header['CRPIX2']*2)
# # try:
# # ext_wcs.wcs.cd = ext_wcs.wcs.pc
# # delattr(ext_wcs.wcs, 'pc')
# # except:
# # pass
# else:
# ext_wcs = pywcs.WCS(ext.header, relax=True)
#
# ext_wcs.pscale = utils.get_wcs_pscale(ext_wcs)
# blotted = ablot.do_blot(ref_data[sly, slx],
# ref_wcs.slice([sly, slx]),
# ext_wcs, 1, coeffs=True, interp=interp,
# sinscl=1.0, stepsize=10, wcsmap=None)
#
# if background_func is not None:
# seg_data = beams_image[ie+1].data
# msk = seg_data == 0
# #print(msk.shape, blotted.shape, seg_data.shape, ie)
# if msk.sum() > 0:
# if bkg_data is None:
# bkg_data = blotted[msk]
# else:
# bkg_data = np.append(bkg_data, blotted[msk])
#
# if msk.sum() > 0:
# blotted -= background_func(blotted[msk])
#
# ext.data = blotted*ref_photflam
#
# if bkg_data is not None:
# bkg_value = background_func(bkg_data)
# for i in range(self.N):
#
# return beams_image
class MultiBeam(GroupFitter):
def __init__(self, beams, group_name=None, fcontam=0., psf=False, polyx=[0.3, 2.5], MW_EBV=0., min_mask=0.01, min_sens=0.08, sys_err=0.0, mask_resid=True, verbose=True, replace_direct=None, **kwargs):
"""Tools for dealing with multiple `~.model.BeamCutout` instances
Parameters
----------
beams : list
List of `~.model.BeamCutout` objects.
group_name : str, None
Rootname to use for saved products. If None, then default to
'group'.
fcontam : float
Factor to use to downweight contaminated pixels. The pixel
inverse variances are scaled by the following weight factor when
evaluating chi-squared of a 2D fit,
`weight = np.exp(-(fcontam*np.abs(contam)*np.sqrt(ivar)))`
where `contam` is the contaminating flux and `ivar` is the initial
pixel inverse variance.
psf : bool
Fit an ePSF model to the direct image to use as the morphological
reference.
MW_EBV : float
Milky way foreground extinction.
min_mask : float
Minimum factor relative to the maximum pixel value of the flat
f-lambda model where the 2D cutout data are considered good.
Passed through to `~grizli.model.BeamCutout`.
min_sens : float
See `~grizli.model.BeamCutout`.
sys_err : float
Systematic error added in quadrature to the pixel variances:
`var_total = var_initial + (beam.sci*sys_err)**2`
Attributes
----------
TBD : type
"""
if group_name is None:
self.group_name = 'group'
else:
self.group_name = group_name
self.fcontam = fcontam
self.polyx = polyx
self.min_mask = min_mask
self.min_sens = min_sens
self.mask_resid = mask_resid
self.sys_err = sys_err
self.Asave = {}
if isinstance(beams, str):
self.load_master_fits(beams, verbose=verbose)
# Auto-generate group_name from filename, e.g.,
# j100140p0130_00237.beams.fits > j100140p0130
if group_name is None:
self.group_name = beams.split('_')[0]
else:
if isinstance(beams[0], str):
# `beams` is list of strings
if 'beams.fits' in beams[0]:
# Master beam files
self.load_master_fits(beams[0], verbose=verbose)
for i in range(1, len(beams)):
b_i = MultiBeam(beams[i], group_name=group_name, fcontam=fcontam, psf=psf, polyx=polyx, MW_EBV=np.maximum(MW_EBV, 0), sys_err=sys_err, verbose=verbose, min_mask=min_mask, min_sens=min_sens, mask_resid=mask_resid)
self.extend(b_i)
else:
# List of individual beam.fits files
self.load_beam_fits(beams)
else:
self.beams = beams
self.ra, self.dec = self.beams[0].get_sky_coords()
if MW_EBV < 0:
# Try to get MW_EBV from mastquery.utils
try:
import mastquery.utils
MW_EBV = mastquery.utils.get_mw_dust(self.ra, self.dec)
except:
try:
import mastquery.utils
MW_EBV = mastquery.utils.get_irsa_dust(self.ra, self.dec)
except:
MW_EBV = 0.
self.MW_EBV = MW_EBV
self._set_MW_EBV(MW_EBV)
self._parse_beams(psf=psf)
self.apply_trace_shift()
self.Nphot = 0
self.is_spec = 1
if replace_direct is not None:
self.replace_direct_image_cutouts(**replace_direct)
def _set_MW_EBV(self, MW_EBV, R_V=utils.MW_RV):
"""
Initialize Galactic extinction
Parameters
----------
MW_EBV : float
Local E(B-V)
R_V : float
Relation between specific and total extinction,
``a_v = r_v * ebv``.
"""
for b in self.beams:
beam = b.beam
if beam.MW_EBV != MW_EBV:
beam.MW_EBV = MW_EBV
beam.init_galactic_extinction(MW_EBV, R_V=R_V)
beam.process_config()
b.flat_flam = b.compute_model(in_place=False, is_cgs=True)
@property
def N(self):
return len(self.beams)
@property
def Ngrism(self):
"""
dictionary containing number of exposures by grism
"""
# Parse grisms & PAs
Ngrism = {}
for beam in self.beams:
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
if grism not in Ngrism:
Ngrism[grism] = 0
Ngrism[grism] += 1
return Ngrism
@property
def grisms(self):
"""
Available grisms
"""
grisms = list(self.Ngrism.keys())
return grisms
@property
def PA(self):
"""
Available PAs in each grism
"""
_PA = {}
for g in self.Ngrism:
_PA[g] = {}
for i, beam in enumerate(self.beams):
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
PA_i = beam.get_dispersion_PA(decimals=0)
if PA_i in _PA[grism]:
_PA[grism][PA_i].append(i)
else:
_PA[grism][PA_i] = [i]
return _PA
@property
def id(self):
return self.beams[0].id
def _parse_beams(self, psf=False):
"""
Derive properties of the beam list (grism, PA) and initialize
data arrays.
"""
# Use WFC3 ePSF for the fit
self.psf_param_dict = None
if (psf > 0) & (self.beams[0].grism.instrument in ['WFC3', 'ACS']):
self.psf_param_dict = OrderedDict()
for ib, beam in enumerate(self.beams):
if (beam.direct.data['REF'] is not None):
# Use REF extension. scale factors might be wrong
beam.direct.data['SCI'] = beam.direct.data['REF']
new_err = np.ones_like(beam.direct.data['ERR'])
new_err *= utils.nmad(beam.direct.data['SCI'])
beam.direct.data['ERR'] = new_err
beam.direct.filter = beam.direct.ref_filter # 'F160W'
beam.direct.photflam = beam.direct.ref_photflam
beam.init_epsf(yoff=0.0, skip=psf*1, N=4, get_extended=True)
#beam.compute_model = beam.compute_model_psf
#beam.beam.compute_model = beam.beam.compute_model_psf
beam.compute_model(use_psf=True)
m = beam.compute_model(in_place=False)
#beam.modelf = beam.model.flatten()
#beam.model = beam.modelf.reshape(beam.beam.sh_beam)
beam.flat_flam = beam.compute_model(in_place=False,
is_cgs=True)
_p = beam.grism.parent_file
self.psf_param_dict[_p] = beam.beam.psf_params
self._parse_beam_arrays()
def _parse_beam_arrays(self):
"""
"""
self.poly_order = None
self.shapes = [beam.model.shape for beam in self.beams]
self.Nflat = [np.product(shape) for shape in self.shapes]
self.Ntot = np.sum(self.Nflat)
for b in self.beams:
if hasattr(b, 'xp_mask'):
delattr(b, 'xp_mask')
# Big array of normalized wavelengths (wave / 1.e4 - 1)
self.xpf = np.hstack([np.dot(np.ones((b.beam.sh_beam[0], 1)),
b.beam.lam[None, :]).flatten()/1.e4
for b in self.beams]) - 1
# Flat-flambda model spectra
self.flat_flam = np.hstack([b.flat_flam for b in self.beams])
self.fit_mask = np.hstack([b.fit_mask*b.contam_mask
for b in self.beams])
self.DoF = self.fit_mask.sum()
# systematic error
for i, b in enumerate(self.beams):
if hasattr(b, 'has_sys_err'):
continue
sciu = b.scif.reshape(b.sh)
ivar = 1./(1/b.ivar + (self.sys_err*sciu)**2)
ivar[~np.isfinite(ivar)] = 0
b.ivar = ivar*1
b.ivarf = b.ivar.flatten()
self.ivarf = np.hstack([b.ivarf for b in self.beams])
self.fit_mask &= (self.ivarf >= 0)
self.scif = np.hstack([b.scif for b in self.beams])
self.idf = np.hstack([b.scif*0+ib for ib, b in enumerate(self.beams)])
self.idf = np.cast[int](self.idf)
#self.ivarf = 1./(1/self.ivarf + (self.sys_err*self.scif)**2)
self.ivarf[~np.isfinite(self.ivarf)] = 0
self.sivarf = np.sqrt(self.ivarf)
self.wavef = np.hstack([b.wavef for b in self.beams])
self.contamf = np.hstack([b.contam.flatten() for b in self.beams])
weightf = np.exp(-(self.fcontam*np.abs(self.contamf)*self.sivarf))
weightf[~np.isfinite(weightf)] = 0
self.weightf = weightf
self.fit_mask &= self.weightf > 0
self.slices = self._get_slices(masked=False)
self._update_beam_mask()
self.DoF = int((self.weightf*self.fit_mask).sum())
self.Nmask = np.sum([b.fit_mask.sum() for b in self.beams])
# Initialize background fit array
# self.A_bg = np.zeros((self.N, self.Ntot))
# i0 = 0
# for i in range(self.N):
# self.A_bg[i, i0:i0+self.Nflat[i]] = 1.
# i0 += self.Nflat[i]
self.A_bg = self._init_background(masked=False)
self.Asave = {}
self.A_bgm = self._init_background(masked=True)
self.init_poly_coeffs(poly_order=1)
self.ra, self.dec = self.beams[0].get_sky_coords()
def compute_exptime(self):
"""
Compute number of exposures and total exposure time for each grism
"""
exptime = {}
nexposures = {}
for beam in self.beams:
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
if grism in exptime:
exptime[grism] += beam.grism.exptime
nexposures[grism] += 1
else:
exptime[grism] = beam.grism.exptime
nexposures[grism] = 1
return nexposures, exptime
def extend(self, new, verbose=True):
"""Concatenate `~grizli.multifit.MultiBeam` objects
Parameters
----------
new : `~grizli.multifit.MultiBeam`
Beam object containing new beams to add.
verbose : bool
Print summary of the change.
"""
self.beams.extend(new.beams)
self._parse_beams()
if verbose:
print('Add beams: {0}\n Now: {1}'.format(new.Ngrism, self.Ngrism))
def write_master_fits(self, verbose=True, get_hdu=False, strip=True, include_model=False, get_trace_table=True):
"""Store all beams in a single HDU
TBD
"""
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
rd = self.beams[0].get_sky_coords()
hdu[0].header['ID'] = (self.id, 'Object ID')
hdu[0].header['RA'] = (rd[0], 'Right Ascension')
hdu[0].header['DEC'] = (rd[1], 'Declination')
exptime = {}
for g in self.Ngrism:
exptime[g] = 0.
count = []
for ib, beam in enumerate(self.beams):
hdu_i = beam.write_fits(get_hdu=True, strip=strip,
include_model=include_model,
get_trace_table=get_trace_table)
hdu.extend(hdu_i[1:])
count.append(len(hdu_i)-1)
hdu[0].header['FILE{0:04d}'.format(ib)] = (beam.grism.parent_file, 'Grism parent file')
hdu[0].header['GRIS{0:04d}'.format(ib)] = (beam.grism.filter, 'Grism element')
hdu[0].header['NEXT{0:04d}'.format(ib)] = (count[-1], 'Number of extensions')
try:
exptime[beam.grism.filter] += beam.grism.header['EXPTIME']
except:
exptime[beam.grism.pupil] += beam.grism.header['EXPTIME']
hdu[0].header['COUNT'] = (self.N, ' '.join(['{0}'.format(c) for c in count]))
for g in self.Ngrism:
hdu[0].header['T_{0}'.format(g)] = (exptime[g], 'Exposure time in grism {0}'.format(g))
if get_hdu:
return hdu
outfile = '{0}_{1:05d}.beams.fits'.format(self.group_name, self.id)
if verbose:
print(outfile)
hdu.writeto(outfile, overwrite=True)
def load_master_fits(self, beam_file, verbose=True):
"""
Load a "beams.fits" file.
"""
import copy
try:
utils.fetch_acs_wcs_files(beam_file)
except:
pass
if verbose:
print('load_master_fits: {0}'.format(beam_file))
hdu = pyfits.open(beam_file, lazy_load_hdus=False)
N = hdu[0].header['COUNT']
Next = np.cast[int](hdu[0].header.comments['COUNT'].split())
i0 = 1
self.beams = []
for i in range(N):
key = 'NEXT{0:04d}'.format(i)
if key in hdu[0].header:
Next_i = hdu[0].header[key]
else:
Next_i = 6 # Assume doesn't have direct SCI/ERR cutouts
# Testing for multiprocessing
if True:
hducopy = hdu[i0:i0+Next_i]
else:
# print('Copy!')
hducopy = pyfits.HDUList([hdu[i].__class__(data=hdu[i].data*1, header=copy.deepcopy(hdu[i].header), name=hdu[i].name) for i in range(i0, i0+Next_i)])
beam = model.BeamCutout(fits_file=hducopy, min_mask=self.min_mask,
min_sens=self.min_sens,
mask_resid=self.mask_resid)
self.beams.append(beam)
if verbose:
print('{0} {1} {2}'.format(i+1, beam.grism.parent_file, beam.grism.filter))
i0 += Next_i # 6#Next[i]
hdu.close()
def write_beam_fits(self, verbose=True):
"""TBD
"""
outfiles = []
for beam in self.beams:
root = beam.grism.parent_file.split('.fits')[0]
outfile = beam.write_fits(root)
if verbose:
print('Wrote {0}'.format(outfile))
outfiles.append(outfile)
return outfiles
def load_beam_fits(self, beam_list, conf=None, verbose=True):
"""TBD
"""
self.beams = []
for file in beam_list:
if verbose:
print(file)
beam = model.BeamCutout(fits_file=file, conf=conf,
min_mask=self.min_mask,
min_sens=self.min_sens,
mask_resid=self.mask_resid)
self.beams.append(beam)
def replace_segmentation_image_cutouts(self, ref_image='gdn-100mas-f160w_seg.fits'):
"""
Replace "REF" extensions in a `beams.fits` file
Parameters
----------
ref_image : str, `~astropy.io.fits.HDUList`, `~astropy.io.fits.ImageHDU`
Filename or preloaded FITS file.
Returns
-------
beams_image : `~astropy.io.fits.HDUList`
Image object with the "REF" extensions filled with the new blotted
image cutouts.
"""
if isinstance(ref_image, pyfits.HDUList):
ref_data = ref_image[0].data
ref_header = ref_image[0].header
ref_image_filename = ref_image.filename()
elif (isinstance(ref_image, pyfits.ImageHDU) |
isinstance(ref_image, pyfits.PrimaryHDU)):
ref_data = ref_image.data
ref_header = ref_image.header
ref_image_filename = 'HDU'
else:
with pyfits.open(ref_image) as ref_im:
ref_data = ref_im[0].data*1
ref_header = ref_im[0].header.copy()
ref_image_filename = ref_image
ref_wcs = pywcs.WCS(ref_header, relax=True)
ref_wcs.pscale = utils.get_wcs_pscale(ref_wcs)
ref_data = ref_data.astype(np.float32)
for ib in range(self.N):
wcs_copy = self.beams[ib].direct.wcs
if hasattr(wcs_copy, 'idcscale'):
if wcs_copy.idcscale is None:
delattr(wcs_copy, 'idcscale')
in_data, in_wcs, out_wcs = ref_data, ref_wcs, wcs_copy
blot_seg = utils.blot_nearest_exact(ref_data, ref_wcs, wcs_copy,
verbose=True, stepsize=-1,
scale_by_pixel_area=False)
self.beams[ib].beam.set_segmentation(blot_seg)
def replace_direct_image_cutouts(self, ref_image='gdn-100mas-f160w_drz_sci.fits', ext=0, interp='poly5', cutout=200, background_func=np.median, thumb_labels=None):
"""
Replace "REF" extensions in a `beams.fits` file
Parameters
----------
ref_image : str or `~astropy.io.fits.HDUList`
Filename or preloaded FITS file.
interp : str
Interpolation function to use for `~drizzlepac.astrodrizzle.ablot.do_blot`.
cutout : int
Make a slice of the `ref_image` with size [-cutout,+cutout] around
the center position of the desired object before passing to
`blot`.
background_func : function, None
If not `None`, compute local background with value from
`background_func(ref_image[cutout])`.
Returns
-------
beams_image : `~astropy.io.fits.HDUList`
Image object with the "REF" extensions filled with the new blotted
image cutouts.
"""
from drizzlepac.astrodrizzle import ablot
if isinstance(ref_image, pyfits.HDUList):
ref_data = ref_image[0].data
ref_header = ref_image[0].header
ref_image_filename = ref_image.filename()
elif (isinstance(ref_image, pyfits.ImageHDU) |
isinstance(ref_image, pyfits.PrimaryHDU)):
ref_data = ref_image.data
ref_header = ref_image.header
ref_image_filename = 'HDU'
else:
with pyfits.open(ref_image)[ext] as ref_im:
ref_data = ref_im.data*1
ref_header = ref_im.header.copy()
ref_image_filename = ref_image
if ref_data.dtype not in [np.float32, np.dtype('>f4')]:
ref_data = ref_data.astype(np.float32)
ref_wcs = pywcs.WCS(ref_header, relax=True)
ref_wcs.pscale = utils.get_wcs_pscale(ref_wcs)
if not hasattr(ref_wcs, '_naxis1') & hasattr(ref_wcs, '_naxis'):
ref_wcs._naxis1, ref_wcs._naxis2 = ref_wcs._naxis
if 'PHOTPLAM' in ref_header:
ref_photplam = ref_header['PHOTPLAM']
else:
ref_photplam = 1.
if 'PHOTFLAM' in ref_header:
ref_photflam = ref_header['PHOTFLAM']
else:
ref_photflam = 1.
try:
ref_filter = utils.get_hst_filter(ref_header)
except:
ref_filter = 'N/A'
beam_ra, beam_dec = self.ra, self.dec
xy = np.cast[int](np.round(ref_wcs.all_world2pix([beam_ra], [beam_dec], 0))).flatten()
sh = ref_data.shape
slx = slice(np.maximum(xy[0]-cutout, 0),
np.minimum(xy[0]+cutout, sh[1]))
sly = slice(np.maximum(xy[1]-cutout, 0),
np.minimum(xy[1]+cutout, sh[0]))
bkg_data = None
#print('xxx', slx, sly, ref_data[sly, slx].shape, ref_data[sly, slx].max(), ref_photflam)
for ie in range(self.N):
wcs_copy = self.beams[ie].direct.wcs
if hasattr(wcs_copy, 'idcscale'):
if wcs_copy.idcscale is None:
delattr(wcs_copy, 'idcscale')
if not hasattr(wcs_copy, '_naxis1') & hasattr(wcs_copy, '_naxis'):
wcs_copy._naxis1, wcs_copy._naxis2 = wcs_copy._naxis
blotted = ablot.do_blot(ref_data[sly, slx],
ref_wcs.slice([sly, slx]),
wcs_copy, 1, coeffs=True, interp=interp,
sinscl=1.0, stepsize=10, wcsmap=None)
#print('xxx', blotted.max(), ref_data[sly, slx].max())
if background_func is not None:
msk = self.beams[ie].beam.seg == 0
#print(msk.shape, blotted.shape, ie)
if msk.sum() > 0:
if bkg_data is None:
bkg_data = blotted[msk]
else:
bkg_data = np.append(bkg_data, blotted[msk])
if thumb_labels is None:
self.beams[ie].direct.data['REF'] = blotted*ref_photflam
self.beams[ie].direct.ref_photflam = ref_photflam
self.beams[ie].direct.ref_photplam = ref_photplam
self.beams[ie].direct.ref_filter = ref_filter
# self.beams[ie].direct.ref_photflam
self.beams[ie].beam.direct = blotted*ref_photflam
else:
for label in thumb_labels:
self.beams[ie].thumbs[label] = blotted*ref_photflam
if bkg_data is not None:
for ie in range(self.N):
bkg_value = background_func(bkg_data)*ref_photflam
if thumb_labels is None:
self.beams[ie].direct.data['REF'] -= bkg_value
else:
for label in thumb_labels:
self.beams[ie].thumbs[label] -= bkg_value
## Recompute total_flux attribute
for b in self.beams:
b.beam.set_segmentation(b.beam.seg)
def reshape_flat(self, flat_array):
"""TBD
"""
out = []
i0 = 0
for ib in range(self.N):
im2d = flat_array[i0:i0+self.Nflat[ib]].reshape(self.shapes[ib])
out.append(im2d)
i0 += self.Nflat[ib]
return out
def init_poly_coeffs(self, flat=None, poly_order=1):
"""TBD
"""
# Already done?
if poly_order < 0:
ok_poly = False
poly_order = 0
else:
ok_poly = True
if poly_order == self.poly_order:
return None
self.poly_order = poly_order
if flat is None:
flat = self.flat_flam
# Polynomial continuum arrays
self.A_poly = np.array([self.xpf**order*flat
for order in range(poly_order+1)])
self.A_poly *= ok_poly
self.n_poly = poly_order + 1
self.x_poly = np.array([(self.beams[0].beam.lam/1.e4-1)**order
for order in range(poly_order+1)])
def eval_poly_spec(self, coeffs_full):
"""Evaluate polynomial spectrum
"""
xspec = np.arange(self.polyx[0], self.polyx[1], 0.05)-1
i0 = self.N*self.fit_bg
scale_coeffs = coeffs_full[i0:i0+self.n_poly]
#yspec = [xspec**o*scale_coeffs[o] for o in range(self.poly_order+1)]
yfull = np.polyval(scale_coeffs[::-1], xspec)
return xspec, yfull
def compute_model(self, id=None, spectrum_1d=None, is_cgs=False, apply_sensitivity=True, scale=None, reset=True):
"""
Compute the dispersed 2D model for an assumed input spectrum
This is a wrapper around the
`grizli.model.GrismDisperser.compute_model` method, where the
parameters are described.
Nothing returned, but the `model` and `modelf` attributes are
updated on the `~grizli.model.GrismDisperser` subcomponents of the
`beams` list.
"""
for beam in self.beams:
beam.beam.compute_model(id=id, spectrum_1d=spectrum_1d,
is_cgs=is_cgs,
scale=scale, reset=reset,
apply_sensitivity=apply_sensitivity)
beam.modelf = beam.beam.modelf
beam.model = beam.beam.modelf.reshape(beam.beam.sh_beam)
def compute_model_psf(self, id=None, spectrum_1d=None, is_cgs=False):
"""
Compute the dispersed 2D model for an assumed input spectrum and for
ePSF morphologies
This is a wrapper around the
`grizli.model.GrismDisperser.compute_model_psf` method, where the
parameters are described.
Nothing returned, but the `model` and `modelf` attributes are
updated on the `~grizli.model.GrismDisperser` subcomponents of the
`beams` list.
"""
for beam in self.beams:
beam.beam.compute_model_psf(id=id, spectrum_1d=spectrum_1d,
is_cgs=is_cgs)
beam.modelf = beam.beam.modelf
beam.model = beam.beam.modelf.reshape(beam.beam.sh_beam)
def fit_at_z(self, z=0., templates={}, fitter='nnls',
fit_background=True, poly_order=0):
"""TBD
"""
try:
import sklearn.linear_model
HAS_SKLEARN = True
except:
HAS_SKLEARN = False
import numpy.linalg
import scipy.optimize
# print 'xxx Init poly'
self.init_poly_coeffs(poly_order=poly_order)
# print 'xxx Init bg'
if fit_background:
self.fit_bg = True
A = np.vstack((self.A_bg, self.A_poly))
else:
self.fit_bg = False
A = self.A_poly*1
NTEMP = len(templates)
A_temp = np.zeros((NTEMP, self.Ntot))
# print 'xxx Load templates'
for i, key in enumerate(templates.keys()):
NTEMP += 1
temp = templates[key] # .zscale(z, 1.)
if hasattr(temp, 'flux_flam'):
# eazy-py Template object
spectrum_1d = [temp.wave*(1+z), temp.flux_flam(z=z)/(1+z)]
else:
spectrum_1d = [temp.wave*(1+z), temp.flux/(1+z)]
if z > 4:
try:
import eazy.igm
igm = eazy.igm.Inoue14()
igmz = igm.full_IGM(z, spectrum_1d[0])
spectrum_1d[1] *= igmz
# print('IGM')
except:
# No IGM
pass
i0 = 0
for ib in range(self.N):
beam = self.beams[ib]
lam_beam = beam.beam.lam_beam
if ((temp.wave.min()*(1+z) > lam_beam.max()) |
(temp.wave.max()*(1+z) < lam_beam.min())):
tmodel = 0.
else:
tmodel = beam.compute_model(spectrum_1d=spectrum_1d,
in_place=False, is_cgs=True) # /beam.beam.total_flux
A_temp[i, i0:i0+self.Nflat[ib]] = tmodel # .flatten()
i0 += self.Nflat[ib]
if NTEMP > 0:
A = np.vstack((A, A_temp))
ok_temp = np.sum(A, axis=1) > 0
out_coeffs = np.zeros(A.shape[0])
# LSTSQ coefficients
# print 'xxx Fitter'
fit_functions = {'lstsq': np.linalg.lstsq, 'nnls': scipy.optimize.nnls}
if fitter in fit_functions:
# 'lstsq':
Ax = A[:, self.fit_mask][ok_temp, :].T
# Weight by ivar
Ax *= np.sqrt(self.ivarf[self.fit_mask][:, np.newaxis])
# print 'xxx lstsq'
#out = numpy.linalg.lstsq(Ax,y)
if fitter == 'lstsq':
y = self.scif[self.fit_mask]
# Weight by ivar
y *= np.sqrt(self.ivarf[self.fit_mask])
try:
out = np.linalg.lstsq(Ax, y, rcond=utils.LSTSQ_RCOND)
except:
print(A.min(), Ax.min(), self.fit_mask.sum(), y.min())
raise ValueError
lstsq_coeff, residuals, rank, s = out
coeffs = lstsq_coeff
if fitter == 'nnls':
if fit_background:
off = 0.04
y = self.scif[self.fit_mask]+off
y *= np.sqrt(self.ivarf[self.fit_mask])
coeffs, rnorm = scipy.optimize.nnls(Ax, y+off)
coeffs[:self.N] -= 0.04
else:
y = self.scif[self.fit_mask]
y *= np.sqrt(self.ivarf[self.fit_mask])
coeffs, rnorm = scipy.optimize.nnls(Ax, y)
# if fitter == 'bounded':
# if fit_background:
# off = 0.04
# y = self.scif[self.fit_mask]+off
# y *= self.ivarf[self.fit_mask]
#
# coeffs, rnorm = scipy.optimize.nnls(Ax, y+off)
# coeffs[:self.N] -= 0.04
# else:
# y = self.scif[self.fit_mask]
# y *= np.sqrt(self.ivarf[self.fit_mask])
#
# coeffs, rnorm = scipy.optimize.nnls(Ax, y)
#
# out = scipy.optimize.minimize(self.eval_trace_shift, shifts, bounds=bounds, args=args, method='Powell', tol=tol)
elif HAS_SKLEARN:
Ax = A[:, self.fit_mask][ok_temp, :].T
y = self.scif[self.fit_mask]
# Wieght by ivar
Ax *= np.sqrt(self.ivarf[self.fit_mask][:, np.newaxis])
y *= np.sqrt(self.ivarf[self.fit_mask])
clf = sklearn.linear_model.LinearRegression()
status = clf.fit(Ax, y)
coeffs = clf.coef_
out_coeffs[ok_temp] = coeffs
modelf = np.dot(out_coeffs, A)
chi2 = np.sum((self.weightf*(self.scif - modelf)**2*self.ivarf)[self.fit_mask])
if fit_background:
poly_coeffs = out_coeffs[self.N:self.N+self.n_poly]
else:
poly_coeffs = out_coeffs[:self.n_poly]
self.y_poly = np.dot(poly_coeffs, self.x_poly)
# x_poly = self.x_poly[1,:]+1 = self.beams[0].beam.lam/1.e4
return A, out_coeffs, chi2, modelf
def parse_fit_outputs(self, z, templates, coeffs_full, A):
"""Parse output from `fit_at_z`.
Parameters
----------
z : float
Redshift at which to evaluate the fits.
templates : list of `~grizli.utils.SpectrumTemplate` objects
Generated with, e.g., `~grizli.utils.load_templates`.
coeffs_full : `~np.ndarray`
Template fit coefficients
A : `~np.ndarray`
Matrix generated for fits and used for computing model 2D spectra:
>>> model_flat = np.dot(coeffs_full, A)
>>> # mb = MultiBeam(...)
>>> all_models = mb.reshape_flat(model_flat)
>>> m0 = all_models[0] # model for mb.beams[0]
Returns
-------
line_flux : dict
Line fluxes and uncertainties, in cgs units (erg/s/cm2)
covar : `~np.ndarray`
Covariance matrix for the fit coefficients
cont1d, line1d, model1d : `~grizli.utils.SpectrumTemplate`
Best-fit continuum, line, and full (continuum + line) templates
model_continuum : `~np.ndarray`
Flat array of the best fit 2D continuum
"""
from collections import OrderedDict
# Covariance matrix for line flux uncertainties
Ax = A[:, self.fit_mask]
ok_temp = (np.sum(Ax, axis=1) > 0) & (coeffs_full != 0)
Ax = Ax[ok_temp, :].T*1 # A[:, self.fit_mask][ok_temp,:].T
Ax *= np.sqrt(self.ivarf[self.fit_mask][:, np.newaxis])
try:
#covar = np.matrix(np.dot(Ax.T, Ax)).I
covar = utils.safe_invert(np.dot(Ax.T, Ax))
covard = np.sqrt(covar.diagonal())
except:
N = ok_temp.sum()
covar = np.zeros((N, N))
covard = np.zeros(N) # -1.
covar_full = utils.fill_masked_covar(covar, ok_temp)
# Random draws from covariance matrix
# draws = np.random.multivariate_normal(coeffs_full[ok_temp], covar, size=500)
line_flux_err = coeffs_full*0.
line_flux_err[ok_temp] = covard
# Continuum fit
mask = np.isfinite(coeffs_full)
for i, key in enumerate(templates.keys()):
if key.startswith('line'):
mask[self.N*self.fit_bg+self.n_poly+i] = False
model_continuum = np.dot(coeffs_full*mask, A)
self.model_continuum = self.reshape_flat(model_continuum)
# model_continuum.reshape(self.beam.sh_beam)
# 1D spectrum
# Polynomial component
xspec, yspec = self.eval_poly_spec(coeffs_full)
model1d = utils.SpectrumTemplate((xspec+1)*1.e4, yspec)
cont1d = model1d*1
i0 = self.fit_bg*self.N + self.n_poly
line_flux = OrderedDict()
fscl = 1. # self.beams[0].beam.total_flux/1.e-17
line1d = OrderedDict()
for i, key in enumerate(templates.keys()):
temp_i = templates[key].zscale(z, coeffs_full[i0+i])
model1d += temp_i
if not key.startswith('line'):
cont1d += temp_i
else:
line1d[key.split()[1]] = temp_i
line_flux[key.split()[1]] = np.array([coeffs_full[i0+i]*fscl,
line_flux_err[i0+i]*fscl])
return line_flux, covar_full, cont1d, line1d, model1d, model_continuum
def fit_stars(self, poly_order=1, fitter='nnls', fit_background=True,
verbose=True, make_figure=True, zoom=None,
delta_chi2_threshold=0.004, zr=0, dz=0, fwhm=0,
prior=None, templates={}, figsize=[8, 5],
fsps_templates=False):
"""TBD
"""
# Polynomial fit
out = self.fit_at_z(z=0., templates={}, fitter='lstsq',
poly_order=3,
fit_background=fit_background)
A, coeffs, chi2_poly, model_2d = out
# Star templates
templates = utils.load_templates(fwhm=fwhm, stars=True)
NTEMP = len(templates)
key = list(templates)[0]
temp_i = {key: templates[key]}
out = self.fit_at_z(z=0., templates=temp_i, fitter=fitter,
poly_order=poly_order,
fit_background=fit_background)
A, coeffs, chi2, model_2d = out
chi2 = np.zeros(NTEMP)
coeffs = np.zeros((NTEMP, coeffs.shape[0]))
chi2min = 1e30
iz = 0
best = key
for i, key in enumerate(list(templates)):
temp_i = {key: templates[key]}
out = self.fit_at_z(z=0., templates=temp_i,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs[i, :], chi2[i], model_2d = out
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
best = key
if verbose:
print(utils.NO_NEWLINE + ' {0} {1:9.1f} ({2})'.format(key, chi2[i], best))
# Best-fit
temp_i = {best: templates[best]}
out = self.fit_at_z(z=0., templates=temp_i,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_full, chi2_best, model_full = out
# Continuum fit
mask = np.isfinite(coeffs_full)
for i, key in enumerate(templates.keys()):
if key.startswith('line'):
mask[self.N*self.fit_bg+self.n_poly+i] = False
model_continuum = np.dot(coeffs_full*mask, A)
self.model_continuum = self.reshape_flat(model_continuum)
# model_continuum.reshape(self.beam.sh_beam)
# 1D spectrum
# xspec = np.arange(0.3, 2.35, 0.05)-1
# scale_coeffs = coeffs_full[self.N*self.fit_bg:
# self.N*self.fit_bg+self.n_poly]
#
# yspec = [xspec**o*scale_coeffs[o] for o in range(self.poly_order+1)]
xspec, yspec = self.eval_poly_spec(coeffs_full)
model1d = utils.SpectrumTemplate((xspec+1)*1.e4, yspec)
cont1d = model1d*1
i0 = self.fit_bg*self.N + self.n_poly
line_flux = OrderedDict()
fscl = 1. # self.beams[0].beam.total_flux/1.e-17
temp_i = templates[best].zscale(0, coeffs_full[i0])
model1d += temp_i
cont1d += temp_i
fit_data = OrderedDict()
fit_data['poly_order'] = poly_order
fit_data['fwhm'] = 0
fit_data['zbest'] = np.argmin(chi2)
fit_data['chibest'] = chi2_best
fit_data['chi_poly'] = chi2_poly
fit_data['zgrid'] = np.arange(NTEMP)
fit_data['prior'] = 1
fit_data['A'] = A
fit_data['coeffs'] = coeffs
fit_data['chi2'] = chi2
fit_data['DoF'] = self.DoF
fit_data['model_full'] = model_full
fit_data['coeffs_full'] = coeffs_full
fit_data['line_flux'] = {}
#fit_data['templates_full'] = templates
fit_data['model_cont'] = model_continuum
fit_data['model1d'] = model1d
fit_data['cont1d'] = cont1d
# return fit_data
fig = None
if make_figure:
fig = self.show_redshift_fit(fit_data)
# fig.savefig('fit.pdf')
return fit_data, fig
def fit_redshift(self, prior=None, poly_order=1, fwhm=1200,
make_figure=True, zr=None, dz=None, verbose=True,
fit_background=True, fitter='nnls',
delta_chi2_threshold=0.004, zoom=True,
line_complexes=True, templates={}, figsize=[8, 5],
fsps_templates=False):
"""TBD
"""
from scipy import polyfit, polyval
if zr is None:
zr = [0.65, 1.6]
if dz is None:
dz = [0.005, 0.0004]
if zr in [0]:
stars = True
zr = [0, 0.01]
fitter = 'nnls'
else:
stars = False
zgrid = utils.log_zgrid(zr, dz=dz[0])
NZ = len(zgrid)
# Polynomial fit
out = self.fit_at_z(z=0., templates={}, fitter='lstsq',
poly_order=3,
fit_background=fit_background)
A, coeffs, chi2_poly, model_2d = out
# Set up for template fit
if templates == {}:
templates = utils.load_templates(fwhm=fwhm, stars=stars, line_complexes=line_complexes, fsps_templates=fsps_templates)
else:
if verbose:
print('User templates! N={0} \n'.format(len(templates)))
NTEMP = len(templates)
out = self.fit_at_z(z=0., templates=templates, fitter=fitter,
poly_order=poly_order,
fit_background=fit_background)
A, coeffs, chi2, model_2d = out
chi2 = np.zeros(NZ)
coeffs = np.zeros((NZ, coeffs.shape[0]))
chi2min = 1e30
iz = 0
for i in range(NZ):
out = self.fit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs[i, :], chi2[i], model_2d = out
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
if verbose:
print(utils.NO_NEWLINE + ' {0:.4f} {1:9.1f} ({2:.4f})'.format(zgrid[i], chi2[i], zgrid[iz]))
print('First iteration: z_best={0:.4f}\n'.format(zgrid[iz]))
# peaks
import peakutils
# chi2nu = (chi2.min()-chi2)/self.DoF
# indexes = peakutils.indexes((chi2nu+delta_chi2_threshold)*(chi2nu > -delta_chi2_threshold), thres=0.3, min_dist=20)
chi2_rev = (chi2_poly - chi2)/self.DoF
if chi2_poly < (chi2.min() + 9):
chi2_rev = (chi2.min() + 16 - chi2)/self.DoF
chi2_rev[chi2_rev < 0] = 0
indexes = peakutils.indexes(chi2_rev, thres=0.4, min_dist=8)
num_peaks = len(indexes)
if False:
plt.plot(zgrid, (chi2-chi2.min()) / self.DoF)
plt.scatter(zgrid[indexes], (chi2-chi2.min())[indexes] / self.DoF, color='r')
# delta_chi2 = (chi2.max()-chi2.min())/self.DoF
# if delta_chi2 > delta_chi2_threshold:
if (num_peaks > 0) & (not stars) & zoom:
zgrid_zoom = []
for ix in indexes:
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zi = -c[1]/(2*c[0])
chi_i = polyval(c, zi)
zgrid_zoom.extend(np.arange(zi-2*dz[0],
zi+2*dz[0]+dz[1]/10., dz[1]))
# zgrid_zoom = utils.zoom_zgrid(zgrid, chi2/self.DoF,
# threshold=delta_chi2_threshold,
# factor=dz[0]/dz[1])
NZOOM = len(zgrid_zoom)
chi2_zoom = np.zeros(NZOOM)
coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
iz = 0
chi2min = 1.e30
for i in range(NZOOM):
out = self.fit_at_z(z=zgrid_zoom[i], templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_zoom[i, :], chi2_zoom[i], model_2d = out
if chi2_zoom[i] < chi2min:
chi2min = chi2_zoom[i]
iz = i
if verbose:
print(utils.NO_NEWLINE+'- {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'.format(zgrid_zoom[i], chi2_zoom[i], zgrid_zoom[iz], i+1, NZOOM))
zgrid = np.append(zgrid, zgrid_zoom)
chi2 = np.append(chi2, chi2_zoom)
coeffs = np.append(coeffs, coeffs_zoom, axis=0)
so = np.argsort(zgrid)
zgrid = zgrid[so]
chi2 = chi2[so]
coeffs = coeffs[so, :]
if prior is not None:
#print('\n\nPrior!\n\n', chi2.min(), prior[1].min())
interp_prior = np.interp(zgrid, prior[0], prior[1])
chi2 += interp_prior
else:
interp_prior = None
print(' Zoom iteration: z_best={0:.4f}\n'.format(zgrid[np.argmin(chi2)]))
# Best redshift
if not stars:
templates = utils.load_templates(line_complexes=False, fwhm=fwhm, fsps_templates=fsps_templates)
zbest = zgrid[np.argmin(chi2)]
ix = np.argmin(chi2)
chibest = chi2.min()
# Fit parabola
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zbest = -c[1]/(2*c[0])
chibest = polyval(c, zbest)
out = self.fit_at_z(z=zbest, templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_full, chi2_best, model_full = out
# Parse results
out2 = self.parse_fit_outputs(zbest, templates, coeffs_full, A)
line_flux, covar, cont1d, line1d, model1d, model_continuum = out2
# Output dictionary with fit parameters
fit_data = OrderedDict()
fit_data['poly_order'] = poly_order
fit_data['fwhm'] = fwhm
fit_data['zbest'] = zbest
fit_data['chibest'] = chibest
fit_data['chi_poly'] = chi2_poly
fit_data['zgrid'] = zgrid
fit_data['prior'] = interp_prior
fit_data['A'] = A
fit_data['coeffs'] = coeffs
fit_data['chi2'] = chi2
fit_data['DoF'] = self.DoF
fit_data['model_full'] = model_full
fit_data['coeffs_full'] = coeffs_full
fit_data['covar'] = covar
fit_data['line_flux'] = line_flux
#fit_data['templates_full'] = templates
fit_data['model_cont'] = model_continuum
fit_data['model1d'] = model1d
fit_data['cont1d'] = cont1d
fit_data['line1d'] = line1d
# return fit_data
fig = None
if make_figure:
fig = self.show_redshift_fit(fit_data, figsize=figsize)
# fig.savefig('fit.pdf')
return fit_data, fig
def run_individual_fits(self, z=0, templates={}):
"""Run template fits on each *exposure* individually to evaluate
variance in line and continuum fits.
Parameters
----------
z : float
Redshift at which to evaluate the fit
templates : list of `~grizli.utils.SpectrumTemplate` objects
Generated with, e.g., `load_templates`.
Returns
-------
line_flux, line_err : dict
Dictionaries with the measured line fluxes and uncertainties for
each exposure fit.
coeffs_list : `~np.ndarray` [Nbeam x Ntemplate]
Raw fit coefficients
chi2_list, DoF_list : `~np.ndarray` [Nbeam]
Chi-squared and effective degrees of freedom for each separate fit
"""
# Fit on the full set of beams
out = self.fit_at_z(z=z, templates=templates,
fitter='nnls', poly_order=self.poly_order,
fit_background=self.fit_bg)
A, coeffs_full, chi2_best, model_full = out
out2 = self.parse_fit_outputs(z, templates, coeffs_full, A)
line, covar, cont1d, line1d, model1d, model_continuum = out2
NB, NTEMP = len(self.beams), len(templates)
# Outputs
coeffs_list = np.zeros((NB, NTEMP))
chi2_list = np.zeros(NB)
DoF_list = np.zeros(NB)
line_flux = OrderedDict()
line_err = OrderedDict()
line_keys = list(line.keys())
for k in line_keys:
line_flux[k] = np.zeros(NB)
line_err[k] = np.zeros(NB)
# Generate separate MultiBeam objects for each individual beam
for i, b in enumerate(self.beams):
b_i = MultiBeam([b], fcontam=self.fcontam,
group_name=self.group_name)
out_i = b_i.fit_at_z(z=z, templates=templates,
fitter='nnls', poly_order=self.poly_order,
fit_background=self.fit_bg)
A_i, coeffs_i, chi2_i, model_full_i = out_i
# Parse fit information from individual fits
out2 = b_i.parse_fit_outputs(z, templates, coeffs_i, A_i)
line_i, covar_i, cont1d_i, line1d_i, model1d_i, model_continuum_i = out2
for k in line_keys:
line_flux[k][i] = line_i[k][0]
line_err[k][i] = line_i[k][1]
coeffs_list[i, :] = coeffs_i[-NTEMP:]
chi2_list[i] = chi2_i
DoF_list[i] = b_i.DoF
return line_flux, line_err, coeffs_list, chi2_list, DoF_list
def show_redshift_fit(self, fit_data, plot_flambda=True, figsize=[8, 5]):
"""TBD
"""
import matplotlib.gridspec
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[0.6, 1])
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0])
c2min = fit_data['chi2'].min()
scale_pz = True
if scale_pz:
scale_nu = c2min/self.DoF
scl_label = '_s'
else:
scale_nu = 1.
scl_label = ''
#axz.plot(z, (chi2-chi2.min())/scale_nu, color='k')
#ax.plot(fit_data['zgrid'], fit_data['chi2']/self.DoF)
ax.plot(fit_data['zgrid'], (fit_data['chi2']-c2min)/scale_nu)
ax.set_xlabel('z')
ax.set_ylabel(r'$\chi^2_\nu$, $\nu$={0:d}'.format(self.DoF))
ax.set_ylim(-4, 27)
ax.set_ylabel(r'$\Delta\chi^2{2}$ ({0:.0f}/$\nu$={1:d})'.format(c2min, self.DoF, scl_label))
ax.set_yticks([1, 4, 9, 16, 25])
# for delta in [1,4,9]:
# ax.plot(fit_data['zgrid'],
# fit_data['zgrid']*0.+(c2min+delta)/self.DoF,
# color='{0:.2f}'.format(1-delta*1./10))
ax.plot(fit_data['zgrid'], (fit_data['chi2']*0+fit_data['chi_poly']-c2min)/scale_nu, color='b', linestyle='--', alpha=0.8)
ax.set_xlim(fit_data['zgrid'].min(), fit_data['zgrid'].max())
ax.grid()
ax.set_title(r'ID = {0:d}, $z_\mathrm{{grism}}$={1:.4f}'.format(self.beams[0].id, fit_data['zbest']))
ax = fig.add_subplot(gs[1])
ymax = 0
ymin = 1e10
continuum_fit = self.reshape_flat(fit_data['model_cont'])
line_fit = self.reshape_flat(fit_data['model_full'])
grisms = self.Ngrism.keys()
wfull = {}
ffull = {}
efull = {}
for grism in grisms:
wfull[grism] = []
ffull[grism] = []
efull[grism] = []
for ib in range(self.N):
beam = self.beams[ib]
clean = beam.grism['SCI'] - beam.contam
if self.fit_bg:
bg_i = fit_data['coeffs_full'][ib]
clean -= bg_i # background
else:
bg_i = 0.
#ivar = 1./(1./beam.ivar + self.fcontam*beam.contam)
#ivar[~np.isfinite(ivar)] = 0
# New weight scheme
ivar = beam.ivar
weight = np.exp(-(self.fcontam*np.abs(beam.contam)*np.sqrt(ivar)))
wave, flux, err = beam.beam.optimal_extract(clean,
ivar=ivar,
weight=weight)
mwave, mflux, merr = beam.beam.optimal_extract(line_fit[ib]-bg_i,
ivar=ivar,
weight=weight)
flat = beam.flat_flam.reshape(beam.beam.sh_beam)
wave, fflux, ferr = beam.beam.optimal_extract(flat, ivar=ivar,
weight=weight)
if plot_flambda:
ok = beam.beam.sensitivity > 0.1*beam.beam.sensitivity.max()
wave = wave[ok]
fscl = 1./1.e-19 # beam.beam.total_flux/1.e-17
flux = (flux*fscl/fflux)[ok]*beam.beam.scale
err = (err*fscl/fflux)[ok]
mflux = (mflux*fscl/fflux)[ok]*beam.beam.scale
ylabel = r'$f_\lambda\,/\,10^{-19}\,\mathrm{cgs}$'
else:
ylabel = 'flux (e-/s)'
scl_region = np.isfinite(mflux)
if scl_region.sum() == 0:
continue
# try:
# okerr = np.isfinite(err) #& (np.abs(flux/err) > 0.2) & (err != 0)
# med_err = np.median(err[okerr])
#
# ymax = np.maximum(ymax,
# (mflux[scl_region][2:-2] + med_err).max())
# ymin = np.minimum(ymin,
# (mflux[scl_region][2:-2] - med_err).min())
# except:
# continue
#okerr = (err != 0) & (np.abs(flux/err) > 0.2)
okerr = np.isfinite(err)
ax.errorbar(wave[okerr]/1.e4, flux[okerr], err[okerr], alpha=0.15+0.2*(self.N <= 2), linestyle='None', marker='.', color='{0:.2f}'.format(ib*0.5/self.N), zorder=1)
ax.plot(wave[okerr]/1.e4, mflux[okerr], color='r', alpha=0.5, zorder=3)
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
# for grism in grisms:
wfull[grism] = np.append(wfull[grism], wave[okerr])
ffull[grism] = np.append(ffull[grism], flux[okerr])
efull[grism] = np.append(efull[grism], err[okerr])
# Scatter direct image flux
if beam.direct.ref_photplam is None:
ax.scatter(beam.direct.photplam/1.e4, beam.beam.total_flux/1.e-19, marker='s', edgecolor='k', color=GRISM_COLORS[grism], alpha=0.2, zorder=100, s=100)
else:
ax.scatter(beam.direct.ref_photplam/1.e4, beam.beam.total_flux/1.e-19, marker='s', edgecolor='k', color=GRISM_COLORS[grism], alpha=0.2, zorder=100, s=100)
for grism in grisms:
if self.Ngrism[grism] > 1:
# binned
okb = (np.isfinite(wfull[grism]) & np.isfinite(ffull[grism]) &
np.isfinite(efull[grism]))
so = np.argsort(wfull[grism][okb])
var = efull[grism]**2
N = int(np.ceil(self.Ngrism[grism]/2)*2)*2
kernel = np.ones(N, dtype=float)/N
wht = 1/var[okb][so]
fbin = nd.convolve(ffull[grism][okb][so]*wht, kernel)[N//2::N]
wbin = nd.convolve(wfull[grism][okb][so]*wht, kernel)[N//2::N]
#vbin = nd.convolve(var[okb][so], kernel**2)[N//2::N]
wht_bin = nd.convolve(wht, kernel)[N//2::N]
vbin = nd.convolve(wht, kernel**2)[N//2::N]/wht_bin**2
fbin /= wht_bin
wbin /= wht_bin
#vbin = 1./wht_bin
ax.errorbar(wbin/1.e4, fbin, np.sqrt(vbin), alpha=0.8,
linestyle='None', marker='.',
color=GRISM_COLORS[grism], zorder=2)
med_err = np.median(np.sqrt(vbin))
ymin = np.minimum(ymin, (fbin-2*med_err).min())
ymax = np.maximum(ymax, (fbin+2*med_err).max())
ymin = np.maximum(0, ymin)
ax.set_ylim(ymin - 0.2*np.abs(ymax), 1.3*ymax)
xmin, xmax = 1.e5, 0
for g in GRISM_LIMITS:
if g in grisms:
xmin = np.minimum(xmin, GRISM_LIMITS[g][0])
xmax = np.maximum(xmax, GRISM_LIMITS[g][1])
# print g, xmin, xmax
ax.set_xlim(xmin, xmax)
ax.semilogx(subsx=[xmax])
# axc.set_xticklabels([])
# axc.set_xlabel(r'$\lambda$')
#axc.set_ylabel(r'$f_\lambda \times 10^{-19}$')
from matplotlib.ticker import MultipleLocator
ax.xaxis.set_major_locator(MultipleLocator(0.1))
labels = np.arange(np.ceil(xmin*10), np.ceil(xmax*10))/10.
ax.set_xticks(labels)
ax.set_xticklabels(labels)
ax.grid()
# Label
ax.text(0.03, 1.03, ('{0}'.format(self.Ngrism)).replace('\'', '').replace('{', '').replace('}', ''), ha='left', va='bottom', transform=ax.transAxes, fontsize=10)
#ax.plot(wave/1.e4, wave/1.e4*0., linestyle='--', color='k')
ax.hlines(0, xmin, xmax, linestyle='--', color='k')
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(ylabel)
gs.tight_layout(fig, pad=0.1)
return fig
def redshift_fit_twod_figure(self, fit, spatial_scale=1, dlam=46., NY=10,
figsize=[8, 3.5], **kwargs):
"""Make figure of 2D spectrum
TBD
"""
# xlimits
xmin, xmax = 1.e5, 0
for g in GRISM_LIMITS:
if g in self.Ngrism:
xmin = np.minimum(xmin, GRISM_LIMITS[g][0])
xmax = np.maximum(xmax, GRISM_LIMITS[g][1])
hdu_sci = drizzle_2d_spectrum(self.beams, ds9=None, NY=NY,
spatial_scale=spatial_scale, dlam=dlam,
kernel='point', pixfrac=0.6,
wlimit=[xmin, xmax],
fcontam=self.fcontam)
# Continuum model
cont = self.reshape_flat(fit['model_cont'])
hdu_con = drizzle_2d_spectrum(self.beams, data=cont, ds9=None, NY=NY,
spatial_scale=spatial_scale, dlam=dlam,
kernel='point', pixfrac=0.6,
wlimit=[xmin, xmax],
fcontam=self.fcontam)
full = self.reshape_flat(fit['model_full'])
hdu_full = drizzle_2d_spectrum(self.beams, data=full, ds9=None, NY=NY,
spatial_scale=spatial_scale, dlam=dlam,
kernel='point', pixfrac=0.6,
wlimit=[xmin, xmax],
fcontam=self.fcontam)
clip = hdu_full['WHT'].data > np.percentile(hdu_full['WHT'].data, 30)
#vmax = np.maximum(1.1*np.percentile(hdu_full['SCI'].data[clip], 98), 0.04)
avg_rms = 1/np.median(np.sqrt(hdu_full['WHT'].data[clip]))
vmax = np.maximum(1.1*np.percentile(hdu_full['SCI'].data[clip], 98), 5*avg_rms)
# print 'VMAX: %f\n\n' %vmax
sh = hdu_full[1].data.shape
extent = [hdu_full[0].header['WMIN'], hdu_full[0].header['WMAX'],
0, sh[0]]
fig = plt.figure(figsize=figsize)
show = [hdu_sci[1].data, hdu_full[1].data,
hdu_sci[1].data-hdu_con[1].data]
desc = [r'$Contam$'+'\n'+r'$Cleaned$', r'$Model$', r'$Line$'+'\n'+r'$Residual$']
i = 0
for data_i, desc_i in zip(show, desc):
ax = fig.add_subplot(11+i+100*len(show))
ax.imshow(data_i, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap=plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([])
ax.set_ylabel(desc_i)
i += 1
for ax in fig.axes[:-1]:
ax.set_xticklabels([])
fig.axes[-1].set_xlabel(r'$\lambda$')
fig.tight_layout(pad=0.2)
# Label
label = 'ID={0:6d}, z={1:.4f}'.format(self.beams[0].id, fit['zbest'])
fig.axes[-1].text(0.97, -0.27, label, ha='right', va='top',
transform=fig.axes[-1].transAxes, fontsize=10)
label2 = ('{0}'.format(self.Ngrism)).replace('\'', '').replace('{', '').replace('}', '')
fig.axes[-1].text(0.03, -0.27, label2, ha='left', va='top',
transform=fig.axes[-1].transAxes, fontsize=10)
hdu_sci.append(hdu_con[1])
hdu_sci[-1].name = 'CONTINUUM'
hdu_sci.append(hdu_full[1])
hdu_sci[-1].name = 'FULL'
return fig, hdu_sci
def drizzle_segmentation(self, wcsobj=None, kernel='square', pixfrac=1, verbose=False):
"""
Drizzle segmentation image from individual `MultiBeam.beams`.
Parameters
----------
wcsobj: `~astropy.wcs.WCS` or `~astropy.io.fits.Header`
Output WCS.
kernel: e.g., 'square', 'point', 'gaussian'
Drizzle kernel, see `~drizzlepac.adrizzle.drizzle`.
pixfrac: float
Drizzle 'pixfrac', see `~drizzlepac.adrizzle.drizzle`.
verbose: bool
Print status messages.
Returns
----------
drizzled_segm: `~numpy.ndarray`, type `~numpy.int64`.
Drizzled segmentation image, with image dimensions and
WCS defined in `wcsobj`.
"""
import numpy as np
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
try:
from . import utils
except:
from grizli import multifit, utils
all_ids = [np.unique(beam.beam.seg) for beam in self.beams]
all_ids = np.unique(np.hstack(all_ids))[1:]
if isinstance(wcsobj, pyfits.Header):
wcs = pywcs.WCS(wcsobj)
wcs.pscale = utils.get_wcs_pscale(wcs)
else:
wcs = wcsobj
if not hasattr(wcs, 'pscale'):
wcs.pscale = utils.get_wcs_pscale(wcs)
if verbose:
print('Drizzle ID={0:.0f} (primary)'.format(self.id))
drizzled_segm = self.drizzle_segmentation_id(id=self.id, wcsobj=wcsobj, kernel=kernel, pixfrac=pixfrac, verbose=verbose)
for id in all_ids:
if int(id) == self.id:
continue
if verbose:
print('Drizzle ID={0:.0f}'.format(id))
dseg_i = self.drizzle_segmentation_id(id=id, wcsobj=wcsobj, kernel=kernel, pixfrac=pixfrac, verbose=False)
new_seg = drizzled_segm == 0
drizzled_segm[new_seg] = dseg_i[new_seg]
return drizzled_segm
def drizzle_segmentation_id(self, id=None, wcsobj=None, kernel='square', pixfrac=1, verbose=True):
"""
Drizzle segmentation image for a single ID
"""
import numpy as np
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
try:
from . import utils
except:
from grizli import multifit, utils
# Can be either a header or WCS object
if isinstance(wcsobj, pyfits.Header):
wcs = pywcs.WCS(wcsobj)
wcs.pscale = utils.get_wcs_pscale(wcs)
else:
wcs = wcsobj
if not hasattr(wcs, 'pscale'):
wcs.pscale = utils.get_wcs_pscale(wcs)
if id is None:
id = self.id
sci_list = [(beam.beam.seg == id)*1. for beam in self.beams]
wht_list = [np.isfinite(beam.beam.seg)*1. for beam in self.beams]
wcs_list = [beam.direct.wcs for beam in self.beams]
out = utils.drizzle_array_groups(sci_list, wht_list, wcs_list, outputwcs=wcs, scale=0.1, kernel=kernel, pixfrac=pixfrac, verbose=verbose)
drizzled_segm = (out[0] > 0)*id
return drizzled_segm
def drizzle_fit_lines(self, fit, pline, force_line=['Ha+NII', 'Ha', 'OIII', 'Hb', 'OII'], save_fits=True, mask_lines=True, mask_sn_limit=3, mask_4959=True, verbose=True, include_segmentation=True, get_ir_psfs=True, min_line_sn=4):
"""
TBD
"""
line_wavelengths, line_ratios = utils.get_line_wavelengths()
hdu_full = []
saved_lines = []
if ('cfit' in fit) & mask_4959:
if 'line OIII' in fit['templates']:
t_o3 = utils.load_templates(fwhm=fit['templates']['line OIII'].fwhm, line_complexes=False, stars=False, full_line_list=['OIII-4959'], continuum_list=[], fsps_templates=False)
if 'zbest' in fit:
z_driz = fit['zbest']
else:
z_driz = fit['z']
if 'line_flux' in fit:
line_flux_dict = fit['line_flux']
else:
line_flux_dict = OrderedDict()
for key in fit['cfit']:
if key.startswith('line'):
line_flux_dict[key.replace('line ', '')] = fit['cfit'][key]
# Compute continuum model
if 'cfit' in fit:
if 'bg {0:03d}'.format(self.N-1) in fit['cfit']:
for ib, beam in enumerate(self.beams):
key = 'bg {0:03d}'.format(ib)
self.beams[ib].background = fit['cfit'][key][0]
cont = fit['cont1d']
for beam in self.beams:
beam.compute_model(spectrum_1d=[cont.wave, cont.flux],
is_cgs=True)
if hasattr(self, 'pscale'):
if (self.pscale is not None):
scale = self.compute_scale_array(self.pscale, beam.wavef)
beam.beam.pscale_array = scale.reshape(beam.sh)
else:
beam.beam.pscale_array = 1.
else:
beam.beam.pscale_array = 1.
for line in line_flux_dict:
line_flux, line_err = line_flux_dict[line]
if line_err == 0:
continue
# Skip if min_line_sn = inf
if not np.isfinite(min_line_sn):
continue
if (line_flux/line_err > min_line_sn) | (line in force_line):
if verbose:
print('Drizzle line -> {0:4s} ({1:.2f} {2:.2f})'.format(line, line_flux/1.e-17, line_err/1.e-17))
line_wave_obs = line_wavelengths[line][0]*(1+z_driz)
if mask_lines:
for beam in self.beams:
beam.oivar = beam.ivar*1
lam = beam.beam.lam_beam
if hasattr(beam.beam, 'pscale_array'):
pscale_array = beam.beam.pscale_array
else:
pscale_array = 1.
# another idea, compute a model for the line itself
# and mask relatively "contaminated" pixels from
# other lines
try:
lm = fit['line1d'][line]
sp = [lm.wave, lm.flux]
except:
key = 'line ' + line
lm = fit['templates'][key]
line_flux = fit['cfit'][key][0]
scl = line_flux/(1+z_driz)
sp = [lm.wave*(1+z_driz), lm.flux*scl]
#lm = fit['line1d'][line]
if ((lm.wave.max() < lam.min()) |
(lm.wave.min() > lam.max())):
continue
#sp = [lm.wave, lm.flux]
if line_flux > 0:
m = beam.compute_model(spectrum_1d=sp,
in_place=False, is_cgs=True)
lmodel = m.reshape(beam.beam.sh_beam)*pscale_array
else:
lmodel = np.zeros(beam.beam.sh_beam)
# if lmodel.max() == 0:
# continue
if 'cfit' in fit:
keys = fit['cfit']
else:
keys = fit['line1d']
beam.extra_lines = beam.contam*0.
for lkey in keys:
if not lkey.startswith('line'):
continue
key = lkey.replace('line ', '')
lf, le = line_flux_dict[key]
# Don't mask if the line missing or undetected
if (lf <= 0): # | (lf < mask_sn_limit*le):
continue
if key != line:
try:
lm = fit['line1d'][lkey]
sp = [lm.wave, lm.flux]
except:
lm = fit['templates'][lkey]
scl = fit['cfit'][lkey][0]/(1+z_driz)
sp = [lm.wave*(1+z_driz), lm.flux*scl]
if ((lm.wave.max() < lam.min()) |
(lm.wave.min() > lam.max())):
continue
m = beam.compute_model(spectrum_1d=sp,
in_place=False,
is_cgs=True)
lcontam = m.reshape(beam.beam.sh_beam)
lcontam *= pscale_array
if lcontam.max() == 0:
# print beam.grism.parent_file, lkey
continue
beam.extra_lines += lcontam
# Only mask if line flux > 0
if line_flux > 0:
extra_msk = lcontam > mask_sn_limit*lmodel
extra_msk &= (lcontam > 0)
extra_msk &= (lmodel > 0)
beam.ivar[extra_msk] *= 0
# Subtract 4959
if (line == 'OIII') & ('cfit' in fit) & mask_4959:
lm = t_o3['line OIII-4959']
scl = fit['cfit']['line OIII'][0]/(1+z_driz)
scl *= 1./(2.98+1)
sp = [lm.wave*(1+z_driz), lm.flux*scl]
if ((lm.wave.max() < lam.min()) |
(lm.wave.min() > lam.max())):
continue
m = beam.compute_model(spectrum_1d=sp,
in_place=False,
is_cgs=True)
lcontam = m.reshape(beam.beam.sh_beam)
lcontam *= pscale_array
if lcontam.max() == 0:
continue
#print('Mask 4959!')
beam.extra_lines += lcontam
hdu = drizzle_to_wavelength(self.beams, ra=self.ra,
dec=self.dec, wave=line_wave_obs,
fcontam=self.fcontam,
**pline)
if mask_lines:
for beam in self.beams:
beam.ivar = beam.oivar*1
delattr(beam, 'oivar')
hdu[0].header['REDSHIFT'] = (z_driz, 'Redshift used')
# for e in [3,4,5,6]:
for e in [-4, -3, -2, -1]:
hdu[e].header['EXTVER'] = line
hdu[e].header['REDSHIFT'] = (z_driz, 'Redshift used')
hdu[e].header['RESTWAVE'] = (line_wavelengths[line][0],
'Line rest wavelength')
saved_lines.append(line)
if len(hdu_full) == 0:
hdu_full = hdu
hdu_full[0].header['NUMLINES'] = (1,
"Number of lines in this file")
else:
hdu_full.extend(hdu[-4:])
hdu_full[0].header['NUMLINES'] += 1
# Make sure DSCI extension is filled. Can be empty for
# lines at the edge of the grism throughput
for f_i in range(hdu[0].header['NDFILT']):
filt_i = hdu[0].header['DFILT{0:02d}'.format(f_i+1)]
if hdu['DWHT', filt_i].data.max() != 0:
hdu_full['DSCI', filt_i] = hdu['DSCI', filt_i]
hdu_full['DWHT', filt_i] = hdu['DWHT', filt_i]
li = hdu_full[0].header['NUMLINES']
hdu_full[0].header['LINE{0:03d}'.format(li)] = line
hdu_full[0].header['FLUX{0:03d}'.format(li)] = (line_flux,
'Line flux, 1e-17 erg/s/cm2')
hdu_full[0].header['ERR{0:03d}'.format(li)] = (line_err,
'Line flux err, 1e-17 erg/s/cm2')
if len(hdu_full) > 0:
hdu_full[0].header['HASLINES'] = (' '.join(saved_lines),
'Lines in this file')
else:
hdu = drizzle_to_wavelength(self.beams, ra=self.ra,
dec=self.dec,
wave=np.median(self.beams[0].wave),
fcontam=self.fcontam,
**pline)
hdu_full = hdu[:-4]
hdu_full[0].header['REDSHIFT'] = (z_driz, 'Redshift used')
hdu_full[0].header['NUMLINES'] = 0
hdu_full[0].header['HASLINES'] = ' '
if include_segmentation:
line_wcs = pywcs.WCS(hdu_full[1].header)
segm = self.drizzle_segmentation(wcsobj=line_wcs)
seg_hdu = pyfits.ImageHDU(data=segm.astype(np.int32), name='SEG')
hdu_full.insert(1, seg_hdu)
if get_ir_psfs:
import grizli.galfit.psf
ir_beams = []
gr_filters = {'G102': ['F105W'], 'G141': ['F105W', 'F125W', 'F140W', 'F160W']}
show_filters = []
for gr in ['G102', 'G141']:
if gr in self.PA:
show_filters.extend(gr_filters[gr])
for pa in self.PA[gr]:
for i in self.PA[gr][pa]:
ir_beams.append(self.beams[i])
if len(ir_beams) > 0:
dp = grizli.galfit.psf.DrizzlePSF(driz_hdu=hdu_full['DSCI'],
beams=self.beams)
for filt in np.unique(show_filters):
if verbose:
print('Get linemap PSF: {0}'.format(filt))
psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0],
dec=dp.driz_wcs.wcs.crval[1],
filter=filt,
pixfrac=dp.driz_header['PIXFRAC'],
kernel=dp.driz_header['DRIZKRNL'],
wcs_slice=dp.driz_wcs, get_extended=True,
verbose=False, get_weight=False)
psf[1].header['EXTNAME'] = 'DPSF'
psf[1].header['EXTVER'] = filt
hdu_full.append(psf[1])
if save_fits:
hdu_full.writeto('{0}_{1:05d}.line.fits'.format(self.group_name, self.id), overwrite=True, output_verify='silentfix')
return hdu_full
def run_full_diagnostics(self, pzfit={}, pspec2={}, pline={},
force_line=['Ha+NII', 'Ha', 'OIII', 'Hb', 'OII'],
GroupFLT=None,
prior=None, zoom=True, verbose=True):
"""TBD
size=20, pixscale=0.1,
pixfrac=0.2, kernel='square'
"""
import copy
# Defaults
pzfit_def, pspec2_def, pline_def = get_redshift_fit_defaults()
if pzfit == {}:
pzfit = pzfit_def
if pspec2 == {}:
pspec2 = pspec2_def
if pline == {}:
pline = pline_def
# Check that keywords allowed
for d, default in zip([pzfit, pspec2, pline],
[pzfit_def, pspec2_def, pline_def]):
for key in d:
if key not in default:
p = d.pop(key)
# Auto generate FWHM (in km/s) to use for line fits
if 'fwhm' in pzfit:
fwhm = pzfit['fwhm']
if pzfit['fwhm'] == 0:
if 'G141' in self.Ngrism:
# WFC3/IR
fwhm = 1200
elif 'G800L' in self.Ngrism:
# ACS/WFC
fwhm = 1400
elif 'G280' in self.Ngrism:
# UVIS
fwhm = 1500
elif 'GRISM' in self.Ngrism:
# WFIRST
fwhm = 350
elif 'G150' in self.Ngrism:
# WFIRST
fwhm = 350
else:
fwhm = 700
# Auto generate delta-wavelength of 2D spectrum
if 'dlam' in pspec2:
dlam = pspec2['dlam']
if dlam == 0:
if 'G141' in self.Ngrism:
dlam = 45
elif 'G800L' in self.Ngrism:
dlam = 40
elif 'G280' in self.Ngrism:
dlam = 18
elif 'GRISM' in self.Ngrism:
dlam = 11
elif 'G150' in self.Ngrism:
dlam = 11
else:
dlam = 25 # G102
# Redshift fit
zfit_in = copy.copy(pzfit)
zfit_in['fwhm'] = fwhm
zfit_in['prior'] = prior
zfit_in['zoom'] = zoom
zfit_in['verbose'] = verbose
if zfit_in['zr'] in [0]:
fit, fig = self.fit_stars(**zfit_in)
else:
fit, fig = self.fit_redshift(**zfit_in)
# Make sure model attributes are set to the continuum model
models = self.reshape_flat(fit['model_cont'])
for j in range(self.N):
self.beams[j].model = models[j]*1
# 2D spectrum
spec_in = copy.copy(pspec2)
spec_in['fit'] = fit
spec_in['dlam'] = dlam
# fig2, hdu2 = self.redshift_fit_twod_figure(**spec_in)#, kwargs=spec2) #dlam=dlam, spatial_scale=spatial_scale, NY=NY)
fig2 = hdu2 = None
# Update master model
if GroupFLT is not None:
try:
ix = GroupFLT.catalog['NUMBER'] == self.beams[0].id
mag = GroupFLT.catalog['MAG_AUTO'][ix].data[0]
except:
mag = 22
sp = fit['cont1d']
GroupFLT.compute_single_model(id, mag=mag, size=-1, store=False,
spectrum_1d=[sp.wave, sp.flux],
is_cgs=True,
get_beams=None, in_place=True)
# 2D lines to drizzle
hdu_full = self.drizzle_fit_lines(fit, pline, force_line=force_line,
save_fits=True)
fit['id'] = self.id
fit['fit_bg'] = self.fit_bg
fit['grism_files'] = [b.grism.parent_file for b in self.beams]
for item in ['A', 'coeffs', 'model_full', 'model_cont']:
if item in fit:
p = fit.pop(item)
#p = fit.pop('coeffs')
np.save('{0}_{1:05d}.zfit.npy'.format(self.group_name, self.id), [fit])
fig.savefig('{0}_{1:05d}.zfit.png'.format(self.group_name, self.id))
#fig2.savefig('{0}_{1:05d}.zfit.2D.png'.format(self.group_name, self.id))
#hdu2.writeto('{0}_{1:05d}.zfit.2D.fits'.format(self.group_name, self.id), overwrite=True, output_verify='silentfix')
label = '# id ra dec zbest '
data = '{0:7d} {1:.6f} {2:.6f} {3:.5f}'.format(self.id, self.ra, self.dec,
fit['zbest'])
for grism in ['G800L', 'G280', 'G102', 'G141', 'GRISM']:
label += ' N{0}'.format(grism)
if grism in self.Ngrism:
data += ' {0:2d}'.format(self.Ngrism[grism])
else:
data += ' {0:2d}'.format(0)
label += ' chi2 DoF '
data += ' {0:14.1f} {1:d} '.format(fit['chibest'], self.DoF)
for line in ['SII', 'Ha', 'OIII', 'Hb', 'Hg', 'OII']:
label += ' {0} {0}_err'.format(line)
if line in fit['line_flux']:
flux = fit['line_flux'][line][0]
err = fit['line_flux'][line][1]
data += ' {0:10.3e} {1:10.3e}'.format(flux, err)
fp = open('{0}_{1:05d}.zfit.dat'.format(self.group_name, self.id), 'w')
fp.write(label+'\n')
fp.write(data+'\n')
fp.close()
fp = open('{0}_{1:05d}.zfit.beams.dat'.format(self.group_name, self.id), 'w')
fp.write('# file filter origin_x origin_y size pad bg\n')
for ib, beam in enumerate(self.beams):
data = '{0:40s} {1:s} {2:5d} {3:5d} {4:5d} {5:5d}'.format(beam.grism.parent_file, beam.grism.filter,
beam.direct.origin[0],
beam.direct.origin[1],
beam.direct.sh[0],
beam.direct.pad)
if self.fit_bg:
data += ' {0:8.4f}'.format(fit['coeffs_full'][ib])
else:
data += ' {0:8.4f}'.format(0.0)
fp.write(data + '\n')
fp.close()
# Save figures
plt_status = plt.rcParams['interactive']
# if not plt_status:
# plt.close(fig)
# plt.close(fig2)
return fit, fig, fig2, hdu2, hdu_full
def apply_trace_shift(self, set_to_zero=False):
"""
Set beam.yoffset back to zero
"""
indices = [[i] for i in range(self.N)]
if set_to_zero:
s0 = np.zeros(len(indices))
else:
s0 = [beam.beam.yoffset for beam in self.beams]
args = (self, indices, 0, False, False, True)
self.eval_trace_shift(s0, *args)
# Reset model profile for optimal extractions
for b in self.beams:
# b._parse_from_data()
if hasattr(b, 'has_sys_err'):
delattr(b, 'has_sys_err')
b._parse_from_data(**b._parse_params)
self._parse_beam_arrays()
def fit_trace_shift(self, split_groups=True, max_shift=5, tol=1.e-2,
verbose=True, lm=False, fit_with_psf=False,
reset=False):
"""TBD
"""
from scipy.optimize import leastsq, minimize
if split_groups:
indices = []
for g in self.PA:
for p in self.PA[g]:
indices.append(self.PA[g][p])
else:
indices = [[i] for i in range(self.N)]
s0 = np.zeros(len(indices))
bounds = np.array([[-max_shift, max_shift]]*len(indices))
args = (self, indices, 0, lm, verbose, fit_with_psf)
if reset:
shifts = np.zeros(len(indices))
out = None
elif lm:
out = leastsq(self.eval_trace_shift, s0, args=args, Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-08, xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None)
shifts = out[0]
else:
out = minimize(self.eval_trace_shift, s0, bounds=bounds, args=args, method='Powell', tol=tol)
if out.x.shape == ():
shifts = [float(out.x)]
else:
shifts = out.x
# Apply to PSF if necessary
args = (self, indices, 0, lm, verbose, True)
self.eval_trace_shift(shifts, *args)
# Reset model profile for optimal extractions
for b in self.beams:
# b._parse_from_data()
b._parse_from_data(**b._parse_params)
# Needed for background modeling
if hasattr(b, 'xp'):
delattr(b, 'xp')
self._parse_beam_arrays()
self.initialize_masked_arrays()
return shifts, out
@staticmethod
def eval_trace_shift(shifts, self, indices, poly_order, lm, verbose, fit_with_psf):
"""TBD
"""
import scipy.ndimage as nd
for il, l in enumerate(indices):
for i in l:
beam = self.beams[i]
beam.beam.add_ytrace_offset(shifts[il])
if hasattr(self.beams[i].beam, 'psf') & fit_with_psf:
#beam.model = nd.shift(beam.modelf.reshape(beam.sh_beam), (shifts[il], 0))
# This is slow, so run with fit_with_psf=False if possible
beam.init_epsf(yoff=0, # shifts[il],
psf_params=beam.beam.psf_params)
beam.compute_model(use_psf=True)
m = beam.compute_model(in_place=False)
#beam.modelf = beam.model.flatten()
#beam.model = beam.modelf.reshape(beam.beam.sh_beam)
beam.flat_flam = beam.compute_model(in_place=False, is_cgs=True)
else:
# self.beams[i].beam.add_ytrace_offset(shifts[il])
# self.beams[i].compute_model(is_cgs=True)
beam.compute_model(use_psf=False)
self.flat_flam = np.hstack([b.beam.model.flatten() for b in self.beams])
self.poly_order = -1
self.init_poly_coeffs(poly_order=poly_order)
self.fit_bg = False
A = self.A_poly*1
ok_temp = np.sum(A, axis=1) != 0
out_coeffs = np.zeros(A.shape[0])
y = self.scif
out = np.linalg.lstsq(A.T, y, rcond=utils.LSTSQ_RCOND)
lstsq_coeff, residuals, rank, s = out
coeffs = lstsq_coeff
out_coeffs = np.zeros(A.shape[0])
out_coeffs[ok_temp] = coeffs
modelf = np.dot(out_coeffs, A)
if lm:
# L-M, return residuals
if verbose:
print('{0} [{1}]'.format(utils.NO_NEWLINE, ' '.join(['{0:5.2f}'.format(s) for s in shifts])))
return ((self.scif-modelf)*self.sivarf)[self.fit_mask]
chi2 = np.sum(((self.scif - modelf)**2*self.ivarf)[self.fit_mask])
if verbose:
print('{0} [{1}] {2:6.2f}'.format(utils.NO_NEWLINE, ' '.join(['{0:5.2f}'.format(s) for s in shifts]), chi2/self.DoF))
return chi2/self.DoF
def drizzle_grisms_and_PAs(self, size=10, fcontam=0, flambda=False, scale=1, pixfrac=0.5, kernel='square', usewcs=False, tfit=None, diff=True, grism_list=['G800L', 'G102', 'G141', 'F090W', 'F115W', 'F150W', 'F200W', 'F356W', 'F410M', 'F444W'], mask_segmentation=True, reset_model=True, make_figure=True, fig_args=dict(mask_segmentation=True, average_only=False, scale_size=1, cmap='viridis_r'), **kwargs):
"""Make figure showing spectra at different orients/grisms
TBD
"""
from matplotlib.ticker import MultipleLocator
#import pysynphot as S
if usewcs:
drizzle_function = drizzle_2d_spectrum_wcs
else:
drizzle_function = drizzle_2d_spectrum
if 'zfit' in kwargs:
tfit = kwargs['zfit']
NX = len(self.PA)
NY = 0
for g in self.PA:
NY = np.maximum(NY, len(self.PA[g]))
NY += 1
# keys = list(self.PA)
# keys.sort()
keys = []
for key in grism_list:
if key in self.PA:
keys.append(key)
if tfit is not None:
if 'coeffs_full' in tfit:
bg = tfit['coeffs_full'][:self.N]
z_cont = tfit['zbest']
else:
# fitting.GroupFitter
z_cont = tfit['z']
bg = []
for k in tfit['cfit']:
if k.startswith('bg '):
bg.append(tfit['cfit'][k][0])
bg = np.array(bg)
else:
# Fit background
try:
out = self.xfit_at_z(z=0, templates={}, fitter='lstsq',
poly_order=3, fit_background=True)
bg = out[-3][:self.N]
except:
bg = [0]*self.N
for ib, beam in enumerate(self.beams):
beam.bg = bg[ib]
prim = pyfits.PrimaryHDU()
h0 = prim.header
h0['ID'] = (self.id, 'Object ID')
h0['RA'] = (self.ra, 'Right ascension')
h0['DEC'] = (self.dec, 'Declination')
h0['ISFLAM'] = (flambda, 'Pixels in f-lam units')
h0['FCONTAM'] = (fcontam, 'Contamination parameter')
h0['NGRISM'] = (len(keys), 'Number of grisms')
all_hdus = []
for ig, g in enumerate(keys):
all_beams = []
hdus = []
pas = list(self.PA[g].keys())
pas.sort()
h0['GRISM{0:03d}'.format(ig+1)] = (g, 'Grism name')
h0['N'+g] = (len(pas), 'Number of PAs for grism '+g)
for ipa, pa in enumerate(pas):
h0[g+'{0:02d}'.format(ipa+1)] = (pa, 'PA')
beams = [self.beams[i] for i in self.PA[g][pa]]
all_beams.extend(beams)
#dlam = np.ceil(np.diff(beams[0].beam.lam)[0])*scale
dlam = GRISM_LIMITS[g][2]*scale
data = [beam.grism['SCI']-beam.contam-beam.bg
for beam in beams]
hdu = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, ds9=None,
mask_segmentation=mask_segmentation)
hdu[0].header['RA'] = (self.ra, 'Right ascension')
hdu[0].header['DEC'] = (self.dec, 'Declination')
hdu[0].header['GRISM'] = (g, 'Grism')
hdu[0].header['PA'] = (pa, 'Dispersion PA')
hdu[0].header['ISFLAM'] = (flambda, 'Pixels in f-lam units')
hdu[0].header['CONF'] = (beams[0].beam.conf.conf_file,
'Configuration file')
hdu[0].header['DLAM0'] = (np.median(np.diff(beams[0].wave)),
'Native dispersion per pix')
# Contam
data = [beam.contam for beam in beams]
hdu_contam = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, ds9=None,
mask_segmentation=mask_segmentation)
hdu_contam[1].header['EXTNAME'] = 'CONTAM'
hdu.append(hdu_contam[1])
# Continuum model
if tfit is not None:
m = tfit['cont1d']
for beam in beams:
beam.compute_model(spectrum_1d=[m.wave, m.flux],
is_cgs=True)
else:
if reset_model:
# simple flat spectrum
for beam in beams:
beam.compute_model()
data = []
for beam in beams:
if hasattr(beam.beam, 'pscale_array'):
data.append(beam.beam.model*beam.beam.pscale_array)
else:
data.append(beam.beam.model)
hdu_model = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, ds9=None,
mask_segmentation=mask_segmentation)
hdu_model[1].header['EXTNAME'] = 'MODEL'
if tfit is not None:
hdu_model[1].header['CONTIN1D'] = (True, 'Model is fit continuum')
hdu_model[1].header['REDSHIFT'] = (z_cont, 'Redshift of the continuum spectrum')
else:
hdu_model[1].header['CONTIN1D'] = (False, 'Model is fit continuum')
hdu.append(hdu_model[1])
# Line kernel
if (not usewcs):
h = hdu[1].header
#gau = S.GaussianSource(1.e-17, h['CRVAL1'], h['CD1_1']*1)
# header keywords scaled to um
toA = 1.e4
#toA = 1.
#gau = S.GaussianSource(1., h['CRVAL1']*toA, h['CD1_1']*toA)
gau = utils.SpectrumTemplate(central_wave=h['CRVAL1']*toA, fwhm=h['CD1_1']*toA)
#print('XXX', h['CRVAL1'], h['CD1_1'], h['CRPIX1'], toA, gau.wave[np.argmax(gau.flux)])
if reset_model:
for beam in beams:
beam.compute_model(spectrum_1d=[gau.wave,
gau.flux],
is_cgs=True)
data = [beam.beam.model for beam in beams]
h_kern = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g],
dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, fill_wht=True,
ds9=None,
mask_segmentation=mask_segmentation)
kern = h_kern[1].data[:, h['CRPIX1']-1-size:h['CRPIX1']-1+size]
#print('XXX', kern.max(), h_kern[1].data.max())
hdu_kern = pyfits.ImageHDU(data=kern, header=h_kern[1].header, name='KERNEL')
hdu.append(hdu_kern)
else:
hdu['DSCI'].header['EXTNAME'] = 'KERNEL'
# Pull out zeroth extension
for k in hdu[0].header:
hdu[1].header[k] = hdu[0].header[k]
for e in hdu[1:]:
e.header['EXTVER'] = '{0},{1}'.format(g, pa)
hdus.append(hdu[1:])
# Stack of each grism
data = [beam.grism['SCI']-beam.contam-beam.bg
for beam in all_beams]
hdu = drizzle_function(all_beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=fcontam, ds9=None,
mask_segmentation=mask_segmentation)
hdu[0].header['RA'] = (self.ra, 'Right ascension')
hdu[0].header['DEC'] = (self.dec, 'Declination')
hdu[0].header['GRISM'] = (g, 'Grism')
hdu[0].header['ISFLAM'] = (flambda, 'Pixels in f-lam units')
hdu[0].header['CONF'] = (beams[0].beam.conf.conf_file,
'Configuration file')
hdu[0].header['DLAM0'] = (np.median(np.diff(beams[0].wave)),
'Native dispersion per pix')
# Full continuum model
if tfit is not None:
if diff > 1:
m = tfit['line1d']
else:
m = tfit['cont1d']
for beam in all_beams:
beam.compute_model(spectrum_1d=[m.wave, m.flux],
is_cgs=True)
else:
if reset_model:
for beam in all_beams:
beam.compute_model()
#data = [beam.beam.model for beam in all_beams]
data = []
for beam in all_beams:
if hasattr(beam.beam, 'pscale_array'):
data.append(beam.beam.model*beam.beam.pscale_array)
else:
data.append(beam.beam.model)
hdu_model = drizzle_function(all_beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=fcontam, ds9=None,
mask_segmentation=mask_segmentation)
hdu_model[1].header['EXTNAME'] = 'MODEL'
if tfit is not None:
hdu_model[1].header['CONTIN1D'] = (True, 'Model is fit continuum')
hdu_model[1].header['REDSHIFT'] = (z_cont, 'Redshift of the continuum spectrum')
else:
hdu_model[1].header['CONTIN1D'] = (False, 'Model is fit continuum')
hdu.append(hdu_model[1])
# Full kernel
h = hdu[1].header
#gau = S.GaussianSource(1.e-17, h['CRVAL1'], h['CD1_1']*1)
toA = 1.e4
#gau = S.GaussianSource(1., h['CRVAL1']*toA, h['CD1_1']*toA)
gau = utils.SpectrumTemplate(central_wave=h['CRVAL1']*toA, fwhm=h['CD1_1']*toA)
if reset_model:
for beam in all_beams:
beam.compute_model(spectrum_1d=[gau.wave, gau.flux],
is_cgs=True)
data = [beam.beam.model for beam in all_beams]
h_kern = drizzle_function(all_beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, fill_wht=True, ds9=None,
mask_segmentation=mask_segmentation)
kern = h_kern[1].data[:, h['CRPIX1']-1-size:h['CRPIX1']-1+size]
hdu_kern = pyfits.ImageHDU(data=kern, header=h_kern[1].header, name='KERNEL')
hdu.append(hdu_kern)
# Pull out zeroth extension
for k in hdu[0].header:
hdu[1].header[k] = hdu[0].header[k]
for e in hdu[1:]:
e.header['EXTVER'] = '{0}'.format(g)
hdus.append(hdu[1:])
all_hdus.extend(hdus)
output_hdu = pyfits.HDUList([prim])
for hdu in all_hdus:
output_hdu.extend(hdu)
if make_figure:
fig = show_drizzle_HDU(output_hdu, diff=diff, **fig_args)
return output_hdu, fig
else:
return output_hdu # all_hdus
def flag_with_drizzled(self, hdul, sigma=4, update=True, interp='nearest', verbose=True):
"""
Update `MultiBeam` masks based on the blotted drizzled combined image
[in progress ... xxx]
Parameters
----------
hdul : `~astropy.io.fits.HDUList`
FITS HDU list output from `drizzle_grisms_and_PAs` or read from a
`stack.fits` file.
sigma : float
Residual threshold to flag.
update : bool
Update the mask.
interp : str
Interpolation method for `~drizzlepac.ablot`.
Returns
-------
Updates the individual `fit_mask` attributes of the individual beams
if `update==True`.
"""
try:
from drizzle.doblot import doblot
blotter = doblot
except:
from drizzlepac import ablot
blotter = ablot.do_blot
# Read the drizzled arrays
Ng = hdul[0].header['NGRISM']
ref_wcs = {}
ref_data = {}
flag_grism = {}
for i in range(Ng):
g = hdul[0].header['GRISM{0:03d}'.format(i+1)]
ref_wcs[g] = pywcs.WCS(hdul['SCI', g].header)
ref_wcs[g].pscale = utils.get_wcs_pscale(ref_wcs[g])
ref_data[g] = hdul['SCI', g].data
flag_grism[g] = hdul[0].header['N{0}'.format(g)] > 1
# Do the masking
for i, beam in enumerate(self.beams):
g = beam.grism.filter
if not flag_grism[g]:
continue
beam_header, flt_wcs = beam.full_2d_wcs()
blotted = blotter(ref_data[g], ref_wcs[g],
flt_wcs, 1,
coeffs=True, interp=interp, sinscl=1.0,
stepsize=10, wcsmap=None)
resid = (beam.grism['SCI'] - beam.contam - blotted)
resid *= np.sqrt(beam.ivar)
blot_mask = (blotted != 0) & (np.abs(resid) < sigma)
if verbose:
print('Beam {0:>3d}: {1:>4d} new masked pixels'.format(i, beam.fit_mask.sum() - (beam.fit_mask & blot_mask.flatten()).sum()))
if update:
beam.fit_mask &= blot_mask.flatten()
if update:
self._parse_beams()
self.initialize_masked_arrays()
def oned_spectrum(self, tfit=None, get_contam=True, get_background=False, masked_model=None, **kwargs):
"""Compute full 1D spectrum with optional best-fit model
Parameters
----------
bin : float / int
Bin factor relative to the size of the native spectral bins of a
given grism.
tfit : dict
Output of `~grizli.fitting.mb.template_at_z`.
Returns
-------
sp : dict
Dictionary of the extracted 1D spectra. Keys are the grism
names and the values are `~astropy.table.Table` objects.
"""
import astropy.units as u
# "Flat" spectrum to perform flux calibration
if self.Nphot > 0:
flat_data = self.flat_flam[self.fit_mask[:-self.Nphotbands]]
else:
flat_data = self.flat_flam[self.fit_mask]
sp_flat = self.optimal_extract(flat_data, **kwargs)
# Best-fit line and continuum models, with background fit
if tfit is not None:
bg_model = self.get_flat_background(tfit['coeffs'],
apply_mask=True)
line_model = self.get_flat_model([tfit['line1d'].wave,
tfit['line1d'].flux])
cont_model = self.get_flat_model([tfit['line1d'].wave,
tfit['cont1d'].flux])
sp_line = self.optimal_extract(line_model, **kwargs)
sp_cont = self.optimal_extract(cont_model, **kwargs)
elif masked_model is not None:
bg_model = 0.
sp_model = self.optimal_extract(masked_model, **kwargs)
else:
bg_model = 0.
# Optimal spectral extraction
sp = self.optimal_extract(self.scif_mask[:self.Nspec]-bg_model, **kwargs)
if get_contam:
spc = self.optimal_extract(self.contamf_mask[:self.Nspec],
**kwargs)
if (tfit is not None) & (get_background):
bgm = self.get_flat_background(tfit['coeffs'], apply_mask=True)
sp_bg = self.optimal_extract(bgm[:self.Nspec], **kwargs)
else:
sp_bg = None
# Loop through grisms, change units and add fit columns
# NB: setting units to "count / s" to comply with FITS standard,
# where count / s = electron / s
for k in sp:
sp[k]['flat'] = sp_flat[k]['flux']
flat_unit = (u.count / u.s) / (u.erg / u.s / u.cm**2 / u.AA)
sp[k]['flat'].unit = flat_unit
sp[k]['flux'].unit = u.count / u.s
sp[k]['err'].unit = u.count / u.s
if get_contam:
sp[k]['contam'] = spc[k]['flux']
sp[k]['contam'].unit = u.count / u.s
if tfit is not None:
sp[k]['line'] = sp_line[k]['flux']
sp[k]['line'].unit = u.count / u.s
sp[k]['cont'] = sp_cont[k]['flux']
sp[k]['cont'].unit = u.count / u.s
if masked_model is not None:
sp[k]['model'] = sp_model[k]['flux']
sp[k]['model'].unit = u.count / u.s
if sp_bg is not None:
sp[k]['background'] = sp_bg[k]['flux']
sp[k]['background'].unit = u.count / u.s
sp[k].meta['GRISM'] = (k, 'Grism name')
# Metadata
exptime = count = 0
for pa in self.PA[k]:
for i in self.PA[k][pa]:
exptime += self.beams[i].grism.header['EXPTIME']
count += 1
parent = (self.beams[i].grism.parent_file, 'Parent file')
sp[k].meta['FILE{0:04d}'.format(count)] = parent
sp[k].meta['NEXP'] = (count, 'Number of exposures')
sp[k].meta['EXPTIME'] = (exptime, 'Total exposure time')
sp[k].meta['NPA'] = (len(self.PA[k]), 'Number of PAs')
# PSCALE
if hasattr(self, 'pscale'):
if (self.pscale is not None):
pscale = self.compute_scale_array(self.pscale,
sp[k]['wave'])
sp[k]['pscale'] = pscale
sp[k].meta['PSCALEN'] = (len(self.pscale)-1,
'PSCALE order')
for i, p in enumerate(self.pscale):
sp[k].meta['PSCALE{0}'.format(i)] = (p,
'PSCALE parameter {0}'.format(i))
return sp
def oned_spectrum_to_hdu(self, sp=None, outputfile=None, units=None, **kwargs):
"""Generate 1D spectra fits HDUList
Parameters
----------
sp : optional, dict
Output of `~grizli.multifit.MultiBeam.oned_spectrum`. If None,
then run that function with `**kwargs`.
outputfile : None, str
If a string supplied, then write the `~astropy.io.fits.HDUList` to
a file.
Returns
-------
hdul : `~astropy.io.fits.HDUList`
FITS version of the 1D spectrum tables.
"""
from astropy.io.fits.convenience import table_to_hdu
# Generate the spectrum if necessary
if sp is None:
sp = self.oned_spectrum(**kwargs)
# Metadata in PrimaryHDU
prim = pyfits.PrimaryHDU()
prim.header['ID'] = (self.id, 'Object ID')
prim.header['RA'] = (self.ra, 'Right Ascension')
prim.header['DEC'] = (self.dec, 'Declination')
prim.header['TARGET'] = (self.group_name, 'Target Name')
prim.header['MW_EBV'] = (self.MW_EBV, 'Galactic extinction E(B-V)')
for g in ['G102', 'G141', 'G800L']:
if g in sp:
prim.header['N_{0}'.format(g)] = sp[g].meta['NEXP']
prim.header['T_{0}'.format(g)] = sp[g].meta['EXPTIME']
prim.header['PA_{0}'.format(g)] = sp[g].meta['NPA']
else:
prim.header['N_{0}'.format(g)] = (0, 'Number of exposures')
prim.header['T_{0}'.format(g)] = (0, 'Total exposure time')
prim.header['PA_{0}'.format(g)] = (0, 'Number of PAs')
for i, k in enumerate(sp):
prim.header['GRISM{0:03d}'.format(i+1)] = (k, 'Grism name')
# Generate HDUList
hdul = [prim]
for k in sp:
hdu = table_to_hdu(sp[k])
hdu.header['EXTNAME'] = k
hdul.append(hdu)
# Outputs
hdul = pyfits.HDUList(hdul)
if outputfile is None:
return hdul
else:
hdul.writeto(outputfile, overwrite=True)
return hdul
def make_simple_hdulist(self):
"""
Make a`~astropy.io.fits.HDUList` object with just a simple
PrimaryHDU
"""
p = pyfits.PrimaryHDU()
p.header['ID'] = (self.id, 'Object ID')
p.header['RA'] = (self.ra, 'R.A.')
p.header['DEC'] = (self.dec, 'Decl.')
p.header['NINPUT'] = (len(self.beams), 'Number of drizzled beams')
p.header['HASLINES'] = ('', 'Lines in this file')
for i, beam in enumerate(self.beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
p.header['PA{0:04d}'.format(i+1)] = (beam.get_dispersion_PA(),
'PA of dispersion axis')
return pyfits.HDUList(p)
def check_for_bad_PAs(self, poly_order=1, chi2_threshold=1.5, fit_background=True, reinit=True):
"""
"""
wave = np.linspace(2000, 2.5e4, 100)
poly_templates = utils.polynomial_templates(wave, order=poly_order)
fit_log = OrderedDict()
keep_dict = {}
has_bad = False
keep_beams = []
for g in self.PA:
fit_log[g] = OrderedDict()
keep_dict[g] = []
for pa in self.PA[g]:
beams = [self.beams[i] for i in self.PA[g][pa]]
mb_i = MultiBeam(beams, fcontam=self.fcontam,
sys_err=self.sys_err, min_sens=self.min_sens,
min_mask=self.min_mask,
mask_resid=self.mask_resid,
MW_EBV=self.MW_EBV)
try:
chi2, _, _, _ = mb_i.xfit_at_z(z=0,
templates=poly_templates,
fit_background=fit_background)
except:
chi2 = 1e30
if False:
p_i = mb_i.template_at_z(z=0, templates=poly_templates, fit_background=fit_background, fitter='lstsq', fwhm=1400, get_uncertainties=2)
fit_log[g][pa] = {'chi2': chi2, 'DoF': mb_i.DoF,
'chi_nu': chi2/np.maximum(mb_i.DoF, 1)}
min_chinu = 1e30
for pa in self.PA[g]:
min_chinu = np.minimum(min_chinu, fit_log[g][pa]['chi_nu'])
fit_log[g]['min_chinu'] = min_chinu
for pa in self.PA[g]:
fit_log[g][pa]['chinu_ratio'] = fit_log[g][pa]['chi_nu']/min_chinu
if fit_log[g][pa]['chinu_ratio'] < chi2_threshold:
keep_dict[g].append(pa)
keep_beams.extend([self.beams[i] for i in self.PA[g][pa]])
else:
has_bad = True
if reinit:
self.beams = keep_beams
self._parse_beams(psf=self.psf_param_dict is not None)
return fit_log, keep_dict, has_bad
def get_redshift_fit_defaults():
"""TBD
"""
pzfit_def = dict(zr=[0.5, 1.6], dz=[0.005, 0.0004], fwhm=0,
poly_order=0, fit_background=True,
delta_chi2_threshold=0.004, fitter='nnls',
prior=None, templates={}, figsize=[8, 5],
fsps_templates=False)
pspec2_def = dict(dlam=0, spatial_scale=1, NY=20, figsize=[8, 3.5])
pline_def = dict(size=20, pixscale=0.1, pixfrac=0.2, kernel='square',
wcs=None)
return pzfit_def, pspec2_def, pline_def
def drizzle_2d_spectrum(beams, data=None, wlimit=[1.05, 1.75], dlam=50,
spatial_scale=1, NY=10, pixfrac=0.6, kernel='square',
convert_to_flambda=True, fcontam=0.2, fill_wht=False,
ds9=None, mask_segmentation=True):
"""Drizzle 2D spectrum from a list of beams
Parameters
----------
beams : list of `~.model.BeamCutout` objects
data : None or list
optionally, drizzle data specified in this list rather than the
contamination-subtracted arrays from each beam.
wlimit : [float, float]
Limits on the wavelength array to drizzle ([wlim, wmax])
dlam : float
Delta wavelength per pixel
spatial_scale : float
Relative scaling of the spatial axis (1 = native pixels)
NY : int
Size of the cutout in the spatial dimension, in output pixels
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
kernel : str, ('square' or 'point')
Drizzle kernel to use
convert_to_flambda : bool, float
Convert the 2D spectrum to physical units using the sensitivity curves
and if float provided, scale the flux densities by that value
fcontam: float
Factor by which to scale the contamination arrays and add to the
pixel variances.
fill_wht: bool
Fill `wht==0` pixels of the beam weights with the median nonzero
value.
ds9: `~grizli.ds9.DS9`
Show intermediate steps of the drizzling
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDUList with the drizzled 2D spectrum and weight arrays
"""
from astropy import log
# try:
# import drizzle
# if drizzle.__version__ != '1.12.99':
# # Not the fork that works for all input/output arrays
# raise(ImportError)
#
# #print('drizzle!!')
# from drizzle.dodrizzle import dodrizzle
# drizzler = dodrizzle
# dfillval = '0'
# except:
from drizzlepac import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
log.setLevel('ERROR')
# log.disable_warnings_logging()
NX = int(np.round(np.diff(wlimit)[0]*1.e4/dlam)) // 2
center = np.mean(wlimit[:2])*1.e4
out_header, output_wcs = utils.full_spectrum_wcsheader(center_wave=center,
dlam=dlam, NX=NX,
spatial_scale=spatial_scale, NY=NY)
sh = (out_header['NAXIS2'], out_header['NAXIS1'])
if not hasattr(output_wcs, '_naxis1'):
output_wcs._naxis2, output_wcs._naxis1 = sh
outsci = np.zeros(sh, dtype=np.float32)
outwht = np.zeros(sh, dtype=np.float32)
outctx = np.zeros(sh, dtype=np.int32)
outvar = np.zeros(sh, dtype=np.float32)
outwv = np.zeros(sh, dtype=np.float32)
outcv = np.zeros(sh, dtype=np.int32)
if data is None:
data = []
for i, beam in enumerate(beams):
# Contamination-subtracted
beam_data = beam.grism.data['SCI'] - beam.contam
data.append(beam_data)
for i, beam in enumerate(beams):
# Get specific WCS for each beam
beam_header, beam_wcs = beam.full_2d_wcs()
if not hasattr(beam_wcs, 'pixel_shape'):
beam_wcs.pixel_shape = beam_wcs._naxis1, beam_wcs._naxis2
if not hasattr(beam_wcs, '_naxis1'):
beam_wcs._naxis1, beam_wcs._naxis2 = beam_wcs._naxis
# Downweight contamination
# wht = 1/beam.ivar + (fcontam*beam.contam)**2
# wht = np.cast[np.float32](1/wht)
# wht[~np.isfinite(wht)] = 0.
contam_weight = np.exp(-(fcontam*np.abs(beam.contam)*np.sqrt(beam.ivar)))
wht = beam.ivar*contam_weight
wht[~np.isfinite(wht)] = 0.
contam_weight[beam.ivar == 0] = 0
if fill_wht:
wht_mask = wht == 0
med_wht = np.median(wht[~wht_mask])
wht[wht_mask] = med_wht
#print('xx Fill weight: {0}'.format(med_wht))
data_i = data[i]*1.
scl = 1.
if convert_to_flambda:
#data_i *= convert_to_flambda/beam.beam.sensitivity
#wht *= (beam.beam.sensitivity/convert_to_flambda)**2
scl = convert_to_flambda # /1.e-17
scl *= 1./beam.flat_flam.reshape(beam.beam.sh_beam).sum(axis=0)
#scl = convert_to_flambda/beam.beam.sensitivity
data_i *= scl
wht *= (1/scl)**2
#contam_weight *= scl
wht[~np.isfinite(data_i+scl)] = 0
contam_weight[~np.isfinite(data_i+scl)] = 0
data_i[~np.isfinite(data_i+scl)] = 0
# Go drizzle
# Contamination-cleaned
drizzler(data_i, beam_wcs, wht, output_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=1., uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# For variance
drizzler(contam_weight, beam_wcs, wht, output_wcs,
outvar, outwv, outcv, 1., 'cps', 1,
wcslin_pscale=1., uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
if ds9 is not None:
ds9.view(outsci/output_wcs.pscale**2, header=out_header)
# if False:
# # Plot the spectra for testing
# w, f, e = beam.beam.trace_extract(data_i, ivar=wht, r=3)
# clip = (f/e > 0.5)
# clip &= (e < 2*np.median(e[clip]))
# plt.errorbar(w[clip], f[clip], e[clip], marker='.', color='k', alpha=0.5, ecolor='0.8', linestyle='None')
# dw = np.median(np.diff(w))
# Correct for drizzle scaling
area_ratio = 1./output_wcs.pscale**2
# Preserve flux (has to preserve aperture flux along spatial axis but
# average in spectral axis).
#area_ratio *= spatial_scale
# preserve flux density
flux_density_scale = spatial_scale**2
# science
outsci *= area_ratio*flux_density_scale
# variance
outvar *= area_ratio/outwv*flux_density_scale**2
outwht = 1/outvar
outwht[(outvar == 0) | (~np.isfinite(outwht))] = 0
# if True:
# # Plot for testing....
# yp, xp = np.indices(outsci.shape)
# mask = np.abs(yp-NY) <= 3/spatial_scale
# fl = (outsci*mask).sum(axis=0)
# flv = (1/outwht*mask).sum(axis=0)
#
# wi = grizli.stack.StackedSpectrum.get_wavelength_from_header(out_header)
#
# plt.errorbar(wi[:-1], fl[1:], np.sqrt(flv)[1:], alpha=0.8) #*area_ratio)
# return outwht, outsci, outvar, outwv, output_wcs.pscale
p = pyfits.PrimaryHDU()
p.header['ID'] = (beams[0].id, 'Object ID')
p.header['WMIN'] = (wlimit[0], 'Minimum wavelength')
p.header['WMAX'] = (wlimit[1], 'Maximum wavelength')
p.header['DLAM'] = (dlam, 'Delta wavelength')
p.header['SSCALE'] = (spatial_scale, 'Spatial scale factor w.r.t native')
p.header['FCONTAM'] = (fcontam, 'Contamination weight')
p.header['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
p.header['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['BEAM'] = (beams[0].beam.beam, 'Grism order')
p.header['NINPUT'] = (len(beams), 'Number of drizzled beams')
exptime = 0.
for i, beam in enumerate(beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
p.header['PA{0:04d}'.format(i+1)] = (beam.get_dispersion_PA(),
'PA of dispersion axis')
exptime += beam.grism.exptime
p.header['EXPTIME'] = (exptime, 'Total exposure time [s]')
h = out_header.copy()
grism_sci = pyfits.ImageHDU(data=outsci, header=h, name='SCI')
grism_wht = pyfits.ImageHDU(data=outwht, header=h, name='WHT')
hdul = pyfits.HDUList([p, grism_sci, grism_wht])
return hdul
def drizzle_to_wavelength(beams, wcs=None, ra=0., dec=0., wave=1.e4, size=5,
pixscale=0.1, pixfrac=0.6, kernel='square',
direct_extension='REF', fcontam=0.2, ds9=None):
"""Drizzle a cutout at a specific wavelength from a list of `~grizli.model.BeamCutout` objects
Parameters
----------
beams : list of `~.model.BeamCutout` objects.
wcs : `~astropy.wcs.WCS` or None
Pre-determined WCS. If not specified, generate one based on ``ra``,
``dec``, ``pixscale`` and ``pixscale``.
ra, dec, wave : float
Sky coordinates and central wavelength
size : float
Size of the output thumbnail, in arcsec
pixscale : float
Pixel scale of the output thumbnail, in arcsec
pixfrac : float
Drizzle PIXFRAC (for ``kernel`` = 'point')
kernel : str, ('square' or 'point')
Drizzle kernel to use
direct_extension : str, ('SCI' or 'REF')
Extension of ``self.direct.data`` do drizzle for the thumbnail
fcontam: float
Factor by which to scale the contamination arrays and add to the
pixel variances.
ds9 : `~grizli.ds9.DS9`, optional
Display each step of the drizzling to an open DS9 window
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDUList with the drizzled thumbnail, line and continuum
cutouts.
"""
# try:
# import drizzle
# if drizzle.__version__ != '1.12.99':
# # Not the fork that works for all input/output arrays
# raise(ImportError)
#
# #print('drizzle!!')
# from drizzle.dodrizzle import dodrizzle
# drizzler = dodrizzle
# dfillval = '0'
# except:
from drizzlepac import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
# Nothing to do
if len(beams) == 0:
return False
# Get output header and WCS
if wcs is None:
header, output_wcs = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=False)
else:
output_wcs = wcs.copy()
if not hasattr(output_wcs, 'pscale'):
output_wcs.pscale = utils.get_wcs_pscale(output_wcs)
header = utils.to_header(output_wcs, relax=True)
if not hasattr(output_wcs, '_naxis1'):
output_wcs._naxis1, output_wcs._naxis2 = output_wcs._naxis
# Initialize data
sh = (header['NAXIS2'], header['NAXIS1'])
outsci = np.zeros(sh, dtype=np.float32)
outwht = np.zeros(sh, dtype=np.float32)
outctx = np.zeros(sh, dtype=np.int32)
coutsci = np.zeros(sh, dtype=np.float32)
coutwht = np.zeros(sh, dtype=np.float32)
coutctx = np.zeros(sh, dtype=np.int32)
xoutsci = np.zeros(sh, dtype=np.float32)
xoutwht = np.zeros(sh, dtype=np.float32)
xoutctx = np.zeros(sh, dtype=np.int32)
#direct_filters = np.unique([b.direct.filter for b in self.beams])
all_direct_filters = []
for beam in beams:
if direct_extension == 'REF':
if beam.direct['REF'] is None:
filt_i = beam.direct.ref_filter
direct_extension = 'SCI'
else:
filt_i = beam.direct.filter
all_direct_filters.append(filt_i)
direct_filters = np.unique(all_direct_filters)
doutsci, doutwht, doutctx = {}, {}, {}
for f in direct_filters:
doutsci[f] = np.zeros(sh, dtype=np.float32)
doutwht[f] = np.zeros(sh, dtype=np.float32)
doutctx[f] = np.zeros(sh, dtype=np.int32)
# doutsci = np.zeros(sh, dtype=np.float32)
# doutwht = np.zeros(sh, dtype=np.float32)
# doutctx = np.zeros(sh, dtype=np.int32)
# Loop through beams and run drizzle
for i, beam in enumerate(beams):
# Get specific wavelength WCS for each beam
beam_header, beam_wcs = beam.get_wavelength_wcs(wave)
if not hasattr(beam_wcs, 'pixel_shape'):
beam_wcs.pixel_shape = beam_wcs._naxis1, beam_wcs._naxis2
if not hasattr(beam_wcs, '_naxis1'):
beam_wcs._naxis1, beam_wcs._naxis2 = beam_wcs._naxis
# Make sure CRPIX set correctly for the SIP header
for j in [0, 1]:
# if beam_wcs.sip is not None:
# beam_wcs.sip.crpix[j] = beam_wcs.wcs.crpix[j]
if beam.direct.wcs.sip is not None:
beam.direct.wcs.sip.crpix[j] = beam.direct.wcs.wcs.crpix[j]
for wcs_ext in [beam_wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[j] = beam_wcs.wcs.crpix[j]
# ACS requires additional wcs attributes
ACS_CRPIX = [4096/2, 2048/2]
dx_crpix = beam_wcs.wcs.crpix[0] - ACS_CRPIX[0]
dy_crpix = beam_wcs.wcs.crpix[1] - ACS_CRPIX[1]
for wcs_ext in [beam_wcs.cpdis1, beam_wcs.cpdis2, beam_wcs.det2im1, beam_wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[0] += dx_crpix
wcs_ext.crval[1] += dy_crpix
beam_data = beam.grism.data['SCI'] - beam.contam
if hasattr(beam, 'background'):
beam_data -= beam.background
if hasattr(beam, 'extra_lines'):
beam_data -= beam.extra_lines
beam_continuum = beam.beam.model*1
if hasattr(beam.beam, 'pscale_array'):
beam_continuum *= beam.beam.pscale_array
# Downweight contamination
if fcontam > 0:
# wht = 1/beam.ivar + (fcontam*beam.contam)**2
# wht = np.cast[np.float32](1/wht)
# wht[~np.isfinite(wht)] = 0.
contam_weight = np.exp(-(fcontam*np.abs(beam.contam)*np.sqrt(beam.ivar)))
wht = beam.ivar*contam_weight
wht[~np.isfinite(wht)] = 0.
else:
wht = beam.ivar*1
# Convert to f_lambda integrated line fluxes:
# (Inverse of the aXe sensitivity) x (size of pixel in \AA)
sens = np.interp(wave, beam.beam.lam, beam.beam.sensitivity,
left=0, right=0)
dlam = np.interp(wave, beam.beam.lam[1:], np.diff(beam.beam.lam))
# 1e-17 erg/s/cm2 #, scaling closer to e-/s
sens *= 1.e-17
sens *= 1./dlam
if sens == 0:
continue
else:
wht *= sens**2
beam_data /= sens
beam_continuum /= sens
# Go drizzle
# Contamination-cleaned
drizzler(beam_data, beam_wcs, wht, output_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=beam.grism.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Continuum
drizzler(beam_continuum, beam_wcs, wht, output_wcs,
coutsci, coutwht, coutctx, 1., 'cps', 1,
wcslin_pscale=beam.grism.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Contamination
drizzler(beam.contam, beam_wcs, wht, output_wcs,
xoutsci, xoutwht, xoutctx, 1., 'cps', 1,
wcslin_pscale=beam.grism.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Direct thumbnail
filt_i = all_direct_filters[i]
if direct_extension == 'REF':
thumb = beam.direct['REF']
thumb_wht = np.cast[np.float32]((thumb != 0)*1)
else:
thumb = beam.direct[direct_extension] # /beam.direct.photflam
thumb_wht = 1./(beam.direct.data['ERR']/beam.direct.photflam)**2
thumb_wht[~np.isfinite(thumb_wht)] = 0
if not hasattr(beam.direct.wcs, 'pixel_shape'):
beam.direct.wcs.pixel_shape = (beam.direct.wcs._naxis1,
beam.direct.wcs._naxis2)
if not hasattr(beam.direct.wcs, '_naxis1'):
beam.direct.wcs._naxis1, beam.direct.wcs._naxis2 = beam.direct.wcs._naxis
drizzler(thumb, beam.direct.wcs, thumb_wht, output_wcs,
doutsci[filt_i], doutwht[filt_i], doutctx[filt_i],
1., 'cps', 1,
wcslin_pscale=beam.direct.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Show in ds9
if ds9 is not None:
ds9.view((outsci-coutsci), header=header)
# Scaling of drizzled outputs
outwht *= (beams[0].grism.wcs.pscale/output_wcs.pscale)**4
coutwht *= (beams[0].grism.wcs.pscale/output_wcs.pscale)**4
xoutwht *= (beams[0].grism.wcs.pscale/output_wcs.pscale)**4
for filt_i in all_direct_filters:
doutwht[filt_i] *= (beams[0].direct.wcs.pscale/output_wcs.pscale)**4
# Make output FITS products
p = pyfits.PrimaryHDU()
p.header['ID'] = (beams[0].id, 'Object ID')
p.header['RA'] = (ra, 'Central R.A.')
p.header['DEC'] = (dec, 'Central Decl.')
p.header['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
p.header['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['NINPUT'] = (len(beams), 'Number of drizzled beams')
for i, beam in enumerate(beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
p.header['PA{0:04d}'.format(i+1)] = (beam.get_dispersion_PA(),
'PA of dispersion axis')
h = header.copy()
h['ID'] = (beam.id, 'Object ID')
h['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
h['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['NDFILT'] = len(direct_filters), 'Number of direct image filters'
for i, filt_i in enumerate(direct_filters):
p.header['DFILT{0:02d}'.format(i+1)] = filt_i
p.header['NFILT{0:02d}'.format(i+1)] = all_direct_filters.count(filt_i), 'Number of beams with this direct filter'
HDUL = [p]
for i, filt_i in enumerate(direct_filters):
h['FILTER'] = (filt_i, 'Direct image filter')
thumb_sci = pyfits.ImageHDU(data=doutsci[filt_i], header=h,
name='DSCI')
thumb_wht = pyfits.ImageHDU(data=doutwht[filt_i], header=h,
name='DWHT')
thumb_sci.header['EXTVER'] = filt_i
thumb_wht.header['EXTVER'] = filt_i
HDUL += [thumb_sci, thumb_wht]
#thumb_seg = pyfits.ImageHDU(data=seg_slice, header=h, name='DSEG')
h['FILTER'] = (beam.grism.filter, 'Grism filter')
h['WAVELEN'] = (wave, 'Central wavelength')
grism_sci = pyfits.ImageHDU(data=outsci-coutsci, header=h, name='LINE')
grism_cont = pyfits.ImageHDU(data=coutsci, header=h, name='CONTINUUM')
grism_contam = pyfits.ImageHDU(data=xoutsci, header=h, name='CONTAM')
grism_wht = pyfits.ImageHDU(data=outwht, header=h, name='LINEWHT')
#HDUL = [p, thumb_sci, thumb_wht, grism_sci, grism_cont, grism_contam, grism_wht]
HDUL += [grism_sci, grism_cont, grism_contam, grism_wht]
return pyfits.HDUList(HDUL)
def show_drizzle_HDU(hdu, diff=True, mask_segmentation=True, average_only=False, scale_size=1, cmap='viridis_r', show_labels=True, **kwargs):
"""Make a figure from the multiple extensions in the drizzled grism file.
Parameters
----------
hdu : `~astropy.io.fits.HDUList`
HDU list output by `drizzle_grisms_and_PAs`.
diff : bool
If True, then plot the stacked spectrum minus the model.
Returns
-------
fig : `~matplotlib.figure.Figure`
The figure.
"""
from collections import OrderedDict
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MultipleLocator
h0 = hdu[0].header
NX = h0['NGRISM']
NY = 0
grisms = OrderedDict()
for ig in range(NX):
g = h0['GRISM{0:03d}'.format(ig+1)]
NY = np.maximum(NY, h0['N'+g])
grisms[g] = h0['N'+g]
NY += 1
widths = []
for i in range(NX):
widths.extend([0.2, 1])
if average_only:
NY = 1
fig = plt.figure(figsize=(5*NX*scale_size, 1*NY*scale_size+0.33))
gs = GridSpec(NY, NX*2, width_ratios=widths)
else:
fig = plt.figure(figsize=(5*NX*scale_size, 1*NY*scale_size))
gs = GridSpec(NY, NX*2, height_ratios=[1]*NY, width_ratios=widths)
for ig, g in enumerate(grisms):
sci_i = hdu['SCI', g]
wht_i = hdu['WHT', g]
model_i = hdu['MODEL', g]
kern_i = hdu['KERNEL', g]
h_i = sci_i.header
clip = wht_i.data > 0
if clip.sum() == 0:
clip = np.isfinite(wht_i.data)
avg_rms = 1/np.median(np.sqrt(wht_i.data[clip]))
vmax = np.maximum(1.1*np.percentile(sci_i.data[clip], 98),
5*avg_rms)
vmax_kern = 1.1*np.percentile(kern_i.data, 99.5)
# Kernel
ax = fig.add_subplot(gs[NY-1, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, cmap=cmap,
extent=extent, aspect='auto')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[NY-1, ig*2+1])
if diff:
#print('xx DIFF!')
m = model_i.data
else:
m = 0
ax.imshow(sci_i.data-m, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap=cmap,
aspect='auto')
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$ ($\mu$m) - '+g)
ax.xaxis.set_major_locator(MultipleLocator(GRISM_MAJOR[g]))
if average_only:
iters = []
else:
iters = range(grisms[g])
for ip in iters:
#print(ip, ig)
pa = h0['{0}{1:02d}'.format(g, ip+1)]
sci_i = hdu['SCI', '{0},{1}'.format(g, pa)]
wht_i = hdu['WHT', '{0},{1}'.format(g, pa)]
kern_i = hdu['KERNEL', '{0},{1}'.format(g, pa)]
h_i = sci_i.header
# Kernel
ax = fig.add_subplot(gs[ip, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, extent=extent,
cmap=cmap, aspect='auto')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[ip, ig*2+1])
ax.imshow(sci_i.data, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap=cmap,
aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_major_locator(MultipleLocator(GRISM_MAJOR[g]))
if show_labels:
ax.text(0.015, 0.94, '{0:3.0f}'.format(pa), ha='left',
va='top',
transform=ax.transAxes, fontsize=8,
backgroundcolor='w')
if (ig == (NX-1)) & (ip == 0) & show_labels:
ax.text(0.98, 0.94, 'ID = {0}'.format(h0['ID']),
ha='right', va='top', transform=ax.transAxes,
fontsize=8, backgroundcolor='w')
if average_only:
#pass
gs.tight_layout(fig, pad=0.01)
else:
gs.tight_layout(fig, pad=0.1)
return fig
def drizzle_2d_spectrum_wcs(beams, data=None, wlimit=[1.05, 1.75], dlam=50,
spatial_scale=1, NY=10, pixfrac=0.6, kernel='square',
convert_to_flambda=True, fcontam=0.2, fill_wht=False,
ds9=None, mask_segmentation=True):
"""Drizzle 2D spectrum from a list of beams
Parameters
----------
beams : list of `~.model.BeamCutout` objects
data : None or list
optionally, drizzle data specified in this list rather than the
contamination-subtracted arrays from each beam.
wlimit : [float, float]
Limits on the wavelength array to drizzle ([wlim, wmax])
dlam : float
Delta wavelength per pixel
spatial_scale : float
Relative scaling of the spatial axis (1 = native pixels)
NY : int
Size of the cutout in the spatial dimension, in output pixels
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
kernel : str, ('square' or 'point')
Drizzle kernel to use
convert_to_flambda : bool, float
Convert the 2D spectrum to physical units using the sensitivity curves
and if float provided, scale the flux densities by that value
fcontam: float
Factor by which to scale the contamination arrays and add to the
pixel variances.
ds9: `~grizli.ds9.DS9`
Show intermediate steps of the drizzling
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDUList with the drizzled 2D spectrum and weight arrays
"""
# try:
# import drizzle
# if drizzle.__version__ != '1.12.99':
# # Not the fork that works for all input/output arrays
# raise(ImportError)
#
# #print('drizzle!!')
# from drizzle.dodrizzle import dodrizzle
# drizzler = dodrizzle
# dfillval = '0'
# except:
from drizzlepac import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
from stwcs import distortion
from astropy import log
log.setLevel('ERROR')
# log.disable_warnings_logging()
adrizzle.log.setLevel('ERROR')
NX = int(np.round(np.diff(wlimit)[0]*1.e4/dlam)) // 2
center = np.mean(wlimit[:2])*1.e4
out_header, output_wcs = utils.make_spectrum_wcsheader(center_wave=center,
dlam=dlam, NX=NX,
spatial_scale=spatial_scale, NY=NY)
pixscale = 0.128*spatial_scale
# # Get central RA, reference pixel of beam[0]
# #rd = beams[0].get_sky_coords()
# x0 = beams[0].beam.x0.reshape((1,2))
# #x0[0,1] += beam.direct.origin[1]-beam.grism.origin[1]
# rd = beam.grism.wcs.all_pix2world(x0,1)[0]
# theta = 270-beams[0].get_dispersion_PA()
#out_header, output_wcs = utils.make_wcsheader(ra=rd[0], dec=rd[1], size=[50,10], pixscale=pixscale, get_hdu=False, theta=theta)
if True:
theta = -np.arctan2(np.diff(beams[0].beam.ytrace)[0], 1)
undist_wcs = distortion.utils.output_wcs([beams[0].grism.wcs], undistort=True)
undist_wcs = utils.transform_wcs(undist_wcs, rotation=theta, scale=undist_wcs.pscale/pixscale)
output_wcs = undist_wcs.copy()
out_header = utils.to_header(output_wcs)
# Direct image
d_undist_wcs = distortion.utils.output_wcs([beams[0].direct.wcs], undistort=True)
d_undist_wcs = utils.transform_wcs(d_undist_wcs, rotation=0., scale=d_undist_wcs.pscale/pixscale)
d_output_wcs = d_undist_wcs.copy()
# Make square
if hasattr(d_output_wcs, '_naxis1'):
nx1, nx2 = d_output_wcs._naxis1, d_output_wcs._naxis2
else:
nx1, nx2 = d_output_wcs._naxis
d_output_wcs._naxis1, d_output_wcs._naxis2 = nx1, nx2
dx = nx1 - nx2
if hasattr(d_output_wcs, '_naxis1'):
d_output_wcs._naxis1 = d_output_wcs._naxis2
else:
d_output_wcs._naxis[0] = d_output_wcs._naxis[1]
d_output_wcs._naxis1 = d_output_wcs._naxis2 = d_output_wcs._naxis[0]
d_output_wcs.wcs.crpix[0] -= dx/2.
d_out_header = utils.to_header(d_output_wcs)
#delattr(output_wcs, 'orientat')
#beam_header = utils.to_header(beam_wcs)
#output_wcs = beam_wcs
#output_wcs = pywcs.WCS(beam_header, relax=True)
#output_wcs.pscale = utils.get_wcs_pscale(output_wcs)
# shift CRPIX to reference position of beam[0]
sh = (out_header['NAXIS2'], out_header['NAXIS1'])
sh_d = (d_out_header['NAXIS2'], d_out_header['NAXIS1'])
outsci = np.zeros(sh, dtype=np.float32)
outwht = np.zeros(sh, dtype=np.float32)
outctx = np.zeros(sh, dtype=np.int32)
doutsci = np.zeros(sh_d, dtype=np.float32)
doutwht = np.zeros(sh_d, dtype=np.float32)
doutctx = np.zeros(sh_d, dtype=np.int32)
outvar = np.zeros(sh, dtype=np.float32)
outwv = np.zeros(sh, dtype=np.float32)
outcv = np.zeros(sh, dtype=np.int32)
outls = np.zeros(sh, dtype=np.float32)
outlw = np.zeros(sh, dtype=np.float32)
outlc = np.zeros(sh, dtype=np.int32)
if data is None:
data = []
for i, beam in enumerate(beams):
# Contamination-subtracted
beam_data = beam.grism.data['SCI'] - beam.contam
data.append(beam_data)
for i, beam in enumerate(beams):
# Get specific WCS for each beam
beam_header, beam_wcs = beam.get_2d_wcs()
beam_wcs = beam.grism.wcs.deepcopy()
# Shift SIP reference
dx_sip = beam.grism.origin[1] - beam.direct.origin[1]
#beam_wcs.sip.crpix[0] += dx_sip
for wcs_ext in [beam_wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[0] += dx_sip
for wcs_ext in [beam_wcs.cpdis1, beam_wcs.cpdis2, beam_wcs.det2im1, beam_wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[0] += dx_sip
# Shift y for trace
xy0 = beam.grism.wcs.all_world2pix(output_wcs.wcs.crval.reshape((1, 2)), 0)[0]
dy = np.interp(xy0[0], np.arange(beam.beam.sh_beam[1]), beam.beam.ytrace)
#beam_wcs.sip.crpix[1] += dy
beam_wcs.wcs.crpix[1] += dy
for wcs_ext in [beam_wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[1] += dy
for wcs_ext in [beam_wcs.cpdis1, beam_wcs.cpdis2, beam_wcs.det2im1, beam_wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[1] += dy
if not hasattr(beam_wcs, 'pixel_shape'):
beam_wcs.pixel_shape = beam_wcs._naxis1, beam_wcs._naxis2
if not hasattr(beam_wcs, '_naxis1'):
beam_wcs._naxis1, beam_wcs._naxis2 = beam_wcs._naxis
d_beam_wcs = beam.direct.wcs
if beam.direct['REF'] is None:
d_wht = 1./beam.direct['ERR']**2
d_wht[~np.isfinite(d_wht)] = 0
d_sci = beam.direct['SCI']*1
else:
d_sci = beam.direct['REF']*1
d_wht = d_sci*0.+1
if mask_segmentation:
d_sci *= (beam.beam.seg == beam.id)
# Downweight contamination
# wht = 1/beam.ivar + (fcontam*beam.contam)**2
# wht = np.cast[np.float32](1/wht)
# wht[~np.isfinite(wht)] = 0.
contam_weight = np.exp(-(fcontam*np.abs(beam.contam)*np.sqrt(beam.ivar)))
wht = beam.ivar*contam_weight
wht[~np.isfinite(wht)] = 0.
contam_weight[beam.ivar == 0] = 0
data_i = data[i]*1.
scl = 1.
if convert_to_flambda:
#data_i *= convert_to_flambda/beam.beam.sensitivity
#wht *= (beam.beam.sensitivity/convert_to_flambda)**2
scl = convert_to_flambda # /1.e-17
scl *= 1./beam.flat_flam.reshape(beam.beam.sh_beam).sum(axis=0)
#scl = convert_to_flambda/beam.beam.sensitivity
data_i *= scl
wht *= (1/scl)**2
#contam_weight *= scl
wht[~np.isfinite(data_i+scl)] = 0
contam_weight[~np.isfinite(data_i+scl)] = 0
data_i[~np.isfinite(data_i+scl)] = 0
# Go drizzle
data_wave = np.dot(np.ones(beam.beam.sh_beam[0])[:, None], beam.beam.lam[None, :])
drizzler(data_wave, beam_wcs, wht*0.+1, output_wcs,
outls, outlw, outlc, 1., 'cps', 1,
wcslin_pscale=1., uniqid=1,
pixfrac=1, kernel='square', fillval=dfillval)
# Direct image
drizzler(d_sci, d_beam_wcs, d_wht, d_output_wcs,
doutsci, doutwht, doutctx, 1., 'cps', 1,
wcslin_pscale=d_beam_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Contamination-cleaned
drizzler(data_i, beam_wcs, wht, output_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=beam_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# For variance
drizzler(contam_weight, beam_wcs, wht, output_wcs,
outvar, outwv, outcv, 1., 'cps', 1,
wcslin_pscale=beam_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
if ds9 is not None:
ds9.view(outsci, header=out_header)
# if True:
# w, f, e = beam.beam.optimal_extract(data_i, ivar=beam.ivar)
# plt.scatter(w, f, marker='.', color='k', alpha=0.5)
# Correct for drizzle scaling
#outsci /= output_wcs.pscale**2
outls /= output_wcs.pscale**2
wave = np.median(outls, axis=0)
# # Testing
# fl = (sp[1].data*mask).sum(axis=0)
# variance
outvar /= outwv # *output_wcs.pscale**2
outwht = 1/outvar
outwht[(outvar == 0) | (~np.isfinite(outwht))] = 0
# return outwht, outsci, outvar, outwv, output_wcs.pscale
p = pyfits.PrimaryHDU()
p.header['ID'] = (beams[0].id, 'Object ID')
p.header['WMIN'] = (wave[0], 'Minimum wavelength')
p.header['WMAX'] = (wave[-1], 'Maximum wavelength')
p.header['DLAM'] = ((wave[-1]-wave[0])/wave.size, 'Delta wavelength')
p.header['FCONTAM'] = (fcontam, 'Contamination weight')
p.header['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
p.header['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['NINPUT'] = (len(beams), 'Number of drizzled beams')
for i, beam in enumerate(beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
h = out_header.copy()
for k in p.header:
h[k] = p.header[k]
direct_sci = pyfits.ImageHDU(data=doutsci, header=d_out_header, name='DSCI')
grism_sci = pyfits.ImageHDU(data=outsci, header=h, name='SCI')
grism_wht = pyfits.ImageHDU(data=outwht, header=h, name='WHT')
hdul = pyfits.HDUList([p, grism_sci, grism_wht, direct_sci])
return hdul
|
gbrammer/grizli
|
grizli/multifit.py
|
Python
|
mit
| 194,256
|
[
"Gaussian"
] |
f826384d325864e17d05f467f3252c7545c44b9a9af96aa801bf6a3d1455debc
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/android_testrunner/',
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/jemalloc/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files if not is_ignored(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
# --file implies --bare (for PRESUBMIT.py).
options.bare = True
errors = check_files(options.root, options.files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print '\n'.join(e['full_path'] for e in errors)
else:
print '\nFAILED\n'
print '\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
|
sgraham/nope
|
tools/checkperms/checkperms.py
|
Python
|
bsd-3-clause
| 13,349
|
[
"Galaxy",
"xTB"
] |
b83e5462961639340b23e06f5bbfcad75e54f37491d0c2a32387edac680380c4
|
import os, sys, time
from PlummerGalaxy import PlummerGalaxy
def printargs():
print("arguments: {numpts} {nbody: aarseth|simplecpu|opencl-cpu|opencl-gpu} {optional:render?} {optional:opencl-particles-per-thread}")
if len(sys.argv) <= 2:
printargs()
quit()
# ==================== Argument 1: number of points in Plummer sphere
galaxyNumPts = int(sys.argv[1])
if galaxyNumPts <= 0:
printargs()
quit()
particlesPerOpenCLThread = 0
if len(sys.argv) > 4:
particlesPerOpenCLThread = str(sys.argv[4])
InitialConditionsFolder = "data/initialconditions/"
OutputResultsFolder = "data/results/"
# ==================== Argument 2: nbody code type
OUTFILENAME = ""
NBODYCOMPILER = ""
NBODYCALCULATOR = ""
if str(sys.argv[2]) == "aarseth":
print("python script was told to use aarseth code (nbody0-lab.c)")
OUTFILENAME = OutputResultsFolder+"out_aarseth_plumbench.data"
NBODYCOMPILER = "(cd NBodySim_Aarseth && make clean && make)"
NBODYCALCULATOR = "./NBodySim_Aarseth/aarseth "+InitialConditionsFolder+"initialconditions.data "+OUTFILENAME
elif str(sys.argv[2]) == "simplecpu":
print("python script was told to use simplecpu code (single-threaded C++)")
OUTFILENAME = OutputResultsFolder+"out_simplecpu_plumbench.data"
NBODYCOMPILER = "(cd NBodySim_SimpleCPU && make clean && make)"
NBODYCALCULATOR = "./NBodySim_SimpleCPU/nbodycpp "+InitialConditionsFolder+"initialconditions.data "+OUTFILENAME
elif str(sys.argv[2]) == "opencl-cpu":
print("python script was told to use opencl code (cpu)")
OUTFILENAME = OutputResultsFolder+"out_opencl_cpu_plumbench.data"
NBODYCOMPILER = "(cd NBodySim_OpenCL_N2 && make clean && make)"
NBODYCALCULATOR = "./NBodySim_OpenCL_N2/nbodyocl cpu "+InitialConditionsFolder+"initialconditions.data "+OUTFILENAME+" "+particlesPerOpenCLThread+" NBodySim_OpenCL_N2/nbody_kernel_verlet.cl"
elif str(sys.argv[2]) == "opencl-gpu":
print("python script was told to use opencl code (gpu)")
OUTFILENAME = OutputResultsFolder+"out_opencl_gpu_plumbench.data"
NBODYCOMPILER = "(cd NBodySim_OpenCL_N2 && make clean && make)"
NBODYCALCULATOR = "./NBodySim_OpenCL_N2/nbodyocl gpu "+InitialConditionsFolder+"initialconditions.data "+OUTFILENAME+" "+particlesPerOpenCLThread+" NBodySim_OpenCL_N2/nbody_kernel_verlet.cl"
else:
print("unknown nbody code type, see arguments")
printargs()
print("will still create initial conditions for later use")
# ==================== Argument 3 - render? (optional, default is False)
doRender3D = False
if len(sys.argv) > 3:
if str(sys.argv[3]) == "True" or str(sys.argv[3]) == "true" or str(sys.argv[3]) == "1":
doRender3D = True
# ==================== compile (run Makefile)
if len(NBODYCOMPILER) > 1:
print("compiling nbody simulator...")
os.system(NBODYCOMPILER)
# ==================== create initial conditions
print("Generating Plummer galaxy initial conditions...")
newGalaxy = PlummerGalaxy()
newGalaxy.npts = galaxyNumPts
newGalaxy.R = 1.0
newGalaxy.timestep = 0.07
newGalaxy.timemax = 50.0
newGalaxy.ZeroVelocities_Bool = False
newGalaxy.GenerateInitialConditions(0,0,0)
newGalaxy.WriteToFile(InitialConditionsFolder+"initialconditions.data")
# ==================== simulate
if len(NBODYCALCULATOR) > 1:
print("Running nbody simulation...")
starttime = time.time() #time.clock()
os.system(NBODYCALCULATOR)
endtime = time.time() #time.clock()
print("TIME TO RUN BENCHMARK: "+str(endtime-starttime)+" seconds")
# ==================== render
if doRender3D:
if False:
import glob
latestDatafile = max(glob.iglob(OutputResultsFolder+"*.data"), key=os.path.getctime)
else:
latestDatafile = OUTFILENAME
print("will render file: \""+str(latestDatafile)+"\"")
os.system("./Renderer3D/Renderer3D "+str(latestDatafile)+" "+str(galaxyNumPts)+" 0 1 1")
|
jasonbunk/NBodyGalaxySimulation
|
run_plummer_benchmark.py
|
Python
|
gpl-3.0
| 3,774
|
[
"Galaxy"
] |
f4b5bd7fcf3c6b7e516e9cac387dc2fb08f10175f2bef7242d4dac320fcc7fc8
|
#-*- encoding=utf-8 -*-
'''
Created on Jan 18, 2013
@author: brian
'''
import openid
from openid.fetchers import HTTPFetcher, HTTPResponse
from urlparse import parse_qs, urlparse
from django.conf import settings
from django.test import TestCase, LiveServerTestCase
from django.core.cache import cache
from django.test.utils import override_settings
from django.urls import reverse
from django.test.client import RequestFactory
from unittest import skipUnless
from student.tests.factories import UserFactory
from openedx.core.djangoapps.external_auth.views import provider_login
class MyFetcher(HTTPFetcher):
"""A fetcher that uses server-internal calls for performing HTTP
requests.
"""
def __init__(self, client):
"""@param client: A test client object"""
super(MyFetcher, self).__init__()
self.client = client
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by Django
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
# method = 'POST'
# undo the URL encoding of the POST arguments
data = parse_qs(body)
response = self.client.post(url, data)
else:
# method = 'GET'
data = {}
if headers and 'Accept' in headers:
data['CONTENT_TYPE'] = headers['Accept']
response = self.client.get(url, data)
# Translate the test client response to the fetcher's HTTP response abstraction
content = response.content
final_url = url
response_headers = {}
if 'Content-Type' in response:
response_headers['content-type'] = response['Content-Type']
if 'X-XRDS-Location' in response:
response_headers['x-xrds-location'] = response['X-XRDS-Location']
status = response.status_code
return HTTPResponse(
body=content,
final_url=final_url,
headers=response_headers,
status=status,
)
class OpenIdProviderTest(TestCase):
"""
Tests of the OpenId login
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_xrds_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_login_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-login')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
for expected_input in (
'<input name="openid.ns" type="hidden" value="http://specs.openid.net/auth/2.0" />',
'<input name="openid.ns.ax" type="hidden" value="http://openid.net/srv/ax/1.0" />',
'<input name="openid.ax.type.fullname" type="hidden" value="http://axschema.org/namePerson" />',
'<input type="submit" value="Continue" />',
'<input name="openid.ax.type.email" type="hidden" value="http://axschema.org/contact/email" />',
'<input name="openid.ax.type.lastname" '
'type="hidden" value="http://axschema.org/namePerson/last" />',
'<input name="openid.ax.type.firstname" '
'type="hidden" value="http://axschema.org/namePerson/first" />',
'<input name="openid.ax.required" type="hidden" '
'value="email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname" />',
'<input name="openid.ax.type.nickname" '
'type="hidden" value="http://axschema.org/namePerson/friendly" />',
'<input name="openid.ax.type.old_email" '
'type="hidden" value="http://schema.openid.net/contact/email" />',
'<input name="openid.ax.type.old_nickname" '
'type="hidden" value="http://schema.openid.net/namePerson/friendly" />',
'<input name="openid.ax.type.old_fullname" '
'type="hidden" value="http://schema.openid.net/namePerson" />',
'<input name="openid.identity" '
'type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />',
'<input name="openid.claimed_id" '
'type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />',
# should work on the test server as well
'<input name="openid.realm" '
'type="hidden" value="http://testserver/" />',
):
self.assertContains(resp, expected_input, html=True)
# not included here are elements that will vary from run to run:
# <input name="openid.return_to" type="hidden"
# value="http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H" />
# <input name="openid.assoc_handle" type="hidden" value="{HMAC-SHA1}{50ff8120}{rh87+Q==}" />
def attempt_login(self, expected_code, login_method='POST', **kwargs):
""" Attempt to log in through the open id provider login """
url = reverse('openid-provider-login')
args = {
"openid.mode": "checkid_setup",
"openid.return_to": "http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H",
"openid.assoc_handle": "{HMAC-SHA1}{50ff8120}{rh87+Q==}",
"openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.realm": "http://testserver/",
"openid.identity": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
"openid.ax.required": "email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname",
"openid.ax.type.fullname": "http://axschema.org/namePerson",
"openid.ax.type.lastname": "http://axschema.org/namePerson/last",
"openid.ax.type.firstname": "http://axschema.org/namePerson/first",
"openid.ax.type.nickname": "http://axschema.org/namePerson/friendly",
"openid.ax.type.email": "http://axschema.org/contact/email",
"openid.ax.type.old_email": "http://schema.openid.net/contact/email",
"openid.ax.type.old_nickname": "http://schema.openid.net/namePerson/friendly",
"openid.ax.type.old_fullname": "http://schema.openid.net/namePerson",
}
# override the default args with any given arguments
for key in kwargs:
args["openid." + key] = kwargs[key]
if login_method == 'POST':
resp = self.client.post(url, args)
elif login_method == 'GET':
resp = self.client.get(url, args)
else:
self.fail('Invalid login method')
code = expected_code
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_open_id_setup(self):
""" Attempt a standard successful login """
self.attempt_login(200)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_namespace(self):
""" Test for 403 error code when the namespace of the request is invalid"""
self.attempt_login(403, ns="http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0")
@override_settings(OPENID_PROVIDER_TRUSTED_ROOTS=['http://apps.cs50.edx.org'])
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_return_url(self):
""" Test for 403 error code when the url"""
self.attempt_login(403, return_to="http://apps.cs50.edx.or")
def _send_bad_redirection_login(self):
"""
Attempt to log in to the provider with setup parameters
Intentionally fail the login to force a redirect
"""
user = UserFactory()
factory = RequestFactory()
post_params = {'email': user.email, 'password': 'password'}
fake_url = 'fake url'
request = factory.post(reverse('openid-provider-login'), post_params)
openid_setup = {
'request': factory.request(),
'url': fake_url,
'post_params': {}
}
request.session = {
'openid_setup': openid_setup
}
response = provider_login(request)
return response
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection(self):
""" Test to see that we can handle login redirection properly"""
response = self._send_bad_redirection_login()
self.assertEquals(response.status_code, 302)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# log in attempts before the rate gets limited
for _ in xrange(30):
self._send_bad_redirection_login()
response = self._send_bad_redirection_login()
# verify that we are not returning the default 403
self.assertEquals(response.status_code, 302)
# clear the ratelimit cache so that we don't fail other logins
cache.clear()
def _attempt_login_and_perform_final_response(self, user, profile_name):
"""
Performs full procedure of a successful OpenID provider login for user,
all required data is taken form ``user`` attribute which is an instance
of ``User`` model. As a convenience this method will also set
``profile.name`` for the user.
"""
url = reverse('openid-provider-login')
# login to the client so that we can persist session information
user.profile.name = profile_name
user.profile.save()
# It is asssumed that user's password is test (default for UserFactory)
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
post_args = {
'email': user.email,
'password': 'test'
}
# call url again, this time with username and password
return self.client.post(url, post_args)
@skipUnless(
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled')
def test_provider_login_can_handle_unicode_email(self):
user = UserFactory(email=u"user.ąęł@gmail.com")
resp = self._attempt_login_and_perform_final_response(user, u"Jan ĄĘŁ")
location = resp['Location']
parsed_url = urlparse(location)
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email')
self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson')
self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0],
user.profile.name.encode('utf-8')) # pylint: disable=no-member
self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0],
user.email.encode('utf-8')) # pylint: disable=no-member
@skipUnless(
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled')
def test_provider_login_can_handle_unicode_email_invalid_password(self):
user = UserFactory(email=u"user.ąęł@gmail.com")
url = reverse('openid-provider-login')
# login to the client so that we can persist session information
user.profile.name = u"Jan ĄĘ"
user.profile.save()
# It is asssumed that user's password is test (default for UserFactory)
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
# We trigger situation where user password is invalid at last phase
# of openid login
post_args = {
'email': user.email,
'password': 'invalid-password'
}
# call url again, this time with username and password
return self.client.post(url, post_args)
@skipUnless(
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'), 'OpenID not enabled')
def test_provider_login_can_handle_unicode_email_inactive_account(self):
user = UserFactory(email=u"user.ąęł@gmail.com", username=u"ąęół")
url = reverse('openid-provider-login')
# login to the client so that we can persist session information
user.profile.name = u'Jan ĄĘ'
user.profile.save() # pylint: disable=no-member
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
# We trigger situation where user is not active at final phase of
# OpenId login.
user.is_active = False
user.save() # pylint: disable=no-member
post_args = {
'email': user.email,
'password': 'test'
}
# call url again, this time with username and password
self.client.post(url, post_args)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_openid_final_response(self):
user = UserFactory()
# login to the client so that we can persist session information
for name in ['Robot 33', '☃']:
resp = self._attempt_login_and_perform_final_response(user, name)
# all information is embedded in the redirect url
location = resp['Location']
# parse the url
parsed_url = urlparse(location)
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email')
self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson')
self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0], user.email)
self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0], user.profile.name)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_openid_invalid_password(self):
url = reverse('openid-provider-login')
user = UserFactory()
# login to the client so that we can persist session information
for method in ['POST', 'GET']:
self.client.login(username=user.username, password='test')
self.attempt_login(200, method)
openid_setup = self.client.session['openid_setup']
self.assertIn('post_params', openid_setup)
post_args = {
'email': user.email,
'password': 'bad_password',
}
# call url again, this time with username and password
resp = self.client.post(url, post_args)
self.assertEquals(resp.status_code, 302)
redirect_url = resp['Location']
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url[4])
self.assertIn('openid.return_to', query_params)
self.assertTrue(
query_params['openid.return_to'][0].startswith('http://testserver/openid/complete/')
)
class OpenIdProviderLiveServerTest(LiveServerTestCase):
"""
In order for this absolute URL to work (i.e. to get xrds, then authentication)
in the test environment, we either need a live server that works with the default
fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
Here we do the former.
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@classmethod
def tearDownClass(cls):
"""
Workaround for a runtime error that occurs
intermittently when the server thread doesn't shut down
within 2 seconds.
Since the server is running in a Django thread and will
be terminated when the test suite terminates,
this shouldn't cause a resource allocation issue.
"""
try:
super(OpenIdProviderLiveServerTest, cls).tearDownClass()
except RuntimeError:
print "Warning: Could not shut down test server."
|
BehavioralInsightsTeam/edx-platform
|
openedx/core/djangoapps/external_auth/tests/test_openid_provider.py
|
Python
|
agpl-3.0
| 21,207
|
[
"Brian"
] |
02807b0ffab0b8be3a217f4df8cf9c0ae63712252adde68cfbd4a24b114b2f02
|
#!/usr/bin/python
# Physics, a 2D Physics Playground for Kids
# Copyright (C) 2008 Alex Levenson and Brian Jordan
# Copyright (C) 2012 Daniel Francis
# Copyright (C) 2012-13 Walter Bender
# Copyright (C) 2013 Sai Vineet
# Copyright (C) 2012-13 Sugar Labs
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Elements is Copyright (C) 2008, The Elements Team, <elements@linuxuser.at>
# Wiki: http://wiki.sugarlabs.org/go/Activities/Physics
# Code: git://git.sugarlabs.org/physics/mainline.git
import os
from gi.repository import Gtk
from gi.repository import Gdk
import pygame
from pygame.locals import MOUSEBUTTONUP
import Box2D as box2d
import myelements as elements
import tools
class PhysicsGame:
def __init__(self, activity):
self.activity = activity
# Get everything set up
self.clock = pygame.time.Clock()
self.in_focus = True
# Create the name --> instance map for components
self.toolList = {}
for c in tools.allTools:
self.toolList[c.name] = c(self)
self.currentTool = self.toolList[tools.allTools[0].name]
# Set up the world (instance of Elements)
self.box2d = box2d
self.opening_queue = None
self.running = True
self.initialise = True
self.full_pos_list = []
self.tracked_bodies = 0
self.trackinfo = {}
self.box2d_fps = 50
def set_game_fps(self, fps):
self.box2d_fps = fps
def switch_off_fake_pygame_cursor_cb(self, panel, event):
self.show_fake_cursor = False
def switch_on_fake_pygame_cursor_cb(self, panel, event):
self.show_fake_cursor = True
def write_file(self, path):
# Saving to journal
self.world.add.remove_mouseJoint()
additional_data = {
'trackinfo': self.trackinfo,
'full_pos_list': self.full_pos_list,
'tracked_bodies': self.tracked_bodies
}
self.world.json_save(path, additional_data, serialize=True)
def read_file(self, path):
# Loading from journal
self.opening_queue = path
def run(self):
if self.initialise:
self.initialise = False
# Fake a Sugar cursor for the pyGame canvas area
self.show_fake_cursor = True
pygame.mouse.set_cursor((8, 8), (0, 0), (0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0))
self.cursor_picture = pygame.image.load('standardcursor.png')
self.cursor_picture.convert_alpha()
self.canvas.connect('enter_notify_event',
self.switch_on_fake_pygame_cursor_cb)
self.canvas.connect('leave_notify_event',
self.switch_off_fake_pygame_cursor_cb)
self.canvas.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK)
self.screen = pygame.display.get_surface()
self.world = elements.Elements(self.screen.get_size())
self.world.renderer.set_surface(self.screen)
self.world.add.ground()
if self.opening_queue:
path = self.opening_queue.encode('ascii', 'convert')
if os.path.exists(path):
self.world.json_load(path, serialized=True)
if 'full_pos_list' in self.world.additional_vars:
self.full_pos_list = \
self.world.additional_vars['full_pos_list']
if 'trackinfo' in self.world.additional_vars:
self.trackinfo = self.world.additional_vars['trackinfo']
if 'tracked_bodies' in self.world.additional_vars:
self.tracked_bodies = \
self.world.additional_vars['tracked_bodies']
while self.running:
# Pump GTK messages.
while Gtk.events_pending():
Gtk.main_iteration()
if not self.running:
break
# Pump PyGame messages.
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.VIDEORESIZE:
pygame.display.set_mode(event.size, pygame.RESIZABLE)
self.currentTool.handleEvents(event)
if event.type == MOUSEBUTTONUP:
# if event.button == 1:
self.show_fake_cursor = True
if self.in_focus:
# Drive motors
if self.world.run_physics:
bodies_present = len(self.world.world.bodies)
clear_all_active = self.activity.clear_all.get_sensitive()
if (bodies_present > 2) and clear_all_active is False:
self.activity.clear_all.set_sensitive(True)
elif (bodies_present > 2) is False and \
clear_all_active is True:
self.activity.clear_all.set_sensitive(False)
poslist = self.full_pos_list
clear_trace_active = \
self.activity.clear_trace.get_sensitive()
if poslist:
if not poslist[0]:
if clear_trace_active:
self.activity.clear_trace.set_sensitive(False)
else:
if clear_trace_active is False:
self.activity.clear_trace.set_sensitive(True)
for key, info in self.trackinfo.items():
# [host_body, tracker, color, destroyed?]
body = info[1]
if info[3] is False: # Not destroyed the pen
trackdex = info[4]
def to_screen(pos):
px = self.world.meter_to_screen(
pos[0])
py = self.world.meter_to_screen(
pos[1])
py = self.world.renderer.get_surface() \
.get_height() - py
return (px, py)
x = body.position.x
y = body.position.y
tupled_pos = to_screen((x, y))
posx = tupled_pos[0]
posy = tupled_pos[1]
try:
self.full_pos_list[trackdex].append(posx)
self.full_pos_list[trackdex].append(posy)
except IndexError:
self.full_pos_list.append([posx, posy])
'''
for body in self.world.world.GetBodyList():
if isinstance(body.userData, dict):
if 'rollMotor' in body.userData:
rollmotor = body.userData['rollMotor']
diff = rollmotor['targetVelocity'] - \
body.GetAngularVelocity()
body.ApplyTorque(rollmotor['strength'] * \
diff * body.getMassData().I)
'''
# Update & Draw World
self.world.update(fps=self.box2d_fps)
self.screen.fill((240, 240, 240)) # #f0f0f0, light-grey
self.world.draw()
# Draw output from tools
self.currentTool.draw()
# Show Sugar like cursor for UI consistancy
if self.show_fake_cursor:
self.screen.blit(self.cursor_picture,
pygame.mouse.get_pos())
# Flip Display
pygame.display.flip()
# Stay < 30 FPS to help keep the rest of the platform responsive
self.clock.tick(30) # Originally 50
return False
def setTool(self, tool):
self.currentTool.cancel()
self.currentTool = self.toolList[tool]
self.currentTool.button_activated()
def get_activity(self):
return self.activity
|
walterbender/physics
|
physics.py
|
Python
|
gpl-3.0
| 9,021
|
[
"Brian"
] |
2a4012d53441a289de3ff5c4e757349d605af0e02bb4d7a1f988c87c43416d62
|
"""ebsd module to manipulate Electron Back Scattered data sets."""
import h5py
import numpy as np
import os
from pymicro.crystal.microstructure import Orientation
from pymicro.crystal.lattice import Symmetry, CrystallinePhase, Lattice
class OimPhase(CrystallinePhase):
"""A class to handle a phase. This is just a child of the class
`CrystallinePhase` where we add 2 additional attributes: `hklFamilies` and
`categories`.
"""
def __init__(self, id):
CrystallinePhase.__init__(self, phase_id=id, name='unknown', lattice=None)
self.hklFamilies = []
self.categories = []
class OimHklFamily:
def __init__(self):
self.hkl = [0, 0, 0]
self.useInIndexing = 0
self.diffractionIntensity = 0.0
self.showBands = 0
class OimScan:
"""OimScan class to handle files from EDAX software OIM."""
def __init__(self, shape, resolution=(1.0, 1.0)):
"""Create an empty EBSD scan."""
self.x_star = 0
self.y_star = 0
self.z_star = 0
self.working_distance = 0
self.grid_type = 'SqrGrid'
self.cols = shape[0]
self.rows = shape[1]
self.xStep = resolution[0]
self.yStep = resolution[1]
self.operator = ''
self.sample_id = ''
self.scan_id = ''
self.phase_list = []
self.init_arrays()
def __repr__(self):
"""Provide a string representation of the class."""
s = 'EBSD scan of size %d x %d' % (self.cols, self.rows)
s += '\nspatial resolution: xStep=%.1f, yStep=%.1f' % (self.xStep, self.yStep)
return s
def init_arrays(self):
"""Memory allocation for all necessary arrays."""
self.euler = np.zeros((self.cols, self.rows, 3))
self.x = np.zeros((self.cols, self.rows))
self.y = np.zeros((self.cols, self.rows))
self.iq = np.zeros((self.cols, self.rows))
self.ci = np.zeros((self.cols, self.rows))
self.phase = np.zeros((self.cols, self.rows), dtype='int')
@staticmethod
def from_file(file_path):
"""Create a new EBSD scan by reading a data file.
At present, only hdf5 format is supported.
:param str file_path: the path to the EBSD scan.
:raise ValueError: if the scan is not in format HDF5.
:return: a new `OimScan` instance.
"""
base_name, ext = os.path.splitext(os.path.basename(file_path))
print(base_name, ext)
if ext in ['.h5', '.hdf5']:
scan = OimScan.read_h5(file_path)
elif ext == '.osc':
scan = OimScan.read_osc(file_path)
elif ext == '.ang':
scan = OimScan.read_ang(file_path)
elif ext == '.ctf':
scan = OimScan.read_ctf(file_path)
else:
raise ValueError('only HDF5, OSC, ANG or CTF formats are '
'supported, please convert your scan')
return scan
@staticmethod
def read_osc(file_path):
"""Read a scan in binary OSC format.
Code inspired from the MTEX project loadEBSD_osc.m function.
:param str file_path: the path to the osc file to read.
:param tuple size: the size of the ebsd scan in form (cols, rows).
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
# the data section is preceded by this pattern
start_hex = ['B9', '0B', 'EF', 'FF', '02', '00', '00', '00']
start_bytes = np.array([int(byte, 16) for byte in start_hex])
with open(file_path, 'r') as f:
print('reading EBSD scan from file %s' % file_path)
header = np.fromfile(f, dtype=np.uint32, count=8)
n = header[6]
print('%d data points in EBSD scan' % n)
f.seek(0)
buffer = np.fromfile(f, dtype=np.uint8, count=2**20)
# search for the start pattern
start = np.where(np.correlate(buffer, start_bytes, mode='valid')
== np.dot(start_bytes, start_bytes))[0][0]
print('start sequence located at byte %d' % start)
f.seek(start + 8)
# data count
data_count = np.fromfile(f, dtype=np.uint32, count=1)[0]
if round(((data_count / 4 - 2) / 10) / n) != 1:
f.seek(start + 8)
# the next 8 bytes are float values for xStep and yStep
scan.xStep = np.fromfile(f, dtype=np.float32, count=1)[0]
scan.yStep = np.fromfile(f, dtype=np.float32, count=1)[0]
print('spatial resolution: xStep=%.1f, yStep=%.1f' % (scan.xStep, scan.yStep))
# now read the payload which contains 10 fields for the n measurements
data = np.fromfile(f, count=n*10, dtype=np.float32)
data = np.reshape(data, (n, 10))
scan.cols = int(max(data[:, 3]) / scan.xStep + 1)
scan.rows = int(max(data[:, 4]) / scan.yStep + 1)
print('size of scan is %d x %d' % (scan.cols, scan.rows))
assert n == scan.cols * scan.rows
scan.init_arrays()
scan.euler[:, :, 0] = np.reshape(data[:, 0], (scan.rows, scan.cols)).T
scan.euler[:, :, 1] = np.reshape(data[:, 1], (scan.rows, scan.cols)).T
scan.euler[:, :, 2] = np.reshape(data[:, 2], (scan.rows, scan.cols)).T
scan.x = np.reshape(data[:, 3], (scan.rows, scan.cols)).T
scan.y = np.reshape(data[:, 4], (scan.rows, scan.cols)).T
scan.iq = np.reshape(data[:, 5], (scan.rows, scan.cols)).T
scan.ci = np.reshape(data[:, 6], (scan.rows, scan.cols)).T
scan.phase = np.reshape(data[:, 7], (scan.rows, scan.cols)).T
return scan
@staticmethod
def read_ang(file_path):
"""Read a scan in ang ascii format.
:raise ValueError: if the grid type in not square.
:param str file_path: the path to the ang file to read.
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
with open(file_path, 'r') as f:
# start by parsing the header
line = f.readline().strip()
while line.startswith('#'):
tokens = line.split()
if len(tokens) <= 2:
line = f.readline().strip()
continue
if tokens[1] == 'TEM_PIXperUM':
pass
elif tokens[1] == 'x-star':
scan.x_star = float(tokens[2])
elif tokens[1] == 'y-star':
scan.y_star = float(tokens[2])
elif tokens[1] == 'z-star':
scan.z_star = float(tokens[2])
elif tokens[1] == 'WorkingDistance':
scan.working_distance = float(tokens[2])
elif tokens[1] == 'Phase':
phase = OimPhase(int(tokens[2]))
line = f.readline().strip()
phase.name = line.split()[2]
line = f.readline().strip()
try:
phase.formula = line.split()[2]
except IndexError:
phase.formula = ''
line = f.readline().strip()
line = f.readline().strip()
sym = Symmetry.from_tsl(int(line.split()[2]))
tokens = f.readline().strip().split()
# convert lattice constants to nm
lattice = Lattice.from_parameters(float(tokens[2]) / 10,
float(tokens[3]) / 10,
float(tokens[4]) / 10,
float(tokens[5]),
float(tokens[6]),
float(tokens[7]),
symmetry=sym)
phase.set_lattice(lattice)
scan.phase_list.append(phase)
elif tokens[1] == 'GRID:':
scan.grid_type = tokens[2]
print('grid type is %s' % tokens[2])
if scan.grid_type != 'SqrGrid':
raise ValueError('only square grid is supported, please convert your scan')
elif tokens[1] == 'XSTEP':
scan.xStep = float(tokens[2])
elif tokens[1] == 'YSTEP':
scan.yStep = float(tokens[2])
elif tokens[1].startswith('NCOLS'):
scan.cols = int(tokens[2])
elif tokens[1].startswith('NROWS'):
scan.rows = int(tokens[2])
elif tokens[1] == 'OPERATOR:':
scan.operator = tokens[2]
elif tokens[1] == 'SAMPLEID:':
scan.sample_id = tokens[2] if len(tokens) >= 3 else ''
elif tokens[1] == 'SCANID:':
scan.scan_id = tokens[2] if len(tokens) >= 3 else ''
line = f.readline().strip()
print('finished reading header, scan size is %d x %d' % (scan.cols, scan.rows))
# now read the payload
data = np.zeros((scan.cols * scan.rows, len(line.split())))
data[0] = np.fromstring(line, sep=' ')
i = 1
for line in f:
data[i] = np.fromstring(line, sep=' ')
i += 1
# we have read all the data, now repack everything into the different arrays
scan.init_arrays()
scan.euler[:, :, 0] = np.reshape(data[:, 0], (scan.rows, scan.cols)).T
scan.euler[:, :, 1] = np.reshape(data[:, 1], (scan.rows, scan.cols)).T
scan.euler[:, :, 2] = np.reshape(data[:, 2], (scan.rows, scan.cols)).T
scan.x = np.reshape(data[:, 3], (scan.rows, scan.cols)).T
scan.y = np.reshape(data[:, 4], (scan.rows, scan.cols)).T
scan.iq = np.reshape(data[:, 5], (scan.rows, scan.cols)).T
scan.ci = np.reshape(data[:, 6], (scan.rows, scan.cols)).T
scan.phase = np.reshape(data[:, 7], (scan.rows, scan.cols)).T
return scan
def read_ctf(file_path):
"""Read a scan in Channel Text File format.
:raise ValueError: if the job mode is not grid.
:param str file_path: the path to the ctf file to read.
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
with open(file_path, 'r') as f:
# start by parsing the header
line = f.readline().strip()
while not line.startswith('Phases'):
tokens = line.split()
if tokens[0] == 'JobMode':
scan.grid_type = tokens[1]
if scan.grid_type != 'Grid':
raise ValueError('only square grid is supported, please convert your scan')
elif tokens[0] == 'XCells':
scan.cols = int(tokens[1])
elif tokens[0] == 'YCells':
scan.rows = int(tokens[1])
elif tokens[0] == 'XStep':
scan.xStep = float(tokens[1])
elif tokens[0] == 'YStep':
scan.yStep = float(tokens[1])
line = f.readline().strip()
# read the phases
tokens = line.split()
n_phases = int(tokens[1])
for i in range(n_phases):
# read this phase (lengths, angles, name, ?, space group, description)
line = f.readline().strip()
tokens = line.split()
phase = CrystallinePhase(i + 1)
phase.name = tokens[2]
phase.name = tokens[5]
sym = Symmetry.from_space_group(int(tokens[4]))
lattice_lengths = tokens[0].split(';')
lattice_angles = tokens[1].split(';')
# convert lattice constants to nm
lattice = Lattice.from_parameters(float(lattice_lengths[0]) / 10,
float(lattice_lengths[1]) / 10,
float(lattice_lengths[2]) / 10,
float(lattice_angles[0]),
float(lattice_angles[1]),
float(lattice_angles[2]),
symmetry=sym)
phase.set_lattice(lattice)
print('adding phase %s' % phase)
scan.phase_list.append(phase)
# read the line before the data
line = f.readline().strip()
# Phase X Y Bands Error Euler1 Euler2 Euler3 MAD BC BS
# now read the payload
data = np.zeros((scan.cols * scan.rows, len(line.split())))
i = 0
for line in f:
data[i] = np.fromstring(line, sep=' ')
i += 1
# we have read all the data, now repack everything into the different arrays
scan.init_arrays()
scan.euler[:, :, 0] = np.reshape(data[:, 5], (scan.rows, scan.cols)).T
scan.euler[:, :, 1] = np.reshape(data[:, 6], (scan.rows, scan.cols)).T
scan.euler[:, :, 2] = np.reshape(data[:, 7], (scan.rows, scan.cols)).T
scan.x = np.reshape(data[:, 1], (scan.rows, scan.cols)).T
scan.y = np.reshape(data[:, 2], (scan.rows, scan.cols)).T
scan.iq = np.reshape(data[:, 9], (scan.rows, scan.cols)).T
scan.ci = np.reshape(data[:, 10], (scan.rows, scan.cols)).T
scan.phase = np.reshape(data[:, 0], (scan.rows, scan.cols)).T
return scan
def read_header(self, header):
# read the header, it contains the following keys: 'Camera Azimuthal Angle', 'Camera Elevation Angle',
# 'Coordinate System', 'Grid Type', 'Notes', 'Operator', 'Pattern Center Calibration', 'Phase', 'Sample ID',
# 'Sample Tilt', 'Scan ID', 'Step X', 'Step Y', 'Working Distance', 'nColumns', 'nRows'
self.x_star = header['Pattern Center Calibration']['x-star'][0]
self.y_star = header['Pattern Center Calibration']['y-star'][0]
self.z_star = header['Pattern Center Calibration']['z-star'][0]
self.working_distance = header['Camera Elevation Angle'][0]
self.grid_type = header['Grid Type'][0].decode('utf-8')
if self.grid_type != 'SqrGrid':
raise ValueError('only square grid is supported, please convert your scan')
self.cols = header['nColumns'][0]
self.rows = header['nRows'][0]
self.xStep = header['Step X'][0]
self.yStep = header['Step Y'][0]
self.operator = header['Operator'][0].decode('utf-8')
self.sample_id = header['Sample ID'][0].decode('utf-8')
self.scan_id = header['Scan ID'][0].decode('utf-8')
# get the different phases
for key in header['Phase'].keys():
phase = header['Phase'][key]
# each phase has the following keys: 'Formula', 'Info', 'Lattice Constant a', 'Lattice Constant alpha',
# 'Lattice Constant b', 'Lattice Constant beta', 'Lattice Constant c', 'Lattice Constant gamma',
# 'Laue Group', 'MaterialName', 'NumberFamilies', 'Point Group', 'Symmetry', 'hkl Families'
phase = OimPhase(int(key))
phase.name = header['Phase'][key]['MaterialName'][0].decode('utf-8')
phase.formula = header['Phase'][key]['Formula'][0].decode('utf-8')
phase.description = header['Phase'][key]['Info'][0].decode('utf-8')
# create a crystal lattice for this phase
sym = Symmetry.from_tsl(header['Phase'][key]['Symmetry'][0])
# convert lattice constants to nm
a = header['Phase'][key]['Lattice Constant a'][0] / 10
b = header['Phase'][key]['Lattice Constant b'][0] / 10
c = header['Phase'][key]['Lattice Constant c'][0] / 10
alpha = header['Phase'][key]['Lattice Constant alpha'][0]
beta = header['Phase'][key]['Lattice Constant beta'][0]
gamma = header['Phase'][key]['Lattice Constant gamma'][0]
lattice = Lattice.from_parameters(a, b, c, alpha, beta, gamma, symmetry=sym)
phase.set_lattice(lattice)
for row in header['Phase'][key]['hkl Families']:
family = OimHklFamily()
family.hkl = [row[0], row[1], row[2]]
family.useInIndexing = row[4]
family.diffractionIntensity = row[3]
family.showBands = row[5]
phase.hklFamilies.append(family)
phase.categories = [0, 0, 0, 0, 0]
self.phase_list.append(phase)
@staticmethod
def read_h5(file_path):
"""Read a scan in H5 format.
:raise ValueError: if the grid type in not square.
:param str file_path: the path to the h5 file to read.
:return: a new instance of OimScan populated with the data from the file.
"""
scan = OimScan((0, 0))
with h5py.File(file_path, 'r') as f:
# find out the scan key (the third one)
key_list = [key for key in f.keys()]
scan_key = key_list[2]
print('reading EBSD scan %s from file %s' % (scan_key, file_path))
header = f[scan_key]['EBSD']['Header']
scan.read_header(header)
# now initialize the fields
scan.init_arrays()
data = f[scan_key]['EBSD']['Data']
scan.euler[:, :, 0] = np.reshape(
data['Phi1'], (scan.rows, scan.cols)).transpose(1, 0)
scan.euler[:, :, 1] = np.reshape(
data['Phi'], (scan.rows, scan.cols)).transpose(1, 0)
scan.euler[:, :, 2] = np.reshape(
data['Phi2'], (scan.rows, scan.cols)).transpose(1, 0)
scan.x = np.reshape(data['X Position'],
(scan.rows, scan.cols)).transpose(1, 0)
scan.y = np.reshape(data['Y Position'],
(scan.rows, scan.cols)).transpose(1, 0)
scan.iq = np.reshape(data['IQ'], (scan.rows, scan.cols)).transpose(1, 0)
scan.ci = np.reshape(data['CI'], (scan.rows, scan.cols)).transpose(1, 0)
scan.phase = np.reshape(data['Phase'],
(scan.rows, scan.cols)).transpose(1, 0)
return scan
def get_phase(self, phase_id=1):
"""Look for a phase with the given id in the list.
:raise ValueError: if the phase_id cannot be found.
:param int phase_id: the id of the phase.
:return: the phase instance with the corresponding id
"""
try:
phase_index = [phase.phase_id for phase in self.phase_list].index(phase_id)
except ValueError:
raise(ValueError('phase %d not in list' % phase_id))
return self.phase_list[phase_index]
def compute_ipf_maps(self):
"""Compute the IPF maps for the 3 cartesian directions.
.. warning::
This function is not vectorized and will be slow for large EBSD maps.
"""
self.ipf001 = np.empty_like(self.euler)
self.ipf010 = np.empty_like(self.euler)
self.ipf100 = np.empty_like(self.euler)
for i in range(self.rows):
for j in range(self.cols):
o = Orientation.from_euler(np.degrees(self.euler[j, i]))
try:
sym = self.get_phase(int(self.phase[j, i])).get_symmetry()
# compute IPF-Z
self.ipf001[j, i] = o.ipf_color(axis=np.array([0., 0., 1.]),
symmetry=sym)
# compute IPF-Y
self.ipf010[j, i] = o.ipf_color(axis=np.array([0., 1., 0.]),
symmetry=sym)
# compute IPF-X
self.ipf100[j, i] = o.ipf_color(axis=np.array([1., 0., 0.]),
symmetry=sym)
except ValueError:
self.ipf001[j, i] = [0., 0., 0.]
self.ipf010[j, i] = [0., 0., 0.]
self.ipf100[j, i] = [0., 0., 0.]
progress = 100 * (i + 1) / self.rows
print('computing IPF maps: {0:.2f} %'.format(progress), end='\r')
def segment_grains(self, tol=5., min_ci=0.2):
"""Segment the grains based on the euler angle maps.
The segmentation is carried out using a region growing algorithm based
on an orientation similarity criterion.
The id 0 is reserved to the background which is assigned to pixels with
a confidence index lower than 0.2. Other pixels are first marqued as
unlabeled using -1, then pixels are evaluated one by one.
A new grain is created and non already assigned neighboring pixels are
evaluated based on the crystal misorientation. If the misorientation is
lower than `tol`, the pixel is assigned to the current grain and its
neighbors added to the list of candidates. When no more candidates are
present, the next pixel is evaluated and a new grain is created.
.. warning::
This function does not account yet for multiple phases. Grains should
be created separately for each crystallographic phase.
:param float tol: misorientation tolerance in degrees.
:param float min_ci: minimum confidence index for a pixel to be a valid
EBSD measurement.
:raise ValueError: if no phase is present in the scan.
:return: a numpy array of the grain labels.
"""
if not len(self.phase_list) > 0:
raise ValueError('at least one phase must be present in this EBSD '
'scan to segment the grains')
# segment the grains
print('grain segmentation for EBSD scan, misorientation tolerance={:.1f}, '
'minimum confidence index={:.1f}'.format(tol, min_ci))
grain_ids = np.zeros_like(self.iq, dtype='int')
grain_ids += -1 # mark all pixels as non assigned
# start by assigning bad pixel to grain 0
grain_ids[self.ci <= min_ci] = 0
n_grains = 0
progress = 0
for j in range(self.rows):
for i in range(self.cols):
if grain_ids[i, j] >= 0:
continue # skip pixel
# create new grain with the pixel as seed
n_grains += 1
# print('segmenting grain %d' % n_grains)
grain_ids[i, j] = n_grains
candidates = [(i, j)]
# apply region growing based on the angle misorientation (strong connectivity)
while len(candidates) > 0:
pixel = candidates.pop()
sym = self.phase_list[self.phase[pixel]].get_symmetry()
# print('* pixel is {}, euler: {}'.format(pixel, np.degrees(euler[pixel])))
# get orientation of this pixel
o = Orientation.from_euler(np.degrees(self.euler[pixel]))
# look around this pixel
east = (pixel[0] - 1, pixel[1])
north = (pixel[0], pixel[1] - 1)
west = (pixel[0] + 1, pixel[1])
south = (pixel[0], pixel[1] + 1)
neighbors = [east, north, west, south]
# look at unlabeled connected pixels
neighbor_list = [n for n in neighbors if
0 <= n[0] < self.cols and
0 <= n[1] < self.rows and
grain_ids[n] == -1]
# print(' * neighbors list is {}'.format([east, north, west, south]))
for neighbor in neighbor_list:
# check misorientation
o_neighbor = Orientation.from_euler(np.degrees(self.euler[neighbor]))
mis, _, _ = o.disorientation(o_neighbor, crystal_structure=sym)
if mis * 180 / np.pi < tol:
# add to this grain
grain_ids[neighbor] = n_grains
# add to the list of candidates
candidates.append(neighbor)
progress = 100 * np.sum(grain_ids >= 0) / (self.cols * self.rows)
print('segmentation progress: {0:.2f} %'.format(progress), end='\r')
print('\n%d grains were segmented' % len(np.unique(grain_ids)))
return grain_ids
def change_orientation_reference_frame(self):
"""Change the reference frame for orientation data.
In OIM, the reference frame for orientation data (euler angles) is
termed A1A2A3 and differs from the sample reference frame XYZ. This can
be set befor the acquisition but the default case is:
X = -A2, Y = -A1, Z = -A3.
This methods change the reference frame used for the euler angles.
"""
# transformation matrix from A1A2A3 to XYZ
T = np.array([[0., -1., 0.], # X is -A2
[-1., 0., 0.], # Y is -A1
[0., 0., -1.]]) # Z is -A3
for j in range(self.rows):
for i in range(self.cols):
o_tsl = Orientation.from_euler(np.degrees(self.euler[i, j, :]))
g_xyz = np.dot(o_tsl.orientation_matrix(), T.T) # move to XYZ local frame
o_xyz = Orientation(g_xyz)
self.euler[i, j, :] = np.radians(o_xyz.euler)
progress = 100 * (j * self.cols + i) / (self.cols * self.rows)
print('changing orientation reference frame progress: {0:.2f} %'.format(progress), end='\r')
print('\n')
def to_h5(self, file_name):
"""Write the EBSD scan as a hdf5 file compatible OIM software (in
progress).
:param str file_name: name of the output file.
"""
f = h5py.File('%s.h5' % file_name, 'w')
f.attrs[' Manufacturer'] = np.string_('EDAX')
f.attrs[' Version'] = np.string_('OIM Analysis 7.3.0 x64 [09-01-15]')
# create the group containing the data
data_container = f.create_group('DataContainer')
ebsd = data_container.create_group('EBSD')
ebsd_header = ebsd.create_group('Header')
ebsd_header.create_dataset('Camera Azimuthal Angle', data=np.array([0.0], dtype=np.float32))
ebsd_header.create_dataset('Camera Elevation Angle', data=np.array([self.working_distance], dtype=np.float32))
pattern_center = ebsd_header.create_group('Pattern Center Calibration')
pattern_center.create_dataset('x-star', data=np.array(self.x_star, dtype=np.float32))
pattern_center.create_dataset('y-star', data=np.array(self.y_star, dtype=np.float32))
pattern_center.create_dataset('z-star', data=np.array(self.z_star, dtype=np.float32))
ebsd_data = ebsd.create_group('Data')
ci = ebsd_data.create_dataset('CI', data=self.ci)
iq = ebsd_data.create_dataset('IQ', data=self.iq)
phase = ebsd_data.create_dataset('Phase', data=self.phase)
phi1 = ebsd_data.create_dataset('Phi1', data=self.euler[:, :, 0])
phi = ebsd_data.create_dataset('Phi', data=self.euler[:, :, 1])
phi2 = ebsd_data.create_dataset('Phi2', data=self.euler[:, :, 2])
x = ebsd_data.create_dataset('X Position', data=self.x)
y = ebsd_data.create_dataset('Y Position', data=self.y)
f.close()
|
heprom/pymicro
|
pymicro/crystal/ebsd.py
|
Python
|
mit
| 28,109
|
[
"CRYSTAL"
] |
17c18cd176aabd14edeedede99ef9edd8ff8f131cf1b1f7b17d851bca8d55b41
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
def highlightBlock(block, vtkwindow):
"""
Input:
block[BlockInfo]: This block will be a child of /BCs
vtkwindow[VTKWindowPlugin]: The vtk window to set the highlights on
"""
if not vtkwindow.isVisible() or not vtkwindow.isEnabled():
return
if not block.path.startswith("/BCs/"):
vtkwindow.onHighlight()
return
boundary_param = None
block_param = None
if block.getParamInfo("boundary"):
boundary_param = block.getParamInfo("boundary").value.split()
if block.getParamInfo("block"):
block_param = block.getParamInfo("block").value.split()
primary = block.getParamInfo("primary")
if boundary_param or block_param:
vtkwindow.onHighlight(boundary=boundary_param, block=block_param)
elif primary:
secondary = block.getParamInfo("secondary")
if secondary:
vtkwindow.onHighlight(boundary=[primary.value, secondary.value])
else:
vtkwindow.onHighlight()
else:
vtkwindow.onHighlight()
|
harterj/moose
|
python/peacock/Input/BCHighlighter.py
|
Python
|
lgpl-2.1
| 1,358
|
[
"MOOSE",
"VTK"
] |
b6a02765e9d42abe725958b7b51c77a6ac8760ea1cf0c8ff54c0aed496e1771b
|
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, tddft, scf
from pyscf.nao import bse_iter
from pyscf.nao import polariz_nonin_ave
from pyscf.data.nist import HARTREE2EV
class KnowValues(unittest.TestCase):
def test_0164_bse_h2o_spin2_uhf_nonin(self):
""" Interacting case """
mol=gto.M(verbose=0,atom='O 0 0 0;H 0 0.489 1.074;H 0 0.489 -1.074',basis='cc-pvdz',spin=2)
gto_mf = scf.UHF(mol)
gto_mf.kernel()
omegas = np.arange(0.0, 2.0, 0.01) + 1j*0.03
p_ave = -polariz_nonin_ave(gto_mf, mol, omegas).imag
data = np.array([omegas.real*HARTREE2EV, p_ave])
np.savetxt('test_0164_bse_h2o_spin2_uhf_nonin_pyscf.txt', data.T, fmt=['%f','%f'])
data_ref = np.loadtxt('test_0164_bse_h2o_spin2_uhf_nonin_pyscf.txt-ref').T
self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
nao_td = bse_iter(mf=gto_mf, gto=mol, verbosity=0)
polariz = -nao_td.polariz_nonin_ave_matelem(omegas).imag
data = np.array([omegas.real*HARTREE2EV, polariz])
np.savetxt('test_0164_bse_h2o_spin2_uhf_nonin_matelem.txt', data.T, fmt=['%f','%f'])
#data_ref = np.loadtxt('test_0164_bse_h2o_spin2_uks_nonin_matelem.txt-ref').T
#self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
polariz = -nao_td.comp_polariz_nonin_ave(omegas).imag
data = np.array([omegas.real*HARTREE2EV, polariz])
np.savetxt('test_0164_bse_h2o_spin2_uhf_nonin_nao.txt', data.T, fmt=['%f','%f'])
#data_ref = np.loadtxt('test_0164_bse_h2o_spin2_uhf_nonin_nao.txt-ref').T
#self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0164_bse_h2o_spin2_uhf_nonin.py
|
Python
|
apache-2.0
| 1,689
|
[
"PySCF"
] |
c91d5366aaeb3e3b8d4f48e3eddf54de4b4e4fa04683dd8ef71a7175cb17fa02
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Connection and Authentication classes.
Information about setting up your Google OAUTH2 credentials:
For libcloud, there are two basic methods for authenticating to Google using
OAUTH2: Service Accounts and Client IDs for Installed Applications.
Both are initially set up from the Cloud Console Console -
https://cloud.google.com/console
Setting up Service Account authentication (note that you need the cryptography
package installed to use this):
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Click on "Create New Client ID..."
- Select "Service account" and click on "Create Client ID"
- Download the Private Key (should happen automatically). The key you download
is in JSON format.
- Move the .json file to a safe location.
- Optionally, you may choose to Generate a PKCS12 key from the Console.
It needs to be converted to the PEM format. Please note, the PKCS12 format
is deprecated and may be removed in a future release.
- Convert the key using OpenSSL (the default password is 'notasecret').
- Move the .pem file to a safe location.
- To Authenticate, you will need to pass the Service Account's "Email
address" in as the user_id and the path to the .pem file as the key.
Setting up Installed Application authentication:
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Select "Installed application" and "Other" then click on
"Create Client ID"
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
secret" as the key
- The first time that you do this, the libcloud will give you a URL to
visit. Copy and paste the URL into a browser.
- When you go to the URL it will ask you to log in (if you aren't already)
and ask you if you want to allow the project access to your account.
- Click on Accept and you will be given a code.
- Paste that code at the prompt given to you by the Google libcloud
connection.
- At that point, a token & refresh token will be stored in your home
directory and will be used for authentication.
Please remember to secure your keys and access tokens.
"""
from __future__ import with_statement
from typing import Optional
try:
import simplejson as json
except ImportError:
import json # type: ignore
import logging
import base64
import errno
import time
import datetime
import os
import socket
import sys
from libcloud.utils.connection import get_response_object
from libcloud.utils.py3 import b, httplib, urlencode, urlparse, PY3
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
PollingConnection)
from libcloud.common.base import BaseDriver
from libcloud.common.types import (ProviderError,
LibcloudError)
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
except ImportError:
# The cryptography library is unavailable
SHA256 = None # type: ignore
UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
LOG = logging.getLogger(__name__)
def _utcnow():
"""
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
return datetime.datetime.utcnow()
def _utc_timestamp(datetime_obj):
"""
Return string of datetime_obj in the UTC Timestamp Format
"""
return datetime_obj.strftime(UTC_TIMESTAMP_FORMAT)
def _from_utc_timestamp(timestamp):
"""
Return datetime obj where date and time are pulled from timestamp string.
"""
return datetime.datetime.strptime(timestamp, UTC_TIMESTAMP_FORMAT)
def _get_gce_metadata(path='', retry_failed: Optional[bool] = None):
try:
url = 'http://metadata/computeMetadata/v1/' + path.lstrip('/')
headers = {'Metadata-Flavor': 'Google'}
response = get_response_object(url, headers=headers,
retry_failed=retry_failed)
return response.status, '', response.body
except Exception as e:
return -1, str(e), None
class GoogleAuthError(LibcloudError):
"""Generic Error class for various authentication errors."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class GoogleBaseError(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(GoogleBaseError, self).__init__(value, http_code, driver)
class InvalidRequestError(GoogleBaseError):
pass
class JsonParseError(GoogleBaseError):
pass
class ResourceNotFoundError(GoogleBaseError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
if isinstance(value, dict) and 'message' in value and \
value['message'].count('/') == 1 and \
value['message'].count('projects/') == 1:
value['message'] = value['message'] + ". A missing project " \
"error may be an authentication issue. " \
"Please ensure your auth credentials match " \
"your project. "
super(ResourceNotFoundError, self).__init__(value, http_code, driver)
class QuotaExceededError(GoogleBaseError):
pass
class ResourceExistsError(GoogleBaseError):
pass
class ResourceInUseError(GoogleBaseError):
pass
class GoogleResponse(JsonResponse):
"""
Google Base Response class.
"""
def success(self):
"""
Determine if the request was successful.
For the Google response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def _get_error(self, body):
"""
Get the error code and message from a JSON response.
Return just the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: Tuple containing error code and message
:rtype: ``tuple`` of ``str`` or ``int``
"""
if 'errors' in body['error']:
err = body['error']['errors'][0]
else:
err = body['error']
if 'code' in err:
code = err.get('code')
message = err.get('message')
else:
code = None
if 'reason' in err:
code = err.get('reason')
message = body.get('error_description', err)
return (code, message)
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
json_error = False
try:
body = json.loads(self.body)
except Exception:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
valid_http_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.CONFLICT,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status, None)
elif 'error' in body:
(code, message) = self._get_error(body)
if code == 'QUOTA_EXCEEDED':
raise QuotaExceededError(message, self.status, code)
elif code == 'RESOURCE_ALREADY_EXISTS':
raise ResourceExistsError(message, self.status, code)
elif code == 'alreadyExists':
raise ResourceExistsError(message, self.status, code)
elif code.startswith('RESOURCE_IN_USE'):
raise ResourceInUseError(message, self.status, code)
else:
raise GoogleBaseError(message, self.status, code)
else:
return body
elif self.status == httplib.NOT_FOUND:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise ResourceNotFoundError(message, self.status, code)
elif self.status == httplib.BAD_REQUEST:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise InvalidRequestError(message, self.status, code)
else:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise GoogleBaseError(message, self.status, code)
class GoogleBaseDriver(BaseDriver):
name = "Google API"
class GoogleBaseAuthConnection(ConnectionUserAndKey):
"""
Base class for Google Authentication. Should be subclassed for specific
types of authentication.
"""
driver = GoogleBaseDriver
responseCls = GoogleResponse
name = 'Google Auth'
host = 'accounts.google.com'
auth_path = '/o/oauth2/auth'
def __init__(self, user_id, key=None, scopes=None,
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
login_hint=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:param scopes: A list of urls defining the scope of authentication
to grant.
:type scopes: ``list``
:keyword redirect_uri: The Redirect URI for the authentication
request. See Google OAUTH2 documentation for
more info.
:type redirect_uri: ``str``
:keyword login_hint: Login hint for authentication request. Useful
for Installed Application authentication.
:type login_hint: ``str``
"""
scopes = scopes or []
self.scopes = " ".join(scopes)
self.redirect_uri = redirect_uri
self.login_hint = login_hint
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
def add_default_headers(self, headers):
"""
Add defaults for 'Content-Type' and 'Host' headers.
"""
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Host'] = self.host
return headers
def _token_request(self, request_body):
"""
Return an updated token from a token request body.
:param request_body: A dictionary of values to send in the body of the
token request.
:type request_body: ``dict``
:return: A dictionary with updated token information
:rtype: ``dict``
"""
data = urlencode(request_body)
try:
response = self.request('/o/oauth2/token', method='POST',
data=data)
except AttributeError:
raise GoogleAuthError('Invalid authorization response, please '
'check your credentials and time drift.')
token_info = response.object
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
def refresh_token(self, token_info):
"""
Refresh the current token.
Fetch an updated refresh token from internal metadata service.
:param token_info: Dictionary containing token information.
(Not used, but here for compatibility)
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
# pylint: disable=no-member
return self.get_new_token()
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
"""Authentication connection for "Installed Application" authentication."""
def get_code(self):
"""
Give the user a URL that they can visit to authenticate and obtain a
code. This method will ask for that code that the user can paste in.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Code supplied by the user after authenticating
:rtype: ``str``
"""
auth_params = {'response_type': 'code',
'client_id': self.user_id,
'redirect_uri': self.redirect_uri,
'scope': self.scopes,
'state': 'Libcloud Request'}
if self.login_hint:
auth_params['login_hint'] = self.login_hint
data = urlencode(auth_params)
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
print('\nPlease Go to the following URL and sign in:')
print(url)
if PY3:
code = input('Enter Code: ')
else:
code = raw_input('Enter Code: ') # NOQA pylint: disable=undefined-variable
return code
def get_new_token(self):
"""
Get a new token. Generally used when no previous token exists or there
is no refresh token
:return: Dictionary containing token information
:rtype: ``dict``
"""
# Ask the user for a code
code = self.get_code()
token_request = {'code': code,
'client_id': self.user_id,
'client_secret': self.key,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'}
return self._token_request(token_request)
def refresh_token(self, token_info):
"""
Use the refresh token supplied in the token info to get a new token.
:param token_info: Dictionary containing current token information
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
if 'refresh_token' not in token_info:
return self.get_new_token()
refresh_request = {'refresh_token': token_info['refresh_token'],
'client_id': self.user_id,
'client_secret': self.key,
'grant_type': 'refresh_token'}
new_token = self._token_request(refresh_request)
if 'refresh_token' not in new_token:
new_token['refresh_token'] = token_info['refresh_token']
return new_token
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for "Service Account" authentication."""
def __init__(self, user_id, key, *args, **kwargs):
"""
Check to see if cryptography is available, and convert key file path
into a key string if the key is in a file.
:param user_id: Email address to be used for Service Account
authentication.
:type user_id: ``str``
:param key: The RSA Key or path to file containing the key.
:type key: ``str``
"""
if SHA256 is None:
raise GoogleAuthError('cryptography library required for '
'Service Account Authentication.')
# Check to see if 'key' is a file and read the file if it is.
if key.find("PRIVATE KEY---") == -1:
# key is a file
keypath = os.path.expanduser(key)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if not is_file_path:
raise ValueError("Missing (or not readable) key "
"file: '%s'" % key)
with open(keypath, 'r') as f:
contents = f.read()
try:
key = json.loads(contents)
key = key['private_key']
except ValueError:
key = contents
super(GoogleServiceAcctAuthConnection, self).__init__(
user_id, key, *args, **kwargs)
def get_new_token(self):
"""
Get a new token using the email address and RSA Key.
:return: Dictionary containing token information
:rtype: ``dict``
"""
# The header is always the same
header = {'alg': 'RS256', 'typ': 'JWT'}
header_enc = base64.urlsafe_b64encode(b(json.dumps(header)))
# Construct a claim set
claim_set = {'iss': self.user_id,
'scope': self.scopes,
'aud': 'https://accounts.google.com/o/oauth2/token',
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
claim_set_enc = base64.urlsafe_b64encode(b(json.dumps(claim_set)))
# The message contains both the header and claim set
message = b'.'.join((header_enc, claim_set_enc))
# Then the message is signed using the key supplied
key = serialization.load_pem_private_key(
b(self.key),
password=None,
backend=default_backend()
)
signature = key.sign(
data=b(message),
padding=PKCS1v15(),
algorithm=SHA256()
)
signature = base64.urlsafe_b64encode(signature)
# Finally the message and signature are sent to get a token
jwt = b'.'.join((message, signature))
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt}
return self._token_request(request)
class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for self-authentication when used with a GCE
instance that supports serviceAccounts.
"""
def get_new_token(self):
"""
Get a new token from the internal metadata service.
:return: Dictionary containing token information
:rtype: ``dict``
"""
path = '/instance/service-accounts/default/token'
http_code, http_reason, token_info = _get_gce_metadata(path)
if http_code == httplib.NOT_FOUND:
raise ValueError("Service Accounts are not enabled for this "
"GCE instance.")
if http_code != httplib.OK:
raise ValueError("Internal GCE Authorization failed: "
"'%s'" % str(http_reason))
token_info = json.loads(token_info)
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
class GoogleAuthType(object):
"""
SA (Service Account),
IA (Installed Application),
GCE (Auth from a GCE instance with service account enabled)
GCS_S3 (Cloud Storage S3 interoperability authentication)
"""
SA = 'SA'
IA = 'IA'
GCE = 'GCE'
GCS_S3 = 'GCS_S3'
ALL_TYPES = [SA, IA, GCE, GCS_S3]
OAUTH2_TYPES = [SA, IA, GCE]
@classmethod
def guess_type(cls, user_id):
if cls._is_sa(user_id):
return cls.SA
elif cls._is_gcs_s3(user_id):
return cls.GCS_S3
elif cls._is_installed_application(user_id):
# NOTE: This should be before "_is_gce()" call so we avoid
# querying GCE metadata service if that's not necessary
return cls.IA
elif cls._is_gce():
return cls.GCE
else:
# TODO: It's probably safe to throw here, but we return cls.IA
# for backward compatibility reasons
return cls.IA
@classmethod
def is_oauth2(cls, auth_type):
return auth_type in cls.OAUTH2_TYPES
@staticmethod
def _is_installed_application(user_id):
return user_id.endswith('apps.googleusercontent.com')
@staticmethod
def _is_gce():
"""
Checks if we can access the GCE metadata server.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
# When using oAuth credentials we check for metadata server first, so
# if server is unavailable and we retry many times before timing out,
# this will slow down the driver instantiation when retrying failed
# requests is enabled globally.
http_code, http_reason, body = _get_gce_metadata(retry_failed=False)
if http_code == httplib.OK and body:
return True
return False
@staticmethod
def _is_gcs_s3(user_id):
"""
Checks S3 key format: alphanumeric chars starting with GOOG.
"""
return user_id.startswith('GOOG')
@staticmethod
def _is_sa(user_id):
return user_id.endswith('.gserviceaccount.com')
class GoogleOAuth2Credential(object):
default_credential_file = '~/.google_libcloud_auth'
def __init__(self, user_id, key, auth_type=None, credential_file=None,
scopes=None, **kwargs):
self.auth_type = auth_type or GoogleAuthType.guess_type(user_id)
if self.auth_type not in GoogleAuthType.ALL_TYPES:
raise GoogleAuthError('Invalid auth type: %s' % self.auth_type)
if not GoogleAuthType.is_oauth2(self.auth_type):
raise GoogleAuthError(('Auth type %s cannot be used with OAuth2' %
self.auth_type))
self.user_id = user_id
self.key = key
default_credential_file = '.'.join([self.default_credential_file,
user_id])
self.credential_file = credential_file or default_credential_file
# Default scopes to read/write for compute, storage, and dns.
self.scopes = scopes or [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
]
self.token = self._get_token_from_file()
if self.auth_type == GoogleAuthType.GCE:
self.oauth2_conn = GoogleGCEServiceAcctAuthConnection(
self.user_id, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.SA:
self.oauth2_conn = GoogleServiceAcctAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.IA:
self.oauth2_conn = GoogleInstalledAppAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
else:
raise GoogleAuthError('Invalid auth_type: %s' %
str(self.auth_type))
if self.token is None:
self.token = self.oauth2_conn.get_new_token()
self._write_token_to_file()
@property
def access_token(self):
if self.token_expire_utc_datetime < _utcnow():
self._refresh_token()
return self.token['access_token']
@property
def token_expire_utc_datetime(self):
return _from_utc_timestamp(self.token['expire_time'])
def _refresh_token(self):
self.token = self.oauth2_conn.refresh_token(self.token)
self._write_token_to_file()
def _get_token_from_file(self):
"""
Read credential file and return token information.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Token information dictionary, or None
:rtype: ``dict`` or ``None``
"""
token = None
filename = os.path.realpath(os.path.expanduser(self.credential_file))
try:
with open(filename, 'r') as f:
data = f.read()
token = json.loads(data)
except (IOError, ValueError) as e:
# Note: File related errors (IOError) and errors related to json
# parsing of the data (ValueError) are not fatal.
LOG.info('Failed to read cached auth token from file "%s": %s',
filename, str(e))
return token
def _write_token_to_file(self):
"""
Write token to credential file.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
filename = os.path.expanduser(self.credential_file)
filename = os.path.realpath(filename)
try:
data = json.dumps(self.token)
write_flags = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
with os.fdopen(os.open(filename, write_flags,
int('600', 8)), 'w') as f:
f.write(data)
except Exception as e:
# Note: Failure to write (cache) token in a file is not fatal. It
# simply means degraded performance since we will need to acquire a
# new token each time script runs.
LOG.info('Failed to write auth token to file "%s": %s',
filename, str(e))
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
"""Base connection class for interacting with Google APIs."""
driver = GoogleBaseDriver
responseCls = GoogleResponse
host = 'www.googleapis.com'
poll_interval = 2.0
timeout = 180
def __init__(self, user_id, key=None, auth_type=None,
credential_file=None, scopes=None, **kwargs):
"""
Determine authentication type, set up appropriate authentication
connection and get initial authentication information.
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword auth_type: See GoogleAuthType class for list and description
of accepted values.
If not supplied, auth_type will be guessed based
on value of user_id or if the code is running
on a GCE instance.
:type auth_type: ``str``
:keyword credential_file: Path to file for caching authentication
information.
:type credential_file: ``str``
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
read/write access to Compute, Storage, and DNS.
:type scopes: ``list``
"""
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
self.oauth2_credential = GoogleOAuth2Credential(
user_id, key, auth_type, credential_file, scopes, **kwargs)
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
self.user_agent_append(ver_platform)
def add_default_headers(self, headers):
"""
@inherits: :class:`Connection.add_default_headers`
"""
headers['Content-Type'] = 'application/json'
headers['Host'] = self.host
return headers
def pre_connect_hook(self, params, headers):
"""
Check to make sure that token hasn't expired. If it has, get an
updated token. Also, add the token to the headers.
@inherits: :class:`Connection.pre_connect_hook`
"""
headers['Authorization'] = ('Bearer ' +
self.oauth2_credential.access_token)
return params, headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
def request(self, *args, **kwargs):
"""
@inherits: :class:`Connection.request`
"""
# Adds some retry logic for the occasional
# "Connection Reset by peer" error.
retries = 4
tries = 0
while tries < (retries - 1):
try:
return super(GoogleBaseConnection, self).request(
*args, **kwargs)
except socket.error as e:
if e.errno == errno.ECONNRESET:
tries = tries + 1
else:
raise e
# One more time, then give up.
return super(GoogleBaseConnection, self).request(*args, **kwargs)
def has_completed(self, response):
"""
Determine if operation has completed based on response.
:param response: JSON response
:type response: I{responseCls}
:return: True if complete, False otherwise
:rtype: ``bool``
"""
if response.object['status'] == 'DONE':
return True
else:
return False
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
"""
return {'action': response.object['selfLink']}
def morph_action_hook(self, action):
"""
Update action to correct request path.
In many places, the Google API returns a full URL to a resource.
This will strip the scheme and host off of the path and just return
the request. Otherwise, it will prepend the base request_path to
the action.
:param action: The action to be called in the http request
:type action: ``str``
:return: The modified request based on the action
:rtype: ``str``
"""
if action.startswith('https://'):
u = urlparse.urlsplit(action)
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
else:
request = self.request_path + action
return request
|
Kami/libcloud
|
libcloud/common/google.py
|
Python
|
apache-2.0
| 31,736
|
[
"VisIt"
] |
0c6e6754488afe5b6adf38cc1f8579c6faf94f264d87d0054a0332de2709d421
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Parent classes for quantum chemistry program input and output file
formats.
"""
import re
class InputFormat(object):
def __init__(self, mem, mtd, bas, mol, sys, cast):
# total job memory in MB
self.memory = mem
# computational method
self.method = mtd.lower()
# qcdb.Molecule object
self.molecule = mol
# database member index
self.index = sys
# orbital basis set
self.basis = bas.lower()
# do cast up from sto-3g basis?
self.castup = cast
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
class InputFormat2(object):
def __init__(self, mem, mol, mtd, der, opt):
# total job memory in MB
self.memory = mem
# qcdb.Molecule object
self.molecule = mol
# computational method
self.method = mtd.lower()
# computational derivative level
self.dertype = der
# options dictionary
self.options = opt
# orbital basis set
self.basis = opt['GLOBALS']['BASIS']['value'].lower()
# do cast up from sto-3g basis?
self.castup = opt['SCF']['BASIS_GUESS']['value']
def corresponding_aux_basis(self):
"""For Dunning basis sets, returns strings from which auxiliary
basis sets and heavy-aug can be constructed. Note that
valence/core-valence/etc. is conserved and X-zeta/(X+d)zeta is
not, since this is the usual aux basis pattern.
*augbasis* is round up to the nearest aug-cc-pVXZ
*rootbasis* is round down to the nearest cc-pVXZ
*auxbasis* is round up to the nearest cc-pVXZ or aug-cc-pVXZ
"""
Dunmatch = re.compile(r'^(.*cc-)(pv|pcv|pwcv).*?([dtq56]).*z$').match(self.basis)
if Dunmatch:
rootbas = 'cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
augbas = 'aug-cc-' + Dunmatch.group(2) + Dunmatch.group(3) + 'z'
if Dunmatch.group(1) == 'cc-':
auxbas = rootbas
else:
auxbas = augbas
else:
rootbas = None
augbas = None
auxbas = None
return [rootbas, augbas, auxbas]
|
dgasmith/psi4
|
psi4/driver/qcdb/qcformat.py
|
Python
|
lgpl-3.0
| 4,080
|
[
"Psi4"
] |
2c77643aa3804e709c31c9b2bfd854bd534323446b9e3058db6dd7fb3ce915db
|
#!/usr/bin/python
from optparse import OptionParser
import numpy as np
import sys
usage = """
A script for creating an error model from biased sequencing data.
Information about the biases should be formatted as a 4x4 table
indicating the probability of an experimentally-derived change
from the rows to the columns (i.e. the cell in row i and column j
should indicate the probability that base i was changed to base j.
A more concrete example is provided on the bwa-pssm web page:
http://bwa-pssm.binf.ku.dk
Example:
1 0 0 0
0 0.8 0 0
0 0 1 0
0 0.2 0 1
The output is a table where the first column indicates the quality
to be replaced, the second the identity of the base and the last
four are numbers corresponding to the columns of the PSSM to be built.
Example:
...
39 A 2.00 -12.77 -12.54 -12.34
39 C -12.54 1.77 -12.54 -0.74
39 G -12.54 -12.77 2.00 -12.34
39 T -12.54 -12.77 -12.54 2.00
40 A 2.00 -13.10 -12.87 -12.67
40 C -12.87 1.77 -12.87 -0.74
40 G -12.87 -13.10 2.00 -12.67
40 T -12.87 -13.10 -12.87 2.00
"""
def phred_prob(q):
'''
Return the error probability corresponding to a quality value of 'q'.
@param q: The PHRED quality value.
@return: The error probability.
'''
return 10 ** (-q / 10.)
def main():
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
in_filename = args[0]
if in_filename == '-':
in_file = sys.stdin
else:
in_file = open(in_filename, 'r')
bias_matrix = np.genfromtxt(in_file)
if bias_matrix.shape != (4,4):
print >>sys.stderr, "Invalid dimensions for the input table:", t.shape
sys.exit(1)
colsums = np.sum(bias_matrix, axis=0)
if not np.allclose(colsums, np.array([1., 1., 1., 1.])):
print >>sys.stderr, """
The probabilities along the columns should sum to \
1. The probabilities in the supplied table have \
the following sums: %s
""" % (colsums)
bases = ['A', 'C', 'G', 'T']
qual_matrix = np.zeros((4,4))
for q in xrange(0, 42):
error_prob = phred_prob(q) / 3. #there's three different bases it could error to
match_prob = 1 - 3. * error_prob
qual_matrix.fill(error_prob)
np.fill_diagonal(qual_matrix, match_prob)
# pseudocounts
qual_matrix += 10 ** -8
bg = 0.25
new_probs = bias_matrix.dot(qual_matrix)
pssm_scores = np.log(new_probs / bg) / np.log(2.)
for i, b in enumerate(bases):
print "%d %s %s" % (q, b, " ".join(map("{:.4f}".format, pssm_scores[:,i])))
in_file.close()
if __name__ == "__main__":
main()
|
pkerpedjiev/bwa-pssm
|
scripts/error_model.py
|
Python
|
gpl-3.0
| 2,785
|
[
"BWA"
] |
9818d7ab11aaedc64546f46a2510d54b87ec072cb385710bf64f000670eee9e9
|
import itertools
import math
from ..ssa import objtypes
from . import visitor
from .stringescape import escapeString
# Explicitly cast parameters to the desired type in order to avoid potential issues with overloaded methods
ALWAYS_CAST_PARAMS = 1
class VariableDeclarator(object):
def __init__(self, typename, identifier): self.typename = typename; self.local = identifier
def print_(self, printer, print_):
return '{} {}'.format(print_(self.typename), print_(self.local))
def tree(self, printer, tree): return [tree(self.typename), tree(self.local)]
#############################################################################################################################################
class JavaStatement(object):
expr = None # provide default for subclasses that don't have an expression
def getScopes(self): return ()
def fixLiterals(self):
if self.expr is not None:
self.expr = self.expr.fixLiterals()
def addCastsAndParens(self, env):
if self.expr is not None:
self.expr.addCasts(env)
self.expr.addParens()
class ExpressionStatement(JavaStatement):
def __init__(self, expr):
self.expr = expr
assert expr is not None
def print_(self, printer, print_): return print_(self.expr) + ';'
def tree(self, printer, tree): return [self.__class__.__name__, tree(self.expr)]
class LocalDeclarationStatement(JavaStatement):
def __init__(self, decl, expr=None):
self.decl = decl
self.expr = expr
def print_(self, printer, print_):
if self.expr is not None:
return '{} = {};'.format(print_(self.decl), print_(self.expr))
return print_(self.decl) + ';'
def tree(self, printer, tree): return [self.__class__.__name__, tree(self.expr), tree(self.decl)]
def addCastsAndParens(self, env):
if self.expr is not None:
self.expr.addCasts(env)
if not isJavaAssignable(env, self.expr.dtype, self.decl.typename.tt):
self.expr = makeCastExpr(self.decl.typename.tt, self.expr, fixEnv=env)
self.expr.addParens()
class ReturnStatement(JavaStatement):
def __init__(self, expr=None, tt=None):
self.expr = expr
self.tt = tt
def print_(self, printer, print_): return 'return {};'.format(print_(self.expr)) if self.expr is not None else 'return;'
def tree(self, printer, tree): return [self.__class__.__name__, tree(self.expr)]
def addCastsAndParens(self, env):
if self.expr is not None:
self.expr.addCasts(env)
if not isJavaAssignable(env, self.expr.dtype, self.tt):
self.expr = makeCastExpr(self.tt, self.expr, fixEnv=env)
self.expr.addParens()
class ThrowStatement(JavaStatement):
def __init__(self, expr):
self.expr = expr
def print_(self, printer, print_): return 'throw {};'.format(print_(self.expr))
def tree(self, printer, tree): return [self.__class__.__name__, tree(self.expr)]
class JumpStatement(JavaStatement):
def __init__(self, target, isFront):
self.label = target.getLabel() if target is not None else None
self.keyword = 'continue' if isFront else 'break'
def print_(self, printer, print_):
label = ' ' + self.label if self.label is not None else ''
return self.keyword + label + ';'
def tree(self, printer, tree): return [self.__class__.__name__, self.keyword, self.label]
# Compound Statements
sbcount = itertools.count()
class LazyLabelBase(JavaStatement):
# Jumps are represented by arbitrary 'keys', currently just the key of the
# original proxy node. Each item has a continueKey and a breakKey representing
# the beginning and the point just past the end respectively. breakKey may be
# None if this item appears at the end of the function and there is nothing after it.
# Statement blocks have a jump key representing where it jumps to if any. This
# may be None if the jump is unreachable (such as if there is a throw or return)
def __init__(self, labelfunc, begink, endk):
self.label, self.func = None, labelfunc
self.continueKey = begink
self.breakKey = endk
self.id = next(sbcount) # For debugging purposes
def getLabel(self):
if self.label is None:
self.label = self.func() # Not a bound function!
return self.label
def getLabelPrefix(self): return '' if self.label is None else self.label + ': '
# def getLabelPrefix(self): return self.getLabel() + ': '
# For debugging
def __str__(self):
if isinstance(self, StatementBlock):
return 'Sb'+str(self.id)
return type(self).__name__[:3]+str(self.id)
__repr__ = __str__
class TryStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, tryb, pairs):
super(TryStatement, self).__init__(labelfunc, begink, endk)
self.tryb, self.pairs = tryb, pairs
def getScopes(self): return (self.tryb,) + zip(*self.pairs)[1]
def print_(self, printer, print_):
tryb = print_(self.tryb)
parts = ['catch({})\n{}'.format(print_(x), print_(y)) for x,y in self.pairs]
return '{}try\n{}\n{}'.format(self.getLabelPrefix(), tryb, '\n'.join(parts))
def tree(self, printer, tree):
parts = [map(tree, t) for t in self.pairs]
return [self.__class__.__name__, self.label, tree(self.tryb), parts]
class IfStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, expr, scopes):
super(IfStatement, self).__init__(labelfunc, begink, endk)
self.expr = expr # don't rename without changing how var replacement works!
self.scopes = scopes
def getScopes(self): return self.scopes
def print_(self, printer, print_):
lbl = self.getLabelPrefix()
parts = [self.expr] + list(self.scopes)
if len(self.scopes) == 1:
parts = [print_(x) for x in parts]
return '{}if ({})\n{}'.format(lbl, *parts)
# Special case handling for 'else if'
sep = '\n' # else seperator depends on if we have else if
fblock = self.scopes[1]
if len(fblock.statements) == 1:
stmt = fblock.statements[-1]
if isinstance(stmt, IfStatement) and stmt.label is None:
sep, parts[-1] = ' ', stmt
parts = [print_(x) for x in parts]
return '{}if ({})\n{}\nelse{sep}{}'.format(lbl, *parts, sep=sep)
def tree(self, printer, tree): return [self.__class__.__name__, self.label, tree(self.expr), map(tree, self.scopes)]
class SwitchStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, expr, pairs):
super(SwitchStatement, self).__init__(labelfunc, begink, endk)
self.expr = expr # don't rename without changing how var replacement works!
self.pairs = pairs
def getScopes(self): return zip(*self.pairs)[1]
def hasDefault(self): return None in zip(*self.pairs)[0]
def print_(self, printer, print_):
expr = print_(self.expr)
def printCase(keys):
if keys is None:
return 'default: '
assert keys
return ''.join(map('case {}: '.format, sorted(keys)))
bodies = [(printCase(keys) + print_(scope)) for keys, scope in self.pairs]
if self.pairs[-1][0] is None and len(self.pairs[-1][1].statements) == 0:
bodies.pop()
contents = '\n'.join(bodies)
indented = [' '+line for line in contents.splitlines()]
lines = ['{'] + indented + ['}']
return '{}switch({}){}'.format(self.getLabelPrefix(), expr, '\n'.join(lines))
def tree(self, printer, tree):
parts = []
for keys, scope in self.pairs:
parts.append([[None] if keys is None else sorted(keys), tree(scope)])
return [self.__class__.__name__, self.label, tree(self.expr), parts]
class WhileStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, parts):
super(WhileStatement, self).__init__(labelfunc, begink, endk)
self.expr = Literal.TRUE
self.parts = parts
assert len(self.parts) == 1
def getScopes(self): return self.parts
def print_(self, printer, print_):
parts = print_(self.expr), print_(self.parts[0])
return '{}while({})\n{}'.format(self.getLabelPrefix(), *parts)
def tree(self, printer, tree): return [self.__class__.__name__, self.label, tree(self.expr), tree(self.parts[0])]
class StatementBlock(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, statements, jumpk, labelable=True):
super(StatementBlock, self).__init__(labelfunc, begink, endk)
self.parent = None # should be assigned later
self.statements = statements
self.jumpKey = jumpk
self.labelable = labelable
def doesFallthrough(self): return self.jumpKey is None or self.jumpKey == self.breakKey
def getScopes(self): return self,
def print_(self, printer, print_):
assert self.labelable or self.label is None
contents = '\n'.join(print_(x) for x in self.statements)
indented = [' '+line for line in contents.splitlines()]
# indented[:0] = [' //{} {}'.format(self,x) for x in (self.continueKey, self.breakKey, self.jumpKey)]
lines = [self.getLabelPrefix() + '{'] + indented + ['}']
return '\n'.join(lines)
@staticmethod
def join(*scopes):
blists = [s.bases for s in scopes if s is not None] # allow None to represent the universe (top element)
if not blists:
return None
common = [x for x in zip(*blists) if len(set(x)) == 1]
return common[-1][0]
def tree(self, printer, tree): return ['BlockStatement', self.label, map(tree, self.statements)]
#############################################################################################################################################
# Careful, order is important here!
_assignable_sprims = objtypes.ByteTT, objtypes.ShortTT, objtypes.CharTT
_assignable_lprims = objtypes.IntTT, objtypes.LongTT, objtypes.FloatTT, objtypes.DoubleTT
# Also used in boolize.py
def isPrimativeAssignable(x, y): # x = fromt, y = to
assert objtypes.dim(x) == objtypes.dim(y) == 0
if x == y or (x in _assignable_sprims and y in _assignable_lprims):
return True
elif (x in _assignable_lprims and y in _assignable_lprims):
return _assignable_lprims.index(x) <= _assignable_lprims.index(y)
else:
return (x, y) == (objtypes.ByteTT, objtypes.ShortTT)
def isReferenceType(tt):
return tt == objtypes.NullTT or objtypes.dim(tt) or (objtypes.className(tt) is not None)
def isJavaAssignable(env, fromt, to):
if fromt is None or to is None: # this should never happen, except during debugging
return True
if isReferenceType(to):
assert isReferenceType(fromt)
# todo - make it check interfaces too
return objtypes.isSubtype(env, fromt, to)
else: # allowed if numeric conversion is widening
return isPrimativeAssignable(fromt, to)
_int_tts = objtypes.LongTT, objtypes.IntTT, objtypes.ShortTT, objtypes.CharTT, objtypes.ByteTT
def makeCastExpr(newtt, expr, fixEnv=None):
if newtt == expr.dtype:
return expr
# if casting a literal with compatible type, just create a literal of the new type
if isinstance(expr, Literal):
allowed_conversions = [
(objtypes.FloatTT, objtypes.DoubleTT),
(objtypes.IntTT, objtypes.LongTT),
(objtypes.IntTT, objtypes.BoolTT),
(objtypes.BoolTT, objtypes.IntTT),
]
if (expr.dtype, newtt) in allowed_conversions:
return Literal(newtt, expr.val)
if newtt == objtypes.IntTT and expr.dtype == objtypes.BoolTT:
return Ternary(expr, Literal.ONE, Literal.ZERO)
elif newtt == objtypes.BoolTT and expr.dtype == objtypes.IntTT:
return BinaryInfix('!=', [expr, Literal.ZERO], objtypes.BoolTT)
ret = Cast(TypeName(newtt), expr)
if fixEnv is not None:
ret = ret.fix(fixEnv)
return ret
#############################################################################################################################################
# Precedence:
# 0 - pseudoprimary
# 5 - pseudounary
# 10-19 binary infix
# 20 - ternary
# 21 - assignment
# Associativity: L = Left, R = Right, A = Full
class JavaExpression(object):
precedence = 0 # Default precedence
params = [] # for subclasses that don't have params
def complexity(self): return 1 + max(e.complexity() for e in self.params) if self.params else 0
def postFlatIter(self):
return itertools.chain([self], *[expr.postFlatIter() for expr in self.params])
def print_(self, printer, print_):
return self.fmt.format(*[print_(expr) for expr in self.params])
def tree(self, printer, tree): return [self.__class__.__name__, map(tree, self.params)]
def replaceSubExprs(self, rdict):
if self in rdict:
return rdict[self]
self.params = [param.replaceSubExprs(rdict) for param in self.params]
return self
def fixLiterals(self):
self.params = [param.fixLiterals() for param in self.params]
return self
def addCasts(self, env):
for param in self.params:
param.addCasts(env)
self.addCasts_sub(env)
def addCasts_sub(self, env): pass
def addParens(self):
for param in self.params:
param.addParens()
self.params = list(self.params) # Copy before editing, just to be extra safe
self.addParens_sub()
def addParens_sub(self): pass
def isLocalAssign(self): return isinstance(self, Assignment) and isinstance(self.params[0], Local)
def __repr__(self):
return type(self).__name__.rpartition('.')[-1] + ' ' + visitor.DefaultVisitor().visit(self)
__str__ = __repr__
class ArrayAccess(JavaExpression):
def __init__(self, *params):
if params[0].dtype == objtypes.NullTT:
# Unfortunately, Java doesn't really support array access on null constants
#So we'll just cast it to Object[] as a hack
param = makeCastExpr(objtypes.withDimInc(objtypes.ObjectTT, 1), params[0])
params = param, params[1]
self.params = list(params)
self.fmt = '{}[{}]'
@property
def dtype(self): return objtypes.withDimInc(self.params[0].dtype, -1)
def addParens_sub(self):
p0 = self.params[0]
if p0.precedence > 0 or isinstance(p0, ArrayCreation):
self.params[0] = Parenthesis(p0)
class ArrayCreation(JavaExpression):
def __init__(self, tt, *sizeargs):
self.dim = objtypes.dim(tt)
self.params = [TypeName(objtypes.withNoDim(tt))] + list(sizeargs)
self.dtype = tt
assert self.dim >= len(sizeargs) > 0
self.fmt = 'new {}' + '[{}]'*len(sizeargs) + '[]'*(self.dim-len(sizeargs))
def tree(self, printer, tree): return [self.__class__.__name__, map(tree, self.params), self.dim]
class Assignment(JavaExpression):
precedence = 21
def __init__(self, *params):
self.params = list(params)
self.fmt = '{} = {}'
@property
def dtype(self): return self.params[0].dtype
def addCasts_sub(self, env):
left, right = self.params
if not isJavaAssignable(env, right.dtype, left.dtype):
expr = makeCastExpr(left.dtype, right, fixEnv=env)
self.params = [left, expr]
def tree(self, printer, tree): return [self.__class__.__name__, map(tree, self.params), '']
_binary_ptable = ['* / %', '+ -', '<< >> >>>',
'< > <= >= instanceof', '== !=',
'&', '^', '|', '&&', '||']
binary_precedences = {}
for _ops, _val in zip(_binary_ptable, range(10,20)):
for _op in _ops.split():
binary_precedences[_op] = _val
class BinaryInfix(JavaExpression):
def __init__(self, opstr, params, dtype=None):
assert len(params) == 2
self.params = params
self.opstr = opstr
self.fmt = '{{}} {} {{}}'.format(opstr)
self._dtype = dtype
self.precedence = binary_precedences[opstr]
@property
def dtype(self): return self.params[0].dtype if self._dtype is None else self._dtype
def addParens_sub(self):
myprec = self.precedence
associative = myprec >= 15 # for now we treat +, *, etc as nonassociative due to floats
for i, p in enumerate(self.params):
if p.precedence > myprec:
self.params[i] = Parenthesis(p)
elif p.precedence == myprec and i > 0 and not associative:
self.params[i] = Parenthesis(p)
def tree(self, printer, tree): return [self.__class__.__name__, map(tree, self.params), self.opstr]
class Cast(JavaExpression):
precedence = 5
def __init__(self, *params):
self.dtype = params[0].tt
self.params = list(params)
self.fmt = '({}){}'
def fix(self, env):
tt, expr = self.dtype, self.params[1]
# "Impossible" casts are a compile error in Java.
# This can be fixed with an intermediate cast to Object
if isReferenceType(tt):
if not isJavaAssignable(env, tt, expr.dtype):
if not isJavaAssignable(env, expr.dtype, tt):
expr = makeCastExpr(objtypes.ObjectTT, expr)
self.params = [self.params[0], expr]
return self
def addCasts_sub(self, env): self.fix(env)
def addParens_sub(self):
p1 = self.params[1]
if p1.precedence > 5 or (isinstance(p1, UnaryPrefix) and p1.opstr[0] in '-+'):
self.params[1] = Parenthesis(p1)
class ClassInstanceCreation(JavaExpression):
def __init__(self, typename, tts, arguments):
self.typename, self.tts, self.params = typename, tts, arguments
self.dtype = typename.tt
def print_(self, printer, print_):
return 'new {}({})'.format(print_(self.typename), ', '.join(print_(x) for x in self.params))
def tree(self, printer, tree):
return [self.__class__.__name__, map(tree, self.params), tree(self.typename)]
def addCasts_sub(self, env):
newparams = []
for tt, expr in zip(self.tts, self.params):
if expr.dtype != tt and (ALWAYS_CAST_PARAMS or not isJavaAssignable(env, expr.dtype, tt)):
expr = makeCastExpr(tt, expr, fixEnv=env)
newparams.append(expr)
self.params = newparams
class FieldAccess(JavaExpression):
def __init__(self, primary, name, dtype, op=None, printLeft=True):
self.dtype = dtype
self.params = [primary]
self.op, self.name = op, name
self.printLeft = printLeft
# self.params, self.name = [primary], escapeString(name)
# self.fmt = ('{}.' if printLeft else '') + self.name
def print_(self, printer, print_):
if self.op is None:
name = self.name
assert name in ('length','class')
else:
cls, name, desc = self.op.target, self.op.name, self.op.desc
name = escapeString(printer.fieldName(cls, name, desc))
pre = print_(self.params[0])+'.' if self.printLeft else ''
return pre+name
def tree(self, printer, tree):
if self.op is None:
trip = None, self.name, None
else:
trip = self.op.target, self.op.name, self.op.desc
return [self.__class__.__name__, map(tree, self.params), trip, self.printLeft]
def addParens_sub(self):
p0 = self.params[0]
if p0.precedence > 0:
self.params[0] = Parenthesis(p0)
def printFloat(x, isSingle):
assert x >= 0.0 and not math.isinf(x)
suffix = 'f' if isSingle else ''
if isSingle and x > 0.0:
# Try to find more compract representation for floats, since repr treats everything as doubles
m, e = math.frexp(x)
half_ulp2 = math.ldexp(1.0, max(e - 25, -150)) # don't bother doubling when near the upper range of a given e value
half_ulp1 = (half_ulp2/2) if m == 0.5 and e >= -125 else half_ulp2
lbound, ubound = x-half_ulp1, x+half_ulp2
assert lbound < x < ubound
s = '{:g}'.format(x).replace('+','')
if lbound < float(s) < ubound: # strict ineq to avoid potential double rounding issues
return s + suffix
return repr(x) + suffix
class Literal(JavaExpression):
def __init__(self, vartype, val):
self.dtype = vartype
self.val = val
if self.dtype == objtypes.ClassTT:
self.params = [TypeName(val)]
def getStr(self):
if self.dtype == objtypes.StringTT:
return '"' + escapeString(self.val) + '"'
elif self.dtype == objtypes.IntTT:
return str(self.val)
elif self.dtype == objtypes.LongTT:
return str(self.val) + 'L'
elif self.dtype == objtypes.FloatTT or self.dtype == objtypes.DoubleTT:
return printFloat(self.val, self.dtype == objtypes.FloatTT)
elif self.dtype == objtypes.NullTT:
return 'null'
elif self.dtype == objtypes.BoolTT:
return 'true' if self.val else 'false'
def fixLiterals(self):
# From the point of view of the Java Language, there is no such thing as a negative literal.
# This replaces invalid literal values with unary minus (and division for non-finite floats)
if self.dtype == objtypes.IntTT or self.dtype == objtypes.LongTT:
if self.val < 0:
return UnaryPrefix('-', Literal(self.dtype, -self.val))
elif self.dtype == objtypes.FloatTT or self.dtype == objtypes.DoubleTT:
x = self.val
zero = Literal.DZERO if self.dtype == objtypes.DoubleTT else Literal.FZERO
if math.isnan(x):
return BinaryInfix('/', [zero, zero])
elif math.isinf(x): #+/- inf
numerator = Literal(self.dtype, math.copysign(1.0, x)).fixLiterals()
return BinaryInfix('/', [numerator, zero])
# finite negative numbers
if math.copysign(1.0, x) == -1.0:
return UnaryPrefix('-', Literal(self.dtype, math.copysign(x, 1.0)))
return self
def print_(self, printer, print_):
if self.dtype == objtypes.ClassTT:
# for printing class literals
return '{}.class'.format(print_(self.params[0]))
return self.getStr()
def tree(self, printer, tree):
result = tree(self.params[0]) if self.dtype == objtypes.ClassTT else self.getStr()
return [self.__class__.__name__, result, self.dtype]
def _key(self): return self.dtype, self.val
def __eq__(self, other): return type(self) == type(other) and self._key() == other._key()
def __ne__(self, other): return type(self) != type(other) or self._key() != other._key()
def __hash__(self): return hash(self._key())
Literal.FALSE = Literal(objtypes.BoolTT, 0)
Literal.TRUE = Literal(objtypes.BoolTT, 1)
Literal.N_ONE = Literal(objtypes.IntTT, -1)
Literal.ZERO = Literal(objtypes.IntTT, 0)
Literal.ONE = Literal(objtypes.IntTT, 1)
Literal.LZERO = Literal(objtypes.LongTT, 0)
Literal.FZERO = Literal(objtypes.FloatTT, 0.0)
Literal.DZERO = Literal(objtypes.DoubleTT, 0.0)
Literal.NULL = Literal(objtypes.NullTT, None)
_init_d = {objtypes.BoolTT: Literal.FALSE,
objtypes.IntTT: Literal.ZERO,
objtypes.LongTT: Literal.LZERO,
objtypes.FloatTT: Literal.FZERO,
objtypes.DoubleTT: Literal.DZERO}
def dummyLiteral(tt):
return _init_d.get(tt, Literal.NULL)
class Local(JavaExpression):
def __init__(self, vartype, namefunc):
self.dtype = vartype
self.name = None
self.func = namefunc
def print_(self, printer, print_):
if self.name is None:
self.name = self.func(self)
return self.name
def tree(self, printer, tree): return [self.__class__.__name__, self.print_(None, None)]
class MethodInvocation(JavaExpression):
def __init__(self, left, name, tts, arguments, op, dtype):
if left is None:
self.params = arguments
else:
self.params = [left] + arguments
self.hasLeft = (left is not None)
self.dtype = dtype
self.name = name
self.tts = tts
self.op = op # keep around for future reference and new merging
def print_(self, printer, print_):
cls, name, desc = self.op.target, self.op.name, self.op.desc
if name != self.name:
assert name == '<init>'
name = self.name
else:
name = escapeString(printer.methodName(cls, name, desc))
if self.hasLeft:
left, arguments = self.params[0], self.params[1:]
return '{}.{}({})'.format(print_(left), name, ', '.join(print_(x) for x in arguments))
else:
arguments = self.params
return '{}({})'.format(name, ', '.join(print_(x) for x in arguments))
def tree(self, printer, tree):
trip = self.op.target, self.op.name, self.op.desc
return [self.__class__.__name__, map(tree, self.params), trip, self.name, self.hasLeft]
def addCasts_sub(self, env):
newparams = []
for tt, expr in zip(self.tts, self.params):
if expr.dtype != tt and (ALWAYS_CAST_PARAMS or not isJavaAssignable(env, expr.dtype, tt)):
expr = makeCastExpr(tt, expr, fixEnv=env)
newparams.append(expr)
self.params = newparams
def addParens_sub(self):
if self.hasLeft:
p0 = self.params[0]
if p0.precedence > 0:
self.params[0] = Parenthesis(p0)
class Parenthesis(JavaExpression):
def __init__(self, param):
self.params = [param]
self.fmt = '({})'
@property
def dtype(self): return self.params[0].dtype
class Ternary(JavaExpression):
precedence = 20
def __init__(self, *params):
self.params = list(params)
self.fmt = '{} ? {} : {}'
@property
def dtype(self): return self.params[1].dtype
def addParens_sub(self):
# Add unecessary parenthesis to complex conditions for readability
if self.params[0].precedence >= 20 or self.params[0].complexity() > 0:
self.params[0] = Parenthesis(self.params[0])
if self.params[2].precedence > 20:
self.params[2] = Parenthesis(self.params[2])
class TypeName(JavaExpression):
def __init__(self, tt):
self.dtype = None
self.tt = tt
def print_(self, printer, print_):
name = objtypes.className(self.tt)
if name is not None:
name = printer.className(name)
name = escapeString(name.replace('/','.'))
if name.rpartition('.')[0] == 'java.lang':
name = name.rpartition('.')[2]
else:
name = objtypes.primName(self.tt)
s = name + '[]'*objtypes.dim(self.tt)
return s
def tree(self, printer, tree): return [self.__class__.__name__, self.tt]
def complexity(self): return -1 # exprs which have this as a param won't be bumped up to 1 uncessarily
class CatchTypeNames(JavaExpression): # Used for caught exceptions, which can have multiple types specified
def __init__(self, env, tts):
assert(tts and not any(objtypes.dim(tt) for tt in tts)) # at least one type, no array types
self.tnames = map(TypeName, tts)
self.dtype = objtypes.commonSupertype(env, tts)
def print_(self, printer, print_):
return ' | '.join(print_(tn) for tn in self.tnames)
def tree(self, printer, tree): return [self.__class__.__name__, map(tree, self.tnames)]
class UnaryPrefix(JavaExpression):
precedence = 5
def __init__(self, opstr, param, dtype=None):
self.params = [param]
self.opstr = opstr
self.fmt = opstr + '{}'
self._dtype = dtype
@property
def dtype(self): return self.params[0].dtype if self._dtype is None else self._dtype
def addParens_sub(self):
p0 = self.params[0]
if p0.precedence > 5 or (isinstance(p0, UnaryPrefix) and p0.opstr[0] == self.opstr[0]):
self.params[0] = Parenthesis(p0)
def tree(self, printer, tree): return ['Unary', map(tree, self.params), self.opstr, False]
class Dummy(JavaExpression):
def __init__(self, fmt, params, isNew=False, dtype=None):
self.params = params
self.fmt = fmt
self.isNew = isNew
self.dtype = dtype
|
Cubitect/ASMModSuit
|
Krakatau-master/Krakatau/java/ast.py
|
Python
|
gpl-3.0
| 28,636
|
[
"VisIt"
] |
45c94706fc8dc37f356434bb5168c7cb13e1aae430b5d20f4a23e101ddae77b6
|
import os
import unittest
from datasources.FileDataSource import FileDataSource
from parsers.NaPekarceParser import NaPekarceParser
class NaPekarceParserTest(unittest.TestCase):
def test_parse(self):
parser = NaPekarceParser(FileDataSource(os.path.dirname(__file__) + '/page_napekarce.html', 'utf8'))
items = parser.get_menu_items()
str_items = [str(item) for item in items]
expected = [
'2016-04-30: Zeleninový krém 1,7 (35)',
'2016-04-30: Živáňská pečeně v alobalu (vepřový bůček,klobása, cibule, brambory) s beraními rohy (139)',
'2016-04-30: Hovězí medajlonky ze svíčkové s fazolkami na šalotce a smetanovo pepřovou omáčkou 1,7 (199)',
'2016-04-30: Květákové placičky s kuřecím masem, šťouchané cibulkové brambory, dip 1,3,7 (109)',
'2016-04-30: Plněný paprikový lusk s rajskou omáčkou, houskové knedlíky 1,3,7 (114)',
'2016-04-30: Segedínský vepřový guláš zjemněný smetanou, houskové knedlíky 1,3,7 (109)',
'2016-04-30: Vepřový steak na roštu s dijónskou omáčkou, americké brambory 1,3,7 (109)',
'2016-04-30: Domácí mrkvový koláč se šlehačkou 1,3,7 (45)',
'2016-04-30: Domácí mrkvový koláč se šlehačkou 1,3,7 + espresso (69)',
'2016-04-30: Sobota+neděle : Dětský jídelní lístek ()',
]
self.assertEqual(expected, str_items)
if __name__ == '__main__':
unittest.main()
|
BrandEmbassy/dailymenu
|
tests/parsers/NaPekarceParserTest.py
|
Python
|
mit
| 1,531
|
[
"ESPResSo"
] |
328846f9975fa492ead1ab557bf09fd110d60324431c02acf2399b549ba33e8e
|
# Psychrometric Chart
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Chris Mackey <Chris@MackeyArchitecture.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to draw a psychrometric chart in the Rhino scene and evaluate a set of temperatures and humidity ratios in terms of indoor comfort. Connected data can include either outdoor temperature and humidty ratios from imported EPW weather data, indoor temperature and humidity ratios from an energy simulation, or indivdual numerical inputs of temperature and humidity. The input data will be plotted alongside polygons on the chart representing comfort as well as polygons representing the efects of passive building strategies on comfort.
_
The specific human energy balance model used by the psychrometric chart is the Predicted Mean Vote (PMV) model developed by P.O. Fanger. PMV is a seven-point scale from cold (-3) to hot (+3) that is used in comfort surveys. Each interger value of the scale indicates the following: -3:Cold, -2:Cool, -1:Slightly Cool, 0:Neutral, +1:Slightly Warm, +2:Warm, +3:Hot. The range of comfort is generally accepted as a PMV between -1 and +1 and this is what defines the range of the comfort polygon on the psychrometric chart.
Accordingly, this component will also output the PMV of the occupant for the input conditions as well as an estimated percentage of people dissatisfied (PPD) in the given conditions.
_
The comfort models that make this component possible were translated to python from a series of validated javascript comfort models developed at the Berkely Center for the Built Environment (CBE).
Specific documentation on the comfort models can be found here: https://code.google.com/p/cbe-comfort-tool/wiki/ComfortModels
_
Special thanks goes to the authors of the online CBE Thermal Comfort Tool who first made the javascript models in order to power the tool:
Hoyt Tyler, Schiavon Stefano, Piccioli Alberto, Moon Dustin, and Steinfeld Kyle, 2013, CBE Thermal Comfort Tool.
Center for the Built Environment, University of California Berkeley, http://cbe.berkeley.edu/comforttool/
_
The information for the polygons representing passive strategies comes from the climate consultant psychrometric chart. Further information on how these polygons are calculated can be found here:
http://apps1.eere.energy.gov/buildings/tools_directory/software.cfm/ID=123/pagename=alpha_list
-
Provided by Ladybug 0.0.61
Args:
_dryBulbTemperature: A number representing the dry bulb temperature of the air in degrees Celcius. This input can also accept a list of temperatures representing conditions at different times or the direct output of dryBulbTemperature from the Import EPW component. Indoor temperatures from Honeybee energy simulations are also possible inputs. Finally, this component can also acccept temperatures in Farenheit in order to draw a chart with IP units but, in order for this component to sense that the values are Farenheit, there must be at least one 'F' or 'F' in the stream of connected data.
_relativeHumidity: A number between 0 and 100 representing the relative humidity of the air in percentage. This input can also accept a list of relative humidity values representing conditions at different times or the direct output of relativeHumidity from of the Import EPW component.
barometricPressure_: A number representing the barometric pressure in Pascals. If no value is connected here, the default pressure will be 101325 Pa, which is air pressure at sea level. It is recommended that you connect the barometric pressure from the Import epw component here as the air pressure at sea level can cause some misleading results for cities at higher elevations.
-------------------------: ...
meanRadTemperature_: A number representing the mean radiant temperature of the surrounding surfaces. This value should be in degrees Celcius unless you have connected values in Farenheit to the dryBulbTemperature and you are seeing a chart in IP units. If no value is plugged in here, this component will assume that the mean radiant temperature is equal to 23 C. This input can also accept a list of temperatures and this will produce several comfort polygons (one for each mean radiant temperature).
windSpeed_: A number representing the wind speed of the air in meters per second. If no value is plugged in here, this component will assume a very low wind speed of 0.05 m/s, characteristic of most indoor conditions. This input can also accept a list of wind speeds representing conditions and this will produce several comfort polygons (one for each wind speed).
metabolicRate_: A number representing the metabolic rate of the human subject in met. This input can also accept text inputs for different activities. Acceptable text inputs include Sleeping, Reclining, Sitting, Typing, Standing, Driving, Cooking, House Cleaning, Walking, Walking 2mph, Walking 3mph, Walking 4mph, Running 9mph, Lifting 10lbs, Lifting 100lbs, Shoveling, Dancing, and Basketball. If no value is input here, the component will assume a metabolic rate of 1 met, which is the metabolic rate of a seated human being. This input can also accept lists of metabolic rates and will produce multiple comfort polygons accordingly.
clothingLevel_: A number representing the clothing level of the human subject in clo. If no value is input here, the component will assume a clothing level of 1 clo, which is roughly the insulation provided by a 3-piece suit. A person dressed in shorts and a T-shirt has a clothing level of roughly 0.5 clo and a person in a thick winter jacket can have a clothing level as high as 2 to 4 clo. This input can also accept lists of clothing levels and will produce multiple comfort polygons accordingly.
-------------------------: ...
mergeComfPolygons_: Set to "True" if you have connected multiple values for any of the four comfort variables in the section above and you wish to merge all of the computed comfort polygons into one.
comfortPar_: Optional comfort parameters from the "Ladybug_PMV Comfort Parameters" component. Use this to adjust maximum and minimum acceptable humidity ratios. These comfortPar can also change whether comfort is defined by eighty or ninety percent of people comfortable.
passiveStrategy_: An optional text input of passive strategies to be laid over the psychrometric chart as polygons. It is recommended that you use the "Ladybug_Passive Strategy List" to select which polygons you would like to display. Otherwise, acceptable text inputs include "Evaporative Cooling", "Thermal Mass + Night Vent", "Occupant Use of Fans", "Internal Heat Gain", and "Dessicant Dehumidification".
strategyPar_: Optional passive strategy parameters from the "Ladybug_Passive Strategy Parameters" component. Use this to adjust the maximum comfortable wind speed, the building balance temperature, and the temperature limits for thermal mass and night flushing.
mollierHX_: Set to "True" to visualize the psychrometric chart as a mollier-hx diagram. This is essentially a psychrometric chart where the axes have been switched, which is popular in Europe.
enthalpyOrWetBulb_: Set to "True" to have the psychrometric chart plot lines of constant enthalpy and set to "False" to have the chart plot linest of constant wet bulb temperature. The default is set to "True" for enthalpy.
analysisPeriod_: An optional analysis period from the Ladybug_Analysis Period component. If no Analysis period is given and epw data from the ImportEPW component has been connected, the analysis will be run for the enitre year.
annualHourlyData_: An optional list of hourly data from the Import epw component, which will be used to create hourPointColors that correspond to the hours of the data (e.g. windSpeed). You can connect up several different annualHourly data here.
conditionalStatement_: This input allows users to remove data that does not fit specific conditions or criteria from the psychrometric chart. The conditional statement input here should be a valid condition statement in Python, such as "a>25" or "b<80" (without quotation marks).
The current version of this component accepts "and" and "or" operators. To visualize the hourly data, only lowercase English letters should be used as variables, and each letter alphabetically corresponds to each of the lists (in their respective order): "a" always represents dryBulbtemperature, "b" always represents the relativeHumidity, "c" always represents the 1st list plugged into annualHourlyData_, "d" represents the 2nd list, etc.
For example, if you want to plot the data for the time period when temperature is between 18C and 23C, and humidity is less than 80%, the conditional statement should be written as 18<a<23 and b<80 (without quotation marks).
basePoint_: An optional base point that will be used to place the Psychrometric Chart in the Rhino scene. If no base point is provided, the base point will be the Rhino model origin.
scale_: An optional number to change the scale of the spychrometric chart in the Rhino scene. By default, this value is set to 1.
legendPar_: Optional legend parameters from the Ladybug Legend Parameters component.
_runIt: Set to "True" to run the component and generate a psychrometric chart!
Returns:
readMe!: ...
-------------------------: ...
totalComfortPercent: The percent of the input data that are inside all comfort and passive strategy polygons.
totalComfortOrNot: A list of 0's and 1's indicating, for each hour of the input data, if the hour is inside a comfort or strategy polygon (1) or not(0).
strategyNames: A list of names for the comfort polygons and strategeis that corresponds to the numbers in the following outputs.
strategyPercentOfTime: The percent of the input data that are in each of the comfort or passive strategy polygons. Each number here corresponds to the names in the "strategyNames" output above.
strategyOrNot: A list of 0's and 1's indicating, for each hour of the input temperature and humidity ratio, if the hour is inside a given comfort or passive strategy polygon (1) or not(0). If there are multiple comfort polyogns or passive strategies connected to the passiveStrategy_ input, this output will be a grafted list for each polygon. Each list here corresponds to the names in the "strategyNames" output above.
-------------------------: ...
chartCurvesAndTxt: The chart curves and text labels of the psychrometric chart.
psychChartMesh: A colored mesh showing the number of input hours happen in each part of the psychrometric chart.
legend: A colored legend showing the number of hours that correspond to each color.
legendBasePt: The legend base point, which can be used to move the legend in relation to the chart with the grasshopper "move" component.
comfortPolygons: A brep representing the range of comfort for the input radiant temperature, wind speed, metabolic rate and clothing level. IF multiple values have been hooked up for any of these inputs, multiple polygons will be output here.
strategyPolygons: A brep representing the area of the chart made comfortable by the passive strategies. If multiple strategies have been hooked up to the passiveStrategy_ input, multiple polygons will be output here.
-------------------------: ...
chartHourPoints: Points representing each of the hours of input temperature and humidity ratio. By default, this ouput is hidden and, to see it, you should connect it to a Grasshopper preview component.
hourPointColors: Colors that correspond to the chartHourPoints above and can be hooked up to the "Swatch" input of a Grasshopper Preview component that has the hour points above connected as geometry. By default, points are colored red if they lie inside comfort or strategy polygons and are colored blue if they do not meet such comfort criteria. In the event that you have hooked up annualHourlyData_ this output will be a grafted list of colors. The first list corresponds to the comfort conditions while the second list colors points based on the annualHourlyData.
hourPointLegend: A legend that corresponds to the hour point colors above. In the event that annualHourlyData_ is connected, this output will be a grafted list of legends that each correspond to the grafted lists of colors.
"""
ghenv.Component.Name = "Ladybug_Psychrometric Chart"
ghenv.Component.NickName = 'PsychChart'
ghenv.Component.Message = 'VER 0.0.61\nNOV_20_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "2 | VisualizeWeatherData"
#compatibleLBVersion = VER 0.0.59\nNOV_20_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import Grasshopper.Kernel as gh
import math
import scriptcontext as sc
import Rhino as rc
import rhinoscriptsyntax as rs
import System
from System import Object
from clr import AddReference
AddReference('Grasshopper')
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
def mollierHXTransform(geometry):
molTransRotat = rc.Geometry.Transform.Rotation(rc.Geometry.Vector3d.YAxis, rc.Geometry.Vector3d(-1,0,0), rc.Geometry.Point3d.Origin)
molTransReflect = rc.Geometry.Transform.Mirror(rc.Geometry.Plane.WorldYZ)
geometry.Transform(molTransRotat)
geometry.Transform(molTransReflect)
return geometry
def C2F(temper):
newTemper = []
for num in temper: newTemper.append(num*9/5 + 32)
return newTemper
def F2C(temper):
newTemper = []
for num in temper: newTemper.append((num-32) * 5 / 9)
return newTemper
def BTUlb2kJkg(enthalpy):
newEnthalpy = []
for num in enthalpy: newEnthalpy.append(num/0.429922614)
return newEnthalpy
def kJkg2BTUlb(enthalpy):
newEnthalpy = []
for num in enthalpy: newEnthalpy.append(num*0.429922614)
return newEnthalpy
def checkTheInputs():
#Define a value that will indicate whether someone has hooked up epw data.
epwData = False
epwStr = []
IPTrigger = False
farenheitVals = []
#Check lenth of the _dryBulbTemperature list and evaluate the contents.
checkData1 = True
epwDatFound = True
airTemp = []
airMultVal = False
if len(_dryBulbTemperature) != 0:
try:
if "Temperature" in _dryBulbTemperature[2]:
airTemp = _dryBulbTemperature[7:]
checkData1 = True
epwData = True
epwStr = _dryBulbTemperature[0:7]
if epwStr[3] == 'F' or epwStr[3] == 'F':
IPTrigger = True
except:
epwDatFound = False
if epwDatFound == False:
for item in _dryBulbTemperature:
try:
airTemp.append(float(item))
except:
if item == 'F' or item == 'F': IPTrigger = True
else: checkData1 = False
if IPTrigger == True:
farenheitVals = airTemp[:]
newAirTemp = F2C(airTemp)
airTemp = newAirTemp
if len(airTemp) > 1: airMultVal = True
if checkData1 == False:
warning = '_dryBulbTemperature input does not contain valid temperature values.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print 'Connect a temperature for _dryBulbTemperature'
#Check lenth of the _relativeHumidity list and evaluate the contents.
checkData2 = False
relHumid = []
humidMultVal = False
nonValue = True
if len(_relativeHumidity) != 0:
try:
if "Humidity" in _relativeHumidity[2]:
relHumid = _relativeHumidity[7:]
checkData2 = True
epwData = True
epwStr = _relativeHumidity[0:7]
except: pass
if checkData2 == False:
for item in _relativeHumidity:
try:
if 0 <= float(item) <= 100:
relHumid.append(float(item))
checkData2 = True
else: nonValue = False
except:checkData2 = False
if nonValue == False: checkData2 = False
if len(relHumid) > 1: humidMultVal = True
if checkData2 == False:
warning = '_relativeHumidity input does not contain valid value.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print 'Connect a value for _relativeHumidity.'
#Check lenth of the _relativeHumidity list and evaluate the contents.
checkData3 = False
barPress = []
pressMultVal = False
nonValue = True
if len(barometricPressure_) != 0:
try:
if "Barometric Pressure" in barometricPressure_[2]:
barPress = barometricPressure_[7:]
checkData3 = True
epwData = True
epwStr = barometricPressure_[0:7]
except: pass
if checkData3 == False:
for item in barometricPressure_:
try:
if 0 <= float(item) <= 100:
barPress.append(float(item))
checkData3 = True
else: nonValue = False
except:checkData3 = False
if nonValue == False: checkData3 = False
if len(barPress) > 1: pressMultVal = True
if checkData3 == False:
warning = 'barometricPressure_ input does not contain valid value.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData3 = True
barPress = [101325]
print 'No value connected for barPress_. It will be assumed that the barometric pressure is that at sea level: 101325 Pa.'
#Check to make sure that the temperature, barometric pressure and humidity ratio lists are the same length.
checkData4 = False
if checkData1 == True and checkData2 == True and checkData3 == True:
if airMultVal == True or humidMultVal == True or pressMultVal == True:
listLenCheck = []
if airMultVal == True: listLenCheck.append(len(airTemp))
if humidMultVal == True: listLenCheck.append(len(relHumid))
if pressMultVal == True: listLenCheck.append(len(barPress))
if all(x == listLenCheck[0] for x in listLenCheck) == True:
checkData4 = True
calcLength = listLenCheck[0]
def duplicateData(data, calcLength):
dupData = []
for count in range(calcLength):
dupData.append(data[0])
return dupData
if airMultVal == False: airTemp = duplicateData(airTemp, calcLength)
if humidMultVal == False: relHumid = duplicateData(relHumid, calcLength)
if pressMultVal == False: barPress = duplicateData(barPress, calcLength)
else:
calcLength = None
warning = 'If you have put in lists with multiple values for temperature or humidity, the lengths of these lists must match between temperature and humidity or you have a single value for a given parameter to be applied to all values in the list.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData4 = True
calcLength = 1
else:
calcLength = 0
#Make sure that the lengths of the 4 other comfort parameters match and assign default values if nothing is connected.
#Check lenth of the meanRadTemperature_ list and evaluate the contents.
checkData5 = False
radTemp = []
radMultVal = False
if len(meanRadTemperature_) != 0:
for item in meanRadTemperature_:
try:
radTemp.append(float(item))
checkData5 = True
except: checkData5 = False
if len(radTemp) > 1: radMultVal = True
if checkData5 == False:
warning = 'meanRadTemperature_ input does not contain valid temperature values in degrees Celcius.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
if IPTrigger: radTemp = F2C(radTemp)
else:
checkData5 = True
radTemp = [23]
print 'No value connected for meanRadTemperature_. It will be assumed that the radiant temperature is equal to 23 degrees Celcius.'
#Check lenth of the windSpeed_ list and evaluate the contents.
checkData6 = False
windSpeed = []
windMultVal = False
nonPositive = True
if len(windSpeed_) != 0:
for item in windSpeed_:
try:
if float(item) >= 0:
windSpeed.append(float(item))
checkData6 = True
else: nonPositive = False
except: checkData6 = False
if nonPositive == False: checkData6 = False
if len(windSpeed) > 1: windMultVal = True
if checkData6 == False:
warning = 'windSpeed_ input does not contain valid wind speed in meters per second. Note that wind speed must be positive.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData6 = True
windSpeed = [0.05]
print 'No value connected for windSpeed_. It will be assumed that the wind speed is a low 0.05 m/s.'
#Check lenth of the metabolicRate_ list and evaluate the contents.
checkData7 = False
metRate = []
metMultVal = False
nonVal = True
if len(metabolicRate_) != 0:
for item in metabolicRate_:
try:
if 0.5 <= float(item) <= 10:
metRate.append(float(item))
checkData7 = True
else: nonVal = False
except: checkData7 = False
if checkData7 == False:
try:
if str(metabolicRate_[0]) == "Sleeping": metRate.append(0.7)
elif str(metabolicRate_[0]) == "Reclining": metRate.append(0.8)
elif str(metabolicRate_[0]) == "Sitting": metRate.append(1.0)
elif str(metabolicRate_[0]) == "Typing": metRate.append(1.1)
elif str(metabolicRate_[0]) == "Standing": metRate.append(1.2)
elif str(metabolicRate_[0]) == "Driving": metRate.append(1.5)
elif str(metabolicRate_[0]) == "Cooking": metRate.append(1.8)
elif str(metabolicRate_[0]) == "House Cleaning": metRate.append(2.7)
elif str(metabolicRate_[0]) == "Walking": metRate.append(1.7)
elif str(metabolicRate_[0]) == "Walking 2mph": metRate.append(2.0)
elif str(metabolicRate_[0]) == "Walking 3mph": metRate.append(2.6)
elif str(metabolicRate_[0]) == "Walking 4mph": metRate.append(3.8)
elif str(metabolicRate_[0]) == "Running 9mph": metRate.append(9.5)
elif str(metabolicRate_[0]) == "Lifting 10lbs": metRate.append(2.1)
elif str(metabolicRate_[0]) == "Lifting 100lbs": metRate.append(4.0)
elif str(metabolicRate_[0]) == "Shoveling": metRate.append(4.4)
elif str(metabolicRate_[0]) == "Dancing": metRate.append(3.4)
elif str(metabolicRate_[0]) == "Basketball": metRate.append(6.3)
else: pass
except: pass
if len(metRate) > 0: checkData7 = True
if nonVal == False: checkData7 = False
if len(metRate) > 1: metMultVal = True
if checkData7 == False:
warning = 'metabolicRate_ input does not contain valid value. Note that metabolicRate_ must be a value between 0.5 and 10. Any thing outside of that is frankly not human.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData7 = True
metRate = [1]
print 'No value connected for metabolicRate_. It will be assumed that the metabolic rate is that of a seated person at 1 met.'
#Check lenth of the clothingLevel_ list and evaluate the contents.
checkData8 = False
cloLevel = []
cloMultVal = False
noVal = True
if len(clothingLevel_) != 0:
for item in clothingLevel_:
try:
if 0 <= float(item) <= 5:
cloLevel.append(float(item))
checkData8 = True
else: noVal = False
except: checkData8 = False
if noVal == False: checkData8 = False
if len(cloLevel) > 1: cloMultVal = True
if checkData8 == False:
warning = 'clothingLevel_ input does not contain valid value. Note that clothingLevel_ must be a value between 0 and 5. Any thing outside of that is frankly not human.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData8 = True
cloLevel = [1]
print 'No value connected for clothingLevel_. It will be assumed that the clothing level is that of a person wearing a 3-piece suit at 1 clo.'
#Finally, for those lists of length greater than 1, check to make sure that they are all the same length.
checkData9 = False
if checkData5 == True and checkData6 == True and checkData7 == True and checkData8 == True:
if radMultVal == True or windMultVal == True or metMultVal == True or cloMultVal == True:
listLenCheck = []
if radMultVal == True: listLenCheck.append(len(radTemp))
if windMultVal == True: listLenCheck.append(len(windSpeed))
if metMultVal == True: listLenCheck.append(len(metRate))
if cloMultVal == True: listLenCheck.append(len(cloLevel))
if all(x == listLenCheck[0] for x in listLenCheck) == True:
checkData9 = True
calcLength2 = listLenCheck[0]
def duplicateData(data, calcLength2):
dupData = []
for count in range(calcLength2):
dupData.append(data[0])
return dupData
if radMultVal == False: radTemp = duplicateData(radTemp, calcLength2)
if windMultVal == False: windSpeed = duplicateData(windSpeed, calcLength2)
if metMultVal == False: metRate = duplicateData(metRate, calcLength2)
if cloMultVal == False: cloLevel = duplicateData(cloLevel, calcLength2)
exWork = duplicateData([0], calcLength2)
else:
calcLength = None
warning = 'If you have put in lists with multiple values for meanRadTemperature, windSpeed, clothingLevel, or metabolicRate, the lengths of these lists must match across the parameters or you have a single value for a given parameter to be applied to all values in the list.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData9 = True
calcLength2 = 1
exWork = [0]
else:
calcLength2 = 0
exWork = []
# Check the humidity ratio upper and lower bounds and assign defaults if none are connected.
checkData10 = True
if comfortPar_ != []:
try:
PPDComfortThresh = float(comfortPar_[0])
humidRatioUp = float(comfortPar_[1])
humidRatioLow = float(comfortPar_[2])
except:
PPDComfortThresh = 10.0
humidRatioUp = 0.030
humidRatioLow = 0.0
checkData10 = False
warning = 'The comfortPar_ are not valid comfort parameters from the Ladybug_Comfort Parameters component.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
PPDComfortThresh = 10.0
humidRatioUp = 0.030
humidRatioLow = 0.0
#Check the annualhourly data and conditional statement
checkData11 = True
annualHourlyData = _dryBulbTemperature + _relativeHumidity + annualHourlyData_
if epwData == True and len(_dryBulbTemperature + _relativeHumidity) > 17533 and conditionalStatement_:
titleStatement, patternList = checkConditionalStatement(annualHourlyData, conditionalStatement_)
if titleStatement == -1 or patternList == -1:
checkData11 = False
else:
titleStatement = None
patternList = []
#Check the passive strategy inputs to be sure that they are correct.
checkData12 = True
if len(passiveStrategy_) > 0:
for item in passiveStrategy_:
if item == "Evaporative Cooling" or item == "Thermal Mass + Night Vent" or item == "Occupant Use of Fans" or item == "Internal Heat Gain" or item == "Humidification Only" or item == "Dehumidification Only" or item == "Dessicant Dehumidification": pass
else: checkData12 = False
if checkData12 == False:
warning = 'Input for passiveStrategy_ is not valid.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#Check to be sure that epw data has been connected and the calculation length is 8760 if the user has connected an analysis period.
checkData13 = True
if analysisPeriod_ != []:
if epwData == True and calcLength == 8760: pass
else:
checkData13 = False
warning = 'Analysis periods can only be used with EPW or EnergyPlus simulation data that is hourly for the full year.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#Average all of the barometric pressures together (this is the pressure that will be used to construct the chart).
if len(barPress) > 1: avgBarPress = (sum(barPress)/len(barPress))
elif len(barPress) ==1: avgBarPress = barPress[0]
else: avgBarPress = None
#If all of the checkDatas have been good to go, let's give a final go ahead.
if checkData1 == True and checkData2 == True and checkData3 == True and checkData4 == True and checkData5 == True and checkData6 == True and checkData7 == True and checkData8 == True and checkData9 == True and checkData10 == True and checkData11 == True and checkData12 == True and checkData13 == True:
checkData = True
else:
checkData = False
#Let's return everything we need.
return checkData, epwData, epwStr, calcLength, airTemp, relHumid, barPress, avgBarPress, radTemp, windSpeed, metRate, cloLevel, exWork, humidRatioUp, humidRatioLow, calcLength2, PPDComfortThresh, titleStatement, patternList, IPTrigger, farenheitVals
def checkConditionalStatement(annualHourlyData, conditionalStatement):
lb_preparation = sc.sticky["ladybug_Preparation"]()
indexList, listInfo = lb_preparation.separateList(annualHourlyData, lb_preparation.strToBeFound)
letters = [chr(i) for i in xrange(ord('a'), ord('z')+1)]
# remove 'and' and 'or' from conditional statements
csCleaned = conditionalStatement.replace('and', '',20000)
csCleaned = csCleaned.replace('or', '',20000)
# find the number of the lists that have assigned conditional statements
listNum = []
for count, let in enumerate(letters):
if csCleaned.find(let)!= -1: listNum.append(count)
# check if all the conditions are actually applicable
for num in listNum:
if num>len(listInfo) - 1:
warning = 'A conditional statement is assigned for list number ' + `num + 1` + ' which is not existed!\n' + \
'Please remove the letter "' + letters[num] + '" from the statements to solve this problem!\n' + \
'Number of lists are ' + `len(listInfo)` + '. Please fix this issue and try again.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return -1, -1
selList = [[]] * len(listInfo)
for i in range(len(listInfo)):
selList[i] = annualHourlyData[indexList[i]+7:indexList[i+1]]
if listInfo[i][4]!='Hourly' or listInfo[i][5]!=(1,1,1) or listInfo[i][6]!=(12,31,24) or len(selList[i])!=8760:
warning = 'At least one of the input data lists is not a valis ladybug hourly data! Please fix this issue and try again!\n List number = '+ `i+1`
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return -1, -1
# replace the right list in the conditional statement
statement = conditionalStatement.split(' ')
finalStatement = 'pattern = '
titleStatement = '... ... ...\n' +\
'Conditional Selection Applied:\n'
for statemntPart in statement:
statementCopy = str.Copy(statemntPart)
if statemntPart!='and' and statemntPart!='or':
for num in listNum:
toBeReplacedWith = 'selList[this][HOY]'.replace('this', `num`)
titleToBeReplacedWith = listInfo[num][2]
statemntPart = statemntPart.replace(letters[num], toBeReplacedWith, 20000)
statementCopy = statementCopy.replace(letters[num], titleToBeReplacedWith, 20000)
if statementCopy.find(letters[num])!=-1: break
titleStatement = titleStatement + ' ' + statementCopy
else:
titleStatement = titleStatement + '\n' + statementCopy
finalStatement = finalStatement + ' ' + statemntPart
print titleStatement
# check for the pattern
patternList = []
try:
for HOY in range(8760):
exec(finalStatement)
patternList.append(pattern)
except Exception,e:
warning = 'There is an error in the conditional statement:\n' + `e`
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
return -1, -1
return titleStatement, patternList
def drawPsychChart(avgBarPress, lb_comfortModels, legendFont, legendFontSize, legendBold, scaleFactor, epwData, epwStr, IPTrigger, lb_visualization):
#Set a default text height if the user has not provided one.
if legendFontSize == None:
if IPTrigger: legendFontSize = 1
else: legendFontSize = 0.6
#Generate a list of temperatures that will be used to make the relative humidity curves.
if IPTrigger:
tempChartVals = range(-5, 120, 5)
tempNum = F2C(tempChartVals)
else: tempChartVals = tempNum = range(-20, 55, 5)
relHumidNum = range(10, 110, 10)
#Set up a list of lists to hold the humidity ratio values and make a list of the barometric pressure.
humidRatio = []
barPressList = []
for item in tempNum:
barPressList.append(avgBarPress)
#Get humidity ratio values for each of the temperatures at the different relative humidity levels.
for relHumid in relHumidNum:
relHumidList = []
for item in tempNum:
relHumidList.append(relHumid)
HR, EN, vapPress, satPress = lb_comfortModels.calcHumidRatio(tempNum, relHumidList, barPressList)
humidRatio.append(HR)
#Put a scale factor on the humidty ratio to make it on the same scale as the temperature.
for listCount, list in enumerate(humidRatio):
for count, num in enumerate(list):
humidRatio[listCount][count] = num*scaleFactor
#Use the humidity ratio and the dry bulb temperature to create coordinates for the lines.
humidLinePts = []
for list in humidRatio:
linePts = []
for count, item in enumerate(list):
linePts.append(rc.Geometry.Point3d(tempChartVals[count], item, 0))
humidLinePts.append(linePts)
#Make the chart relative humidity lines.
humidCurves = []
humidCurves.append(rc.Geometry.LineCurve(rc.Geometry.Point3d(tempChartVals[0], 0, 0), rc.Geometry.Point3d(tempChartVals[-1], 0, 0)))
for pointList in humidLinePts:
humidCurves.append(rc.Geometry.Curve.CreateInterpolatedCurve(pointList, 3))
#If the humidity ratio goes larger than 0.030, chop off the humidity line there.
maxLine = rc.Geometry.LineCurve(rc.Geometry.Point3d(tempChartVals[0], 0.03 * scaleFactor, 0), rc.Geometry.Point3d(tempChartVals[-1], 0.03 * scaleFactor, 0))
maxBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(maxLine, rc.Geometry.Vector3d.ZAxis))
maxhumidCurves = []
for curve in humidCurves:
splitCrv = curve.Split(maxBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) != 0:
maxhumidCurves.append(splitCrv[0])
else:
maxhumidCurves.append(curve)
#Make the isothermal lines.
tempCurves = []
tempLabelBasePts = []
tempText = []
for count, temp in enumerate(tempChartVals):
tempCurves.append(rc.Geometry.LineCurve(rc.Geometry.Point3d(temp, 0, 0), rc.Geometry.Point3d(temp, humidRatio[-1][count], 0)))
tempLabelBasePts.append(rc.Geometry.Point3d(temp-0.75, -1.3*legendFontSize, 0))
tempText.append(str(temp))
if mollierHX_ == True:
for ptCount, point in enumerate(tempLabelBasePts):
mollierHXTransform(point)
tempLabelBasePts[ptCount] = rc.Geometry.Point3d(point.X-(1.5*legendFontSize), point.Y+(0.25/legendFontSize), 0)
#Split the isothermal lines.
maxTempCurves = []
for curve in tempCurves:
splitCrv = curve.Split(maxBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) != 0:
maxTempCurves.append(splitCrv[0])
else:
maxTempCurves.append(curve)
#Make the lines of constant humidity ratio.
satBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(humidCurves[-1], rc.Geometry.Vector3d.ZAxis))
hrLines = []
ratioList = []
ratioText = []
ratioBasePt = []
ratioStart = (0.03*scaleFactor)/6
for index in range(6):
ratioList.append(ratioStart)
ratioStart += (0.03*scaleFactor)/6
for ratio in ratioList:
hrLines.append(rc.Geometry.LineCurve(rc.Geometry.Point3d(tempChartVals[0], ratio, 0), rc.Geometry.Point3d(tempChartVals[-1], ratio, 0)))
ratioText.append(str(ratio/scaleFactor))
ratioBasePt.append(rc.Geometry.Point3d(tempChartVals[-1]+.5, ratio-.375, 0))
maxHrLines = []
for curve in hrLines:
splitCrv = curve.Split(satBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) != 0:
maxHrLines.append(splitCrv[-1])
else:
maxHrLines.append(curve)
if mollierHX_ == True:
for ptCount, point in enumerate(ratioBasePt):
mollierHXTransform(point)
ratioBasePt[ptCount] = rc.Geometry.Point3d(point.X-(legendFontSize), point.Y, 0)
topBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(maxHrLines[-1], rc.Geometry.Vector3d.ZAxis))
#Make lines of constant enthalpy or wet bulb temperature.
if enthalpyOrWetBulb_ == True or enthalpyOrWetBulb_ == None:
if IPTrigger:
enthalpyForLines = range(0,55,5)
celciEnthalpyForLines = BTUlb2kJkg(enthalpyForLines)
else:
celciEnthalpyForLines = enthalpyForLines = range(-10,120,10)
enthalLines = []
enthText = []
for ecount, enthl in enumerate(enthalpyForLines):
if IPTrigger: enthText.append(str(enthl)+" BTU/lb")
else: enthText.append(str(enthl)+" kJ/kg")
startVal = lb_comfortModels.calcTempFromEnthalpy(celciEnthalpyForLines[ecount], 0.0)
if IPTrigger: startVal = C2F([startVal])[0]
startPt = rc.Geometry.Point3d(startVal, 0.0, 0.0)
endVal = lb_comfortModels.calcTempFromEnthalpy(celciEnthalpyForLines[ecount], 0.03)
if IPTrigger: endVal = C2F([endVal])[0]
endPt = rc.Geometry.Point3d(endVal, 0.03*scaleFactor, 0.0)
enthLine = rc.Geometry.LineCurve(startPt, endPt)
enthalLines.append(enthLine)
else:
if IPTrigger:
wetBulbForLines = range(-5,95,5)
celciWetBulbForLines = F2C(wetBulbForLines)
else:
celciWetBulbForLines = wetBulbForLines = range(-20,36,2)
enthalLines = []
enthText = []
for ecount, enthl in enumerate(wetBulbForLines):
if IPTrigger: enthText.append(str(enthl)+" F")
else: enthText.append(str(enthl)+" C")
startRH = 0
startVal, startHR = lb_comfortModels.calcTempFromWetBulb(celciWetBulbForLines[ecount], startRH, avgBarPress)
if IPTrigger: startVal = C2F([startVal])[0]
startPt = rc.Geometry.Point3d(startVal, startHR, 0.0)
endRH = 100
endVal, endHR = lb_comfortModels.calcTempFromWetBulb(celciWetBulbForLines[ecount], endRH, avgBarPress)
if IPTrigger: endVal = C2F([endVal])[0]
endPt = rc.Geometry.Point3d(endVal, endHR*scaleFactor, 0.0)
enthLine = rc.Geometry.LineCurve(startPt, endPt)
enthalLines.append(enthLine)
#Split the enthalpy/wet bulb lines with the boundary of the chart.
for crvCount, curve in enumerate(enthalLines):
splitCrv = curve.Split(satBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) != 0: enthalLines[crvCount] = splitCrv[0]
maxHRBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(maxHrLines[-1], rc.Geometry.Vector3d.ZAxis))
for crvCount, curve in enumerate(enthalLines):
splitCrv = curve.Split(maxHRBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) != 0: enthalLines[crvCount] = splitCrv[0]
maxTBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(maxTempCurves[-1], rc.Geometry.Vector3d.ZAxis))
for crvCount, curve in enumerate(enthalLines):
splitCrv = curve.Split(maxTBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) != 0: enthalLines[crvCount] = splitCrv[-1]
#Make the text for the lines of constant enthalpy.
enthLabelBasePts = []
if enthalpyOrWetBulb_ == True or enthalpyOrWetBulb_ == None: textFactor = 5
else: textFactor = 3
if mollierHX_:
if enthalpyOrWetBulb_ == True or enthalpyOrWetBulb_ == None: vertTFactor = 1.26
else: vertTFactor = 0.75
else: vertTFactor = None
for count, enth in enumerate(enthalLines):
enthLabelBasePts.append(rc.Geometry.Point3d(enth.PointAtEnd.X-(legendFontSize*textFactor), enth.PointAtEnd.Y+(legendFontSize*0.5), 0))
if mollierHX_ == True:
for ptCount, point in enumerate(enthLabelBasePts):
mollierHXTransform(point)
enthLabelBasePts[ptCount] = rc.Geometry.Point3d(point.X, point.Y+(vertTFactor/legendFontSize), 0)
# Bring all of the curves into one list.
chartCurves = []
chartCurves.extend(maxhumidCurves)
chartCurves.extend(maxTempCurves)
chartCurves.extend(maxHrLines)
chartCurves.extend(enthalLines)
# Make the temperature text for the chart.
tempLabels = []
for count, text in enumerate(tempText):
tempLabels.extend(lb_visualization.text2srf([text], [tempLabelBasePts[count]], legendFont, legendFontSize, legendBold)[0])
# Make the humidity ratio text for the chart.
ratioLabels = []
for count, text in enumerate(ratioText):
ratioLabels.extend(lb_visualization.text2srf([text], [ratioBasePt[count]], legendFont, legendFontSize, legendBold)[0])
# Make the relative humidity text for the chart.
relHumidBasePts = []
relHumidTxt = []
relHumidLabels = []
for curve in maxhumidCurves[1:]:
curvePt = curve.PointAtNormalizedLength(0.98)
relHumidBasePts.append(rc.Geometry.Point3d(curvePt.X-1.75, curvePt.Y, 0))
if mollierHX_ == True:
for ptCount, point in enumerate(relHumidBasePts):
mollierHXTransform(point)
relHumidBasePts[ptCount] = rc.Geometry.Point3d(point.X, point.Y+(0.5/legendFontSize), 0)
for humid in relHumidNum:
relHumidTxt.append(str(humid)+"%")
for count, text in enumerate(relHumidTxt[:-1]):
relHumidLabels.extend(lb_visualization.text2srf([text], [relHumidBasePts[count]], legendFont, legendFontSize*.75, legendBold)[0])
#Make the enthalpy labels for the chart.
enthLabels = []
for count, text in enumerate(enthText):
enthLabels.extend(lb_visualization.text2srf([text], [enthLabelBasePts[count]], legendFont, legendFontSize*0.75, legendBold)[0])
#Make axis labels for the chart.
xAxisLabels = []
xAxisTxt = ["Dry Bulb Temperature"]
if mollierHX_ == True: xAxisPt = [rc.Geometry.Point3d(-5*legendFontSize, 15, 0)]
else: xAxisPt = [rc.Geometry.Point3d(tempChartVals[0]-0.5, -4*legendFontSize, 0)]
xAxisLabels.extend(lb_visualization.text2srf(xAxisTxt, xAxisPt, legendFont, legendFontSize*1.25, legendBold)[0])
if mollierHX_ == True:
rotateTransf = rc.Geometry.Transform.Rotation(1.57079633, xAxisPt[0])
for geo in xAxisLabels:
geo.Transform(rotateTransf)
yAxisLabels = []
yAxisTxt = ["Humidity Ratio"]
if mollierHX_ == True:
if IPTrigger: yAxisPt = [rc.Geometry.Point3d(40, 115+(4*legendFontSize), 0)]
else: yAxisPt = [rc.Geometry.Point3d(20, 50+(4*legendFontSize), 0)]
yAxisLabels.extend(lb_visualization.text2srf(yAxisTxt, yAxisPt, legendFont, legendFontSize*1.25, legendBold)[0])
else:
yAxisPt = [rc.Geometry.Point3d(tempChartVals[-1]+(7*legendFontSize), 0.0245*scaleFactor, 0)]
yAxisLabels.extend(lb_visualization.text2srf(yAxisTxt, yAxisPt, legendFont, legendFontSize*1.25, legendBold)[0])
rotateTransf = rc.Geometry.Transform.Rotation(1.57079633, rc.Geometry.Point3d(tempChartVals[-1]+(7*legendFontSize), 0.0245*scaleFactor, 0))
for geo in yAxisLabels:
geo.Transform(rotateTransf)
#Make the chart title.
def getDateStr(start, end):
stMonth, stDay, stHour, endMonth, endDay, endHour = lb_visualization.readRunPeriod((start, end), False)
period = `stDay`+ ' ' + lb_visualization.monthList[stMonth-1] + ' ' + `stHour` + ':00' + \
" - " + `endDay`+ ' ' + lb_visualization.monthList[endMonth-1] + ' ' + `endHour` + ':00'
return period
titleLabels = []
if epwData == True:
if mollierHX_ == True: titleTxt = ["Mollier HX Diagram", epwStr[1]]
else: titleTxt = ["Psychrometric Chart", epwStr[1]]
if analysisPeriod_ == []:
titleTxt.append(getDateStr(epwStr[5], epwStr[6]))
else:
titleTxt.append(getDateStr(analysisPeriod_[0], analysisPeriod_[1]))
else: titleTxt = ["Psychrometric Chart", "Unkown Location", "Unknown Time Period"]
if mollierHX_ == True: titlePt = [rc.Geometry.Point3d(20, -0.011*scaleFactor, 0), rc.Geometry.Point3d(20, (-0.011*scaleFactor)-(legendFontSize*2.5), 0), rc.Geometry.Point3d(20, (-0.011*scaleFactor)-(legendFontSize*5), 0)]
else: titlePt = [rc.Geometry.Point3d(-19, 0.0295*scaleFactor, 0), rc.Geometry.Point3d(-19, (0.0295*scaleFactor)-(legendFontSize*2.5), 0), rc.Geometry.Point3d(-19, (0.0295*scaleFactor)-(legendFontSize*5), 0)]
for count, text in enumerate(titleTxt):
titleLabels.extend(lb_visualization.text2srf([text], [titlePt[count]], legendFont, legendFontSize*1.5, legendBold)[0])
#Bring all text and curves together in one list.
chartCrvAndText = []
for item in chartCurves: chartCrvAndText.append(item)
for item in tempLabels: chartCrvAndText.append(item)
for item in ratioLabels: chartCrvAndText.append(item)
for item in relHumidLabels:
chartCrvAndText.append(item)
for item in xAxisLabels:
chartCrvAndText.append(item)
for item in yAxisLabels:
chartCrvAndText.append(item)
for item in titleLabels:
chartCrvAndText.append(item)
for item in enthLabels:
chartCrvAndText.append(item)
return chartCrvAndText, humidCurves
def colorMesh(airTemp, relHumid, barPress, lb_preparation, lb_comfortModels, lb_visualization, scaleFactor, lowB, highB, customColors, IPTrigger, farenheitVals):
# Make the full chart mesh
#Generate a list of temperatures that will be used to make the mesh.
if IPTrigger:
initVal = -5
tempNumMesh = []
for tempVal in range(73):
tempNumMesh.append(initVal)
initVal += (5/3)
#tempNumMesh = range(-5, 116, 1)
celNumMesh = F2C(tempNumMesh)
else: celNumMesh = tempNumMesh = range(-20, 51, 1)
relHumidNumMesh = range(0, 105, 5)
#Get humidity ratio values for each of the temperatures at the different relative humidity levels.
humidRatioMesh = []
for relHum in relHumidNumMesh:
relHumidListMesh = []
for item in tempNumMesh:
relHumidListMesh.append(relHum)
pressList = []
for item in tempNumMesh:
pressList.append(avgBarPress)
HR, EN, vapPress, satPress = lb_comfortModels.calcHumidRatio(celNumMesh, relHumidListMesh, pressList)
for count, num in enumerate(HR):
HR[count] = num*scaleFactor
humidRatioMesh.append(HR)
#Make the mesh faces.
chartMesh = rc.Geometry.Mesh()
meshFacePts = []
for listCount, humilist in enumerate(humidRatioMesh[:-1]):
for tempCount, temp in enumerate(tempNumMesh[:-1]):
facePt1 = rc.Geometry.Point3d(temp, humilist[tempCount], 0)
facePt2 = rc.Geometry.Point3d(temp, humidRatioMesh[listCount+1][tempCount], 0)
facePt3 = rc.Geometry.Point3d(tempNumMesh[tempCount+1], humidRatioMesh[listCount+1][tempCount+1], 0)
facePt4 = rc.Geometry.Point3d(tempNumMesh[tempCount+1], humilist[tempCount+1], 0)
meshFacePts.append([facePt1, facePt2, facePt3, facePt4])
for ptlist in meshFacePts:
mesh = rc.Geometry.Mesh()
for point in ptlist:
mesh.Vertices.Add(point)
mesh.Faces.AddFace(0, 1, 2, 3)
chartMesh.Append(mesh)
uncoloredMesh = chartMesh
#Calculate the humidity ratio for each of the hours of the year and use this to make points for the chart.
HR, EN, vapPress, satPress = lb_comfortModels.calcHumidRatio(airTemp, relHumid, barPress)
hourPts = []
for count, ratio in enumerate(HR):
if IPTrigger: hourPts.append(rc.Geometry.Point3d(farenheitVals[count], ratio*scaleFactor, 0))
else: hourPts.append(rc.Geometry.Point3d(airTemp[count], ratio*scaleFactor, 0))
#Make a list to hold values for all of the mesh faces.
meshFrequency = []
for count, value in enumerate(range(0, 100, 5)):
meshFrequency.append([])
if IPTrigger:
for face in range(72): meshFrequency[count].append([])
else:
for face in range(-20, 50, 1): meshFrequency[count].append([])
#Bin the input humidity and temperatures into categories that correspond to the mesh faces.
def getTempIndex(hour):
if airTemp[hour] > -20 and airTemp[hour] < 50:
index = int(round(airTemp[hour] +19.5))
else: index = -1
return index
def getTempIndexIP(hour):
if farenheitVals[hour] > -5 and farenheitVals[hour] < 115:
index = int((farenheitVals[hour] +4.5)*(3/5))
else: index = -1
return index
for hour, humid in enumerate(relHumid):
if IPTrigger: tempIndex = getTempIndexIP(hour)
else: tempIndex = getTempIndex(hour)
if tempIndex != -1:
if humid < 5: meshFrequency[0][tempIndex].append(1)
elif humid < 10: meshFrequency[1][tempIndex].append(1)
elif humid < 15:meshFrequency[2][tempIndex].append(1)
elif humid < 20:meshFrequency[3][tempIndex].append(1)
elif humid < 25:meshFrequency[4][tempIndex].append(1)
elif humid < 30:meshFrequency[5][tempIndex].append(1)
elif humid < 35:meshFrequency[6][tempIndex].append(1)
elif humid < 40:meshFrequency[7][tempIndex].append(1)
elif humid < 45:meshFrequency[8][tempIndex].append(1)
elif humid < 50:meshFrequency[9][tempIndex].append(1)
elif humid < 55:meshFrequency[10][tempIndex].append(1)
elif humid < 60:meshFrequency[11][tempIndex].append(1)
elif humid < 65:meshFrequency[12][tempIndex].append(1)
elif humid < 70:meshFrequency[13][tempIndex].append(1)
elif humid < 75:meshFrequency[14][tempIndex].append(1)
elif humid < 80:meshFrequency[15][tempIndex].append(1)
elif humid < 85:meshFrequency[16][tempIndex].append(1)
elif humid < 90:meshFrequency[17][tempIndex].append(1)
elif humid < 95:meshFrequency[18][tempIndex].append(1)
else: meshFrequency[19][tempIndex].append(1)
#Sum all of the lists together to get the frequency.
finalMeshFrequency = []
for humidlist in meshFrequency:
for templist in humidlist:
finalMeshFrequency.append(sum(templist))
#Get a list of colors
colors = lb_visualization.gradientColor(finalMeshFrequency, lowB, highB, customColors)
# color the mesh faces.
uncoloredMesh.VertexColors.CreateMonotoneMesh(System.Drawing.Color.Gray)
for srfNum in range (uncoloredMesh.Faces.Count):
uncoloredMesh.VertexColors[4 * srfNum + 0] = colors[srfNum]
uncoloredMesh.VertexColors[4 * srfNum + 1] = colors[srfNum]
uncoloredMesh.VertexColors[4 * srfNum + 3] = colors[srfNum]
uncoloredMesh.VertexColors[4 * srfNum + 2] = colors[srfNum]
# Remove the mesh faces that do not have any hour associated with them.
cullFaceIndices = []
for count, freq in enumerate(finalMeshFrequency):
if freq == 0:
cullFaceIndices.append(count)
uncoloredMesh.Faces.DeleteFaces(cullFaceIndices)
#Flip the mesh to be sure that it always displays correctly.
uncoloredMesh.Flip(True, True, True)
#Return everything that's useful.
return hourPts, uncoloredMesh, finalMeshFrequency
def unionAllCurves(Curves):
res = []
for curveCount in range(0, len(Curves), 2):
try:
sc.doc = rc.RhinoDoc.ActiveDoc #change target document
rs.EnableRedraw(False)
guid1 = sc.doc.Objects.AddCurve(Curves[curveCount])
guid2 = sc.doc.Objects.AddCurve(Curves[curveCount + 1])
all = rs.CurveBooleanUnion([guid1, guid2])
rs.DeleteObjects(guid1)
rs.DeleteObjects(guid2)
if all:
a = [rs.coercegeometry(a) for a in all]
for g in a: g.EnsurePrivateCopy() #must ensure copy if we delete from doc
rs.DeleteObjects(all)
sc.doc = ghdoc #put back document
rs.EnableRedraw()
if a == None:
a = [Curves[curveCount], Curves[curveCount + 1]]
except:
rs.DeleteObjects(guid1)
sc.doc = ghdoc #put back document
rs.EnableRedraw()
a = [Curves[curveCount]]
if a:
res.extend(a)
return res
def calcComfAndStrategyPolygons(radTemp, windSpeed, metRate, cloLevel, exWork, humidRatioUp, humidRatioLow, passiveStrategy, relHumidLines, calcLengthComf, lb_comfortModels, chartBoundary, scaleFactor, PPDComfortThresh, IPTrigger):
#Take just the top middle and bottom lines for making the comofrt range in order to speed up the calculation.
relHumidLines = [relHumidLines[0], relHumidLines[5], relHumidLines[10]]
#Define max/min temperatures.
if not IPTrigger:
maxTempe = 50
minTempe = -20
else:
maxTempe = C2F([50])[0]
minTempe = C2F([-20])[0]
#Make a comfort polyline for each of the variables in the comfCalcLength.
#First get the points that represent the lower and upper bound of comfort at each relative humidty line.
comfPolyLinePts = []
for index in range(calcLengthComf):
upTemperPts = []
downTemperPts = []
for count, humidity in enumerate(range(0,150,50)):
upTemper, downTemper = lb_comfortModels.calcComfRange(radTemp[index]+2, radTemp[index]-2, radTemp[index], windSpeed[index], humidity, metRate[index], cloLevel[index], exWork[index], PPDComfortThresh)
if IPTrigger == True: upTemper, downTemper = C2F([upTemper])[0], C2F([downTemper])[0]
if upTemper < maxTempe:
if upTemper > minTempe:
upIntersect = rc.Geometry.Intersect.Intersection.CurvePlane(relHumidLines[count], rc.Geometry.Plane(rc.Geometry.Point3d(upTemper, 0,0), rc.Geometry.Vector3d.XAxis), sc.doc.ModelAbsoluteTolerance)[0].PointA
else: upIntersect = relHumidLines[count].PointAtStart
else: upIntersect = relHumidLines[count].PointAtEnd
upTemperPts.append(upIntersect)
if downTemper < maxTempe:
if downTemper > minTempe:
downIntersect = rc.Geometry.Intersect.Intersection.CurvePlane(relHumidLines[count], rc.Geometry.Plane(rc.Geometry.Point3d(downTemper, 0,0), rc.Geometry.Vector3d.XAxis), sc.doc.ModelAbsoluteTolerance)[0].PointA
else: downIntersect = relHumidLines[count].PointAtStart
else: upIntersect = relHumidLines[count].PointAtEnd
downTemperPts.append(downIntersect)
comfPolyLinePts.append([upTemperPts, downTemperPts])
#Use the collected points to define a boundary curve around the comfort zone.
chartBoundaryBrep = rc.Geometry.Surface.CreateExtrusion(chartBoundary, rc.Geometry.Vector3d.ZAxis)
comfortCurves = []
comfortCrvSegments = []
for futurePoly in comfPolyLinePts:
upperBoundary = rc.Geometry.Curve.CreateInterpolatedCurve(futurePoly[0], 3)
lowerBoundary = rc.Geometry.Curve.CreateInterpolatedCurve(futurePoly[1], 3)
try:
upperBoundary = upperBoundary.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
lowerBoundary = upperBoundary.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
except: pass
upperBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(upperBoundary, rc.Geometry.Vector3d.ZAxis))
lowerBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(lowerBoundary, rc.Geometry.Vector3d.ZAxis))
splitCurve = chartBoundary.Split(upperBrep, sc.doc.ModelAbsoluteTolerance)[0]
try:
bottomCurve = splitCurve.Split(lowerBrep, sc.doc.ModelAbsoluteTolerance)[0]
topCurve = splitCurve.Split(lowerBrep, sc.doc.ModelAbsoluteTolerance)[2]
joinedCurves = rc.Geometry.Curve.JoinCurves([upperBoundary, topCurve, lowerBoundary, bottomCurve])[0]
comfortCrvSegments.append([upperBoundary, lowerBoundary, topCurve, bottomCurve])
comfortCurves.append(joinedCurves)
except:
warning = 'Comfort polygon has fallen completely off of the psych chart.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
if comfortCurves != []:
#If the user has speified a max or a min humidity ratio, use that to trim the comfort boundary.
if humidRatioUp != 0.03:
splittingLineUp = rc.Geometry.LineCurve(rc.Geometry.Point3d(-30, humidRatioUp*scaleFactor, 0), rc.Geometry.Point3d(120, humidRatioUp*scaleFactor, 0))
splittingBrepUp = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(splittingLineUp, rc.Geometry.Vector3d.ZAxis))
for count, curve in enumerate(comfortCurves):
try:
splitCurves = curve.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)
if len(splitCurves) > 1:
joinedComfBound = rc.Geometry.Curve.JoinCurves([splitCurves[0], rc.Geometry.LineCurve(splitCurves[0].PointAtStart, splitCurves[0].PointAtEnd)])[0]
comfortCurves[count] = joinedComfBound
else: pass
except: pass
if humidRatioLow != 0:
splittingLineLow = rc.Geometry.LineCurve(rc.Geometry.Point3d(-30, humidRatioLow*scaleFactor, 0), rc.Geometry.Point3d(120, humidRatioLow*scaleFactor, 0))
splittingBrepLow = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(splittingLineLow, rc.Geometry.Vector3d.ZAxis))
for count, curve in enumerate(comfortCurves):
try:
splitCurves = curve.Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)
if len(splitCurves) > 1:
joinedComfBound = rc.Geometry.Curve.JoinCurves([splitCurves[1], rc.Geometry.LineCurve(splitCurves[1].PointAtStart, splitCurves[1].PointAtEnd)])[0]
comfortCurves[count] = joinedComfBound
else: pass
except: pass
#If the user has multiple comfort polygons and has selected to merge them, them merge them.
mergedCurvesFinal = comfortCurves
if len(comfortCurves) > 1 and mergeComfPolygons_ == True:
listLength = len(comfortCurves)
count = 0
while len(mergedCurvesFinal) > 1 and count < int(listLength/2) + 1:
mergedCurvesFinal = unionAllCurves(mergedCurvesFinal)
count += 1
if mergedCurvesFinal == None:
mergedCurvesFinal = comfortCurves
print "Attempt to merge comfort curves failed. Component will return multiple comfort boundaries."
#Add the comfort polygons to the strategy list.
strategyListTest = []
if len(mergedCurvesFinal) == 1:
strategyListTest.append("Comfort")
else:
for count, curve in enumerate(mergedCurvesFinal):
strategyListTest.append("Comfort " + str(count))
#Organize data to be used to construct the strategy curves
windSpeed.sort()
windSpeed[0] = windSpeed[-1]
cloLevel.sort()
cloLevel[0] = cloLevel[0]
upBoundXList = []
upBoundCrv = []
lowBoundXList = []
lowBoundCrv = []
for crvList in comfortCrvSegments:
upBoundXList.append(crvList[0].PointAtStart.X)
upBoundCrv.append(crvList[0])
lowBoundXList.append(crvList[1].PointAtEnd.X)
lowBoundCrv.append(crvList[1])
upBoundXList, upBoundCrv = zip(*sorted(zip(upBoundXList, upBoundCrv)))
comfortCrvSegments[0][0] = upBoundCrv[-1]
lowBoundXList, lowBoundCrv = zip(*sorted(zip(lowBoundXList, lowBoundCrv)))
comfortCrvSegments[0][1] = lowBoundCrv[0]
#Define a function to offset curves and return things that will stand out on the psychrometric chart.
def outlineCurve(curve):
try:
offsetCrv = curve.Offset(rc.Geometry.Plane.WorldXY, 0.15, sc.doc.ModelAbsoluteTolerance, rc.Geometry.CurveOffsetCornerStyle.Sharp)[0]
finalBrep = (rc.Geometry.Brep.CreatePlanarBreps([curve, offsetCrv])[0])
if finalBrep.Edges.Count < 3:
finalBrep = curve
except:
finalBrep = curve
warning = "Creating an outline of one of the comfort or strategy curves failed. Component will return a solid brep."
print warning
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return finalBrep
#Define a function that will extract the points from a polycurve line
def getCurvePoints(curve):
exploCurve = rc.Geometry.PolyCurve.DuplicateSegments(curve)
individPts = []
for line in exploCurve:
individPts.append(line.PointAtStart)
return individPts
#Turn the comfort curve into a brep that will show up well on the chart.
finalComfortBreps = []
for curve in mergedCurvesFinal:
finalComfortBreps.append(outlineCurve(curve))
#Evaluate each of the connected strategies and draw polygons for them on the chart.
passiveStrategyCurves = []
passiveStrategyBreps = []
if len(passiveStrategy) != 0:
#If the user has connected strategy parameters, read them out.
if strategyPar_ != []:
if len(strategyPar_) == 4:
tempAboveComf = strategyPar_[0]
tempBelowComf = strategyPar_[1]
maxWindSpeed = strategyPar_[2]
bldgBalPt = strategyPar_[3]
else:
warning = 'The strategyPar_ list does not contain valid data. StrategyPar_ must come from the "Ladybug_Passive Strategy Parameters" component.'
print warning
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
tempAboveComf = 16.7
tempBelowComf = 2.8
maxWindSpeed = 1.5
bldgBalPt = 12.8
else:
tempAboveComf = 16.7
tempBelowComf = 2.8
maxWindSpeed = 1.5
bldgBalPt = 12.8
if IPTrigger:
tempAboveComf = C2F([tempAboveComf])[0]-32
tempBelowComf = C2F([tempBelowComf])[0]-32
bldgBalPt = C2F([bldgBalPt])[0]
for comfCount, comfortCurve in enumerate([mergedCurvesFinal[0]]):
#If the user has hooked up evaporative cooling, add an evaporative cooling curve to the chart.
if "Evaporative Cooling" in passiveStrategy:
comfPolygonPts = getCurvePoints(comfortCurve)
ptXYSum = []
for point in comfPolygonPts:
ptXYSum.append(point.X + point.Y)
ptXYSum, comfPolygonPts = zip(*sorted(zip(ptXYSum, comfPolygonPts)))
startPt = comfPolygonPts[-1]
#Calculate the enthalpy at the start point.
if IPTrigger:
startTemp = F2C([startPt.X])[0]
endTemp = F2C([115.5])[0]
else:
startTemp = startPt.X
endTemp = 50.5
enthalpy = (startTemp * (1.01 + 0.00189*((startPt.Y/scaleFactor)*1000))) + 2.5*((startPt.Y/scaleFactor)*1000)
#If the temperature at the edge of the chart is 50C, use that to find another point of the line.
newHR = (((enthalpy - endTemp) / 2.5945)/1000)* scaleFactor
if IPTrigger: maxTempera = 115
else: maxTempera = 50
endPt = rc.Geometry.Point3d(maxTempera, newHR, 0)
evapCoolLine = rc.Geometry.LineCurve(startPt, endPt)
#If there is a minimum humidity ratio, use the comfort upper curve. otherwise, use the comfort bottom curve.
if humidRatioLow == 0 and humidRatioUp*scaleFactor >= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
boundaryLine = comfortCrvSegments[comfCount][0]
joinedEvapBound = rc.Geometry.Curve.JoinCurves([evapCoolLine, boundaryLine])[0]
elif humidRatioLow == 0:
boundaryLine = comfortCrvSegments[comfCount][0]
boundaryLine = boundaryLine.Split(rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(evapCoolLine, rc.Geometry.Vector3d.ZAxis)), sc.doc.ModelAbsoluteTolerance)[0]
joinedEvapBound = rc.Geometry.Curve.JoinCurves([evapCoolLine, boundaryLine])[0]
elif humidRatioUp*scaleFactor >= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
boundaryLine = comfortCrvSegments[comfCount][1]
boundaryLine = boundaryLine.Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[0]
transVector = rc.Geometry.Vector3d.Subtract(rc.Geometry.Vector3d(boundaryLine.PointAtEnd.X, boundaryLine.PointAtEnd.Y,boundaryLine.PointAtEnd.Z), rc.Geometry.Vector3d(evapCoolLine.PointAtStart.X, evapCoolLine.PointAtStart.Y,evapCoolLine.PointAtStart.Z))
evapLine2 = evapCoolLine.DuplicateCurve()
evapLine2.Translate(transVector)
evapLine2 = evapLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
comfLine2 = comfortCrvSegments[comfCount][0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
comfLine1 = rc.Geometry.LineCurve(comfLine2.PointAtStart, boundaryLine.PointAtEnd)
joinedEvapBound = rc.Geometry.Curve.JoinCurves([evapCoolLine, evapLine2, comfLine1, comfLine2])[0]
else:
boundaryLine = comfortCrvSegments[comfCount][1]
boundaryLine = boundaryLine.Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[0]
transVector = rc.Geometry.Vector3d.Subtract(rc.Geometry.Vector3d(boundaryLine.PointAtEnd.X, boundaryLine.PointAtEnd.Y,boundaryLine.PointAtEnd.Z), rc.Geometry.Vector3d(evapCoolLine.PointAtStart.X, evapCoolLine.PointAtStart.Y,evapCoolLine.PointAtStart.Z))
evapLine2 = evapCoolLine.DuplicateCurve()
evapLine2.Translate(transVector)
evapLine2 = evapLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
comfLine2 = comfortCrvSegments[comfCount][0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
comfLine2 = comfLine2.Split(rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(evapCoolLine, rc.Geometry.Vector3d.ZAxis)), sc.doc.ModelAbsoluteTolerance)[0]
comfLine1 = rc.Geometry.LineCurve(comfLine2.PointAtStart, boundaryLine.PointAtEnd)
joinedEvapBound = rc.Geometry.Curve.JoinCurves([evapCoolLine, evapLine2, comfLine1, comfLine2])[0]
joinedEvapBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(joinedEvapBound, rc.Geometry.Vector3d.ZAxis))
chartBoundSegments = chartBoundary.Split(joinedEvapBrep, sc.doc.ModelAbsoluteTolerance)
if len(chartBoundSegments) == 3:
segment = chartBoundSegments[2]
else: segment = chartBoundSegments[1]
joinedEvapCoolBound = rc.Geometry.Curve.JoinCurves([joinedEvapBound, segment])[0]
passiveStrategyCurves.append(joinedEvapCoolBound)
passiveStrategyBreps.append(outlineCurve(joinedEvapCoolBound))
strategyListTest.append("Evaporative Cooling")
#If the user has hooked up thermal mass and night flushing, add an thernal mass curve to the chart.
if "Thermal Mass + Night Vent" in passiveStrategy:
#If there is a minimum humidity ratio, use the comfort upper curve. Otherwise, use the comfort bottom curve.
ChartBoundCheck = 0
if humidRatioLow == 0.0 and humidRatioUp*scaleFactor >= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
strategyLine = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Point3d(comfortCrvSegments[comfCount][0].PointAtEnd.X+tempAboveComf, comfortCrvSegments[comfCount][0].PointAtEnd.Y, 0))
boundaryLine = comfortCrvSegments[comfCount][0]
transformMass = rc.Geometry.Transform.Translation(tempAboveComf, 0, 0)
boundaryLine2 = boundaryLine.DuplicateCurve()
boundaryLine2.Transform(transformMass)
splitCrv = boundaryLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) == 2:
boundaryLine2 = splitCrv[1]
ChartBoundCheck = 2
else: ChartBoundCheck = 1
joinedMassBound = rc.Geometry.Curve.JoinCurves([strategyLine, boundaryLine, boundaryLine2])[0]
elif humidRatioLow == 0.0:
cornerPt = rc.Geometry.Intersect.Intersection.CurveCurve(splittingLineUp, comfortCrvSegments[comfCount][0], sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)[0].PointA
strategyLine = rc.Geometry.LineCurve(cornerPt, rc.Geometry.Point3d(cornerPt.X+tempAboveComf, humidRatioUp*scaleFactor, 0))
boundaryLine = comfortCrvSegments[comfCount][0].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0]
transformMass = rc.Geometry.Transform.Translation(tempAboveComf, 0, 0)
boundaryLine2 = boundaryLine.DuplicateCurve()
boundaryLine2.Transform(transformMass)
splitCrv = boundaryLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) == 2:
boundaryLine2 = splitCrv[1]
ChartBoundCheck = 2
else: ChartBoundCheck = 1
joinedMassBound = rc.Geometry.Curve.JoinCurves([strategyLine, boundaryLine, boundaryLine2])[0]
elif humidRatioUp*scaleFactor >= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
strategyLine1 = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Point3d(comfortCrvSegments[comfCount][0].PointAtEnd.X+tempAboveComf, comfortCrvSegments[comfCount][0].PointAtEnd.Y, 0))
cornerPt = rc.Geometry.Intersect.Intersection.CurveCurve(splittingLineLow, comfortCrvSegments[comfCount][0], sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)[0].PointA
strategyLine2 = rc.Geometry.LineCurve(cornerPt, rc.Geometry.Point3d(cornerPt.X+tempAboveComf, humidRatioLow*scaleFactor, 0))
splitCrv1 = strategyLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv1) == 2:
strategyLine2 = splitCrv1[0]
boundaryLine = comfortCrvSegments[comfCount][0]
boundaryLine = boundaryLine.Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[-1]
transformMass = rc.Geometry.Transform.Translation(tempAboveComf, 0, 0)
boundaryLine2 = boundaryLine.DuplicateCurve()
boundaryLine2.Transform(transformMass)
splitCrv = boundaryLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) == 2:
boundaryLine2 = splitCrv[1]
ChartBoundCheck = 0
else: ChartBoundCheck = 3
joinedMassBound = rc.Geometry.Curve.JoinCurves([strategyLine1, boundaryLine, strategyLine2, boundaryLine2])[0]
else:
cornerPt1 = rc.Geometry.Intersect.Intersection.CurveCurve(splittingLineUp, comfortCrvSegments[comfCount][0], sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)[0].PointA
cornerPt2 = rc.Geometry.Intersect.Intersection.CurveCurve(splittingLineLow, comfortCrvSegments[comfCount][0], sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)[0].PointA
strategyLine1 = rc.Geometry.LineCurve(cornerPt1, rc.Geometry.Point3d(cornerPt1.X+tempAboveComf, humidRatioUp*scaleFactor, 0))
strategyLine2 = rc.Geometry.LineCurve(cornerPt2, rc.Geometry.Point3d(cornerPt2.X+tempAboveComf, humidRatioLow*scaleFactor, 0))
splitCrv1 = strategyLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv1) == 2:
strategyLine2 = splitCrv1[0]
boundaryLine = comfortCrvSegments[comfCount][0]
boundaryLine = boundaryLine.Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[-1]
boundaryLine = boundaryLine.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0]
transformMass = rc.Geometry.Transform.Translation(tempAboveComf, 0, 0)
boundaryLine2 = boundaryLine.DuplicateCurve()
boundaryLine2.Transform(transformMass)
splitCrv = boundaryLine2.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)
if len(splitCrv) == 2:
boundaryLine2 = splitCrv[1]
ChartBoundCheck = 0
else: ChartBoundCheck = 3
joinedMassBound = rc.Geometry.Curve.JoinCurves([strategyLine1, boundaryLine, strategyLine2, boundaryLine2])[0]
joinedMassBrep = rc.Geometry.Brep.CreateFromSurface(rc.Geometry.Surface.CreateExtrusion(joinedMassBound, rc.Geometry.Vector3d.ZAxis))
chartBoundSegments = chartBoundary.Split(joinedMassBrep, sc.doc.ModelAbsoluteTolerance)
if ChartBoundCheck == 1:
segment = chartBoundSegments[0]
elif ChartBoundCheck == 2:
try: segment = chartBoundSegments[2]
except: segment = chartBoundSegments[1]
elif ChartBoundCheck == 0: segment = chartBoundSegments[1]
if len(chartBoundSegments) != 0: joinedMassCoolBound = rc.Geometry.Curve.JoinCurves([joinedMassBound, segment])[0]
else: joinedMassCoolBound = joinedMassBound
passiveStrategyCurves.append(joinedMassCoolBound)
passiveStrategyBreps.append(outlineCurve(joinedMassCoolBound))
strategyListTest.append("Thermal Mass + Night Vent")
#passiveStrategyBreps.append(joinedMassCoolBound)
#If the user has hooked up natural ventilation, add a natural ventilation curve to the chart.
if "Occupant Use of Fans" in passiveStrategy and windSpeed[comfCount] < maxWindSpeed:
try:
#Calculate the upper boundary of Natural ventilation.
upTemperPts = []
for count, humidity in enumerate(range(0,150,50)):
upTemper, downTemper = lb_comfortModels.calcComfRange(radTemp[comfCount]+2, radTemp[comfCount]-2, radTemp[comfCount], maxWindSpeed, humidity, metRate[comfCount], cloLevel[comfCount], exWork[comfCount], PPDComfortThresh)
if IPTrigger: upTemperSpatial, downTemperSpatial = C2F([upTemper])[0], C2F([downTemper])[0]
else: upTemperSpatial, downTemperSpatial = upTemper, downTemper
if upTemperSpatial < maxTempe:
if downTemperSpatial > minTempe:
upIntersect = rc.Geometry.Intersect.Intersection.CurvePlane(relHumidLines[count], rc.Geometry.Plane(rc.Geometry.Point3d(upTemperSpatial, 0,0), rc.Geometry.Vector3d.XAxis), sc.doc.ModelAbsoluteTolerance)[0].PointA
else: upIntersect = relHumidLines[count].PointAtStart
else: upIntersect = relHumidLines[count].PointAtEnd
upTemperPts.append(upIntersect)
natVentBoundary = rc.Geometry.Curve.CreateInterpolatedCurve(upTemperPts, 3)
try: natVentBoundary = upperBoundary.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
except: pass
if humidRatioLow == 0 and humidRatioUp*scaleFactor >= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
strategyLine1 = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Intersect.Intersection.CurveCurve(natVentBoundary, rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Point3d(maxTempe, comfortCrvSegments[comfCount][0].PointAtEnd.Y, 0)), sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)[0].PointA)
strategyLine2 = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtStart, natVentBoundary.PointAtStart)
boundaryLine = comfortCrvSegments[comfCount][0]
natVentLine = natVentBoundary.Split(rc.Geometry.Surface.CreateExtrusion(rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Point3d(maxTempe, comfortCrvSegments[comfCount][0].PointAtEnd.Y, 0)), rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0]
elif humidRatioLow == 0:
strategyLine1 = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0].PointAtEnd, natVentBoundary.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0].PointAtEnd)
strategyLine2 = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtStart, natVentBoundary.PointAtStart)
boundaryLine = comfortCrvSegments[comfCount][0].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0]
natVentLine = natVentBoundary.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0]
elif humidRatioUp*scaleFactor >= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
strategyLine1 = rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Intersect.Intersection.CurveCurve(natVentBoundary, rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Point3d(maxTempe, comfortCrvSegments[comfCount][0].PointAtEnd.Y, 0)), sc.doc.ModelAbsoluteTolerance, sc.doc.ModelAbsoluteTolerance)[0].PointA)
natVentLine = natVentBoundary.Split(rc.Geometry.Surface.CreateExtrusion(rc.Geometry.LineCurve(comfortCrvSegments[comfCount][0].PointAtEnd, rc.Geometry.Point3d(maxTempe, comfortCrvSegments[comfCount][0].PointAtEnd.Y, 0)), rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
boundaryLine = comfortCrvSegments[comfCount][0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
strategyLine2 = rc.Geometry.LineCurve(boundaryLine.PointAtStart, natVentLine.PointAtStart)
else:
natVentLine = natVentBoundary.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
boundaryLine = comfortCrvSegments[comfCount][0].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
strategyLine1 = rc.Geometry.LineCurve(boundaryLine.PointAtStart, natVentLine.PointAtStart)
strategyLine2 = rc.Geometry.LineCurve(boundaryLine.PointAtEnd, natVentLine.PointAtEnd)
joinedNatVentBound = rc.Geometry.Curve.JoinCurves([strategyLine1, boundaryLine, strategyLine2, natVentLine])[0]
passiveStrategyCurves.append(joinedNatVentBound)
passiveStrategyBreps.append(outlineCurve(joinedNatVentBound))
strategyListTest.append("Occupant Use of Fans")
except:
print "Use of fans is not helful to ouccupants when the desired air is so hot."
#If the user has hooked up internal gain, add an internal gain curve to the chart.
if "Internal Heat Gain" in passiveStrategy:
heatBoundary = rc.Geometry.LineCurve(rc.Geometry.Point3d(bldgBalPt, 0, 0), rc.Geometry.Point3d(bldgBalPt, scaleFactor*0.03, 0))
heatBoundary = heatBoundary.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
if humidRatioLow == 0:
boundaryLine = comfortCrvSegments[comfCount][1]
strategyLine1 = chartBoundary.Split(rc.Geometry.Surface.CreateExtrusion(boundaryLine, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0].Split(rc.Geometry.Surface.CreateExtrusion(heatBoundary, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0]
strategyLine2 = chartBoundary.Split(rc.Geometry.Surface.CreateExtrusion(boundaryLine, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0].Split(rc.Geometry.Surface.CreateExtrusion(heatBoundary, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[2]
joinedHeatBound = rc.Geometry.Curve.JoinCurves([strategyLine1, boundaryLine, strategyLine2, heatBoundary])[0]
else:
boundaryLine = comfortCrvSegments[comfCount][1].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
heatBoundaryNew = heatBoundary.Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[1]
strategyLine1 = chartBoundary.Split(rc.Geometry.Surface.CreateExtrusion(comfortCrvSegments[comfCount][1], rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0].Split(rc.Geometry.Surface.CreateExtrusion(heatBoundary, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[2]
strategyLine2 = rc.Geometry.LineCurve(boundaryLine.PointAtStart, heatBoundaryNew.PointAtStart)
joinedHeatBound = rc.Geometry.Curve.JoinCurves([strategyLine1, boundaryLine, strategyLine2, heatBoundaryNew])[0]
passiveStrategyCurves.append(joinedHeatBound)
passiveStrategyBreps.append(outlineCurve(joinedHeatBound))
strategyListTest.append("Internal Heat Gain")
#If the user has hooked up humidification only, add a humidification only curve to the chart.
if "Humidification Only" in passiveStrategy and humidRatioLow != 0:
boundary1 = comfortCrvSegments[comfCount][1].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[0]
boundary2 = comfortCrvSegments[comfCount][0].Split(splittingBrepLow, sc.doc.ModelAbsoluteTolerance)[0]
boundary3 = rc.Geometry.LineCurve(boundary1.PointAtStart, boundary2.PointAtStart)
boundary4 = rc.Geometry.LineCurve(boundary1.PointAtEnd, boundary2.PointAtEnd)
joinedHumidBound = rc.Geometry.Curve.JoinCurves([boundary1, boundary2, boundary3, boundary4])[0]
passiveStrategyCurves.append(joinedHumidBound)
passiveStrategyBreps.append(outlineCurve(joinedHumidBound))
strategyListTest.append("Humidification Only")
#If the user has hooked up dehumidification only, add a dehumidification only curve to the chart.
if "Dessicant Dehumidification" in passiveStrategy and humidRatioUp*scaleFactor <= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
comfPolygonPts = getCurvePoints(comfortCurve)
ptXYSum = []
for point in comfPolygonPts:
ptXYSum.append(point.X + point.Y)
ptXYSum, comfPolygonPts = zip(*sorted(zip(ptXYSum, comfPolygonPts)))
startPt = comfPolygonPts[-1]
#Calculate the enthalpy at the start point.
if IPTrigger:
startTemp = F2C([startPt.X])[0]
endTemp = F2C([-5])[0]
else:
startTemp = startPt.X
endTemp = -20.2
enthalpy = (startTemp * (1.01 + 0.00189*((startPt.Y/scaleFactor)*1000))) + 2.5*((startPt.Y/scaleFactor)*1000)
#If the temperature at the edge of the chart is 50C, use that to find another point of the line.
newHR = (((enthalpy - endTemp) / 2.4622)/1000)* scaleFactor
endPt = rc.Geometry.Point3d(minTempe, newHR, 0)
dessicantLine = rc.Geometry.LineCurve(startPt, endPt)
boundary1 = dessicantLine.Split(chartBoundaryBrep, sc.doc.ModelAbsoluteTolerance)[0]
try:
boundary2 = comfortCrvSegments[comfCount][1].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[1]
boundary3 = rc.Geometry.LineCurve(boundary1.PointAtStart, boundary2.PointAtStart)
boundary4 = rc.Geometry.LineCurve(boundary1.PointAtEnd, boundary2.PointAtEnd)
joinedHumidBound = rc.Geometry.Curve.JoinCurves([boundary1, boundary2, boundary3, boundary4])[0]
except:
boundary2 = chartBoundary.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0].Split(rc.Geometry.Surface.CreateExtrusion(boundary1, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0]
boundary3 = rc.Geometry.LineCurve(boundary1.PointAtStart, boundary2.PointAtStart)
joinedHumidBound = rc.Geometry.Curve.JoinCurves([boundary1, boundary2, boundary3])[0]
passiveStrategyCurves.append(joinedHumidBound)
passiveStrategyBreps.append(outlineCurve(joinedHumidBound))
strategyListTest.append("Dessicant Dehumidification")
elif "Dessicant Dehumidification" in passiveStrategy:
passiveStrategyCurves.append(None)
strategyListTest.append("Dessicant Dehumidification")
warning = 'Dessicant Dehumidification is only relevant when there is an upper bound of humidity ratio on the comfort polygon. Use the "PMV Comfort Parameters" component to set this.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#If the user has hooked up dessicant dehumidification, add a dessicant dehumidification curve to the chart.
if "Dehumidification Only" in passiveStrategy and humidRatioUp*scaleFactor <= comfortCrvSegments[comfCount][0].PointAtEnd.Y:
boundary1 = comfortCrvSegments[comfCount][0].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[1]
try:
boundary2 = comfortCrvSegments[comfCount][1].Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[1]
boundary3 = rc.Geometry.LineCurve(boundary1.PointAtStart, boundary2.PointAtStart)
boundary4 = rc.Geometry.LineCurve(boundary1.PointAtEnd, boundary2.PointAtEnd)
joinedHumidBound = rc.Geometry.Curve.JoinCurves([boundary1, boundary2, boundary3, boundary4])[0]
except:
boundary2 = chartBoundary.Split(splittingBrepUp, sc.doc.ModelAbsoluteTolerance)[0].Split(rc.Geometry.Surface.CreateExtrusion(boundary1, rc.Geometry.Vector3d.ZAxis), sc.doc.ModelAbsoluteTolerance)[0]
boundary3 = rc.Geometry.LineCurve(boundary1.PointAtStart, boundary2.PointAtStart)
joinedHumidBound = rc.Geometry.Curve.JoinCurves([boundary1, boundary2, boundary3])[0]
passiveStrategyCurves.append(joinedHumidBound)
passiveStrategyBreps.append(outlineCurve(joinedHumidBound))
strategyListTest.append("Dehumidification Only")
else:
tempBelowComf = 2.8
maxComfortPolyTemp = comfortCrvSegments[0][0].PointAt(0.5).X
#Try to boolean all of the strategy and comfort curves together so that we can get a sense of comfort over the whole graph.
allCurves = []
for crv in mergedCurvesFinal:
allCurves.append(crv)
for crv in passiveStrategyCurves:
allCurves.append(crv)
if len(allCurves) > 1:
listLength = len(allCurves)
count = 0
while len(allCurves) > 1 and count < int(listLength/2) + 1:
allCurves = unionAllCurves(allCurves)
count += 1
#Move the strategy outlines up just a bit so that they can be seen over the mesh.
transformMatrix = rc.Geometry.Transform.Translation(0,0,sc.doc.ModelAbsoluteTolerance*5)
for brep in finalComfortBreps:
brep.Transform(transformMatrix)
for brep in passiveStrategyBreps:
brep.Transform(transformMatrix)
return mergedCurvesFinal, finalComfortBreps, passiveStrategyCurves, passiveStrategyBreps, strategyListTest, allCurves, tempBelowComf, maxComfortPolyTemp
else:
return [], [], [], [], [], [], 2.8, 0
def statisticallyAnalyzePolygons(hourPts, comfortPolyline, strategyPolylines, unionedCurves, epwData, epwStr, strategyTextNames, tempBelowComf, airTemp, maxComfortPolyTemp, patternList):
#Define lists to be filled up with the data.
strategyPercent = []
strategyOrNot = []
#For each of the comfort polygons, determine how many of the hour points are inside of them and make a comfort or not list.
for countComf, comfortPolygon in enumerate(comfortPolyline):
comfBool = []
for hourPt in hourPts:
if str(comfortPolygon.Contains(hourPt, rc.Geometry.Plane.WorldXY, sc.doc.ModelAbsoluteTolerance)) == "Inside": comfBool.append(1)
else:comfBool.append(0)
if len(comfBool) != 0:
comfPercent = (sum(comfBool)/len(comfBool))*100
else:
comfPercent =100
strategyPercent.append(comfPercent)
if epwData == True:
if analysisPeriod_:
comfBool.insert(0,analysisPeriod_[1])
comfBool.insert(0,analysisPeriod_[0])
else:
comfBool.insert(0, epwStr[6])
comfBool.insert(0, epwStr[5])
comfBool.insert(0, epwStr[4])
comfBool.insert(0, "Boolean Value")
comfBool.insert(0, "Comfortable Hours in " + strategyTextNames[countComf] + " Polygon")
comfBool.insert(0, epwStr[1])
comfBool.insert(0, epwStr[0])
strategyOrNot.append(comfBool)
#For each of the strategy polygons, determine how many of the hour points are inside of them and make a comfort or not list.
for countStrat, comfortPolygon in enumerate(strategyPolylines):
comfBool = []
try:
if strategyTextNames[countComf + countStrat + 1] != "Thermal Mass + Night Vent" or epwData == False or patternList != []:
for hourPt in hourPts:
if str(comfortPolygon.Contains(hourPt, rc.Geometry.Plane.WorldXY, sc.doc.ModelAbsoluteTolerance)) == "Inside": comfBool.append(1)
else:comfBool.append(0)
else:
for hourCt, hourPt in enumerate(hourPts):
if str(comfortPolygon.Contains(hourPt, rc.Geometry.Plane.WorldXY, sc.doc.ModelAbsoluteTolerance)) == "Inside" and airTemp[hourCt-12] < maxComfortPolyTemp-tempBelowComf: comfBool.append(1)
else:comfBool.append(0)
comfPercent = (sum(comfBool)/len(comfBool))*100
strategyPercent.append(comfPercent)
if epwData == True:
if analysisPeriod_:
comfBool.insert(0,analysisPeriod_[1])
comfBool.insert(0,analysisPeriod_[0])
else:
comfBool.insert(0, epwStr[6])
comfBool.insert(0, epwStr[5])
comfBool.insert(0, epwStr[4])
comfBool.insert(0, "Boolean Value")
comfBool.insert(0, "Comfortable Hours in " + strategyTextNames[countComf + countStrat + 1] + " Polygon")
comfBool.insert(0, epwStr[1])
comfBool.insert(0, epwStr[0])
strategyOrNot.append(comfBool)
except:
strategyPercent.append(0)
for count in range(len(hourPts)):
comfBool.append(0)
if epwData == True:
if analysisPeriod_:
comfBool.insert(0,analysisPeriod_[1])
comfBool.insert(0,analysisPeriod_[0])
else:
comfBool.insert(0, epwStr[6])
comfBool.insert(0, epwStr[5])
comfBool.insert(0, epwStr[4])
comfBool.insert(0, "Boolean Value")
comfBool.insert(0, "Comfortable Hours in Dessicant Dehumidification Polygon")
comfBool.insert(0, epwStr[1])
comfBool.insert(0, epwStr[0])
strategyOrNot.append(comfBool)
#For the total comfort, determine how many of the hour points are inside of them and make a comfort or not list.
temporaryPercent = []
temporaryComfOrNot = []
for polygon in unionedCurves:
comfBool = []
for hourPt in hourPts:
if str(polygon.Contains(hourPt, rc.Geometry.Plane.WorldXY, sc.doc.ModelAbsoluteTolerance)) == "Inside": comfBool.append(1)
else:comfBool.append(0)
if len(comfBool) != 0:
comfPercent = (sum(comfBool)/len(comfBool))*100
else:
comfPercent = 100
temporaryPercent.append(comfPercent)
temporaryComfOrNot.append(comfBool)
#Build the final percent and comfort or not lists.
finalTotalPercent = sum(temporaryPercent)
finalComfOrNot = []
for listCount, list in enumerate(temporaryComfOrNot):
for count, item in enumerate(list):
if listCount == 0:
finalComfOrNot.append(item)
else:
finalComfOrNot[count] = finalComfOrNot[count] + item
if epwData == True:
if analysisPeriod_:
finalComfOrNot.insert(0,analysisPeriod_[1])
finalComfOrNot.insert(0,analysisPeriod_[0])
else:
finalComfOrNot.insert(0, epwStr[6])
finalComfOrNot.insert(0, epwStr[5])
finalComfOrNot.insert(0, epwStr[4])
finalComfOrNot.insert(0, "Boolean Value")
finalComfOrNot.insert(0, "Comfortable Hours in All Polygons")
finalComfOrNot.insert(0, epwStr[1])
finalComfOrNot.insert(0, epwStr[0])
return finalTotalPercent, finalComfOrNot, strategyPercent, strategyOrNot
def getPointColors(totalComfOrNot, annualHourlyDataSplit, annualDataStr, numSeg, customColors, legendBasePoint, legendScale, legendFont, legendFontSize, legendBold, decimalPlaces, removeLessThan, lb_visualization):
#Define the lists.
pointColors = []
colorLegends = []
#Get the colors for comfort.
if str(totalComfOrNot[0]) == "key:location/dataType/units/frequency/startsAt/endsAt":
totalComfOrNot = totalComfOrNot[7:]
pointColors.append(lb_visualization.gradientColor(totalComfOrNot, 0, 1, customColors))
#Get the colors for annualHourly Data.
for list in annualHourlyDataSplit:
if len(list) != 0:
pointColors.append(lb_visualization.gradientColor(list, "min", "max", customColors))
#Generate a legend for comfort.
legend = []
legendSrfs, legendText, legendTextCrv, textPt, textSize = lb_visualization.createLegend(totalComfOrNot, 0, 1, 2, "Comfort", lb_visualization.BoundingBoxPar, legendBasePoint, legendScale, legendFont, legendFontSize, legendBold, decimalPlaces, removeLessThan)
legendColors = lb_visualization.gradientColor(legendText[:-1], 0, 1, customColors)
legendSrfs = lb_visualization.colorMesh(legendColors, legendSrfs)
legend.append(legendSrfs)
for list in legendTextCrv:
for item in list:
legend.append(item)
colorLegends.append(legend)
#Generate legends for annualHourly Data.
for listCount, list in enumerate(annualHourlyDataSplit):
if len(list) != 0:
legend = []
legendSrfs, legendText, legendTextCrv, textPt, textSize = lb_visualization.createLegend(list, "min", "max", numSeg, annualDataStr[listCount][3], lb_visualization.BoundingBoxPar, legendBasePoint, legendScale, legendFont, legendFontSize, legendBold, decimalPlaces, removeLessThan)
legendColors = lb_visualization.gradientColor(legendText[:-1], "min", "max", customColors)
legendSrfs = lb_visualization.colorMesh(legendColors, legendSrfs)
legend.append(legendSrfs)
for list in legendTextCrv:
for item in list:
legend.append(item)
colorLegends.append(legend)
return pointColors, colorLegends
def main(epwData, epwStr, calcLength, airTemp, relHumid, barPress, avgBarPress, radTemp, windSpeed, metRate, cloLevel, exWork, humidRatioUp, humidRatioLow, calcLengthComf, PPDComfortThresh, titleStatement, patternList, IPTrigger, farenheitVals):
#Import the classes.
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
lb_comfortModels = sc.sticky["ladybug_ComfortModels"]()
lb_visualization = sc.sticky["ladybug_ResultVisualization"]()
# Read the legend parameters.
lowB, highB, numSeg, customColors, legendBasePoint, legendScale, legendFont, legendFontSize, legendBold, decimalPlaces, removeLessThan = lb_preparation.readLegendParameters(legendPar_, False)
# Generate the chart curves.
if IPTrigger == True: scaleFactor = 1500*(9/5)
else: scaleFactor = 1500
chartCurves, humidityLines = drawPsychChart(avgBarPress, lb_comfortModels, legendFont, legendFontSize, legendBold, scaleFactor, epwData, epwStr, IPTrigger, lb_visualization)
#If there is annual hourly data, split it up.
if annualHourlyData_ != []:
def chunks(l, n):
finalList = []
for i in range(0, len(l), n):
finalList.append(l[i:i+n])
return finalList
annualHourlyDataSplit = chunks(annualHourlyData_, 8767)
else: annualHourlyDataSplit = [[]]
annualDataStr = []
if annualHourlyDataSplit != [[]]:
for list in annualHourlyDataSplit:
annualDataStr.append(list[:7])
# If an analysis period is selected, use that to select out the data.
if analysisPeriod_ != [] and epwData == True and calcLength == 8760:
airTemp = lb_preparation.selectHourlyData(_dryBulbTemperature, analysisPeriod_)[7:]
relHumid = lb_preparation.selectHourlyData(_relativeHumidity, analysisPeriod_)[7:]
if len(barPress) == 8760:
barPress = lb_preparation.selectHourlyData(barPress, analysisPeriod_)[7:]
else:
barPress2 = []
for num in range(len(airTemp)):
barPress2.append(barPress[0])
barPress = barPress2
if len(patternList) == 8760:
HOYS, months, days = getHOYsBasedOnPeriod(analysisPeriod_, 1)
newPatternList = []
for hour in HOYS:
newPatternList.append(patternList[hour-1])
patternList = newPatternList
if annualHourlyDataSplit != [[]]:
annualHourlyDataSplitNew = []
for list in annualHourlyDataSplit:
annualHourlyDataSplitNew.append(lb_preparation.selectHourlyData(list, analysisPeriod_)[7:])
annualHourlyDataSplit = annualHourlyDataSplitNew
else:
annualHourlyDataSplitNew = []
for list in annualHourlyDataSplit:
annualHourlyDataSplitNew.append(lb_preparation.selectHourlyData(list, analysisPeriod_)[7:])
annualHourlyDataSplit = annualHourlyDataSplitNew
#If a conditional statement is applied, use it to select out data.
if patternList != []:
newAirTemp = []
newRelHumid = []
newBarPress = []
newAnnualHourlyDataSplit = []
for list in annualHourlyDataSplit:
newAnnualHourlyDataSplit.append([])
for count, bool in enumerate(patternList):
if bool == True:
newAirTemp.append(airTemp[count])
newRelHumid.append(relHumid[count])
newBarPress.append(barPress[count])
if annualHourlyDataSplit != [[]]:
for listCount in range(len(annualHourlyDataSplit)):
newAnnualHourlyDataSplit[listCount].append(annualHourlyDataSplit[listCount][count])
airTemp = newAirTemp
relHumid = newRelHumid
barPress = newBarPress
annualHourlyDataSplit = newAnnualHourlyDataSplit
#As long as the calculation length is more than 1, make a colored mesh and get chart points for the input data.
legend = []
if calcLength > 1:
hourPts, coloredMesh, meshFaceValues = colorMesh(airTemp, relHumid, barPress, lb_preparation, lb_comfortModels, lb_visualization, scaleFactor, lowB, highB, customColors, IPTrigger, farenheitVals)
legendTitle = "Hours"
if mollierHX_ == True:
if IPTrigger: lb_visualization.calculateBB(chartCurves[:3], True)
else: lb_visualization.calculateBB(chartCurves[:3], True)
else:
if IPTrigger:
if enthalpyOrWetBulb_ == True or enthalpyOrWetBulb_ == None: lb_visualization.calculateBB(chartCurves[:3]+chartCurves[100:110], True)
else: lb_visualization.calculateBB(chartCurves[:3]+chartCurves[110:120], True)
else:
if enthalpyOrWetBulb_ == True or enthalpyOrWetBulb_ == None: lb_visualization.calculateBB(chartCurves[75:83], True)
else: lb_visualization.calculateBB(chartCurves[80:100], True)
legendSrfs, legendText, legendTextCrv, textPt, textSize = lb_visualization.createLegend(meshFaceValues, lowB, highB, numSeg, legendTitle, lb_visualization.BoundingBoxPar, legendBasePoint, legendScale, legendFont, legendFontSize, legendBold, decimalPlaces, removeLessThan)
legendColors = lb_visualization.gradientColor(legendText[:-1], lowB, highB, customColors)
legendSrfs = lb_visualization.colorMesh(legendColors, legendSrfs)
legend.append(legendSrfs)
for list in legendTextCrv:
for item in list:
legend.append(item)
if legendBasePoint == None:
legendBasePoint = lb_visualization.BoundingBoxPar[0]
if mollierHX_ == True:
moveTrans = rc.Geometry.Transform.Translation(0,-20,0)
for geo in legend: geo.Transform(moveTrans)
legendBasePoint.Transform(moveTrans)
else:
if IPTrigger: hourPts = [rc.Geometry.Point3d(farenheitVals[0], lb_comfortModels.calcHumidRatio(airTemp, relHumid, barPress)[0][0]*scaleFactor, 0)]
else: hourPts = [rc.Geometry.Point3d(airTemp[0], lb_comfortModels.calcHumidRatio(airTemp, relHumid, barPress)[0][0]*scaleFactor, 0)]
coloredMesh = None
meshFaceValues = []
legendBasePoint = None
# Get a polycurve that represents the boundary of the chart.
if not IPTrigger: chartBoundary = rc.Geometry.Curve.JoinCurves([chartCurves[0], chartCurves[25], chartCurves[31], chartCurves[10], chartCurves[11]])[0]
else: chartBoundary = rc.Geometry.Curve.JoinCurves([chartCurves[0], chartCurves[35], chartCurves[41], chartCurves[10], chartCurves[11]])[0]
# Calculate the comfort and strategy polygons.
try:
comfortPolyline, comfortPolygon, strategyPolylines, strategyPolygons, strategyTextNames, unionedCurves, tempBelowComf, maxComfortPolyTemp = calcComfAndStrategyPolygons(radTemp, windSpeed, metRate, cloLevel, exWork, humidRatioUp, humidRatioLow, passiveStrategy_, humidityLines, calcLengthComf, lb_comfortModels, chartBoundary, scaleFactor, PPDComfortThresh, IPTrigger)
#Calculate how many hours are in each comfort or strategy and comfort polygons.
totalComfPercent, totalComfOrNot, strategyPercent, strategyOrNot = statisticallyAnalyzePolygons(hourPts, comfortPolyline, strategyPolylines, unionedCurves, epwData, epwStr, strategyTextNames, tempBelowComf, airTemp, maxComfortPolyTemp, patternList)
except:
comfortPolyline, comfortPolygon, strategyPolylines, strategyPolygons, strategyTextNames, unionedCurves, tempBelowComf, maxComfortPolyTemp = None, None, [], [], [], [], None, None
totalComfPercent, totalComfOrNot, strategyPercent, strategyOrNot = None, [], None, []
warning = 'Comfort polygon has fallen completely off of the psych chart.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
#Generate colors for the points.
if len(totalComfOrNot) > 1:
pointColors, pointLegends = getPointColors(totalComfOrNot, annualHourlyDataSplit, annualDataStr, numSeg, customColors, legendBasePoint, legendScale, legendFont, legendFontSize, legendBold, decimalPlaces, removeLessThan, lb_visualization)
else:
pointColors = []
pointLegends = []
#If the molier transform is selected, apply it to the chart curves.
if mollierHX_ == True:
for item in chartCurves:
if str(item.ObjectType) == 'Curve': mollierHXTransform(item)
mollierHXTransform(coloredMesh)
for geo in comfortPolygon: mollierHXTransform(geo)
for geo in strategyPolygons: mollierHXTransform(geo)
for geo in hourPts: mollierHXTransform(geo)
#If the user has selected to scale or move the geometry, scale it all and/or move it all.
if basePoint_ != None:
transformMtx = rc.Geometry.Transform.Translation(basePoint_.X, basePoint_.Y, basePoint_.Z)
for geo in chartCurves: geo.Transform(transformMtx)
coloredMesh.Transform(transformMtx)
for geo in legend: geo.Transform(transformMtx)
legendBasePoint.Transform(transformMtx)
for geo in comfortPolygon: geo.Transform(transformMtx)
for geo in strategyPolygons: geo.Transform(transformMtx)
for geo in hourPts: geo.Transform(transformMtx)
for list in pointLegends:
for geo in list:
geo.Transform(transformMtx)
basePoint = basePoint_
else: basePoint = rc.Geometry.Point3d(0,0,0)
if scale_ != None:
transformMtx = rc.Geometry.Transform.Scale(basePoint, scale_)
for geo in chartCurves: geo.Transform(transformMtx)
coloredMesh.Transform(transformMtx)
for geo in legend: geo.Transform(transformMtx)
legendBasePoint.Transform(transformMtx)
for geo in comfortPolygon: geo.Transform(transformMtx)
for geo in strategyPolygons: geo.Transform(transformMtx)
for geo in hourPts: geo.Transform(transformMtx)
return totalComfPercent, totalComfOrNot, strategyTextNames, strategyPercent, strategyOrNot, chartCurves, coloredMesh, legend, legendBasePoint, comfortPolygon, strategyPolygons, hourPts, pointColors, pointLegends
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
#Check the inputs.
checkData = False
if _runIt == True:
checkData, epwData, epwStr, calcLength, airTemp, relHumid, barPress, \
avgBarPress, radTemp, windSpeed, metRate, cloLevel, exWork, humidRatioUp, \
humidRatioLow, calcLengthComf, PPDComfortThresh, titleStatement, \
patternList, IPTrigger, farenheitVals = checkTheInputs()
#If the inputs are good, run the function.
if checkData == True:
results = main(epwData, epwStr, calcLength, airTemp, relHumid, barPress, \
avgBarPress, radTemp, windSpeed, metRate, cloLevel, exWork, \
humidRatioUp, humidRatioLow, calcLengthComf, \
PPDComfortThresh, titleStatement, patternList, IPTrigger, farenheitVals)
if results != -1:
totalComfortPercent, totalComfortOrNot, strategyNames, strategyPercentOfTime, \
initStrategyOrNot, chartCurvesAndTxt, psychChartMesh, legend, legendBasePt, \
comfortPolygons, strategyPolygons, chartHourPoints, pointColors, \
pointLegends = results
#Unpack the data tree of the strategyOrNot.
strategyOrNot = DataTree[Object]()
for listCount, list in enumerate(initStrategyOrNot):
for item in list:
strategyOrNot.Add(item, GH_Path(listCount))
#Unpack the data tree of point colors and their legends.
hourPointColors = DataTree[Object]()
for listCount, list in enumerate(pointColors):
for item in list:
hourPointColors.Add(item, GH_Path(listCount))
hourPointLegend = DataTree[Object]()
for listCount, list in enumerate(pointLegends):
for item in list:
hourPointLegend.Add(item, GH_Path(listCount))
#Hide the points input.
ghenv.Component.Params.Output[11].Hidden = True
ghenv.Component.Params.Output[15].Hidden = True
ghenv.Component.Params.Output[17].Hidden = True
|
boris-p/ladybug
|
src/Ladybug_Psychrometric Chart.py
|
Python
|
gpl-3.0
| 117,875
|
[
"EPW"
] |
4fda24157eff26a162fc32759f9edbded719311871f0804f95a4778b9404d717
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to handle policy violation errors.
To get ad groups, run get_ad_groups.py.
Tags: AdGroupAdService.mutate
"""
__author__ = 'Joseph DiLallo'
import re
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
ad_group_ad_service = client.GetService('AdGroupAdService', 'v201309')
# Create text ad.
text_ad_operation = {
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'ad': {
# The 'xsi_type' field allows you to specify the xsi:type of the
# object being created. It's only necessary when you must provide
# an explicit type that the client library can't infer.
'xsi_type': 'TextAd',
'headline': 'Mars Cruise!!!',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'url': 'http://www.example.com',
'displayUrl': 'www.example.com',
}
}
}
operations = [text_ad_operation]
# Validate the ad.
try:
# Enable "validate only" to check for errors.
client.validate_only = True
ad_group_ad_service.mutate(operations)
print 'Validation successful, no errors returned.'
except suds.WebFault, e:
for error in e.fault.detail.ApiExceptionFault.errors:
if error['ApiError.Type'] == 'PolicyViolationError':
operation_index = re.findall(r'operations\[(.*)\]\.',
error['fieldPath'])
if operation_index:
operation = operations[int(operation_index[0])]
print ('Ad with headline \'%s\' violated %s policy \'%s\'.' %
(operation['operand']['ad']['headline'],
'exemptable' if error['isExemptable'] else 'non-exemptable',
error['externalPolicyName']))
if error['isExemptable'].lower() == 'true':
# Add exemption request to the operation.
print ('Adding exemption request for policy name \'%s\' on text '
'\'%s\'.' %
(error['key']['policyName'], error['key']['violatingText']))
if 'exemptionRequests' not in operation:
operation['exemptionRequests'] = []
operation['exemptionRequests'].append({
'key': error['key']
})
else:
# Remove non-exemptable operation
print 'Removing the operation from the request.'
operations.delete(operation)
else:
# Non-policy error returned, re-throw exception.
raise e
# Add these ads. Disable "validate only" so the ads will get created.
client.validate_only = False
if operations:
response = ad_group_ad_service.mutate(operations)
if response and response['value']:
ads = response['value']
print 'Added %s ad(s) to ad group %s.' % (len(ads), ad_group_id)
for ad in ads:
print (' Ad id is %s, type is %s and status is \'%s\'.' %
(ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
jdilallo/jdilallo-test
|
examples/adwords/v201309/error_handling/handle_policy_violation_error.py
|
Python
|
apache-2.0
| 3,956
|
[
"VisIt"
] |
186893dfca858a2523273dc2746062c3b004b28fe427a340fdbe13de49690238
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import dmrgscf
from pyscf import mrpt
dmrgscf.settings.MPIPREFIX = 'mpirun -n 4'
b = 1.4
mol = gto.M(verbose = 0,
atom = [
['N', (0, 0, -b/2)],
['N', (0, 0, b/2)], ],
basis = '631g')
m = scf.RHF(mol)
m.conv_tol = 1e-12
m.scf()
mc = dmrgscf.dmrgci.DMRGSCF(m, 4, 4)
mc.kernel()
class KnowValues(unittest.TestCase):
# def test_nevpt2_with_4pdm(self):
# e = mrpt.NEVPT(mc).kernel()
# self.assertAlmostEqual(e, -0.14058373193902649, 6)
def test_nevpt2_without_4pdm(self):
e = mrpt.NEVPT(mc).compress_approx(maxM=5000).kernel()
self.assertAlmostEqual(e, -0.14058324516302972, 6)
if __name__ == "__main__":
print("Full Tests for N2")
unittest.main()
|
gkc1000/pyscf
|
pyscf/dmrgscf/test/test_dmrgnevpt2.py
|
Python
|
apache-2.0
| 1,447
|
[
"PySCF"
] |
3eb26bd659bf1564307c1dea68af0fcd998626f4307344c37847587cfa354a41
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from html.parser import HTMLParser
from html.entities import name2codepoint
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print('<%s>' % tag)
def handle_endtag(self, tag):
print('</%s>' % tag)
def handle_startendtag(self, tag, attrs):
print('<%s/>' % tag)
def handle_data(self, data):
print(data)
def handle_comment(self, data):
print('<!--', data, '-->')
def handle_entityref(self, name):
print('&%s;' % name)
def handle_charref(self, name):
print('&#%s;' % name)
parser = MyHTMLParser()
parser.feed('''
<!doctype html>
<!--[if lt IE 7]> <html class="no-js ie6 lt-ie7 lt-ie8 lt-ie9"> <![endif]-->
<!--[if IE 7]> <html class="no-js ie7 lt-ie8 lt-ie9"> <![endif]-->
<!--[if IE 8]> <html class="no-js ie8 lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--><html class="no-js" lang="en" dir="ltr"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<link rel="prefetch" href="//ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js">
<meta name="application-name" content="Python.org">
<meta name="msapplication-tooltip" content="The official home of the Python Programming Language">
<meta name="apple-mobile-web-app-title" content="Python.org">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="HandheldFriendly" content="True">
<meta name="format-detection" content="telephone=no">
<meta http-equiv="cleartype" content="on">
<meta http-equiv="imagetoolbar" content="false">
<script src="/static/js/libs/modernizr.js"></script>
<link href="/static/stylesheets/style.css" rel="stylesheet" type="text/css" title="default" />
<link href="/static/stylesheets/mq.css" rel="stylesheet" type="text/css" media="not print, braille, embossed, speech, tty" />
<!--[if (lte IE 8)&(!IEMobile)]>
<link href="/static/stylesheets/no-mq.css" rel="stylesheet" type="text/css" media="screen" />
<![endif]-->
<link rel="icon" type="image/x-icon" href="/static/favicon.ico">
<link rel="apple-touch-icon-precomposed" sizes="144x144" href="/static/apple-touch-icon-144x144-precomposed.png">
<link rel="apple-touch-icon-precomposed" sizes="114x114" href="/static/apple-touch-icon-114x114-precomposed.png">
<link rel="apple-touch-icon-precomposed" sizes="72x72" href="/static/apple-touch-icon-72x72-precomposed.png">
<link rel="apple-touch-icon-precomposed" href="/static/apple-touch-icon-precomposed.png">
<link rel="apple-touch-icon" href="/static/apple-touch-icon-precomposed.png">
<meta name="msapplication-TileImage" content="/static/metro-icon-144x144-precomposed.png"><!-- white shape -->
<meta name="msapplication-TileColor" content="#3673a5"><!-- python blue -->
<meta name="msapplication-navbutton-color" content="#3673a5">
<title>Our Events | Python.org</title>
<meta name="description" content="The official home of the Python Programming Language">
<meta name="keywords" content="Python programming language object oriented web free open source software license documentation download community">
<meta property="og:type" content="website">
<meta property="og:site_name" content="Python.org">
<meta property="og:title" content="Our Events">
<meta property="og:description" content="The official home of the Python Programming Language">
<meta property="og:image" content="https://www.python.org/static/opengraph-icon-200x200.png">
<meta property="og:image:secure_url" content="https://www.python.org/static/opengraph-icon-200x200.png">
<meta property="og:url" content="https://www.python.org/events/python-events/">
<link rel="author" href="/static/humans.txt">
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "WebSite",
"url": "https://www.python.org/",
"potentialAction": {
"@type": "SearchAction",
"target": "https://www.python.org/search/?q={search_term_string}",
"query-input": "required name=search_term_string"
}
}
</script>
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-39055973-1']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body class="python events default-page">
<div id="touchnav-wrapper">
<div id="nojs" class="do-not-print">
<p><strong>Notice:</strong> While Javascript is not essential for this website, your interaction with the content will be limited. Please turn Javascript on for the full experience. </p>
</div>
<!--[if lt IE 8]>
<div id="oldie-warning" class="do-not-print">
<p><strong>Notice:</strong> Your browser is <em>ancient</em> and <a href="http://www.ie6countdown.com/">Microsoft agrees</a>. <a href="http://browsehappy.com/">Upgrade to a different browser</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to experience a better web.</p>
</div>
<![endif]-->
<!-- Sister Site Links -->
<div id="top" class="top-bar do-not-print">
<nav class="meta-navigation container" role="navigation">
<div class="skip-link screen-reader-text">
<a href="#content" title="Skip to content">Skip to content</a>
</div>
<a id="close-python-network" class="jump-link" href="#python-network" aria-hidden="true">
<span aria-hidden="true" class="icon-arrow-down"><span>▼</span></span> Close
</a>
<ul class="menu" role="tree">
<li class="python-meta ">
<a href="/" title="The Python Programming Language" >Python</a>
</li>
<li class="psf-meta ">
<a href="/psf-landing/" title="The Python Software Foundation" >PSF</a>
</li>
<li class="docs-meta ">
<a href="https://docs.python.org" title="Python Documentation" >Docs</a>
</li>
<li class="pypi-meta ">
<a href="https://pypi.python.org/" title="Python Package Index" >PyPI</a>
</li>
<li class="jobs-meta ">
<a href="/jobs/" title="Python Job Board" >Jobs</a>
</li>
<li class="shop-meta ">
<a href="/community/" title="Python Community" >Community</a>
</li>
</ul>
<a id="python-network" class="jump-link" href="#top" aria-hidden="true">
<span aria-hidden="true" class="icon-arrow-up"><span>▲</span></span> The Python Network
</a>
</nav>
</div>
<!-- Header elements -->
<header class="main-header" role="banner">
<div class="container">
<h1 class="site-headline">
<a href="/"><img class="python-logo" src="/static/img/python-logo.png" alt="python™"></a>
</h1>
<div class="options-bar do-not-print">
<a id="site-map-link" class="jump-to-menu" href="#site-map"><span class="menu-icon">≡</span> Menu</a><form class="search-the-site" action="/search/" method="get">
<fieldset title="Search Python.org">
<span aria-hidden="true" class="icon-search"></span>
<label class="screen-reader-text" for="id-search-field">Search This Site</label>
<input id="id-search-field" name="q" type="search" role="textbox" class="search-field" placeholder="Search" value="" tabindex="1">
<button type="submit" name="submit" id="submit" class="search-button" title="Submit this Search" tabindex="3">
GO
</button>
<!--[if IE]><input type="text" style="display: none;" disabled="disabled" size="1" tabindex="4"><![endif]-->
</fieldset>
</form><span class="breaker"></span><div class="adjust-font-size" aria-hidden="true">
<ul class="navigation menu" aria-label="Adjust Text Size on Page">
<li class="tier-1 last" aria-haspopup="true">
<a href="#" class="action-trigger"><strong><small>A</small> A</strong></a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a class="text-shrink" title="Make Text Smaller" href="javascript:;">Smaller</a></li>
<li class="tier-2 element-2" role="treeitem"><a class="text-grow" title="Make Text Larger" href="javascript:;">Larger</a></li>
<li class="tier-2 element-3" role="treeitem"><a class="text-reset" title="Reset any font size changes I have made" href="javascript:;">Reset</a></li>
</ul>
</li>
</ul>
</div><div class="winkwink-nudgenudge">
<ul class="navigation menu" aria-label="Social Media Navigation">
<li class="tier-1 last" aria-haspopup="true">
<a href="#" class="action-trigger">Socialize</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="http://plus.google.com/+Python"><span aria-hidden="true" class="icon-google-plus"></span>Google+</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="http://www.facebook.com/pythonlang?fref=ts"><span aria-hidden="true" class="icon-facebook"></span>Facebook</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="http://twitter.com/ThePSF"><span aria-hidden="true" class="icon-twitter"></span>Twitter</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/community/irc/"><span aria-hidden="true" class="icon-freenode"></span>Chat on IRC</a></li>
</ul>
</li>
</ul>
</div><div class="account-signin">
<ul class="navigation menu" aria-label="Social Media Navigation">
<li class="tier-1 last" aria-haspopup="true">
<a href="/accounts/login/" title="Sign Up or Sign In to Python.org">Sign In</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/accounts/signup/">Sign Up / Register</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/accounts/login/">Sign In</a></li>
</ul>
</li>
</ul>
</div>
</div><!-- end options-bar -->
<nav id="mainnav" class="python-navigation main-navigation do-not-print" role="navigation">
<ul class="navigation menu" role="menubar" aria-label="Main Navigation">
<li id="about" class="tier-1 element-1 " aria-haspopup="true">
<a href="/about/" title="" class="">About</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/about/apps/" title="">Applications</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/about/quotes/" title="">Quotes</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/about/gettingstarted/" title="">Getting Started</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/about/help/" title="">Help</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="http://brochure.getpython.info/" title="">Python Brochure</a></li>
</ul>
</li>
<li id="downloads" class="tier-1 element-2 " aria-haspopup="true">
<a href="/downloads/" title="" class="">Downloads</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/downloads/" title="">All releases</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/downloads/source/" title="">Source code</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/downloads/windows/" title="">Windows</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/downloads/mac-osx/" title="">Mac OS X</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="/download/other/" title="">Other Platforms</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="https://docs.python.org/3/license.html" title="">License</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="/download/alternatives" title="">Alternative Implementations</a></li>
</ul>
</li>
<li id="documentation" class="tier-1 element-3 " aria-haspopup="true">
<a href="/doc/" title="" class="">Documentation</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/doc/" title="">Docs</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/doc/av" title="">Audio/Visual Talks</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="https://wiki.python.org/moin/BeginnersGuide" title="">Beginner's Guide</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="https://docs.python.org/devguide/" title="">Developer's Guide</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="https://docs.python.org/faq/" title="">FAQ</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="http://wiki.python.org/moin/Languages" title="">Non-English Docs</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="http://python.org/dev/peps/" title="">PEP Index</a></li>
<li class="tier-2 element-8" role="treeitem"><a href="https://wiki.python.org/moin/PythonBooks" title="">Python Books</a></li>
</ul>
</li>
<li id="community" class="tier-1 element-4 " aria-haspopup="true">
<a href="/community/" title="" class="">Community</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/community/diversity/" title="">Diversity</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/community/irc/" title="">IRC</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/community/lists/" title="">Mailing Lists</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/community/workshops/" title="">Python Conferences</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="/community/sigs/" title="">Special Interest Groups</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="https://wiki.python.org/moin/" title="">Python Wiki</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="/community/logos/" title="">Python Logo</a></li>
<li class="tier-2 element-8" role="treeitem"><a href="/community/merchandise/" title="">Merchandise</a></li>
<li class="tier-2 element-9" role="treeitem"><a href="/community/awards" title="">Community Awards</a></li>
</ul>
</li>
<li id="success-stories" class="tier-1 element-5 " aria-haspopup="true">
<a href="/about/success/" title="success-stories" class="">Success Stories</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/about/success/#arts" title="">Arts</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/about/success/#business" title="">Business</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/about/success/#education" title="">Education</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/about/success/#engineering" title="">Engineering</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="/about/success/#government" title="">Government</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="/about/success/#scientific" title="">Scientific</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="/about/success/#software-development" title="">Software Development</a></li>
</ul>
</li>
<li id="news" class="tier-1 element-6 " aria-haspopup="true">
<a href="/blogs/" title="News from around the Python world" class="">News</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/blogs/" title="Python Insider Blog Posts">Python News</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="http://planetpython.org/" title="Planet Python">Community News</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="http://pyfound.blogspot.com/" title="PSF Blog">PSF News</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="http://pycon.blogspot.com/" title="PyCon Blog">PyCon News</a></li>
</ul>
</li>
<li id="events" class="tier-1 element-7 " aria-haspopup="true">
<a href="/events/" title="" class="">Events</a>
<ul class="subnav menu" role="menu" aria-hidden="true">
<li class="tier-2 element-1" role="treeitem"><a href="/events/python-events" title="">Python Events</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/events/python-user-group/" title="">User Group Events</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/events/python-events/past/" title="">Python Events Archive</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/events/python-user-group/past/" title="">User Group Events Archive</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="https://wiki.python.org/moin/PythonEventsCalendar#Submitting_an_Event" title="">Submit an Event</a></li>
</ul>
</li>
</ul>
</nav>
<div class="header-banner "> <!-- for optional "do-not-print" class -->
</div>
</div><!-- end .container -->
</header>
<div id="content" class="content-wrapper">
<!-- Main Content Column -->
<div class="container">
<section class="main-content with-right-sidebar" role="main">
<header class="article-header">
<h3>from the Python Events Calendar</h3>
</header>
<div class="most-recent-events">
<div class="shrubbery">
<h2 class="widget-title"><span aria-hidden="true" class="icon-calendar"></span>Upcoming Events</h2>
<p class="give-me-more"><a href="?page=2" title="More Events">More</a></p>
<ul class="list-recent-events menu">
<li>
<h3 class="event-title"><a href="/events/python-events/401/">PyOhio 2016</a></h3>
<p>
<time datetime="2016-07-29T00:00:00+00:00">29 July – 01 Aug. <span class="say-no-more"> 2016</span></time>
<span class="event-location">The Ohio Union at The Ohio State University. 1739 N. High Street, Columbus, OH 43210, USA</span>
</p>
</li>
<li>
<h3 class="event-title"><a href="/events/python-events/357/">PyCon Australia 2016</a></h3>
<p>
<time datetime="2016-08-12T00:00:00+00:00">12 Aug. – 17 Aug. <span class="say-no-more"> 2016</span></time>
<span class="event-location">Melbourne Convention and Exhibition Centre, 1 Convention Centre Pl, South Wharf VIC 3006, Australia</span>
</p>
</li>
<li>
<h3 class="event-title"><a href="/events/python-events/427/">PyCon APAC 2016</a></h3>
<p>
<time datetime="2016-08-13T00:00:00+00:00">13 Aug. – 16 Aug. <span class="say-no-more"> 2016</span></time>
<span class="event-location">Trade Center COEX Samseong 1-dong Gangnam-gu, Seoul, South Korea</span>
</p>
</li>
<li>
<h3 class="event-title"><a href="/events/python-events/440/">PyBay 2016</a></h3>
<p>
<time datetime="2016-08-19T00:00:00+00:00">19 Aug. – 22 Aug. <span class="say-no-more"> 2016</span></time>
<span class="event-location">UCSF Mission Bay Conference Center, 1675 Owens St., San Francisco, CA 94143, USA</span>
</p>
</li>
<li>
<h3 class="event-title"><a href="/events/python-events/438/">PyBay 2016</a></h3>
<p>
<time datetime="2016-08-19T01:26:32+00:00">19 Aug. – 21 Aug. <span class="say-no-more"> 2016</span></time>
<span class="event-location"><a href="http://www.acc-missionbayconferencecenter.com/">UCSF Mission Bay Conference Center</a>, 1675 Owens Street, Suite 251, San Francisco, CA 94143-3008</span>
</p>
</li>
<li>
<h3 class="event-title"><a href="/events/python-events/409/">EuroScipy 2016</a></h3>
<p>
<time datetime="2016-08-23T00:00:00+00:00">23 Aug. – 28 Aug. <span class="say-no-more"> 2016</span></time>
<span class="event-location">Faculty of Medicine of the University of Erlangen, Germany</span>
</p>
</li>
</ul>
</div>
<h3 class="widget-title just-missed">You just missed...</h3>
<ul class="list-recent-events menu">
<li>
<h3 class="event-title"><a href="/events/python-events/426/">PyGotham 2016</a></h3>
<p>
<time datetime="2016-07-16T00:00:00+00:00">16 July – 18 July <span class="say-no-more"> 2016</span></time>
<span class="event-location">New York, NY, USA</span>
</p>
</li>
<li>
<h3 class="event-title"><a href="/events/python-events/395/">PyCon Singapore 2016</a></h3>
<p>
<time datetime="2016-06-23T00:00:00+00:00">23 June – 26 June <span class="say-no-more"> 2016</span></time>
<span class="event-location">National University of Singapore, School of Computing, Computing 1, 13 Computing Drive, Singapore 117417, Republic of Singapore</span>
</p>
</li>
</ul>
</div>
</section>
<aside class="right-sidebar" role="secondary">
<div class="sidebar-widget subscribe-widget">
<h2 class="widget-title">Python Event Subscriptions</h2>
<p>Subscribe to Python Event Calendars:</p>
<ul class="menu">
<li><a href="https://www.google.com/calendar/ical/j7gov1cmnqr9tvg14k621j7t5c@group.calendar.google.com/public/basic.ics"><span aria-hidden="true" class="icon-ical"></span>Events in iCal format</a></li>
</ul>
<h2 class="widget-title">Python Events Calendars</h2>
<br/>
<p>For Python events near you, please have a look at the <a href="http://lmorillas.github.io/python_events/"><b>Python events map</b></a>.</p>
<p>The Python events calendars are maintained by the <a href="https://wiki.python.org/moin/PythonEventsCalendar#Python_Calendar_Team">events calendar team</a>.</p>
<p>Please see the <a href="https://wiki.python.org/moin/PythonEventsCalendar">events calendar project page</a> for details on how to <a href="https://wiki.python.org/moin/PythonEventsCalendar#Submitting_an_Event">submit events</a>, <a href="https://wiki.python.org/moin/PythonEventsCalendar#Available_Calendars">subscribe to the calendars</a>, get <a href="https://twitter.com/PythonEvents">Twitter feeds</a> or embed them.</p>
<p>Thank you.</p>
</div>
</aside>
</div><!-- end .container -->
</div><!-- end #content .content-wrapper -->
<!-- Footer and social media list -->
<footer id="site-map" class="main-footer" role="contentinfo">
<div class="main-footer-links">
<div class="container">
<a id="back-to-top-1" class="jump-link" href="#python-network"><span aria-hidden="true" class="icon-arrow-up"><span>▲</span></span> Back to Top</a>
<ul class="sitemap navigation menu do-not-print" role="tree" id="container">
<li class="tier-1 element-1">
<a href="/about/" >About</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/about/apps/" title="">Applications</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/about/quotes/" title="">Quotes</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/about/gettingstarted/" title="">Getting Started</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/about/help/" title="">Help</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="http://brochure.getpython.info/" title="">Python Brochure</a></li>
</ul>
</li>
<li class="tier-1 element-2">
<a href="/downloads/" >Downloads</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/downloads/" title="">All releases</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/downloads/source/" title="">Source code</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/downloads/windows/" title="">Windows</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/downloads/mac-osx/" title="">Mac OS X</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="/download/other/" title="">Other Platforms</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="https://docs.python.org/3/license.html" title="">License</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="/download/alternatives" title="">Alternative Implementations</a></li>
</ul>
</li>
<li class="tier-1 element-3">
<a href="/doc/" >Documentation</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/doc/" title="">Docs</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/doc/av" title="">Audio/Visual Talks</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="https://wiki.python.org/moin/BeginnersGuide" title="">Beginner's Guide</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="https://docs.python.org/devguide/" title="">Developer's Guide</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="https://docs.python.org/faq/" title="">FAQ</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="http://wiki.python.org/moin/Languages" title="">Non-English Docs</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="http://python.org/dev/peps/" title="">PEP Index</a></li>
<li class="tier-2 element-8" role="treeitem"><a href="https://wiki.python.org/moin/PythonBooks" title="">Python Books</a></li>
</ul>
</li>
<li class="tier-1 element-4">
<a href="/community/" >Community</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/community/diversity/" title="">Diversity</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/community/irc/" title="">IRC</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/community/lists/" title="">Mailing Lists</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/community/workshops/" title="">Python Conferences</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="/community/sigs/" title="">Special Interest Groups</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="https://wiki.python.org/moin/" title="">Python Wiki</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="/community/logos/" title="">Python Logo</a></li>
<li class="tier-2 element-8" role="treeitem"><a href="/community/merchandise/" title="">Merchandise</a></li>
<li class="tier-2 element-9" role="treeitem"><a href="/community/awards" title="">Community Awards</a></li>
</ul>
</li>
<li class="tier-1 element-5">
<a href="/about/success/" title="success-stories">Success Stories</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/about/success/#arts" title="">Arts</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/about/success/#business" title="">Business</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/about/success/#education" title="">Education</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/about/success/#engineering" title="">Engineering</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="/about/success/#government" title="">Government</a></li>
<li class="tier-2 element-6" role="treeitem"><a href="/about/success/#scientific" title="">Scientific</a></li>
<li class="tier-2 element-7" role="treeitem"><a href="/about/success/#software-development" title="">Software Development</a></li>
</ul>
</li>
<li class="tier-1 element-6">
<a href="/blogs/" title="News from around the Python world">News</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/blogs/" title="Python Insider Blog Posts">Python News</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="http://planetpython.org/" title="Planet Python">Community News</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="http://pyfound.blogspot.com/" title="PSF Blog">PSF News</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="http://pycon.blogspot.com/" title="PyCon Blog">PyCon News</a></li>
</ul>
</li>
<li class="tier-1 element-7">
<a href="/events/" >Events</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="/events/python-events" title="">Python Events</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="/events/python-user-group/" title="">User Group Events</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="/events/python-events/past/" title="">Python Events Archive</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="/events/python-user-group/past/" title="">User Group Events Archive</a></li>
<li class="tier-2 element-5" role="treeitem"><a href="https://wiki.python.org/moin/PythonEventsCalendar#Submitting_an_Event" title="">Submit an Event</a></li>
</ul>
</li>
<li class="tier-1 element-8">
<a href="/dev/" >Contributing</a>
<ul class="subnav menu">
<li class="tier-2 element-1" role="treeitem"><a href="http://docs.python.org/devguide/" title="">Developer's Guide</a></li>
<li class="tier-2 element-2" role="treeitem"><a href="http://bugs.python.org/" title="">Issue Tracker</a></li>
<li class="tier-2 element-3" role="treeitem"><a href="https://mail.python.org/mailman/listinfo/python-dev" title="">python-dev list</a></li>
<li class="tier-2 element-4" role="treeitem"><a href="http://pythonmentors.com/" title="">Core Mentorship</a></li>
</ul>
</li>
</ul>
<a id="back-to-top-2" class="jump-link" href="#python-network"><span aria-hidden="true" class="icon-arrow-up"><span>▲</span></span> Back to Top</a>
</div><!-- end .container -->
</div> <!-- end .main-footer-links -->
<div class="site-base">
<div class="container">
<ul class="footer-links navigation menu do-not-print" role="tree">
<li class="tier-1 element-1"><a href="/about/help/">Help & <span class="say-no-more">General</span> Contact</a></li>
<li class="tier-1 element-2"><a href="/community/diversity/">Diversity <span class="say-no-more">Initiatives</span></a></li>
<li class="tier-1 element-3"><a href="https://github.com/python/pythondotorg/issues">Submit Website Bug</a></li>
<li class="tier-1 element-4">
<a href="https://status.python.org/">Status <span class="python-status-indicator-default" id="python-status-indicator"></span></a>
</li>
</ul>
<div class="copyright">
<p><small>
<span class="pre">Copyright ©2001-2016.</span>
<span class="pre"><a href="/psf-landing/">Python Software Foundation</a></span>
<span class="pre"><a href="/about/legal/">Legal Statements</a></span>
<span class="pre"><a href="/privacy/">Privacy Policy</a></span>
</small></p>
</div>
</div><!-- end .container -->
</div><!-- end .site-base -->
</footer>
</div><!-- end #touchnav-wrapper -->
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="/static/js/libs/jquery-1.8.2.min.js"><\/script>')</script>
<script src="/static/js/libs/masonry.pkgd.min.js"></script>
<script type="text/javascript" src="/static/js/main-min.js" charset="utf-8"></script>
<!--[if lte IE 7]>
<script type="text/javascript" src="/static/js/plugins/IE8-min.js" charset="utf-8"></script>
<![endif]-->
<!--[if lte IE 8]>
<script type="text/javascript" src="/static/js/plugins/getComputedStyle-min.js" charset="utf-8"></script>
<![endif]-->
</body>
</html>
''')
|
shawnleo001/Pythonoob
|
Module/module_7html-test.py
|
Python
|
gpl-3.0
| 35,404
|
[
"COLUMBUS"
] |
fc7695ac3e54ae83e5c6dade005afd6662569a0a68cb1593824590bed39e32e4
|
import md5
import dateutil.parser
import dateutil.relativedelta
import json
import time
from flask import jsonify, request, abort, make_response, g
from flaskapimongo import app, mongo
from pymongo.errors import OperationFailure
@app.before_request
def before_request():
g.request_start_time = time.time()
g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time)
@app.route('/')
def index():
activities_collection = mongo.db.activities
count = activities_collection.count()
return 'Flask-api-mongo is running! Total entries is {0}. Rendered in {0}'.format(count ,g.request_time())
@app.route('/get', methods = ['GET'])
def get_activity():
"""Get uid and date args and return json object with visit count for requested uid and date."""
required_args = ['uid', 'date']
if not all(x in request.args for x in required_args):
abort(400)
result_answer = {}
try:
req_uid = int(request.args['uid'])
req_date = dateutil.parser.parse(request.args['date'])
#removing time from date if user sent full date format
start_date = req_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date = start_date + dateutil.relativedelta.relativedelta(days=+1)
activities_collection = mongo.db.activities
count = activities_collection.count(filter={'uid': req_uid, 'date': {"$gte": start_date, "$lt": end_date}})
#old pymongo style
#count = activities_collection.find({'uid': req_uid, 'date': {"$gte": start_date, "$lt": end_date}}, fields={'uid': 1, '_id': 0}).count()
except (KeyError, ValueError, OperationFailure) as e:
result_answer['status'] = "FAIL"
result_answer['error'] = str(e)
return jsonify(result_answer)
result_answer['status'] = "OK"
result_answer['uid'] = req_uid
result_answer['count'] = count
result_answer['execution_time'] = g.request_time()
return jsonify(result_answer)
@app.route('/post', methods = ['POST'])
def post_activity():
"""Writes json object to storage if its md5sum is correct.
Accepts single json object or array."""
if not request.json:
abort(400)
if not isinstance(request.json, list):
activities = [request.json]
else:
activities = request.json
result_answer = {}
activities_collection = mongo.db.activities
for pos, activity in enumerate(activities):
try:
recieved_md5 = activity.pop('md5checksum')
calculated_md5 = md5.new(json.dumps(activity)).hexdigest()
if recieved_md5 == calculated_md5:
activity['uid'] = int(activity['uid'])
activity['date'] = dateutil.parser.parse(activity['date'])
activities_collection.insert_one(activity)
result_answer[pos] = "OK"
else:
result_answer[pos] = "FAIL"
except (KeyError, ValueError, OperationFailure) as e:
result_answer[pos] = "FAIL"
result_answer['execution_time'] = g.request_time()
return jsonify(result_answer), 201
|
antonsoroko/flaskapimongo
|
flaskapimongo/views.py
|
Python
|
gpl-3.0
| 3,109
|
[
"VisIt"
] |
01680f88ca4ccfae462298f01abff887011d8e0fd881ea429cca2880248e92ee
|
from __future__ import print_function, division
import time
import warnings
from mdtraj.utils.delay_import import import_
from mdtraj.utils.validation import ensure_type, cast_indices, check_random_state
from mdtraj.utils.unit import in_units_of
from mdtraj.utils.rotation import rotation_matrix_from_quaternion, uniform_quaternion
from mdtraj.utils.unitcell import (lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles)
from mdtraj.utils.contextmanagers import timing, enter_temp_directory
from mdtraj.utils.zipped import open_maybe_zipped
__all__ = ["ensure_type", "import_", "in_units_of",
"lengths_and_angles_to_box_vectors",
"box_vectors_to_lengths_and_angles",
"ilen", "timing", "cast_indices", "check_random_state",
"rotation_matrix_from_quaternion", "uniform_quaternion",
"enter_temp_directory", "timing", "deprecated"]
# Make sure that DeprecationWarning get printed
warnings.simplefilter("always", DeprecationWarning)
def ilen(iterable):
"""Length of an iterator. Note, this consumes the iterator
Parameters
----------
iterable : iterable
An iterable, such as a generator, list, etc.
Returns
-------
length : int
The number of elements in the iterable
"""
return sum(1 for _ in iterable)
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Copied from scikit-learn: sklearn/utils/__init__.py
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
|
casawa/mdtraj
|
mdtraj/utils/__init__.py
|
Python
|
lgpl-2.1
| 3,770
|
[
"MDTraj"
] |
a3f06f026679fdeb3cef62adcde45dc9f604ae30e1821ffb1cad63e696688683
|
"""
Draw a background for HE0435 (should be generalized later)
Compute the precise astometry for Euler pixel size from Courbin2011
"""
import os
import astropy.io.fits as pyfits
import scipy
# Astrometry : from Courbin2011
# in arcseconds
A = (0.0, 0.0)
B = (-1.4743, +0.5518)
C = (-2.4664, -0.6022)
D = (-0.9378, -1.6160)
G = (-1.1706, -0.5665)
pixsize = 0.2148 # Measured on an image, Malte, for ECAM
arcsectopix = 1.0/pixsize
# astrometry. Give initial coordinates for A in pixels, I compute B, C, D and G coordinates in pixels according to your astrometry and the pixel size of the images
if 0:
Ainit_x = 27.48 # in pixels
Ainit_y = 37.84 # in pixels
A_pix = ((-A[0]*arcsectopix)+Ainit_x, (A[1]*arcsectopix)+Ainit_y)
B_pix = ((-B[0]*arcsectopix)+Ainit_x, (B[1]*arcsectopix)+Ainit_y)
C_pix = ((-C[0]*arcsectopix)+Ainit_x, (C[1]*arcsectopix)+Ainit_y)
D_pix = ((-D[0]*arcsectopix)+Ainit_x, (D[1]*arcsectopix)+Ainit_y)
G_pix = ((-G[0]*arcsectopix)+Ainit_x, (G[1]*arcsectopix)+Ainit_y)
print A_pix[0],'\t',A_pix[1]
print B_pix[0],'\t',B_pix[1]
print C_pix[0],'\t',C_pix[1]
print D_pix[0],'\t',D_pix[1]
print G_pix[0],'\t',G_pix[1]
sys.exit()
# to see if a transformation respects your astrometry
print '='*50
print "diff_AB = ", np.sqrt((B_pix[0]-A_pix[0])**2 + (B_pix[1]-A_pix[1])**2)
print "diff_AC = ", np.sqrt((C_pix[0]-A_pix[0])**2 + (D_pix[1]-A_pix[1])**2)
print "diff_AD = ", np.sqrt((C_pix[0]-A_pix[0])**2 + (D_pix[1]-A_pix[1])**2)
A_pix_new = (27.664938 , 37.176521)
B_pix_new = (34.401466 , 39.382728)
C_pix_new = (38.872914 , 34.300117)
D_pix_new = (31.986974 , 29.892582 )
print "diff_AB_new = ", np.sqrt((B_pix_new[0]-A_pix_new[0])**2 + (B_pix_new[1]-A_pix_new[1])**2)
print "diff_AC_new = ", np.sqrt((C_pix_new[0]-A_pix_new[0])**2 + (D_pix_new[1]-A_pix_new[1])**2)
print "diff_AD_new = ", np.sqrt((C_pix_new[0]-A_pix_new[0])**2 + (D_pix_new[1]-A_pix_new[1])**2)
# draw a background
if 1:
# canvas is a black 128x128 pixels fits file
os.system('cp canvas.fits back.fits') # background to pass to MCS
os.system('cp canvas.fits profile.fits') # non-convolved profile, 128*128
os.system('cp canvas.fits gaussian.fits') # gaussian, 128*128
profile = pyfits.open('profile.fits', mode='update')
gaussian = pyfits.open('gaussian.fits', mode='update')
back = pyfits.open('back.fits', mode='update')
data = back[0].data
pdata = profile[0].data
gdata = gaussian[0].data
# Sersic profile parameters
I0 = 5.00142905e+03 # arbitrary
reff_arcsec = 1.5
#sersic_index = 4.0 # de Vaucouleurs
reff_pix = reff_arcsec * arcsectopix * 2.0 # *2 as we use finer pixels
reff_pix = 3.00398690e-02 # from optimiser...
xc = 32.9297206704 * 2.0 # *2 as we use finer pixels
yc = 35.2026629423 * 2.0 # idem
# OPTIMUM PARAMETERS FROM MINIMIZE :[ 5.00142905e+03 3.00398690e-02 3.97002517e+00]
# Sersic profile
def getprofilevalue(x, y, I0, reff_pix, sersic_index=4.0):
r = np.sqrt((x-xc)**2 + (y-yc)**2)
return I0*np.exp(-(r/reff_pix)**(1.0/sersic_index))
# Read data value
def getfitsvalue(data, x, y):
return data[x][y]
# 2D gaussian profile
fwhm = 2.0 # pixels
sigma = fwhm / 2.355
def get2dgaussianvalue(x, y, xc, yc):
return 1.0 / (2 * np.pi * sigma ** 2) * np.exp(-((x - xc) ** 2 + (y - yc) ** 2) / (2 * sigma ** 2))
for lind, line in enumerate(gdata):
for cind, elt in enumerate(line):
gdata[lind][cind] = get2dgaussianvalue(cind+1, lind+1, 65, 65)
pdata[lind][cind] = getprofilevalue(cind+1, lind+1, I0, reff_pix, sersic_index=3.97002517e+00)
import scipy.ndimage
if 1:
out = scipy.ndimage.filters.convolve(gdata, pdata)
for lind, line in enumerate(data):
for cind, elt in enumerate(line):
data[lind][cind] = out[lind][cind]
back.close()
profile.close()
gaussian.close()
#sys.exit()
from scipy.optimize import minimize
if 0:
xs = np.arange(0, 128, 1)
ys = np.arange(0, 128, 1)
fitsdata = pyfits.open('optimised.fits')[0].data
fitsval = getfitsvalue(fitsdata, xs, ys)
def tominim((vals)):
I0 = vals[0]
reff_pix = vals[1]
sersic_index = vals[2]
for lind, line in enumerate(gdata):
for cind, elt in enumerate(line):
pdata[lind][cind] = getprofilevalue(cind+1, lind+1, I0, reff_pix, sersic_index)
convol = scipy.ndimage.filters.convolve(gdata, pdata)
res = 0.0
for l1, l2 in zip(convol, fitsval):
for c1, c2 in zip(l1, l2):
res += np.sqrt((c1 - c2) ** 2)
return res
def tominimtest((vals)):
x = vals[0]
y = vals[1]
return (x-1) **2 + (y-2) **2
#print tominim([1000, 1.0])
#print tominim(1000, 0.2)
#sys.exit()
min = minimize(tominim, ([8000, 0.01, 4.5]), bounds=((2000, 10000), (0.01, 0.3), (3, 5)))
#min = minimize(tominimtest, ([6,6]), bounds=((2,3), (2,12)))
print min.x
print min.success
# OPTIMUM PARAMETERS FROM MINIMIZE :[ 9.67585375e+02 9.83258231e-02]
# OPTIMUM PARAMETERS FROM MINIMIZE :[ 5.00351033e+03 2.84755113e-02]
# OPTIMUM PARAMETERS FROM MINIMIZE :[ 5.00142905e+03 3.00398690e-02 3.97002517e+00] # I used this one !
# OPTIMUM PARAMETERS FROM MINIMIZE :[ 8.00000000e+03 1.15868793e-02 4.32036352e+00]
sys.exit()
# Let's do something stupid and simple (like me!): brute force optimisation:
if 0:
def leastsq(data1, data2):
res = 0.0
for l1, l2 in zip(data1, data2):
for c1, c2 in zip(l1, l2):
res += np.sqrt((c1 - c2) ** 2)
return res
I0s = np.arange(4000, 8000, 1000)
reffs = np.arange(0.01, 0.2, 0.02)
sersics = np.arange(3.5, 4.5, 0.2)
xs = np.arange(0, 128, 1)
ys = np.arange(0, 128, 1)
fitsdata = pyfits.open('optimised.fits')[0].data
fitsval = getfitsvalue(fitsdata, xs, ys)
resmin = 1e10
for indI0, I0 in enumerate(I0s):
for indreff, reff in enumerate(reffs):
for sersic in sersics:
for lind, line in enumerate(pdata):
for cind, elt in enumerate(line):
pdata[lind][cind] = getprofilevalue(cind+1, lind+1, I0, reff, sersic)
convol = scipy.ndimage.filters.convolve(gdata, pdata)
res = leastsq(convol, fitsval)
print I0, reff, sersic, res
if res < resmin:
resmin = res
I0min = I0
reffmin = reff
print 'Min results: I0 = ', I0min, ' | reff = ', reffmin
sys.exit()
if 0:
toplot = pyfits.open('back.fits')[0].data
# Display stuff, slow...
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D #<-- Note the capitalization!
import numpy as np
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(0,128,1)
Y = np.arange(0,128,1)
Z = getfitsvalue(toplot, X, Y)
X, Y = np.meshgrid(X, Y)
#Z = getprofilevalue(128-X, Y)
#Z = getconvolution(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
|
COSMOGRAIL/COSMOULINE
|
pipe/extrascripts/background/drawbackground.py
|
Python
|
gpl-3.0
| 7,062
|
[
"Gaussian"
] |
6b07e5d44f013925c2e5e037c945c9f442b3da7b1a61075c8ef8ad2146ae9926
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import unittest
import glob
import shutil
import vtk
import time
from PyQt5 import QtWidgets
import chigger
from peacock.ExodusViewer.plugins.VTKWindowPlugin import main
from peacock.utils import Testing
class TestVTKWindowPlugin(Testing.PeacockImageTestCase):
"""
Testing for VTKWindowPlugin
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
#: str: The filename to load.
_filename = Testing.get_chigger_input('mug_blocks_out.e')
#: str: Temporary filename for testing delayed load (see testFilename)
_temp_file = 'TestVTKWindowPlugin.e'
@classmethod
def setUpClass(cls):
super(TestVTKWindowPlugin, cls).setUpClass()
if os.path.exists(cls._temp_file):
os.remove(cls._temp_file)
def setUp(self):
"""
Loads an Exodus file in the VTKWindowWidget object using a structure similar to the ExodusViewer widget.
"""
self.sleepIfSlow()
self._widget, self._window = main(size=[600,600])
self._window.onSetFilename(self._filename)
self._window.onSetVariable('diffused')
self._window.onWindowRequiresUpdate()
def testInitialize(self):
"""
Test the result open and are initialized.
"""
self.assertTrue(self._window._initialized)
self.assertImage('testInitialize.png', allowed=0.98)
def testCamera(self):
"""
Test that the camera can be modified.
"""
self._window.onCameraChanged((-0.7786, 0.2277, 0.5847), (9.2960, -0.4218, 12.6685), (0.0000, 0.0000, 0.1250))
self._window.onWindowRequiresUpdate()
self.assertImage('testCamera.png')
def testReader(self):
"""
Test that reader settings may be changed.
"""
self._window.onReaderOptionsChanged(dict(timestep=1))
self._window.onWindowRequiresUpdate()
tdata = self._window._reader.getTimeData()
self.assertEqual(1, tdata.timestep)
self.assertEqual(0.1, tdata.time)
self.assertImage('testReader.png')
def testResult(self):
"""
Test that result settings may be changed.
"""
self._window.onResultOptionsChanged(dict(cmap='viridis'))
self._window.onWindowRequiresUpdate()
self.assertEqual('viridis', self._window._result.getOption('cmap'))
self.assertImage('testResult.png')
def testFilename(self):
"""
Tests that non-existent files, new files, and removed files do not break window.
"""
# The source and destination filenames
filename = Testing.get_chigger_input('step10_micro_out.e')
newfile = self._temp_file
# Remove any existing files
for fname in glob.glob(newfile + '*'):
os.remove(fname)
# Supply a non-existent file
self._window.onSetFilename(newfile)
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameEmpty.png')
# Create the files and simulate the initialization timer timeout call
shutil.copy(filename, newfile)
for i in range(2, 6):
ext = '-s00' + str(i)
shutil.copy(filename + ext, newfile + ext)
time.sleep(1.5) # sleep so modified times differ
self._window._timers["initialize"].timeout.emit()
self._window.onResultOptionsChanged({'variable':'phi'})
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameCreated.png', allowed=0.98)
# Add new files and simulate the update timer timeout call
for i in range(6, 10):
ext = '-s00' + str(i)
shutil.copy(filename + ext, newfile + ext)
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameUpdated.png', allowed=0.98)
# Remove the files and simulate a call to the update timer
for fname in glob.glob(newfile + '*'):
os.remove(fname)
time.sleep(1.5)
self._window.onWindowRequiresUpdate()
self.assertImage('testFilenameEmpty.png') # the window should be empty again
def testIteractorStyle(self):
"""
Tests interaction style matches the mesh dimension
"""
self.assertIsNone(self._window._window.getOption('style'))
self.assertIsInstance(self._window._window.getVTKInteractor().GetInteractorStyle(),
chigger.base.KeyPressInteractorStyle)
self._window.onSetFilename(Testing.get_chigger_input('displace.e'))
self._window.onWindowRequiresUpdate()
self.assertIsNone(self._window._window.getOption('style'))
self.assertIsInstance(self._window._window.getVTKInteractor().GetInteractorStyle(),
vtk.vtkInteractorStyleImage)
def testNoFile(self):
"""
Test that window shows up with peacock image.
"""
self._window.onSetFilename(None)
self._window.onWindowRequiresUpdate()
self.assertImage('testPeacockMessage.png')
def testLoadingMessage(self):
"""
Test that the load message can be toggled.
"""
self._window.onResultOptionsChanged(dict(cmap='viridis'))
self._window.onWindowRequiresUpdate()
self.assertEqual('viridis', self._window._result.getOption('cmap'))
self.assertImage('testResult.png')
self._window.onSetFilename(None)
self._window.onWindowRequiresUpdate()
self._window._setLoadingMessage("Testing...")
self.assertImage('testLoadingMessage.png')
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
nuclear-wizard/moose
|
python/peacock/tests/exodus_tab/test_VTKWindowPlugin.py
|
Python
|
lgpl-2.1
| 6,061
|
[
"MOOSE",
"VTK"
] |
a9a979201a73cc51a4157fb0a6af9dff7e4429b532c4a1930f720531ac2d603b
|
__author__ = 'tweninge'+'@'+'nd.edu'
__author__ = 'saguinag'+'@'+'nd.edu'
__author__ = 'rodrigopala91'+'@'+'gmail.com'
__version__ = "0.1.0"
##
## hrgm = hyperedge replacement grammars model
##
# TODO: Sort out the returns
# VersionLog:
# 0.0.1 Initial state; modified tw_karate_chop to accommodate this version
#
import argparse,traceback,optparse
import pandas as pd
import os, sys, time
import networkx as nx
from collections import deque, defaultdict, Counter
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from tw_karate_chop import save_plot_figure_2disk,quickbb,get_production_rule,add_to_prod_rules,visit,find_match,control_rod
import pprint
graphs_list=['Board Ex','Karate Club','NewWatStr SW','Erdos-Renyi','Kronecker','edgelist']
global num_nodes, debug, prod_rules
prod_rules={}
debug = False
class ListSupportedGraphs(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
##
print ">> GRAPH_NAME: list of supported graphs"
for g in graphs_list:
print " '%s'" % g
def write_to_json(data, model_name, graphs=False):
import json, time
timestr = time.strftime("%d%b%y_%H%M%S")
model_nm = "/tmp/"+model_name.replace (" ", "_")
if save_unique_names_bool:
out_file = model_nm+'_'+timestr
else:
out_file = model_nm
# TODO: are the next 2 lines needed?
#if os.path.isfile(out_file):
# out_file = "/tmp/"+model_name+'_'+timestr+'_1.json'
try:
if graphs: ## when writing graphs to a file
for g in data:
#print type (g)
df = pd.DataFrame(g.edges_iter())
df.columns=['% src_node','trg_node']
out_filename = out_file+"_edgeList_grph.tsv"
df.to_csv(out_filename, mode='a',sep='\t', index=False, encoding='utf-8', header=True)
print ' Graphs saved to edge-list file:',out_filename
else:
out_filename = out_file+".json"
with open(out_filename, 'w') as fp:
json.dump(data, fp)
print '\t',out_filename,'file saved.'
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
def load_graph(graph_name):
if graph_name=='':
return
if graph_name == 'Board Ex':
## Board example - Toy Graph
G = nx.Graph()
G.add_edge(1,2)
G.add_edge(1,5)
G.add_edge(2,3)
G.add_edge(2,4)
G.add_edge(3,4)
G.add_edge(3,5)
G.add_edge(4,6)
G.add_edge(5,6)
# G.add_edge(5,7)
# G.add_edge(6,7)
#print '--- Board Example ---'
elif graph_name == 'Karate Club':
G = nx.karate_club_graph()
elif graph_name == "Erdos-Renyi":
n=100 # 10 nodes
p= 0.5# 20 edges
G=nx.gnp_random_graph(n,p) #binomial_graph which is also somtims called the Erdos-Renyi grap h.
elif graph_name == "NewWatStr SW":
k=2
p=0.5
G = nx.newman_watts_strogatz_graph(n, k, p)
elif graph_name == "Kronecker":
## TODO: implement Kronecker
G = nx.graph()
#
elif graph_name == "edgelist":
G = nx.read_edgelist('../demo_graphs/out.contact',comments="%",delimiter="\t")
G = nx.read_edgelist('../demo_graphs/netscience.txt')
#LCCG = sorted(nx.connected_components(G), key = len, reverse=True)
cc = sorted(list(nx.connected_component_subgraphs(G)), key = len, reverse=True)
G = cc[0]
else:
G = nx.graph()
return (graph_name,G)
def load_graphs(args):
print '-'*100
print '... loading graph:',args['graph_name']
#hrgs = HyperEdgeRGs(args['graph_name'])
#print hrgs.graph_name
avail = False
for g in graphs_list:
if g == args['graph_name'][0]:
G = load_graph(g)
## Check graph
# print G[0]
#print "%s -> G(n=%d,m=%d)" % (G.number_of_nodes(), G.number_of_edges()
avail = True
return G
if not avail:
print '!!Warining: graph is not available'
os._exit(1)
def graph_checks(G):
## Target number of nodes
global num_nodes
num_nodes = G.number_of_nodes()
if not nx.is_connected(G) :
print "Graph must be connected";
os._exit(1)
if G.number_of_selfloops() > 0 :
print "Graph must be not contain self-loops";
os._exit(1)
def get_parser():
parser = argparse.ArgumentParser(description='hrgm: Hyperedge Replacement Grammars Model')
parser.add_argument('graph_name', metavar='GRAPH_NAME', nargs=1, help='the graph name to process')
parser.add_argument('--list', nargs=0, help='unique file names', action=ListSupportedGraphs)
parser.add_argument('-s','--save', help='Save to disk with unique names', action='store_true', default=False)
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
#global options, args
#g = command_line_runner()
parser = get_parser()
args = vars(parser.parse_args())
global save_unique_names_bool
save_unique_names_bool = args['save']
if not args['graph_name']:
parser.print_help()
os._exit(1)
print args
(gn,G) = load_graphs(args)
graph_checks(G)
T = quickbb(G) # tree decomposition
root = list(T)[0]
print
print "---------------------"
print "- Intersection Tree -"
print "---------------------"
#get S
rhs = get_production_rule(G, root, set())
s = list()
for c in T[root]:
s.append( list(set(c).intersection(root)) )
add_to_prod_rules(prod_rules, set(), rhs, s)
visit(root, 0, set(),prod_rules, T, G)
exit()
print
print "--------------------"
print "- Production Rules -"
print "--------------------"
for k in prod_rules.iterkeys():
print k
for d in prod_rules[k]:
print '\t -> ', d, prod_rules[k][d]
write_to_json(prod_rules, gn) ## write production rules to disk
n_distribution = {}
eff_diag_run = pd.DataFrame()
grphs = []
print
print "--------------------"
print "- Runs -"
print "--------------------"
compute_eff_diameter = False
heterm_s = [] # Stats
nbr_of_runs = 100
for run in range(0,nbr_of_runs):
H = list()
N = list()
heterm_cnt = Counter()
N.append(["S"]) #starting node
ttt=0
eff_dia_gph = []
graphlet_node_cnt = []
#pick non terminal
num = 0
while len(N) > 0:
lhs_match = find_match(N, prod_rules)
e = []
match = []
for tup in lhs_match:
match.append(tup[0])
e.append(tup[1])
lhs_str = "(" + ",".join(str(x) for x in sorted(e)) + ")"
#DO SOMETHING USEFUL WITH THIS MATCH
new_idx = {}
n_rhs =str(control_rod(H, num_nodes, prod_rules[lhs_str].items())).lstrip("(").rstrip(")")
#n_rhs =str(weighted_choice(prod_rules[lhs_str].items())).lstrip("(").rstrip(")")
#n_rhs = str(random.choice(prod_rules[lhs_str].keys())).lstrip("(").rstrip(")")
# if n_rhs[-1] == "N":
if debug: print lhs_str, "->", n_rhs
for x in n_rhs.split(")("):
heterm_cnt[x] += 1
new_he = []
he = x.split(":")[0]
term_symb = x.split(":")[1]
for y in he.split(","):
if y.isdigit(): # y is internal node
if y not in new_idx:
new_idx[y] = num
num+=1
new_he.append(new_idx[y])
else: #y is external node
for tup in lhs_match: #which external node?
if tup[1] == y:
new_he.append(tup[0])
break
#prod = "(" + ",".join(str(x) for x in new_he) + ")"
if term_symb == "N":
N.append(sorted(new_he))
elif term_symb == "T":
H.append(new_he)
## ends for
##print 'N:',len(N), 'H:',len(H)
match = sorted(match)
N.remove(match)
if compute_eff_diameter:
##
## EFFECTIVE DIAMETER as graph grows
##
newG = nx.Graph()
for e in H:
if (len(e) == 1):
newG.add_node(e[0])
else:
newG.add_edge(e[0], e[1])
#eff_dia_gph.append(chartvis.bfs_eff_diam(newG, 50, .90))
#eff_dia_gph.append((newG.number_of_nodes(), chartvis.bfs_eff_diam(newG, 50, .90)))
eff_dia_gph.append([newG.number_of_nodes(),
chartvis.bfs_eff_diam(newG, 50, .90)])
if compute_eff_diameter:
ed = map(list, zip(eff_dia_gph))
eff_diag_run = eff_diag_run.append(pd.DataFrame(eff_dia_gph))
######
newG = nx.Graph()
for e in H:
if(len(e) == 1):
newG.add_node(e[0])
else:
newG.add_edge(e[0], e[1])
#print len(newG.edges())
n = newG.number_of_nodes()
if n in n_distribution:
n_distribution[newG.number_of_nodes()] += 1
else:
n_distribution[newG.number_of_nodes()] = 1
#if n == num_nodes:
grphs.append(newG)
heterm_s.append(heterm_cnt) ## keep each run's count of HETT
# print run number
if debug: print '\trun:',run,'in',nbr_of_runs
#------------------------ for loop ends
if compute_eff_diameter:
print eff_diag_run.shape
print eff_diag_run.head()
ef = eff_diag_run.groupby([0])
# print "V = ", newG.number_of_nodes()
# print "E = ", newG.number_of_edges()
# giant_nodes = max(nx.connected_component_subgraphs(newG), key=len)
# giant = nx.subgraph(newG, giant_nodes)
# print "V in giant component = ", giant.number_of_nodes()
# print "E in giant compenent = ", giant.number_of_edges()
# print "Diameter = ", nx.diameter(nx.subgraph(newG, giant))
## Print the distribution
x =[]; y =[]
for k in sorted(n_distribution.keys()):
print k,"\t",n_distribution[k]
x.append(k)
y.append(n_distribution[k])
## Save Graphs to Disk
write_to_json(grphs,gn,graphs=True)
## Chartvis
print '-'*80
print args
if __name__ == '__main__':
# g = command_line_runner()
# ## View/Plot the graph to a file
# fig = plt.figure(figsize=(1.6*6,1*6))
# ax0 = fig.add_subplot(111)
# nx.draw_networkx(g[1],ax=ax0)
# plt_filename="/tmp/outfig"
# try:
# save_plot_figure_2disk(plotname=plt_filename)
# print 'Saved plot to: '+plt_filename
# except Exception, e:
# print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
# print str(e)
# traceback.print_exc()
# os._exit(1)
# sys.exit(0)
main()
sys.exit(0)
|
abitofalchemy/hrg_nets
|
hrgm.py
|
Python
|
gpl-3.0
| 10,646
|
[
"VisIt"
] |
a73e623526c95dd2f1783bd41b3acc7fbcaee410120de3cf5daea64d31f1f6e3
|
# Orca
#
# Copyright 2013 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI to present Orca commands."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2013 Igalia, S.L."
__license__ = "LGPL"
from gi.repository import GObject, Gdk, Gtk
from . import guilabels
from . import orca_state
class OrcaCommandListGUI:
def __init__(self, title, columnHeaders, rows, canPerformCommands):
self._tree = None
self._okButton = None
self._gui = self._createCommandListDialog(columnHeaders, rows)
self._gui.set_title(title)
self._gui.set_modal(True)
if not canPerformCommands:
self._okButton.destroy()
self.showGUI()
def _createCommandListDialog(self, columnHeaders, rows):
dialog = Gtk.Dialog()
dialog.set_default_size(600, 400)
grid = Gtk.Grid()
contentArea = dialog.get_content_area()
contentArea.add(grid)
scrolledWindow = Gtk.ScrolledWindow()
grid.add(scrolledWindow)
self._tree = Gtk.TreeView()
self._tree.set_hexpand(True)
self._tree.set_vexpand(True)
scrolledWindow.add(self._tree)
cols = len(columnHeaders) * [GObject.TYPE_STRING]
for i, header in enumerate(columnHeaders):
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(header, cell, text=i)
self._tree.append_column(column)
if header:
column.set_sort_column_id(i)
model = Gtk.ListStore(*cols)
for row in rows:
rowIter = model.append(None)
for i, cell in enumerate(row):
model.set_value(rowIter, i, str(cell))
column = self._tree.get_column(0)
column.set_visible(False)
btn = dialog.add_button(guilabels.BTN_CANCEL, Gtk.ResponseType.CANCEL)
btn.connect('clicked', self._onCancelClicked)
self._okButton = dialog.add_button(guilabels.BTN_OK, Gtk.ResponseType.OK)
self._okButton.grab_default()
self._okButton.connect('clicked', self._onOKClicked)
self._tree.set_model(model)
self._tree.connect('key-release-event', self._onKeyRelease)
return dialog
def showGUI(self):
self._gui.show_all()
ts = orca_state.lastInputEvent.timestamp
if ts == 0:
ts = Gtk.get_current_event_time()
self._gui.present_with_time(ts)
def _onKeyRelease(self, widget, event):
keycode = event.hardware_keycode
keymap = Gdk.Keymap.get_default()
entries_for_keycode = keymap.get_entries_for_keycode(keycode)
entries = entries_for_keycode[-1]
eventString = Gdk.keyval_name(entries[0])
if eventString == 'Return':
self._gui.activate_default()
def _onCancelClicked(self, widget):
self._gui.destroy()
def _onOKClicked(self, widget):
handler = self._getSelectedHandler()
self._gui.destroy()
def _getSelectedHandler(self):
if not self._tree:
return None
selection = self._tree.get_selection()
if not selection:
return None
model, paths = selection.get_selected_rows()
if not paths:
return None
return model.get_value(model.get_iter(paths[0]), 0)
def showUI(title='', columnHeaders=[], rows=[()], canPerformCommands=True):
gui = OrcaCommandListGUI(title, columnHeaders, rows, canPerformCommands)
gui.showGUI()
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/orca_gui_commandlist.py
|
Python
|
gpl-3.0
| 4,290
|
[
"ORCA"
] |
1798322dd848caaaa24cb7cecc41e27c91d8d35f77c9e79844229cb050f7e6c2
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if squared_norm(centers_old - centers) <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr')
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
if X.dtype.kind != 'f':
warnings.warn("Got data type %s, converted to float "
"to avoid overflows" % X.dtype,
RuntimeWarning, stacklevel=2)
X = X.astype(np.float)
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
ashhher3/scikit-learn
|
sklearn/cluster/k_means_.py
|
Python
|
bsd-3-clause
| 54,782
|
[
"Gaussian"
] |
511be01489595e399524218ca92dfbbc1283f096e48ec153ba279951fc7ad6c5
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
from six import binary_type, text_type
import unittest
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pandas as pd
from IPython.core.display import Image, SVG
from nose.tools import assert_is_instance, assert_true
from skbio import OrdinationResults
from skbio._base import SkbioObject
class TestSkbioObject(unittest.TestCase):
def test_no_instantiation(self):
class Foo(SkbioObject):
pass
with self.assertRaises(TypeError):
Foo()
class TestOrdinationResults(unittest.TestCase):
def setUp(self):
# Define in-memory CA results to serialize and deserialize.
eigvals = pd.Series([0.0961330159181, 0.0409418140138], ['CA1', 'CA2'])
features = np.array([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]])
samples = np.array([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]])
features_ids = ['Species1', 'Species2', 'Species3']
sample_ids = ['Site1', 'Site2', 'Site3']
samples_df = pd.DataFrame(samples, index=sample_ids,
columns=['CA1', 'CA2'])
features_df = pd.DataFrame(features, index=features_ids,
columns=['CA1', 'CA2'])
self.ordination_results = OrdinationResults(
'CA', 'Correspondance Analysis', eigvals=eigvals,
samples=samples_df, features=features_df)
# DataFrame for testing plot method. Has a categorical column with a
# mix of numbers and strings. Has a numeric column with a mix of ints,
# floats, and strings that can be converted to floats. Has a numeric
# column with missing data (np.nan).
self.df = pd.DataFrame([['foo', '42', 10],
[22, 0, 8],
[22, -4.2, np.nan],
['foo', '42.19', 11]],
index=['A', 'B', 'C', 'D'],
columns=['categorical', 'numeric', 'nancolumn'])
# Minimal ordination results for easier testing of plotting method.
# Paired with df above.
eigvals = np.array([0.50, 0.25, 0.25])
samples = np.array([[0.1, 0.2, 0.3],
[0.2, 0.3, 0.4],
[0.3, 0.4, 0.5],
[0.4, 0.5, 0.6]])
samples_df = pd.DataFrame(samples, ['A', 'B', 'C', 'D'],
['PC1', 'PC2', 'PC3'])
self.min_ord_results = OrdinationResults(
'PCoA', 'Principal Coordinate Analysis', eigvals, samples_df)
def test_str(self):
exp = ("Ordination results:\n"
"\tMethod: Correspondance Analysis (CA)\n"
"\tEigvals: 2\n"
"\tProportion explained: N/A\n"
"\tFeatures: 3x2\n"
"\tSamples: 3x2\n"
"\tBiplot Scores: N/A\n"
"\tSample constraints: N/A\n"
"\tFeature IDs: 'Species1', 'Species2', 'Species3'\n"
"\tSample IDs: 'Site1', 'Site2', 'Site3'")
obs = str(self.ordination_results)
self.assertEqual(obs, exp)
# all optional attributes missing
exp = ("Ordination results:\n"
"\tMethod: Principal Coordinate Analysis (PCoA)\n"
"\tEigvals: 1\n"
"\tProportion explained: N/A\n"
"\tFeatures: N/A\n"
"\tSamples: 2x1\n"
"\tBiplot Scores: N/A\n"
"\tSample constraints: N/A\n"
"\tFeature IDs: N/A\n"
"\tSample IDs: 0, 1")
samples_df = pd.DataFrame(np.array([[1], [2]]))
obs = str(OrdinationResults('PCoA', 'Principal Coordinate Analysis',
pd.Series(np.array([4.2])), samples_df))
self.assertEqual(obs.split('\n'), exp.split('\n'))
def check_basic_figure_sanity(self, fig, exp_num_subplots, exp_title,
exp_legend_exists, exp_xlabel, exp_ylabel,
exp_zlabel):
# check type
assert_is_instance(fig, mpl.figure.Figure)
# check number of subplots
axes = fig.get_axes()
npt.assert_equal(len(axes), exp_num_subplots)
# check title
ax = axes[0]
npt.assert_equal(ax.get_title(), exp_title)
# shouldn't have tick labels
for tick_label in (ax.get_xticklabels() + ax.get_yticklabels() +
ax.get_zticklabels()):
npt.assert_equal(tick_label.get_text(), '')
# check if legend is present
legend = ax.get_legend()
if exp_legend_exists:
assert_true(legend is not None)
else:
assert_true(legend is None)
# check axis labels
npt.assert_equal(ax.get_xlabel(), exp_xlabel)
npt.assert_equal(ax.get_ylabel(), exp_ylabel)
npt.assert_equal(ax.get_zlabel(), exp_zlabel)
def test_plot_no_metadata(self):
fig = self.min_ord_results.plot()
self.check_basic_figure_sanity(fig, 1, '', False, '0', '1', '2')
def test_plot_with_numeric_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'numeric', axes=(1, 0, 2),
axis_labels=['PC 2', 'PC 1', 'PC 3'], title='a title', cmap='Reds')
self.check_basic_figure_sanity(
fig, 2, 'a title', False, 'PC 2', 'PC 1', 'PC 3')
def test_plot_with_categorical_metadata_and_plot_options(self):
fig = self.min_ord_results.plot(
self.df, 'categorical', axes=[2, 0, 1], title='a title',
cmap='Accent')
self.check_basic_figure_sanity(fig, 1, 'a title', True, '2', '0', '1')
def test_plot_with_invalid_axis_labels(self):
with six.assertRaisesRegex(self, ValueError, 'axis_labels.*4'):
self.min_ord_results.plot(axes=[2, 0, 1],
axis_labels=('a', 'b', 'c', 'd'))
def test_validate_plot_axes_valid_input(self):
# shouldn't raise an error on valid input. nothing is returned, so
# nothing to check here
samples = self.min_ord_results.samples.values.T
self.min_ord_results._validate_plot_axes(samples, (1, 2, 0))
def test_validate_plot_axes_invalid_input(self):
# not enough dimensions
with six.assertRaisesRegex(self, ValueError, '2 dimension\(s\)'):
self.min_ord_results._validate_plot_axes(
np.asarray([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]), (0, 1, 2))
coord_matrix = self.min_ord_results.samples.values.T
# wrong number of axes
with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 0'):
self.min_ord_results._validate_plot_axes(coord_matrix, [])
with six.assertRaisesRegex(self, ValueError, 'exactly three.*found 4'):
self.min_ord_results._validate_plot_axes(coord_matrix,
(0, 1, 2, 3))
# duplicate axes
with six.assertRaisesRegex(self, ValueError, 'must be unique'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 1, 0))
# out of range axes
with six.assertRaisesRegex(self, ValueError, 'axes\[1\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, -1, 2))
with six.assertRaisesRegex(self, ValueError, 'axes\[2\].*3'):
self.min_ord_results._validate_plot_axes(coord_matrix, (0, 2, 3))
def test_get_plot_point_colors_invalid_input(self):
# column provided without df
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(None, 'numeric',
['B', 'C'], 'jet')
# df provided without column
with npt.assert_raises(ValueError):
self.min_ord_results._get_plot_point_colors(self.df, None,
['B', 'C'], 'jet')
# column not in df
with six.assertRaisesRegex(self, ValueError, 'missingcol'):
self.min_ord_results._get_plot_point_colors(self.df, 'missingcol',
['B', 'C'], 'jet')
# id not in df
with six.assertRaisesRegex(self, ValueError, 'numeric'):
self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'missingid', 'A'], 'jet')
# missing data in df
with six.assertRaisesRegex(self, ValueError, 'nancolumn'):
self.min_ord_results._get_plot_point_colors(self.df, 'nancolumn',
['B', 'C', 'A'], 'jet')
def test_get_plot_point_colors_no_df_or_column(self):
obs = self.min_ord_results._get_plot_point_colors(None, None,
['B', 'C'], 'jet')
npt.assert_equal(obs, (None, None))
def test_get_plot_point_colors_numeric_column(self):
# subset of the ids in df
exp = [0.0, -4.2, 42.0]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp)
assert_true(obs[1] is None)
# all ids in df
exp = [0.0, 42.0, 42.19, -4.2]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'numeric', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp)
assert_true(obs[1] is None)
def test_get_plot_point_colors_categorical_column(self):
# subset of the ids in df
exp_colors = [[0., 0., 0.5, 1.], [0., 0., 0.5, 1.], [0.5, 0., 0., 1.]]
exp_color_dict = {
'foo': [0.5, 0., 0., 1.],
22: [0., 0., 0.5, 1.]
}
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'C', 'A'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
npt.assert_equal(obs[1], exp_color_dict)
# all ids in df
exp_colors = [[0., 0., 0.5, 1.], [0.5, 0., 0., 1.], [0.5, 0., 0., 1.],
[0., 0., 0.5, 1.]]
obs = self.min_ord_results._get_plot_point_colors(
self.df, 'categorical', ['B', 'A', 'D', 'C'], 'jet')
npt.assert_almost_equal(obs[0], exp_colors)
# should get same color dict as before
npt.assert_equal(obs[1], exp_color_dict)
def test_plot_categorical_legend(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# we shouldn't have a legend yet
assert_true(ax.get_legend() is None)
self.min_ord_results._plot_categorical_legend(
ax, {'foo': 'red', 'bar': 'green'})
# make sure we have a legend now
legend = ax.get_legend()
assert_true(legend is not None)
# do some light sanity checking to make sure our input labels and
# colors are present. we're not using nose.tools.assert_items_equal
# because it isn't available in Python 3.
labels = [t.get_text() for t in legend.get_texts()]
npt.assert_equal(sorted(labels), ['bar', 'foo'])
colors = [l.get_color() for l in legend.get_lines()]
npt.assert_equal(sorted(colors), ['green', 'red'])
def test_repr_png(self):
obs = self.min_ord_results._repr_png_()
assert_is_instance(obs, binary_type)
assert_true(len(obs) > 0)
def test_repr_svg(self):
obs = self.min_ord_results._repr_svg_()
# print_figure(format='svg') can return text or bytes depending on the
# version of IPython
assert_true(isinstance(obs, text_type) or isinstance(obs, binary_type))
assert_true(len(obs) > 0)
def test_png(self):
assert_is_instance(self.min_ord_results.png, Image)
def test_svg(self):
assert_is_instance(self.min_ord_results.svg, SVG)
if __name__ == '__main__':
unittest.main()
|
SamStudio8/scikit-bio
|
skbio/tests/test_base.py
|
Python
|
bsd-3-clause
| 12,772
|
[
"scikit-bio"
] |
e04aab3a68504779c716202468779bdeface54db8b5bff4e49da10f928ba6fbd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.