text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""Vimdoc plugin management."""
from collections import OrderedDict
import json
import os
import warnings
import vimdoc
from vimdoc import error
from vimdoc import parser
from vimdoc import regex
from vimdoc.block import Block
# Plugin subdirectories that should be crawled by vimdoc.
DOC_SUBDIRS = [
'plugin',
'instant',
'autoload',
'syntax',
'indent',
'ftdetect',
'ftplugin',
'spell',
'colors',
]
class Module(object):
"""Manages a set of source files that all output to the same help file."""
def __init__(self, name, plugin):
self.name = name
self.plugin = plugin
self.sections = OrderedDict()
self.backmatters = {}
self.collections = {}
self.order = None
def Merge(self, block, namespace=None):
"""Merges a block with the module."""
typ = block.locals.get('type')
# This block doesn't want to be spoken to.
if not typ:
return
# If the type still hasn't been set, it never will be.
if typ is True:
raise error.AmbiguousBlock
block.Local(namespace=namespace)
# Consume module-level metadata
if 'order' in block.globals:
if self.order is not None:
raise error.RedundantControl('order')
self.order = block.globals['order']
self.plugin.Merge(block)
# Sections and Backmatter are specially treated.
block_id = block.locals.get('id')
if typ == vimdoc.SECTION:
# Overwrite existing section if it's a default.
if block_id not in self.sections or self.sections[block_id].IsDefault():
self.sections[block_id] = block
elif not block.IsDefault():
# Tried to overwrite explicit section with explicit section.
raise error.DuplicateSection(block_id)
elif typ == vimdoc.BACKMATTER:
# Overwrite existing section backmatter if it's a default.
if (block_id not in self.backmatters
or self.backmatters[block_id].IsDefault()):
self.backmatters[block_id] = block
elif not block.IsDefault():
# Tried to overwrite explicit backmatter with explicit backmatter.
raise error.DuplicateBackmatter(block_id)
else:
collection_type = self.plugin.GetCollectionType(block)
if collection_type is not None:
self.collections.setdefault(collection_type, []).append(block)
def LookupTag(self, typ, name):
return self.plugin.LookupTag(typ, name)
def GetCollection(self, typ):
"""Gets a collection by type, sorting and filtering as necessary.
Applies appropriate sorting for the type. Most types are left in the same
order they were defined in the code since that's usually the logical order.
Functions are sorted by namespace but functions within a namespace are left
in definition order. Dicts are sorted by name.
Some blocks might be "default blocks", meaning they should be omitted if any
other block defines the same tag name. These will be omitted as appropriate.
Args:
typ: a vimdoc block type identifier (e.g., vimdoc.FUNCTION)
"""
collection = self.collections.get(typ, ())
if typ == vimdoc.FUNCTION:
# Sort by namespace, but preserve order within the same namespace. This
# lets us avoid variability in the order files are traversed without
# losing all useful order information.
collection = sorted(collection,
key=lambda x: x.locals.get('namespace', ''))
elif typ == vimdoc.DICTIONARY:
collection = sorted(collection)
non_default_names = set(x.TagName() for x in collection
if not x.IsDefault())
return [x for x in collection
if not x.IsDefault() or x.TagName() not in non_default_names]
def Close(self):
"""Closes the module.
All default sections that have not been overridden will be created.
"""
if self.GetCollection(vimdoc.FUNCTION) and 'functions' not in self.sections:
functions = Block(vimdoc.SECTION)
functions.Local(id='functions', name='Functions')
self.Merge(functions)
if (self.GetCollection(vimdoc.EXCEPTION)
and 'exceptions' not in self.sections):
exceptions = Block(vimdoc.SECTION)
exceptions.Local(id='exceptions', name='Exceptions')
self.Merge(exceptions)
if self.GetCollection(vimdoc.COMMAND) and 'commands' not in self.sections:
commands = Block(vimdoc.SECTION)
commands.Local(id='commands', name='Commands')
self.Merge(commands)
if self.GetCollection(vimdoc.DICTIONARY) and 'dicts' not in self.sections:
dicts = Block(vimdoc.SECTION)
dicts.Local(id='dicts', name='Dictionaries')
self.Merge(dicts)
if self.GetCollection(vimdoc.FLAG):
# If any maktaba flags were documented, add a default configuration
# section to explain how to use them.
config = Block(vimdoc.SECTION, is_default=True)
config.Local(id='config', name='Configuration')
config.AddLine(
'This plugin uses maktaba flags for configuration. Install Glaive'
' (https://github.com/google/glaive) and use the @command(Glaive)'
' command to configure them.')
self.Merge(config)
if ((self.GetCollection(vimdoc.FLAG) or
self.GetCollection(vimdoc.SETTING)) and
'config' not in self.sections):
config = Block(vimdoc.SECTION)
config.Local(id='config', name='Configuration')
self.Merge(config)
for backmatter in self.backmatters:
if backmatter not in self.sections:
raise error.NoSuchSection(backmatter)
# Use explicit order as partial ordering and merge with default section
# ordering. All custom sections must be ordered explicitly.
self.order = self._GetSectionOrder(self.order, self.sections)
known = set(self.sections)
neglected = sorted(known.difference(self.order))
if neglected:
raise error.NeglectedSections(neglected, self.order)
# Sections are now in order.
for key in self.order:
if key in self.sections:
# Move to end.
self.sections[key] = self.sections.pop(key)
def Chunks(self):
for ident, section in self.sections.items():
yield section
if ident == 'functions':
for block in self.GetCollection(vimdoc.FUNCTION):
if 'dict' not in block.locals and 'exception' not in block.locals:
yield block
if ident == 'commands':
for block in self.GetCollection(vimdoc.COMMAND):
yield block
if ident == 'dicts':
for block in self.GetCollection(vimdoc.DICTIONARY):
yield block
for func in self.GetCollection(vimdoc.FUNCTION):
if func.locals.get('dict') == block.locals['dict']:
yield func
if ident == 'exceptions':
for block in self.GetCollection(vimdoc.EXCEPTION):
yield block
if ident == 'config':
for block in self.GetCollection(vimdoc.FLAG):
yield block
for block in self.GetCollection(vimdoc.SETTING):
yield block
if ident in self.backmatters:
yield self.backmatters[ident]
@staticmethod
def _GetSectionOrder(explicit_order, sections):
"""Gets final section order from explicit_order and actual sections present.
Built-in sections with no explicit order come before custom sections, with
two exceptions:
* The "about" section comes last by default.
* If a built-in section is explicitly ordered, it "resets" the ordering so
so that subsequent built-in sections come directly after it.
This yields the order you would intuitively expect in cases like ordering
"intro" after other sections.
"""
order = explicit_order or []
default_order = [
'intro',
'config',
'commands',
'autocmds',
'settings',
'dicts',
'functions',
'exceptions',
'mappings']
# Add any undeclared sections before custom sections, except 'about' which
# comes at the end by default.
section_insertion_idx = 0
order = order[:]
for builtin in default_order:
if builtin in order:
# Section already present. Skip and continue later sections after it.
section_insertion_idx = order.index(builtin) + 1
continue
else:
# If section present, insert into order at logical index.
if builtin in sections:
order.insert(section_insertion_idx, builtin)
section_insertion_idx += 1
if 'about' in sections and 'about' not in order:
order.append('about')
return order
class VimPlugin(object):
"""State for entire plugin (potentially multiple modules)."""
def __init__(self, name):
self.name = name
self.collections = {}
self.tagline = None
self.author = None
self.stylization = None
self.library = None
def ConsumeMetadata(self, block):
assert block.locals.get('type') in [vimdoc.SECTION, vimdoc.BACKMATTER]
# Error out for deprecated controls.
if 'author' in block.globals:
raise error.InvalidBlock(
'Invalid directive @author.'
' Specify author field in addon-info.json instead.')
if 'tagline' in block.globals:
raise error.InvalidBlock(
'Invalid directive @tagline.'
' Specify description field in addon-info.json instead.')
for control in ['stylization', 'library']:
if control in block.globals:
if getattr(self, control) is not None:
raise error.RedundantControl(control)
setattr(self, control, block.globals[control])
def LookupTag(self, typ, name):
"""Returns the tag name for the given type and name."""
# Support both @command(Name) and @command(:Name).
if typ == vimdoc.COMMAND:
fullname = name.lstrip(':')
elif typ == vimdoc.SETTING:
scope_match = regex.setting_scope.match(name)
fullname = scope_match and name or 'g:' + name
else:
fullname = name
block = None
if typ in self.collections:
collection = self.collections[typ]
candidates = [x for x in collection if x.FullName() == fullname]
if len(candidates) > 1:
raise KeyError('Found multiple %ss named %s' % (typ, name))
if candidates:
block = candidates[0]
if block is None:
# Create a dummy block to get default tag.
block = Block(typ)
block.Local(name=fullname)
return block.TagName()
def GetCollectionType(self, block):
typ = block.locals.get('type')
# The inclusion of function docs depends upon the module type.
if typ == vimdoc.FUNCTION:
# Exclude deprecated functions
if block.locals.get('deprecated'):
return None
# If this is a library module, exclude private functions.
if self.library and block.locals.get('private'):
return None
# If this is a non-library, exclude non-explicitly-public functions.
if not self.library and block.locals.get('private', True):
return None
if 'exception' in block.locals:
return vimdoc.EXCEPTION
return typ
def Merge(self, block):
typ = block.locals.get('type')
if typ in [vimdoc.SECTION, vimdoc.BACKMATTER]:
self.ConsumeMetadata(block)
else:
collection_type = self.GetCollectionType(block)
if collection_type is not None:
self.collections.setdefault(collection_type, []).append(block)
def Modules(directory):
"""Creates modules from a plugin directory.
Note that there can be many, if a plugin has standalone parts that merit their
own helpfiles.
Args:
directory: The plugin directory.
Yields:
Module objects as necessary.
"""
directory = directory.rstrip(os.path.sep)
addon_info = None
# Check for module metadata in addon-info.json (if it exists).
addon_info_path = os.path.join(directory, 'addon-info.json')
if os.path.isfile(addon_info_path):
try:
with open(addon_info_path, 'r') as addon_info_file:
addon_info = json.loads(addon_info_file.read())
except (IOError, ValueError) as e:
warnings.warn(
'Failed to read file {}. Error was: {}'.format(addon_info_path, e),
error.InvalidAddonInfo)
plugin_name = None
# Use plugin name from addon-info.json if available. Fall back to dir name.
addon_info = addon_info or {}
plugin_name = addon_info.get(
'name', os.path.basename(os.path.abspath(directory)))
plugin = VimPlugin(plugin_name)
# Set module metadata from addon-info.json.
if addon_info is not None:
# Valid addon-info.json. Apply addon metadata.
if 'author' in addon_info:
plugin.author = addon_info['author']
if 'description' in addon_info:
plugin.tagline = addon_info['description']
# Crawl plugin dir and collect parsed blocks for each file path.
paths_and_blocks = []
standalone_paths = []
autoloaddir = os.path.join(directory, 'autoload')
for (root, dirs, files) in os.walk(directory):
# Visit files in a stable order, since the ordering of e.g. the Maktaba
# flags below depends upon the order that we visit the files.
dirs.sort()
files.sort()
# Prune non-standard top-level dirs like 'test'.
if root == directory:
dirs[:] = [x for x in dirs if x in DOC_SUBDIRS + ['after']]
if root == os.path.join(directory, 'after'):
dirs[:] = [x for x in dirs if x in DOC_SUBDIRS]
for f in files:
filename = os.path.join(root, f)
if os.path.splitext(filename)[1] == '.vim':
relative_path = os.path.relpath(filename, directory)
with open(filename) as filehandle:
lines = list(filehandle)
blocks = list(parser.ParseBlocks(lines, filename))
# Define implicit maktaba flags for files that call
# maktaba#plugin#Enter. These flags have to be special-cased here
# because there aren't necessarily associated doc comment blocks and
# the name is computed from the file name.
if (not relative_path.startswith('autoload' + os.path.sep)
and relative_path != os.path.join('instant', 'flags.vim')):
if ContainsMaktabaPluginEnterCall(lines):
flagpath = relative_path
if flagpath.startswith('after' + os.path.sep):
flagpath = os.path.relpath(flagpath, 'after')
flagblock = Block(vimdoc.FLAG, is_default=True)
name_parts = os.path.splitext(flagpath)[0].split(os.path.sep)
flagname = name_parts.pop(0)
flagname += ''.join('[' + p + ']' for p in name_parts)
flagblock.Local(name=flagname)
flagblock.AddLine(
'Configures whether {} should be loaded.'.format(
relative_path))
default = 0 if flagname == 'plugin[mappings]' else 1
# Use unbulleted list to make sure it's on its own line. Use
# backtick to avoid helpfile syntax highlighting.
flagblock.AddLine(' - Default: {} `'.format(default))
blocks.append(flagblock)
paths_and_blocks.append((relative_path, blocks))
if filename.startswith(autoloaddir):
if blocks and blocks[0].globals.get('standalone'):
standalone_paths.append(relative_path)
docdir = os.path.join(directory, 'doc')
if not os.path.isdir(docdir):
os.mkdir(docdir)
modules = []
main_module = Module(plugin_name, plugin)
for (path, blocks) in paths_and_blocks:
# Skip standalone paths.
if GetMatchingStandalonePath(path, standalone_paths) is not None:
continue
namespace = None
if path.startswith('autoload' + os.path.sep):
namespace = GetAutoloadNamespace(os.path.relpath(path, 'autoload'))
for block in blocks:
main_module.Merge(block, namespace=namespace)
modules.append(main_module)
# Process standalone modules.
standalone_modules = {}
for (path, blocks) in paths_and_blocks:
standalone_path = GetMatchingStandalonePath(path, standalone_paths)
# Skip all but standalone paths.
if standalone_path is None:
continue
assert path.startswith('autoload' + os.path.sep)
namespace = GetAutoloadNamespace(os.path.relpath(path, 'autoload'))
standalone_module = standalone_modules.get(standalone_path)
# Initialize module if this is the first file processed from it.
if standalone_module is None:
standalone_module = Module(namespace.rstrip('#'), plugin)
standalone_modules[standalone_path] = standalone_module
modules.append(standalone_module)
for block in blocks:
standalone_module.Merge(block, namespace=namespace)
for module in modules:
module.Close()
yield module
def GetAutoloadNamespace(filepath):
return (os.path.splitext(filepath)[0]).replace('/', '#') + '#'
def GetMatchingStandalonePath(path, standalones):
for standalone in standalones:
# Check for filename match.
if path == standalone:
return standalone
# Strip off '.vim' and check for directory match.
if path.startswith(os.path.splitext(standalone)[0] + os.path.sep):
return standalone
return None
def ContainsMaktabaPluginEnterCall(lines):
"""Returns whether lines of vimscript contain a maktaba#plugin#Enter call.
Args:
lines: A sequence of vimscript strings to search.
"""
for _, line in parser.EnumerateStripNewlinesAndJoinContinuations(lines):
if not parser.IsComment(line) and 'maktaba#plugin#Enter(' in line:
return True
return False
|
malcolmr/vimdoc
|
vimdoc/module.py
|
Python
|
apache-2.0
| 17,433
|
[
"VisIt"
] |
b00b3182194a9a12d342934eb3b6b7947a630fed4106bc61757e501650479b39
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui import APP, Object, Signal
if APP.is_gtk():
from agui.backends.gtk.widgets import *
elif APP.is_pyside():
from agui.backends.pyside.widgets import *
else:
raise RuntimeError('Backend gui not yet chosen')
|
bhdouglass/agui
|
agui/widgets.py
|
Python
|
gpl-3.0
| 990
|
[
"Brian"
] |
1c51243d2fbadf2931a3417395250371b8b6cd5117b18d1e85f1a6ba4c1e74ae
|
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""variables checkers for Python code
"""
import copy
import itertools
import os
import sys
import re
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
import astroid
from astroid import decorators
from astroid import modutils
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.utils import get_global_option
from pylint.checkers import BaseChecker
from pylint.checkers import utils
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = '__future__'
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*|^ignored_|^unused_')
PY3K = sys.version_info >= (3, 0)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return
for local_node in module.locals.get(name, []):
if (isinstance(local_node, astroid.ImportFrom)
and local_node.modname == FUTURE):
return True
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return (isinstance(parent, astroid.For) and
any(else_stmt.parent_of(stmt) or else_stmt == stmt
for else_stmt in parent.orelse))
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, infered):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ''
infered_module = infered.root().name
if node.root().name == infered_module:
if node.lineno == infered.lineno:
more = ' %s' % infered.as_string()
elif infered.lineno:
more = ' defined at line %s' % infered.lineno
elif infered.lineno:
more = ' defined at line %s of %s' % (infered.lineno, infered_module)
return more
def _detect_global_scope(node, frame, defframe):
""" Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent,
(astroid.FunctionDef, astroid.Arguments)):
return False
elif any(not isinstance(f, (astroid.ClassDef, astroid.Module))
for f in (frame, defframe)):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for s in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = s
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _fix_dot_imports(not_consumed):
""" Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
# TODO: this should be improved in issue astroid #46
names = {}
for name, stmts in six.iteritems(not_consumed):
if any(isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
if imports[0] == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
if imports[0].find(".") > -1 or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = imports[0]
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
MSGS = {
'E0601': ('Using variable %r before assignment',
'used-before-assignment',
'Used when a local variable is accessed before it\'s \
assignment.'),
'E0602': ('Undefined variable %r',
'undefined-variable',
'Used when an undefined variable is accessed.'),
'E0603': ('Undefined variable name %r in __all__',
'undefined-all-variable',
'Used when an undefined variable name is referenced in __all__.'),
'E0604': ('Invalid object %r in __all__, must contain only strings',
'invalid-all-object',
'Used when an invalid (non-string) object occurs in __all__.'),
'E0611': ('No name %r in module %r',
'no-name-in-module',
'Used when a name cannot be found in a module.'),
'W0601': ('Global variable %r undefined at the module level',
'global-variable-undefined',
'Used when a variable is defined through the "global" statement \
but the variable is not defined in the module scope.'),
'W0602': ('Using global for %r but no assignment is done',
'global-variable-not-assigned',
'Used when a variable is defined through the "global" statement \
but no assignment to this variable is done.'),
'W0603': ('Using the global statement', # W0121
'global-statement',
'Used when you use the "global" statement to update a global \
variable. Pylint just try to discourage this \
usage. That doesn\'t mean you cannot use it !'),
'W0604': ('Using the global statement at the module level', # W0103
'global-at-module-level',
'Used when you use the "global" statement at the module level \
since it has no effect'),
'W0611': ('Unused %s',
'unused-import',
'Used when an imported module or variable is not used.'),
'W0612': ('Unused variable %r',
'unused-variable',
'Used when a variable is defined but not used.'),
'W0613': ('Unused argument %r',
'unused-argument',
'Used when a function or method argument is not used.'),
'W0614': ('Unused import %s from wildcard import',
'unused-wildcard-import',
'Used when an imported module or variable is not used from a \
`\'from X import *\'` style import.'),
'W0621': ('Redefining name %r from outer scope (line %s)',
'redefined-outer-name',
'Used when a variable\'s name hides a name defined in the outer \
scope.'),
'W0622': ('Redefining built-in %r',
'redefined-builtin',
'Used when a variable or function override a built-in.'),
'W0623': ('Redefining name %r from %s in exception handler',
'redefine-in-handler',
'Used when an exception handler assigns the exception \
to an existing name'),
'W0631': ('Using possibly undefined loop variable %r',
'undefined-loop-variable',
'Used when an loop variable (i.e. defined by a for loop or \
a list comprehension or a generator expression) is used outside \
the loop.'),
'E0632': ('Possible unbalanced tuple unpacking with '
'sequence%s: '
'left side has %d label(s), right side has %d value(s)',
'unbalanced-tuple-unpacking',
'Used when there is an unbalanced tuple unpacking in assignment',
{'old_names': [('W0632', 'unbalanced-tuple-unpacking')]}),
'E0633': ('Attempting to unpack a non-sequence%s',
'unpacking-non-sequence',
'Used when something which is not '
'a sequence is used in an unpack assignment',
{'old_names': [('W0633', 'unpacking-non-sequence')]}),
'W0640': ('Cell variable %s defined in loop',
'cell-var-from-loop',
'A variable used in a closure is defined in a loop. '
'This will result in all closures using the same value for '
'the closed-over variable.'),
}
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
"""
__implements__ = IAstroidChecker
name = 'variables'
msgs = MSGS
priority = -1
options = (("init-import",
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : 'Tells whether we should check for unused import in '
'__init__ files.'}),
("dummy-variables-rgx",
{'default': '_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_',
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'A regular expression matching the name of dummy '
'variables (i.e. expectedly not used).'}),
("additional-builtins",
{'default': (), 'type' : 'csv',
'metavar' : '<comma separated list>',
'help' : 'List of additional names supposed to be defined in '
'builtins. Remember that you should avoid to define new builtins '
'when possible.'
}),
("callbacks",
{'default' : ('cb_', '_cb'), 'type' : 'csv',
'metavar' : '<callbacks>',
'help' : 'List of strings which can identify a callback '
'function by name. A callback name must start or '
'end with one of those strings.'}
),
("redefining-builtins-modules",
{'default': ('six.moves', 'future.builtins'), 'type': 'csv',
'metavar': '<comma separated list>',
'help': 'List of qualified module names which can have objects '
'that can redefine builtins.'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('allow-global-unused-variables',
{'default': True,
'type': 'yn', 'metavar': '<y_or_n>',
'help': 'Tells whether unused global variables should be treated as a violation.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
self._checking_mod_attr = None
self._loop_variables = []
# Relying on other checker's options, which might not have been initialized yet.
@decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, 'analyse-fallback-blocks', default=False)
@decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, 'ignored-modules', default=[])
@decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, 'allow-global-unused-variables', default=True)
@utils.check_messages('redefined-outer-name')
def visit_for(self, node):
assigned_to = [var.name for var in node.target.nodes_of_class(astroid.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if (variable in outer_variables
and not in_for_else_branch(outer_for, node)):
self.add_message(
'redefined-outer-name',
args=(variable, outer_for.fromlineno),
node=node
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages('redefined-outer-name')
def leave_for(self, _):
self._loop_variables.pop()
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [(copy.copy(node.locals), {}, 'module')]
for name, stmts in six.iteritems(node.locals):
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]):
continue
self.add_message('redefined-builtin', args=name, node=stmts[0])
@utils.check_messages('unused-import', 'unused-wildcard-import',
'redefined-builtin', 'undefined-all-variable',
'invalid-all-object', 'unused-variable')
def leave_module(self, node):
"""leave module: check globals
"""
assert len(self._to_consume) == 1
not_consumed = self._to_consume.pop()[0]
# attempt to check for __all__ if defined
if '__all__' in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr('__all__'))
if assigned is astroid.YES:
return
for elt in getattr(assigned, 'elts', ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.YES:
continue
if not elt_name.parent:
continue
if (not isinstance(elt_name, astroid.Const)
or not isinstance(elt_name.value, six.string_types)):
self.add_message('invalid-all-object',
args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == '__init__':
name = node.name + "." + elt_name
try:
modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
except SyntaxError:
# don't yield an syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in six.iteritems(not_consumed):
for node in nodes:
self.add_message('unused-variable', args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
if (isinstance(stmt, astroid.Import) or
(isinstance(stmt, astroid.ImportFrom) and
not stmt.modname)):
if (isinstance(stmt, astroid.ImportFrom) and
SPECIAL_OBJ.search(imported_name)):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = "%s imported as %s" % (imported_name, as_name)
self.add_message('unused-import', args=msg, node=stmt)
elif (isinstance(stmt, astroid.ImportFrom)
and stmt.modname != FUTURE):
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if imported_name == '*':
self.add_message('unused-wildcard-import',
args=name, node=stmt)
else:
if as_name is None:
msg = "%s imported from %s" % (imported_name, stmt.modname)
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
self.add_message('unused-import', args=msg, node=stmt)
del self._to_consume
def visit_classdef(self, node):
"""visit class: update consumption analysis variable
"""
self._to_consume.append((copy.copy(node.locals), {}, 'class'))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable
"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable
"""
self._to_consume.append((copy.copy(node.locals), {}, 'lambda'))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable
"""
self._to_consume.append((copy.copy(node.locals), {}, 'comprehension'))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append((copy.copy(node.locals), {}, 'comprehension'))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable
"""
self._to_consume.append((copy.copy(node.locals), {}, 'comprehension'))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals
"""
self._to_consume.append((copy.copy(node.locals), {}, 'function'))
if not (self.linter.is_message_enabled('redefined-outer-name') or
self.linter.is_message_enabled('redefined-builtin')):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE):
# It is a __future__ directive, not a symbol.
continue
line = definition.fromlineno
dummy_rgx = self.config.dummy_variables_rgx
if not dummy_rgx.match(name):
self.add_message('redefined-outer-name',
args=(name, line), node=stmt)
elif utils.is_builtin(name) and not self._should_ignore_redefined_builtin(stmt):
# do not print Redefining builtin for additional builtins
self.add_message('redefined-builtin', args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (isinstance(node, astroid.FunctionDef)
and name == '__class__'
and len(node.locals['__class__']) == 1
and isinstance(node.locals['__class__'][0], astroid.ClassDef)):
return
# Ignore names imported by the global statement.
# FIXME: should only ignore them if it's assigned latter
if isinstance(stmt, astroid.Global):
return
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(itertools.chain(
node.argnames(),
[arg.name for arg in node.args.kwonlyargs]
))
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
else:
confidence = HIGH
# Care about functions with unknown argument (builtins)
if name in argnames:
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != 'staticmethod' and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in ('__init__', '__new__'):
return
# Don't check callback arguments
if any(node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
self.add_message('unused-argument', args=name, node=stmt,
confidence=confidence)
else:
if stmt.parent and isinstance(stmt.parent, astroid.Assign):
if name in nonlocal_names:
return
if isinstance(stmt, astroid.Import):
# Need the complete name, which we don't have in .locals.
qname, asname = stmt.names[0]
name = asname or qname
self.add_message('unused-variable', args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
not_consumed = self._to_consume.pop()[0]
if not (self.linter.is_message_enabled('unused-variable') or
self.linter.is_message_enabled('unused-argument')):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in six.iteritems(not_consumed):
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages('global-variable-undefined', 'global-variable-not-assigned',
'global-statement', 'global-at-module-level',
'redefined-builtin')
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message('global-at-module-level', node=node)
return
module = frame.root()
default_message = True
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
if not assign_nodes:
self.add_message('global-variable-not-assigned',
args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes):
self.add_message('redefined-builtin', args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
# global undefined at the module scope
self.add_message('global-variable-undefined', args=name, node=node)
default_message = False
if default_message:
self.add_message('global-statement', node=node)
def _check_late_binding_closure(self, node, assignment_node):
def _is_direct_lambda_call():
return (isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message('cell-var-from-loop', node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)):
self.add_message('cell-var-from-loop', node=node, args=node.name)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
# XXX used to filter parents but don't remember why, and removing this
# fixes a W0631 false positive reported by Paul Hachmann on 2008/12 on
# python-projects (added to func_use_for_or_listcomp_var test)
#astmts = [stmt for stmt in node.lookup(name)[1]
# if hasattr(stmt, 'ass_type')] and
# not stmt.statement().parent_of(node)]
if not self.linter.is_message_enabled('undefined-loop-variable'):
return
astmts = [stmt for stmt in node.lookup(name)[1]
if hasattr(stmt, 'ass_type')]
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if not astmts or (astmts[0].is_statement or astmts[0].parent) \
and astmts[0].statement().parent_of(node):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if (astmts[i].statement().parent_of(stmt)
and not in_for_else_branch(astmts[i].statement(), stmt)):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) == 1:
assign = astmts[0].assign_type()
if (isinstance(assign, (astroid.For, astroid.Comprehension,
astroid.GeneratorExp))
and assign.statement() is not node.statement()):
self.add_message('undefined-loop-variable', args=name, node=node)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
@utils.check_messages('redefine-in-handler')
def visit_excepthandler(self, node):
for name in utils.get_all_elements(node.name):
clobbering, args = utils.clobber_in_except(name)
if clobbering:
self.add_message('redefine-in-handler', args=args, node=name)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default = False
if (isinstance(frame, astroid.FunctionDef) and
node.statement() is frame):
in_annotation_or_default = (
(
PY3K and (node in frame.args.annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation)
)
or
frame.args.parent_of(node)
)
return in_annotation_or_default
@staticmethod
def _next_to_consume(node, name, to_consume):
# mark the name as consumed if it's defined in this scope
found_node = to_consume.get(name)
if (found_node
and isinstance(node.parent, astroid.Assign)
and node.parent == found_node[0].parent):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
return found_node
@staticmethod
def _is_variable_violation(node, name, defnode, stmt, defstmt,
frame, defframe, base_scope_type,
recursive_klass):
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope
forbid_lookup = isinstance(frame, astroid.FunctionDef) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = (
stmt == defstmt
and not isinstance(defnode, astroid.node_classes.Comprehension)
)
else:
# check if we have a nonlocal
if name in defframe.locals:
maybee0601 = not any(isinstance(child, astroid.Nonlocal)
and name in child.names
for child in defframe.get_children())
if (base_scope_type == 'lambda' and
isinstance(frame, astroid.ClassDef)
and name in frame.locals):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (isinstance(defnode, astroid.Arguments) and
node in defnode.defaults and
frame.locals[name][0].fromlineno < defstmt.fromlineno)
elif (isinstance(defframe, astroid.ClassDef) and
isinstance(frame, astroid.FunctionDef)):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if (PY3K and node is frame.returns and
defframe.parent_of(frame.returns)):
maybee0601 = annotation_return = True
if (maybee0601 and defframe.name in defframe.locals and
defframe.locals[name][0].lineno < frame.lineno):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node, name, frame):
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
in_annotation_or_default = self._defined_in_function_definition(
node, frame)
if in_annotation_or_default:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not ((isinstance(frame, astroid.ClassDef) or
in_annotation_or_default) and
name in frame_locals)
@utils.check_messages(*(MSGS.keys()))
def visit_name(self, node):
"""check that a name is defined if the current scope and doesn't
redefine a built-in
"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from a astroid built from live code, skip
assert not stmt.root().file.endswith('.py')
return
name = node.name
frame = stmt.scope()
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (utils.is_func_default(node) or utils.is_func_decorator(node)
or utils.is_ancestor_name(frame, node)):
start_index = len(self._to_consume) - 2
else:
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index][-1]
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
to_consume, consumed, scope_type = self._to_consume[i]
# if the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names. The only exception is when the starting scope is a
# comprehension and its direct outer scope is a class
if scope_type == 'class' and i != start_index and not (
base_scope_type == 'comprehension' and i == start_index-1):
if self._ignore_class_scope(node, name, frame):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
if name in consumed:
defnode = utils.assign_parent(consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = self._next_to_consume(node, name, to_consume)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(to_consume[name][0])
if defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (frame is defframe and
defframe.parent_of(node) and
isinstance(defframe, astroid.ClassDef) and
node.name == defframe.name)
maybee0601, annotation_return, use_outer_definition = self._is_variable_violation(
node, name, defnode, stmt, defstmt,
frame, defframe,
base_scope_type, recursive_klass)
if use_outer_definition:
continue
if (maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ('NameError',))):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = (
defstmt is stmt
and isinstance(node, (astroid.DelName, astroid.AssignName))
)
if (recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name,
node=node)
elif base_scope_type != 'lambda':
# E0601 may *not* occurs in lambda scope.
self.add_message('used-before-assignment', args=name, node=node)
elif base_scope_type == 'lambda':
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message('used-before-assignment',
args=name, node=node)
else:
self.add_message('undefined-variable',
args=name, node=node)
elif scope_type == 'lambda':
self.add_message('undefined-variable',
node=node, args=name)
consumed[name] = found_node
del to_consume[name]
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if not (name in astroid.Module.scope_attrs or utils.is_builtin(name)
or name in self.config.additional_builtins):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name, node=node)
@utils.check_messages('no-name-in-module')
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split('.')
try:
module = next(node.infer_name_module(parts[0]))
except astroid.ResolveError:
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages('no-name-in-module')
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split('.')
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == '*':
continue
self._check_module_attrs(node, module, name.split('.'))
@utils.check_messages('unbalanced-tuple-unpacking', 'unpacking-non-sequence')
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences.
"""
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
infered = utils.safe_infer(node.value)
if infered is not None:
self._check_unpacking(infered, node, targets)
except astroid.InferenceError:
return
def _check_unpacking(self, infered, node, targets):
""" Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if infered is astroid.YES:
return
if (isinstance(infered.parent, astroid.Arguments) and
isinstance(node.value, astroid.Name) and
node.value.name == infered.parent.vararg):
# Variable-length argument, we can't determine the length.
return
if isinstance(infered, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = infered.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred)
for target in targets):
return
self.add_message('unbalanced-tuple-unpacking', node=node,
args=(_get_unpacking_extra_info(node, infered),
len(targets),
len(values)))
# attempt to check unpacking may be possible (ie RHS is iterable)
else:
if not utils.is_iterable(infered):
self.add_message('unpacking-non-sequence', node=node,
args=(_get_unpacking_extra_info(node, infered),))
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
assert isinstance(module, astroid.Module), module
while module_names:
name = module_names.pop(0)
if name == '__dict__':
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.YES:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message('no-name-in-module',
args=(name, module.name), node=node)
return None
except astroid.InferenceError:
return None
if module_names:
# FIXME: other message if name is not the latest part of
# module_names ?
modname = module.name if module else '__dict__'
self.add_message('no-name-in-module', node=node,
args=('.'.join(module_names), modname))
return None
if isinstance(module, astroid.Module):
return module
return None
class VariablesChecker3k(VariablesChecker):
'''Modified variables checker for 3k'''
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append((copy.copy(node.locals), {}, 'comprehension'))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def leave_functiondef(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_functiondef(node)
def leave_module(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_module(node)
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif metaclass:
name = metaclass.root().name
found = None
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute):
name = klass._metaclass.as_string()
if name is not None:
if not (name in astroid.Module.scope_attrs or
utils.is_builtin(name) or
name in self.config.additional_builtins or
name in parent_node.locals):
self.add_message('undefined-variable',
node=klass,
args=(name,))
return consumed
if sys.version_info >= (3, 0):
VariablesChecker = VariablesChecker3k
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
|
rogalski/pylint
|
pylint/checkers/variables.py
|
Python
|
gpl-2.0
| 57,018
|
[
"VisIt"
] |
02b9ec30bff833c21256d5cf4ac8f56502e88650f919a6366c6f9c4d3a4002d1
|
import os
import re
import sys
import uuid
import requests
from ast import literal_eval
from subprocess import call
conf = {} # populated by given command line arguments via start()
def request(context, flow):
request = flow.request
if(request.host == "feelinsonice-hrd.appspot.com" and request.path == '/bq/blob'):
blobURI = "http://"+request.host+request.path+"?"+request.content
log("Downloading intercepted blob ("+blobURI+")")
r = requests.get(blobURI)
log("Downloaded blob")
blobFile = './blob'
b = open(blobFile, 'w+')
print >> b, r.content
log("Decrypting file...")
if(not os.path.exists(os.path.expanduser(conf['snapsDir']))):
os.makedirs(os.path.expanduser(conf['snapsDir']))
outputFile = os.path.expanduser(conf['snapsDir']) + 'snap_%s' % str(uuid.uuid4())
rubyFile = conf['pyDir']+'/decrypt_snap.rb'
call(['ruby', rubyFile, blobFile, outputFile])
log("File decrypted! Your picture is now available.")
log("---- Waiting for a Snapchat... ----")
def start(context, argv):
global conf
conf = literal_eval(argv[1]) # decode all script arguments
conf['pyDir'] = os.path.dirname(argv[0])
snapsDir = conf['snapsDir']
if(not snapsDir[-1:] == '/'):
snapsDir = snapsDir+'/'
conf['snapsDir'] = snapsDir
print 'Snapception is now running on Port %s. Configure your device to point to this port via a proxy. Intercepted snaps will appear in \"%s\"' % (conf['port'], conf['snapsDir'][:-1])
print 'If you have not already done so, you will need to install a Certificate Authority. The easiest way to do this is to visit mitm.it on your device.'
log("---- Waiting for a Snapchat... ----")
def log(str):
if(not conf['verbose']):
print str
|
thebradbain/snapception
|
snapception/mitmdump_input.py
|
Python
|
mit
| 1,693
|
[
"VisIt"
] |
d8a5d61a619dcb8f16956d83cca13ba5e9f1aa03b450421c74b4b3df0f5c2179
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
""" Generation tools for NNGT """
import logging
import numpy as np
import scipy.sparse as ssp
from scipy.spatial.distance import cdist
from numpy.random import randint
import nngt
from nngt.lib import InvalidArgument, nonstring_container
from nngt.lib.logger import _log_message
logger = logging.getLogger(__name__)
__all__ = [
"_check_num_edges",
"_compute_connections",
"_filter",
"_no_self_loops",
"_set_degree_type",
"_set_options",
"_unique_rows",
"dist_rule",
"max_proba_dist_rule"
]
def _set_options(graph, population, shape, positions):
''' Make a graph a network or spatial '''
if population is not None:
nngt.Graph.make_network(graph, population)
if shape is not None or positions is not None:
nngt.Graph.make_spatial(graph, shape, positions)
def _compute_connections(num_source, num_target, density, edges, avg_deg,
directed, reciprocity=-1):
assert (density, edges, avg_deg) != (None, None, None), \
"At leat one of the following entries must be specified: 'density', " \
"'edges', 'avg_deg'."
pre_recip_edges = 0
if avg_deg is not None:
pre_recip_edges = int(avg_deg * num_source)
elif edges is not None:
pre_recip_edges = int(edges)
else:
pre_recip_edges = int(density * num_source * num_target)
dens = pre_recip_edges / float(num_source * num_target)
edges = pre_recip_edges
if edges:
if reciprocity > max(0,(2.-1./dens)):
frac_recip = ((reciprocity - 1.
+ np.sqrt(1. + dens*(reciprocity - 2.))) /
(2. - reciprocity))
if frac_recip < 1.:
pre_recip_edges = int(edges/(1+frac_recip))
else:
raise InvalidArgument(
"Such reciprocity cannot be obtained, request ignored.")
elif reciprocity > 0.:
raise InvalidArgument(
"Reciprocity cannot be lower than 2-1/density.")
return edges, pre_recip_edges
def _check_num_edges(source_ids, target_ids, num_edges, directed, multigraph,
return_sets=False):
num_source, num_target = len(source_ids), len(target_ids)
source_set, target_set = None, None
has_only_one_population = (num_source == num_target)
if has_only_one_population:
source_set = set(source_ids)
target_set = set(target_ids)
has_only_one_population = (source_set == target_set)
if not has_only_one_population and not multigraph:
b_d = (num_edges > num_source*num_target)
b_nd = (num_edges > int(0.5*num_source*num_target))
if (not directed and b_nd) or (directed and b_d):
raise InvalidArgument("Required number of edges is too high")
elif has_only_one_population and not multigraph:
b_d = (num_edges > num_source*(num_target-1))
b_nd = (num_edges > int(0.5*(num_source-1)*num_target))
if (not directed and b_nd) or (directed and b_d):
raise InvalidArgument("Required number of edges is too high")
if return_sets:
return has_only_one_population, source_set, target_set
return has_only_one_population
def _set_degree_type(degree_type):
deg_map = {
"in-degree": "in", "out-degree": "out", "total-degree": "total",
"in": "in", "out": "out", "total": "total"
}
try:
degree_type = deg_map[degree_type]
except KeyError:
raise ValueError("`degree_type` must be either 'in', 'out', 'total', "
"or the full version 'in-degree', 'out-degree', "
"'total-degree'.")
return degree_type
# ------------------------- #
# Edge checks and filtering #
# ------------------------- #
def _unique_rows(arr, return_index=False):
'''
Keep only unique edges
'''
b = np.ascontiguousarray(arr).view(
np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
b, idx = np.unique(b, return_index=True)
unique = b.view(arr.dtype).reshape(-1, arr.shape[1]).astype(int)
if return_index:
return unique, idx
return unique
def _no_self_loops(array, return_test=False):
'''
Remove self-loops
'''
test = array[:, 0] != array[:, 1]
if return_test:
return array[test, :].astype(int), test
return array[test, :].astype(int)
def _filter(ia_edges, ia_edges_tmp, num_ecurrent, edges_hash, b_one_pop,
multigraph, directed=True, recip_hash=None, distance=None,
dist_tmp=None):
'''
Filter the edges: remove self loops and multiple connections if the graph
is not a multigraph.
'''
if b_one_pop:
ia_edges_tmp, test = _no_self_loops(ia_edges_tmp, return_test=True)
if dist_tmp is not None:
dist_tmp = dist_tmp[test]
if not multigraph:
num_ecurrent = len(edges_hash)
if distance is not None:
for e, d in zip(ia_edges_tmp, dist_tmp):
tpl_e = tuple(e)
if tpl_e not in edges_hash:
if directed or tpl_e not in recip_hash:
ia_edges[num_ecurrent] = e
distance.append(d)
edges_hash.add(tpl_e)
if not directed:
recip_hash.add(tpl_e[::-1])
num_ecurrent += 1
else:
for e in ia_edges_tmp:
tpl_e = tuple(e)
if tpl_e not in edges_hash:
if directed or tpl_e not in recip_hash:
ia_edges[num_ecurrent] = e
edges_hash.add(tpl_e)
if not directed:
recip_hash.add(tpl_e[::-1])
num_ecurrent += 1
else:
num_added = len(ia_edges_tmp)
ia_edges[num_ecurrent:num_ecurrent + num_added, :] = ia_edges_tmp
num_ecurrent += num_added
if distance is not None:
distance.extend(dist_tmp)
return ia_edges, num_ecurrent
def _cleanup_edges(g, edges, attributes, duplicates, loops, existing, ignore):
'''
Cleanup an list of edges.
'''
loops_only = loops and not (duplicates or existing)
new_edges = None
new_attr = {}
directed = g.is_directed()
if loops_only:
edges = np.asarray(edges)
new_edges, test = _no_self_loops(edges, return_test=True)
if len(new_edges) != len(edges):
if ignore:
_log_message(logger, "WARNING",
"Self-loops ignored: {}.".format(edges[~test]))
else:
raise InvalidArgument(
"Self-loops are present: {}.".format(edges[~test]))
new_attr = {k: np.asarray(v)[test] for v, k in attributes.items()}
else:
# check (also) either duplicates or existing
new_attr = {key: [] for key in attributes}
edge_set = set()
new_edges = []
if existing:
edge_set = {tuple(e) for e in g.edges_array}
for i, e in enumerate(edges):
tpl_e = tuple(e)
if tpl_e in edge_set or (not directed and tpl_e[::-1] in edge_set):
if ignore:
_log_message(logger, "WARNING",
"Existing edge {} ignored.".format(tpl_e))
else:
raise InvalidArgument(
"Edge {} already exists.".format(tpl_e))
elif loops and e[0] == e[1]:
if ignore:
_log_message(logger, "WARNING",
"Self-loop on {} ignored.".format(e[0]))
else:
raise InvalidArgument("Self-loop on {}.".format(e[0]))
else:
edge_set.add(tpl_e)
new_edges.append(tpl_e)
if not directed:
edge_set.add(tpl_e[::-1])
for k, vv in attributes.items():
if nonstring_container(vv):
new_attr[k].append(vv[i])
else:
new_attr[k].append(vv)
new_edges = np.asarray(new_edges)
return new_edges, new_attr
# ------------- #
# Distance rule #
# ------------- #
def dist_rule(rule, scale, pos_src, pos_targets, dist=None):
'''
DR test from one source to several targets
Parameters
----------
rule : str
Either 'exp', 'gaussian', or 'lin'.
scale : float
Characteristic scale.
pos_src : array of shape (2, N)
Positions of the sources.
pos_targets : array of shape (2, N)
Positions of the targets.
dist : list, optional (default: None)
List that will be filled with the distances of the edges.
Returns
-------
Array of size N giving the probability of the edges according to the rule.
'''
vect = pos_targets - pos_src
origin = np.array([(0., 0.)])
# todo correct this
dist_tmp = np.squeeze(cdist(vect.T, origin), axis=1)
if dist is not None:
dist.extend(dist_tmp)
if rule == 'exp':
return np.exp(np.divide(dist_tmp, -scale))
elif rule == 'gaussian':
return np.exp(-0.5*np.square(np.divide(dist_tmp, scale)))
elif rule == 'lin':
return np.divide(scale - dist_tmp, scale).clip(min=0.)
else:
raise InvalidArgument('Unknown rule "' + rule + '".')
def max_proba_dist_rule(rule, scale, max_proba, pos_src, pos_targets,
dist=None):
'''
DR test from one source to several targets
Parameters
----------
rule : str
Either 'exp', 'gaussian', or 'lin'.
scale : float
Characteristic scale.
norm : float
Normalization factor giving proba at zero distance.
pos_src : 2-tuple
Positions of the sources.
pos_targets : array of shape (2, N)
Positions of the targets.
dist : list, optional (default: None)
List that will be filled with the distances of the edges.
Returns
-------
Array of size N giving the probability of the edges according to the rule.
'''
x, y = pos_src
s = np.repeat([[x], [y]], pos_targets.shape[1], axis=1)
vect = pos_targets - np.repeat([[x], [y]], pos_targets.shape[1], axis=1)
origin = np.array([(0., 0.)])
# todo correct this
dist_tmp = np.squeeze(cdist(vect.T, origin), axis=1)
if dist is not None:
dist.extend(dist_tmp)
if rule == 'exp':
return max_proba*np.exp(np.divide(dist_tmp, -scale))
elif rule == 'gaussian':
return max_proba*np.exp(-0.5*np.square(np.divide(dist_tmp, scale)))
elif rule == 'lin':
return max_proba*np.divide(scale - dist_tmp, scale).clip(min=0.)
else:
raise InvalidArgument('Unknown rule "' + rule + '".')
def _set_dist_new_edges(new_attr, graph, edge_list):
''' Add the distances to the edge attributes '''
if graph.is_spatial() and "distance" not in new_attr:
if len(edge_list) == 1:
positions = graph.get_positions(list(edge_list[0]))
new_attr["distance"] = cdist([positions[0]], [positions[1]])[0][0]
else:
positions = graph.get_positions()
mat = cdist(positions, positions)
distances = [mat[e[0], e[1]] for e in edge_list]
new_attr["distance"] = distances
def _set_default_edge_attributes(g, attributes, num_edges):
''' Set default edge attributes values '''
for k in g.edge_attributes:
skip = k in ("weight", "distance", "delay")
if k not in attributes:
dtype = g.get_attribute_type(k)
if dtype == "string":
attributes[k] = ["" for _ in range(num_edges)]
elif dtype == "double" and not skip:
attributes[k] = [np.NaN for _ in range(num_edges)]
elif dtype == "int":
attributes[k] = [0 for _ in range(num_edges)]
elif not skip:
attributes[k] = [None for _ in range(num_edges)]
|
Silmathoron/NNGT
|
nngt/lib/connect_tools.py
|
Python
|
gpl-3.0
| 12,235
|
[
"Gaussian"
] |
06a1eed7a64d988b2722e65ddb8055f9f6027493046616a6ae34e0d2cec4d9f1
|
'''
Created on Nov 7, 2010
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import collections
import shutil
import os
# local imports
from chimerascan import pysam
from chimerascan.lib.chimera import Chimera, DiscordantRead, \
DiscordantTags, DISCORDANT_TAG_NAME, \
OrientationTags, ORIENTATION_TAG_NAME
from chimerascan.lib.base import LibraryTypes
from chimerascan.pipeline.find_discordant_reads import get_gene_orientation
def parse_group_by_attr(myiter, attr):
mylist = []
prev = None
for itm in myiter:
cur = getattr(itm, attr)
if prev != cur:
if len(mylist) > 0:
yield prev, mylist
mylist = []
prev = cur
mylist.append(itm)
if len(mylist) > 0:
yield prev, mylist
def parse_sync_by_breakpoint(chimera_file, bam_file):
# group reads by reference name (matches breakpoint name)
bamfh = pysam.Samfile(bam_file, "rb")
tid_rname_map = list(bamfh.references)
# initialize iterator through reads
read_iter = parse_group_by_attr(bamfh, "rname")
read_iter_valid = True
try:
rname, reads = read_iter.next()
read_breakpoint_name = tid_rname_map[rname]
except StopIteration:
bamfh.close()
read_iter_valid = False
reads = []
read_breakpoint_name = "ZZZZZZZZZZZZZZ"
# group chimeras by breakpoint name
for chimera_breakpoint_name, chimeras in \
parse_group_by_attr(Chimera.parse(open(chimera_file)),
"breakpoint_name"):
while (read_iter_valid) and (chimera_breakpoint_name > read_breakpoint_name):
try:
rname, reads = read_iter.next()
read_breakpoint_name = tid_rname_map[rname]
except StopIteration:
read_iter_valid = False
reads = []
if chimera_breakpoint_name < read_breakpoint_name:
yield chimeras, []
else:
yield chimeras, reads
bamfh.close()
def get_mismatch_positions(md):
x = 0
pos = []
for y in xrange(len(md)):
if md[y].isalpha():
offset = int(md[x:y])
pos.append(offset)
x = y + 1
return pos
def check_breakpoint_alignment(c, r,
anchor_min,
anchor_length,
anchor_mismatches):
"""
returns True if read 'r' meets criteria for a valid
breakpoint spanning read, False otherwise
c - Chimera object
r - pysam AlignedRead object
"""
# get position of breakpoint along seq
breakpoint_pos = len(c.breakpoint_seq_5p)
# check if read spans breakpoint
if not (r.pos < breakpoint_pos < r.aend):
return False
# calculate amount in bp that read overlaps breakpoint
# and ensure overlap is sufficient
left_anchor_bp = breakpoint_pos - r.pos
if left_anchor_bp < max(c.homology_left, anchor_min):
return False
right_anchor_bp = r.aend - breakpoint_pos
if right_anchor_bp < max(c.homology_right, anchor_min):
return False
# ensure that alignments with anchor overlap less than 'anchor_length'
# do not have more than 'anchor_mismatches' mismatches in the
# first 'anchor_length' bases
if min(left_anchor_bp, right_anchor_bp) < anchor_length:
# find interval of smallest anchor
if left_anchor_bp < anchor_length:
anchor_interval = (0, left_anchor_bp)
else:
aligned_length = r.aend - r.pos
anchor_interval = (aligned_length - right_anchor_bp, aligned_length)
# get positions where mismatches occur
mmpos = get_mismatch_positions(r.opt('MD'))
# see if any mismatches lie in anchor interval
anchor_mm = [pos for pos in mmpos
if anchor_interval[0] <= pos < anchor_interval[1]]
if len(anchor_mm) > anchor_mismatches:
# too many mismatches within anchor position
return False
return True
def filter_spanning_reads(chimeras, reads,
anchor_min,
anchor_length,
anchor_mismatches,
library_type):
for i,r in enumerate(reads):
if r.is_unmapped:
continue
# make a discordant read object
# TODO: need to annotate reads elsewhere since they have already been sorted here
r.tags = r.tags + [("HI", 0),
("IH", 1),
("NH", 1),
(DISCORDANT_TAG_NAME, DiscordantTags.DISCORDANT_GENE),
(ORIENTATION_TAG_NAME, get_gene_orientation(r, library_type))]
dr = DiscordantRead.from_read(r)
dr.is_spanning = True
# check read alignment against chimeras
for c in chimeras:
if check_breakpoint_alignment(c, r,
anchor_min,
anchor_length,
anchor_mismatches):
# valid spanning read
yield c,dr
def merge_spanning_alignments(breakpoint_chimera_file,
encomp_bam_file,
singlemap_bam_file,
output_chimera_file,
anchor_min,
anchor_length,
anchor_mismatches,
library_type,
tmp_dir):
#
# Process reads that are both encompassing and spanning
#
logging.debug("Processing encompassing/spanning reads")
tmp_encomp_chimera_file = os.path.join(tmp_dir, "tmp_encomp_chimeras.bedpe")
f = open(tmp_encomp_chimera_file, "w")
filtered_hits = 0
for chimeras, reads in parse_sync_by_breakpoint(breakpoint_chimera_file, encomp_bam_file):
# build dictionary of chimera name -> qname -> discordant pairs
chimera_qname_dict = collections.defaultdict(lambda: {})
for c in chimeras:
for dpair in c.encomp_frags:
chimera_qname_dict[c.name][dpair[0].qname] = dpair
# find valid spanning reads
for c, dr in filter_spanning_reads(chimeras, reads,
anchor_min, anchor_length,
anchor_mismatches, library_type):
# ensure encompassing read is present
if dr.qname not in chimera_qname_dict[c.name]:
continue
# get discordant pair
dpair = chimera_qname_dict[c.name][dr.qname]
# mark correct read (read1/read2) as a spanning read
if dr.readnum == dpair[0].readnum:
dpair[0].is_spanning = True
elif dr.readnum == dpair[1].readnum:
dpair[1].is_spanning = True
else:
assert False
filtered_hits += 1
# write chimeras back to file
for c in chimeras:
fields = c.to_list()
print >>f, '\t'.join(map(str, fields))
f.close()
logging.debug("\tFound %d hits" % (filtered_hits))
#
# Process reads that are single-mapped and spanning
#
logging.debug("Processing single-mapping/spanning reads")
tmp_singlemap_chimera_file = os.path.join(tmp_dir, "tmp_singlemap_chimeras.bedpe")
f = open(tmp_singlemap_chimera_file, "w")
filtered_hits = 0
for chimeras, reads in parse_sync_by_breakpoint(tmp_encomp_chimera_file, singlemap_bam_file):
# find valid spanning reads
for c, dr in filter_spanning_reads(chimeras, reads,
anchor_min, anchor_length,
anchor_mismatches, library_type):
# ensure mate maps to 5' or 3' gene
# TODO: implement this using sorted/indexed BAM file?
# add read as a spanning read
c.spanning_reads.append(dr)
filtered_hits += 1
# write chimeras back to file
for c in chimeras:
fields = c.to_list()
print >>f, '\t'.join(map(str, fields))
f.close()
logging.debug("\tFound %d hits" % (filtered_hits))
# output_chimera_file
shutil.copyfile(tmp_singlemap_chimera_file, output_chimera_file)
# remove temporary files
if os.path.exists(tmp_encomp_chimera_file):
os.remove(tmp_encomp_chimera_file)
if os.path.exists(tmp_singlemap_chimera_file):
os.remove(tmp_singlemap_chimera_file)
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <chimeras.breakpoint_sorted.txt> "
"<encomp.bam> <onemap.bam> <chimeras.out.txt>")
parser.add_option("--anchor-min", type="int", dest="anchor_min", default=4)
parser.add_option("--anchor-length", type="int", dest="anchor_length", default=8)
parser.add_option("--anchor-mismatches", type="int", dest="anchor_mismatches", default=0)
parser.add_option('--library', dest="library_type",
default=LibraryTypes.FR_UNSTRANDED)
options, args = parser.parse_args()
breakpoint_chimera_file = args[0]
encomp_bam_file = args[1]
singlemap_bam_file = args[2]
output_chimera_file = args[4]
merge_spanning_alignments(breakpoint_chimera_file,
encomp_bam_file,
singlemap_bam_file,
output_chimera_file,
options.anchor_min,
options.anchor_length,
options.anchor_mismatches,
options.library_type)
if __name__ == '__main__':
main()
|
genome-vendor/chimerascan
|
chimerascan/pipeline/merge_spanning_alignments.py
|
Python
|
gpl-3.0
| 10,791
|
[
"pysam"
] |
86b6f39d1c608059d4e47345bc1c571e2f761004e9e0fb285455d75c0aa04437
|
# Placeholder because parallel moved
# Remove this in version 1.0
import warnings
with warnings.catch_warnings():
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(("parallel has moved to MDAnalysis.lib.parallel "
"and will be removed from here in release 1.0"),
DeprecationWarning)
from ..lib.parallel import *
|
alejob/mdanalysis
|
package/MDAnalysis/core/parallel.py
|
Python
|
gpl-2.0
| 375
|
[
"MDAnalysis"
] |
d04be92cf9bbc0fa8791d7d040541f5c8ecd3b0f2480fba1c825b6f0f875d595
|
#!/usr/bin/python
###############################################################################
#
# Prepares histograms for individual rings of channel atoms based on a user-
# -defined column of the channel atom datafiles.
#
# Example: For 14-column data with this type (described elsewhere):
#
# 1 7.0 0.413 0.373 0.294 0.300 0.282 0.425 0.358 0.246 0.422 0.305 0.392 0.350
# 2 7.0 0.412 0.337 0.280 0.388 0.292 0.419 0.384 0.233 0.469 0.287 0.389 0.301
#
# The following command will load that datafile into memory, strip
# the first 2000 lines and produce a series of histogram datafiles
# or return data that could be plotted accordingly using matplotlib.
#
# python ChannelAtom_Histograms.py -f nav.n7.thr -r 4 -remove 2000
#
# By Chris Ing, 2013 for Python 2.7
#
###############################################################################
from argparse import ArgumentParser
from collections import defaultdict
from numpy import histogram, convolve, ones, mean
from ChannelAnalysis.PoreAnalysis.Preprocessor import *
# a great helper function to iterate over chunks of a list
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
# a helper method for extracting a timeseries window.
def window(size):
return ones(size)/float(size)
# This returns the sort_column as a time series, useful
# for making scatterplot time series of channel atom positions.
def compute_atom_timeseries(data_lines, sort_col, traj_col,
col_skip=2, num_cols=3, window_size=100,
mean_shift=False):
# These are dictionaries of dict where the key is the traj_number
# and the subdict is ion_number and te value is a LIST of ion positions,
# or associated time values in the case of the associated time_per_traj
atom_pos_per_traj = defaultdict(dict)
time_per_traj = defaultdict(dict)
# First determine the mean displacement for the entire dataset.
if mean_shift:
traj_mean = 0.0
for line in data_lines:
col_blocks = list(chunker(line[col_skip:],num_cols))
traj_mean += mean([block[sort_col] for block in col_blocks])
traj_mean /= len(data_lines)
for line in data_lines:
traj_id = line[traj_col]
for atom_num, atom in enumerate(list(chunker(line[col_skip:],
num_cols))):
sort_val = atom[sort_col]
if mean_shift:
sort_val -= traj_mean
if atom_num not in atom_pos_per_traj[traj_id]:
atom_pos_per_traj[traj_id][atom_num] = [sort_val]
time_per_traj[traj_id][atom_num] = [line[0]]
else:
atom_pos_per_traj[traj_id][atom_num].append(sort_val)
time_per_traj[traj_id][atom_num].append(line[0])
if window_size != None:
for t_id, atoms in atom_pos_per_traj.iteritems():
for a_id, atom_ts in atoms.iteritems():
atom_pos_per_traj[t_id][a_id] = list(convolve(atom_ts,
window(window_size),
'same'))
return (dict(atom_pos_per_traj), dict(time_per_traj))
# Not a complicated function for getting histogrammed data for the sort_col
# in a particular group of data_lines. This does not distinguish between
# any of the residues in the ring, i.e. if one is protonated this will
# be lumped in all together.
def compute_allatom_histogram(data_lines, sort_col,
num_cols=3,
histmin=-1.50, histmax=1.5,
histbins=300, col_skip=2,
normed=True, prefix=None):
# Power datatypes son. The structure is: traj_id -> ion_num -> z_vals
atom_sortvals = []
# These are dictionaries of lists where the key is a coord_col
# and the list is a axial probability or associated z value.
coord_hist_per_atom = defaultdict(list)
z_per_atom = defaultdict(list)
for line in data_lines:
for atom in chunker(line[col_skip:],num_cols):
sort_val = atom[sort_col]
atom_sortvals.append(sort_val)
histo, edges = histogram(atom_sortvals, range=[histmin, histmax],
bins=histbins, normed=normed)
if prefix != None:
with open(prefix+"_allatom","w") as out:
for xval, yval in zip(edges,histo):
out.write(str(xval)+" "+str(yval)+"\n")
coord_hist_per_atom["ALL"].extend(list(histo))
z_per_atom["ALL"].extend(list(edges))
return (dict(coord_hist_per_atom), dict(z_per_atom))
if __name__ == '__main__':
parser = ArgumentParser(
description='This script parses input columnular ASCII data\
of channel atoms and makes it nice and pretty for subsequent analysis.')
parser.add_argument(
'-f', dest='filenames', type=str, nargs="+", required=True,
help='a filename of atom data from MDAnalysis trajectory data')
parser.add_argument(
'-c', dest='num_cols', type=int, default=3,
help='the number of columns per channel atom in the input')
parser.add_argument(
'-cs', dest='col_skip', type=int, default=2,
help='the number of columns per line in input that are not atom data')
parser.add_argument(
'-s', dest='sort_col', type=int, default=2,
help='a zero inclusive column number to pull from each res, typically z')
parser.add_argument(
'-remove', dest='remove_frames', type=int, default=0,
help='this is a number of frames to remove from the start of the data')
args = parser.parse_args()
data_f_processed = process_channelatoms(filenames=args.filenames,
remove_frames=args.remove_frames)
allatom_histo = compute_allatom_histogram(data_f_processed,
args.sort_col,
col_skip=args.col_skip,
num_cols=args.num_cols)
print allatom_histo
|
cing/ChannelAnalysis
|
ChannelAnalysis/PoreAnalysis/Histograms.py
|
Python
|
mit
| 6,160
|
[
"MDAnalysis"
] |
885b7b4538229c40b4f46113f1902386d2311518d9c63a8b70d464896adcec58
|
from datetime import date, timedelta
from ..core import WesternCalendar
from ..core import SUN, MON, TUE, THU, SAT
from ..registry_tools import iso_register
@iso_register('US')
class UnitedStates(WesternCalendar):
"United States of America"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(7, 4, 'Independence Day'),
)
# Veterans day label
include_veterans_day = True
veterans_day_label = 'Veterans Day'
# MLK
martin_luther_king_label = 'Birthday of Martin Luther King, Jr.'
include_thanksgiving_friday = False
thanksgiving_friday_label = "Thanksgiving Friday"
# Some states don't include Washington's Birthday, or move it to December.
include_federal_presidents_day = True
presidents_day_label = "Washington's Birthday"
include_lincoln_birthday = False
# Columbus day is included by default
include_columbus_day = True
columbus_day_label = "Columbus Day"
# Confederation day
include_confederation_day = False
# Jefferson Davis Birthday.
include_jefferson_davis_birthday = False
# Include Cesar Chavez day(s)
include_cesar_chavez_day = False
# Patriot's day
include_patriots_day = False
# Boxing day label is not "boxing day" in the US
boxing_day_label = "Day After Christmas"
# Election Day
# To include every year?
include_election_day_every_year = False
# To include on even years?
include_election_day_even = False
# NOTE: if it's included on every year, it'll also be included
# on even years. Setting these two flags to ON will give priority to the
# yearly flag.
election_day_label = "Election Day"
# Inauguration Day
include_inauguration_day = False
# National Memorial Day
national_memorial_day_label = "Memorial Day"
# Some regional variants
include_fat_tuesday = False
fat_tuesday_label = "Mardi Gras"
# Shift day mechanism
# These days won't be shifted to next MON or previous FRI
shift_exceptions = (
# Example:
# (11, 11), # Veterans day won't be shifted
)
def shift(self, holidays, year):
"""
Shift all holidays of the year, according to the shifting rules.
"""
new_holidays = []
holiday_lookup = [x[0] for x in holidays]
exceptions = []
if self.shift_exceptions:
exceptions = [
*[date(year - 1, m, d) for m, d in self.shift_exceptions],
*[date(year, m, d) for m, d in self.shift_exceptions],
*[date(year + 1, m, d) for m, d in self.shift_exceptions]
]
# For each holiday available:
# * if it falls on SUN, add the observed on MON
# * if it falls on SAT, add the observed on FRI
for day, label in holidays:
# ... except if it's been explicitely excepted.
if day in exceptions:
continue
if day.weekday() == SAT:
new_holidays.append(
(day - timedelta(days=1), f"{label} (Observed)"))
elif day.weekday() == SUN:
new_holidays.append(
(day + timedelta(days=1), f"{label} (Observed)"))
# If year+1 January the 1st is on SAT, add the FRI before to observed
next_year_jan_1st = date(year + 1, 1, 1)
if next_year_jan_1st.weekday() == SAT and \
next_year_jan_1st not in exceptions:
new_holidays.append(
(date(year, 12, 31,), "New Years Day (Observed)"))
# Special rules for XMas and XMas Eve
christmas = date(year, 12, 25)
christmas_eve = date(year, 12, 24)
# Is XMas eve in your calendar?
if christmas_eve in holiday_lookup:
# You are observing the THU before, as an extra XMas Eve
if christmas.weekday() == SAT:
# Remove the "fake" XMAS Day shift, the one done before.
new_holidays.remove(
(christmas_eve, "Christmas Day (Observed)")
)
new_holidays.append(
(date(year, 12, 23), "Christmas Eve (Observed)"))
# You are observing the 26th (TUE)
elif christmas.weekday() == MON:
# Remove the "fake" XMAS Eve shift, done before
new_holidays.remove(
(christmas, "Christmas Eve (Observed)")
)
new_holidays.append(
(date(year, 12, 26), "Christmas Day (Observed)"))
return holidays + new_holidays
@staticmethod
def is_presidential_year(year):
return (year % 4) == 0
def get_election_date(self, year):
"""
Return the Election Day *Date*
Definition: on an election year, "the Tuesday next after the first
Monday in the month of November".
"""
first_monday_november = self.get_nth_weekday_in_month(year, 11, MON)
return self.get_nth_weekday_in_month(
year, 11, TUE, start=first_monday_november
)
def get_election_day(self, year):
"""
Return the Election Day
"""
return self.get_election_date(year), self.election_day_label
def get_thanksgiving_friday(self, year):
"""
Thanksgiving friday is on the day following Thanksgiving Day
"""
thanksgiving = UnitedStates.get_nth_weekday_in_month(year, 11, THU, 4)
thanksgiving_friday = thanksgiving + timedelta(days=1)
return thanksgiving_friday, self.thanksgiving_friday_label
def get_confederate_day(self, year):
"""
Confederate memorial day is on the 4th MON of April.
"""
day = self.get_nth_weekday_in_month(year, 4, MON, 4)
return day, "Confederate Memorial Day"
def get_jefferson_davis_birthday(self, year):
"""
The first MON of June is Jefferson Davis Birthday
"""
return (
self.get_nth_weekday_in_month(year, 6, MON, 1),
"Jefferson Davis Birthday"
)
def get_martin_luther_king_date(self, year):
"""
Martin Luther King is on 3rd MON of January, starting of 1985.
"""
if year < 1985:
raise ValueError(
"Martin Luther King Day became a holiday in 1985"
)
return UnitedStates.get_nth_weekday_in_month(year, 1, MON, 3)
def get_martin_luther_king_day(self, year):
"""
Return holiday record for Martin Luther King Jr. Day.
"""
day = self.get_martin_luther_king_date(year)
return day, self.martin_luther_king_label
def get_presidents_day(self, year):
"""
Presidents Day is on the 3rd MON of February
May be called Washington's or Lincoln's birthday
"""
day = UnitedStates.get_nth_weekday_in_month(year, 2, MON, 3)
return day, self.presidents_day_label
def get_cesar_chavez_days(self, year):
"""
Cesar Chavez day is on 31st of March
Will return a list of days, because in some states (California),
it can float to MON if it happens on SUN.
"""
days = [(date(year, 3, 31), "Cesar Chavez Day")]
return days
def get_patriots_day(self, year):
"""3rd Monday of April"""
return self.get_nth_weekday_in_month(year, 4, MON, 3), "Patriots Day"
def get_columbus_day(self, year):
"""
Columbus day is on the 2nd MON of October.
Only half of the states recognize it.
"""
day = UnitedStates.get_nth_weekday_in_month(year, 10, MON, 2)
return day, self.columbus_day_label
def get_lincoln_birthday(self, year):
"""
February the 2nd is Lincoln's birthday in the following States:
* Connecticut,
* Illinois,
* Missouri,
* New York
"""
return date(year, 2, 12), "Lincoln's Birthday"
def get_inauguration_date(self, year):
"""
If the year is an Inauguration Year, will return the Inauguration Day
date.
If this day falls on SUN, it's replaced by the next MON.
If the year is not a Inauguration Year, it raises a ValueError.
"""
if ((year - 1) % 4) != 0:
raise ValueError(
f"The year {year} is not an Inauguration Year")
inauguration_day = date(year, 1, 20)
if inauguration_day.weekday() == SUN:
inauguration_day = date(year, 1, 21)
return inauguration_day
def get_national_memorial_day(self, year):
"""
Return National Memorial Day
"""
return (
UnitedStates.get_last_weekday_in_month(year, 5, MON),
self.national_memorial_day_label
)
def get_variable_days(self, year):
# usual variable days
days = super().get_variable_days(year)
# Martin Luther King's Day started only in 1985
if year >= 1985:
days.append(self.get_martin_luther_king_day(year))
days.extend([
self.get_national_memorial_day(year),
(UnitedStates.get_nth_weekday_in_month(year, 9, MON),
"Labor Day"),
(UnitedStates.get_nth_weekday_in_month(year, 11, THU, 4),
"Thanksgiving Day"),
])
if self.include_federal_presidents_day:
days.append(self.get_presidents_day(year))
if self.include_lincoln_birthday:
days.append(self.get_lincoln_birthday(year))
if self.include_cesar_chavez_day:
days.extend(self.get_cesar_chavez_days(year))
if self.include_patriots_day:
days.append(self.get_patriots_day(year))
if self.include_columbus_day:
days.append(self.get_columbus_day(year))
if self.include_confederation_day:
days.append(self.get_confederate_day(year))
if self.include_jefferson_davis_birthday:
days.append(self.get_jefferson_davis_birthday(year))
if self.include_inauguration_day:
# Is it a "Inauguration year"?
if UnitedStates.is_presidential_year(year - 1):
days.append(
(self.get_inauguration_date(year), "Inauguration Day")
)
if self.include_election_day_every_year:
days.append(self.get_election_day(year))
elif self.include_election_day_even:
if (year % 2) == 0:
days.append(self.get_election_day(year))
if self.include_thanksgiving_friday:
days.append(
self.get_thanksgiving_friday(year)
)
return days
def get_veterans_day(self, year):
"""
Return Veterans Day (November 11th).
Placed here because some States are renaming it.
"""
return date(year, 11, 11), self.veterans_day_label
def get_fixed_holidays(self, year):
days = super().get_fixed_holidays(year)
if self.include_veterans_day:
days.append(self.get_veterans_day(year))
return days
def get_calendar_holidays(self, year):
"""
Will return holidays and their shifted days
"""
days = super().get_calendar_holidays(year)
days = self.shift(days, year)
return days
|
novafloss/workalendar
|
workalendar/usa/core.py
|
Python
|
mit
| 11,414
|
[
"COLUMBUS"
] |
d45bf8f0bbbae02c9bbc91f2fc7a776619dd20d6069949b134a7632d920a3171
|
# TODO#!/usr/bin/env python
"""
Install.py tool to download, compile, and setup the pace library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, subprocess
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import fullpath, geturl, checkmd5sum
# settings
thisdir = fullpath('.')
version = 'v.2021.4.9'
# known checksums for different PACE versions. used to validate the download.
checksums = { \
'v.2021.2.3.upd2' : '8fd1162724d349b930e474927197f20d',
'v.2021.4.9' : '4db54962fbd6adcf8c18d46e1798ceb5',
}
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# help message
HELP = """
Syntax from src dir: make lib-pace args="-b"
or: make lib-pace args="-b -v version"
Syntax from lib dir: python Install.py -b
or: python Install.py -b -v version
Examples:
make lib-pace args="-b" # install default version of PACE lib
make lib-pace args="-b -v version" # install specified version of PACE lib
"""
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build base PACE library")
parser.add_argument("-v", "--version", default=version, choices=checksums.keys(),
help="set version of PACE library to download and build (default: %s)" % version)
parser.add_argument("-vv", "--verbose", action="store_true",
help="be more verbose about is happening while this script runs")
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build:
parser.print_help()
sys.exit(HELP)
buildflag = args.build
verboseflag = args.verbose
version = args.version
archive_extension = "tar.gz"
url = "https://github.com/ICAMS/lammps-user-pace/archive/refs/tags/%s.%s" % (version, archive_extension)
unarchived_folder_name = "lammps-user-pace-%s"%(version)
# download PACE tarball, unpack, build PACE
if buildflag:
# download entire tarball
print("Downloading pace tarball ...")
archive_filename = "%s.%s" % (version, archive_extension)
download_filename = "%s/%s" % (thisdir, archive_filename)
print("Downloading from ",url," to ",download_filename, end=" ")
geturl(url, download_filename)
print(" done")
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], archive_filename):
sys.exit("Checksum for pace library does not match")
print("Unpacking pace tarball ...")
src_folder = thisdir+"/src"
cmd = 'cd "%s"; rm -rf "%s"; tar -xvf %s; mv %s %s' % (thisdir, src_folder, archive_filename, unarchived_folder_name, src_folder)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# build
print("Building libpace ...")
cmd = 'make lib -j2'
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
if verboseflag:
print(txt.decode("UTF-8"))
# remove source files
print("Removing pace build files and archive ...")
cmd = 'rm %s; make clean-build' % (download_filename)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
|
jeremiahyan/lammps
|
lib/pace/Install.py
|
Python
|
gpl-2.0
| 3,322
|
[
"LAMMPS"
] |
0a2e0515659750756fefecfd14f26e6e4af13cfc4023d5563e6faaaa23ea173f
|
# -*- coding: UTF-8 -*-
# Copyright 2017-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.api import rt, _
from lino.utils.mldbc import babel_named as named
def objects():
Group = rt.models.groups.Group
User = rt.models.users.User
UserTypes = rt.models.users.UserTypes
yield named(Group, _("Hitchhiker's Guide to the Galaxy"))
yield named(Group, _("Star Trek"))
yield named(Group, _("Harry Potter"))
yield User(username="andy", user_type=UserTypes.user, first_name="Andy")
yield User(username="bert", user_type=UserTypes.user, first_name="Bert")
yield User(username="chloe", user_type=UserTypes.user, first_name="Chloe")
|
lino-framework/xl
|
lino_xl/lib/groups/fixtures/demo.py
|
Python
|
bsd-2-clause
| 722
|
[
"Galaxy"
] |
8ff0d0cd48ee50a6e4633e535d1979083d37f959684eb680e2ec60cbfb1521d8
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import datetime
import os
import urllib
from common import jinja_filters
from common import safe_dom
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import HUMAN_READABLE_TIME_FORMAT
from controllers.utils import ReflectiveRequestHandler
import jinja2
import jinja2.exceptions
from models import config
from models import courses
from models import custom_modules
from models import jobs
from models import roles
from models import transforms
from models import utils
from models import vfs
from models.models import Student
from course_settings import CourseSettingsHandler
from course_settings import CourseSettingsRESTHandler
import filer
from filer import AssetItemRESTHandler
from filer import AssetUriRESTHandler
from filer import FileManagerAndEditor
from filer import FilesItemRESTHandler
import messages
from peer_review import AssignmentManager
import unit_lesson_editor
from unit_lesson_editor import AssessmentRESTHandler
from unit_lesson_editor import ImportCourseRESTHandler
from unit_lesson_editor import LessonRESTHandler
from unit_lesson_editor import LinkRESTHandler
from unit_lesson_editor import UnitLessonEditor
from unit_lesson_editor import UnitLessonTitleRESTHandler
from unit_lesson_editor import UnitRESTHandler
from google.appengine.api import users
class DashboardHandler(
CourseSettingsHandler, FileManagerAndEditor, UnitLessonEditor,
AssignmentManager, ApplicationHandler, ReflectiveRequestHandler):
"""Handles all pages and actions required for managing a course."""
default_action = 'outline'
get_actions = [
default_action, 'assets', 'settings', 'analytics',
'edit_basic_settings', 'edit_settings', 'edit_unit_lesson',
'edit_unit', 'edit_link', 'edit_lesson', 'edit_assessment',
'add_asset', 'delete_asset', 'import_course', 'edit_assignment']
# Requests to these handlers automatically go through an XSRF token check
# that is implemented in ReflectiveRequestHandler.
post_actions = [
'compute_student_stats', 'create_or_edit_settings', 'add_unit',
'add_link', 'add_assessment', 'add_lesson',
'edit_basic_course_settings', 'add_reviewer', 'delete_reviewer']
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [
(AssessmentRESTHandler.URI, AssessmentRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(CourseSettingsRESTHandler.URI, CourseSettingsRESTHandler),
(FilesItemRESTHandler.URI, FilesItemRESTHandler),
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(AssetUriRESTHandler.URI, AssetUriRESTHandler),
(ImportCourseRESTHandler.URI, ImportCourseRESTHandler),
(LessonRESTHandler.URI, LessonRESTHandler),
(LinkRESTHandler.URI, LinkRESTHandler),
(UnitLessonTitleRESTHandler.URI, UnitLessonTitleRESTHandler),
(UnitRESTHandler.URI, UnitRESTHandler),
]
def can_view(self):
"""Checks if current user has viewing rights."""
return roles.Roles.is_course_admin(self.app_context)
def can_edit(self):
"""Checks if current user has editing rights."""
return roles.Roles.is_course_admin(self.app_context)
def get(self):
"""Enforces rights to all GET operations."""
if not self.can_view():
self.redirect(self.app_context.get_slug())
return
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(DashboardHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
if not self.can_edit():
self.redirect(self.app_context.get_slug())
return
return super(DashboardHandler, self).post()
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
jinja_environment = jinja2.Environment(
autoescape=True, finalize=jinja_filters.finalize,
loader=jinja2.FileSystemLoader(dirs + [os.path.dirname(__file__)]))
jinja_environment.filters['js_string'] = jinja_filters.js_string
return jinja_environment.get_template(template_name)
def _get_alerts(self):
alerts = []
if not courses.is_editable_fs(self.app_context):
alerts.append('Read-only course.')
if not self.app_context.now_available:
alerts.append('The course is not publicly available.')
return '\n'.join(alerts)
def _get_top_nav(self):
current_action = self.request.get('action')
nav_mappings = [
('', 'Outline'),
('assets', 'Assets'),
('settings', 'Settings'),
('analytics', 'Analytics'),
('edit_assignment', 'Peer Review')]
nav = safe_dom.NodeList()
for action, title in nav_mappings:
class_name = 'selected' if action == current_action else ''
action_href = 'dashboard?action=%s' % action
nav.append(safe_dom.Element(
'a', href=action_href, className=class_name).add_text(
title))
if roles.Roles.is_super_admin():
nav.append(safe_dom.Element(
'a', href='/admin').add_text('Admin'))
nav.append(safe_dom.Element(
'a', href='https://code.google.com/p/course-builder/wiki/Dashboard',
target='_blank').add_text('Help'))
return nav
def render_page(self, template_values):
"""Renders a page using provided template values."""
template_values['top_nav'] = self._get_top_nav()
template_values['gcb_course_base'] = self.get_base_href(self)
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout'))
template_values[
'page_footer'] = 'Created on: %s' % datetime.datetime.now()
if not template_values.get('sections'):
template_values['sections'] = []
self.response.write(
self.get_template('view.html', []).render(template_values))
def format_title(self, text):
"""Formats standard title."""
title = self.app_context.get_environ()['course']['title']
return safe_dom.NodeList().append(
safe_dom.Text('Course Builder ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s ' % title)
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' Dashboard ')
).append(
safe_dom.Entity('>')
).append(
safe_dom.Text(' %s' % text)
)
def _get_edit_link(self, url):
return safe_dom.NodeList().append(
safe_dom.Text(' ')
).append(
safe_dom.Element('a', href=url).add_text('Edit')
)
def _get_availability(self, resource):
if not hasattr(resource, 'now_available'):
return safe_dom.Text('')
if resource.now_available:
return safe_dom.Text('')
else:
return safe_dom.NodeList().append(
safe_dom.Text(' ')
).append(
safe_dom.Element(
'span', className='draft-label'
).add_text('(%s)' % unit_lesson_editor.DRAFT_TEXT)
)
def render_course_outline_to_html(self):
"""Renders course outline to HTML."""
course = courses.Course(self)
if not course.get_units():
return []
is_editable = filer.is_editable_fs(self.app_context)
lines = safe_dom.Element('ul', style='list-style: none;')
for unit in course.get_units():
if unit.type == 'A':
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href='assessment?name=%s' % unit.unit_id,
className='strong'
).add_text(unit.title)
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_assessment',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
lines.add_child(li)
continue
if unit.type == 'O':
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href=unit.href, className='strong'
).add_text(unit.title)
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_link',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
lines.add_child(li)
continue
if unit.type == 'U':
li = safe_dom.Element('li').add_child(
safe_dom.Element(
'a', href='unit?unit=%s' % unit.unit_id,
className='strong').add_text(
'Unit %s - %s' % (unit.index, unit.title))
).add_child(self._get_availability(unit))
if is_editable:
url = self.canonicalize_url(
'/dashboard?%s') % urllib.urlencode({
'action': 'edit_unit',
'key': unit.unit_id})
li.add_child(self._get_edit_link(url))
ol = safe_dom.Element('ol')
for lesson in course.get_lessons(unit.unit_id):
li2 = safe_dom.Element('li').add_child(
safe_dom.Element(
'a',
href='unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id),
).add_text(lesson.title)
).add_child(self._get_availability(lesson))
if is_editable:
url = self.get_action_url(
'edit_lesson', key=lesson.lesson_id)
li2.add_child(self._get_edit_link(url))
ol.add_child(li2)
li.add_child(ol)
lines.add_child(li)
continue
raise Exception('Unknown unit type: %s.' % unit.type)
return lines
def get_outline(self):
"""Renders course outline view."""
pages_info = [
safe_dom.Element(
'a', href=self.canonicalize_url('/announcements')
).add_text('Announcements'),
safe_dom.Element(
'a', href=self.canonicalize_url('/course')
).add_text('Course')]
outline_actions = []
if filer.is_editable_fs(self.app_context):
outline_actions.append({
'id': 'edit_unit_lesson',
'caption': 'Organize',
'href': self.get_action_url('edit_unit_lesson')})
outline_actions.append({
'id': 'add_lesson',
'caption': 'Add Lesson',
'action': self.get_action_url('add_lesson'),
'xsrf_token': self.create_xsrf_token('add_lesson')})
outline_actions.append({
'id': 'add_unit',
'caption': 'Add Unit',
'action': self.get_action_url('add_unit'),
'xsrf_token': self.create_xsrf_token('add_unit')})
outline_actions.append({
'id': 'add_link',
'caption': 'Add Link',
'action': self.get_action_url('add_link'),
'xsrf_token': self.create_xsrf_token('add_link')})
outline_actions.append({
'id': 'add_assessment',
'caption': 'Add Assessment',
'action': self.get_action_url('add_assessment'),
'xsrf_token': self.create_xsrf_token('add_assessment')})
if not courses.Course(self).get_units():
outline_actions.append({
'id': 'import_course',
'caption': 'Import',
'href': self.get_action_url('import_course')
})
data_info = self.list_files('/data/')
sections = [
{
'title': 'Pages',
'description': messages.PAGES_DESCRIPTION,
'children': pages_info},
{
'title': 'Course Outline',
'description': messages.COURSE_OUTLINE_DESCRIPTION,
'actions': outline_actions,
'pre': self.render_course_outline_to_html()},
{
'title': 'Data Files',
'description': messages.DATA_FILES_DESCRIPTION,
'children': data_info}]
template_values = {}
template_values['page_title'] = self.format_title('Outline')
template_values['alerts'] = self._get_alerts()
template_values['sections'] = sections
self.render_page(template_values)
def get_action_url(self, action, key=None, extra_args=None):
args = {'action': action}
if key:
args['key'] = key
if extra_args:
args.update(extra_args)
url = '/dashboard?%s' % urllib.urlencode(args)
return self.canonicalize_url(url)
def get_settings(self):
"""Renders course settings view."""
yaml_actions = []
basic_setting_actions = []
# Basic course info.
course_info = [
'Course Title: %s' % self.app_context.get_environ()['course'][
'title'],
'Context Path: %s' % self.app_context.get_slug(),
'Datastore Namespace: %s' % self.app_context.get_namespace_name()]
# Course file system.
fs = self.app_context.fs.impl
course_info.append(('File System: %s' % fs.__class__.__name__))
if fs.__class__ == vfs.LocalReadOnlyFileSystem:
course_info.append(('Home Folder: %s' % sites.abspath(
self.app_context.get_home_folder(), '/')))
# Enable editing if supported.
if filer.is_editable_fs(self.app_context):
yaml_actions.append({
'id': 'edit_course_yaml',
'caption': 'Advanced Edit',
'action': self.get_action_url('create_or_edit_settings'),
'xsrf_token': self.create_xsrf_token(
'create_or_edit_settings')})
yaml_actions.append({
'id': 'edit_basic_course_settings',
'caption': 'Edit',
'action': self.get_action_url('edit_basic_course_settings'),
'xsrf_token': self.create_xsrf_token(
'edit_basic_course_settings')})
# Yaml file content.
yaml_info = []
yaml_stream = self.app_context.fs.open(
self.app_context.get_config_filename())
if yaml_stream:
yaml_lines = yaml_stream.read().decode('utf-8')
for line in yaml_lines.split('\n'):
yaml_info.append(line)
else:
yaml_info.append('< empty file >')
# Prepare template values.
template_values = {}
template_values['page_title'] = self.format_title('Settings')
template_values['page_description'] = messages.SETTINGS_DESCRIPTION
template_values['sections'] = [
{
'title': 'About the Course',
'description': messages.ABOUT_THE_COURSE_DESCRIPTION,
'actions': basic_setting_actions,
'children': course_info},
{
'title': 'Contents of course.yaml file',
'description': messages.CONTENTS_OF_THE_COURSE_DESCRIPTION,
'actions': yaml_actions,
'children': yaml_info}]
self.render_page(template_values)
def list_files(self, subfolder):
"""Makes a list of files in a subfolder."""
home = sites.abspath(self.app_context.get_home_folder(), '/')
files = self.app_context.fs.list(
sites.abspath(self.app_context.get_home_folder(), subfolder))
result = []
for abs_filename in sorted(files):
filename = os.path.relpath(abs_filename, home)
result.append(vfs.AbstractFileSystem.normpath(filename))
return result
def list_and_format_file_list(
self, title, subfolder,
links=False, upload=False, prefix=None, caption_if_empty='< none >',
edit_url_template=None, sub_title=None):
"""Walks files in folders and renders their names in a section."""
items = safe_dom.NodeList()
count = 0
for filename in self.list_files(subfolder):
if prefix and not filename.startswith(prefix):
continue
li = safe_dom.Element('li')
if links:
li.add_child(safe_dom.Element(
'a', href=urllib.quote(filename)).add_text(filename))
if edit_url_template:
edit_url = edit_url_template % urllib.quote(filename)
li.add_child(
safe_dom.Entity(' ')
).add_child(
safe_dom.Element('a', href=edit_url).add_text('[Edit]'))
else:
li.add_text(filename)
count += 1
items.append(li)
output = safe_dom.NodeList()
if filer.is_editable_fs(self.app_context) and upload:
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?%s' % urllib.urlencode(
{'action': 'add_asset', 'base': subfolder})
).add_text('Upload')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;'))
if title:
h3 = safe_dom.Element('h3')
if count:
h3.add_text('%s (%s)' % (title, count))
else:
h3.add_text(title)
output.append(h3)
if sub_title:
output.append(safe_dom.Element('blockquote').add_text(sub_title))
if items:
output.append(safe_dom.Element('ol').add_children(items))
else:
if caption_if_empty:
output.append(
safe_dom.Element('blockquote').add_text(caption_if_empty))
return output
def get_assets(self):
"""Renders course assets view."""
def inherits_from(folder):
return '< inherited from %s >' % folder
items = safe_dom.NodeList().append(
self.list_and_format_file_list(
'Assessments', '/assets/js/', links=True,
prefix='assets/js/assessment-')
).append(
self.list_and_format_file_list(
'Activities', '/assets/js/', links=True,
prefix='assets/js/activity-')
).append(
self.list_and_format_file_list(
'Images & Documents', '/assets/img/', links=True, upload=True,
edit_url_template='dashboard?action=delete_asset&uri=%s',
sub_title='< inherited from /assets/img/ >',
caption_if_empty=None)
).append(
self.list_and_format_file_list(
'Cascading Style Sheets', '/assets/css/', links=True,
caption_if_empty=inherits_from('/assets/css/'))
).append(
self.list_and_format_file_list(
'JavaScript Libraries', '/assets/lib/', links=True,
caption_if_empty=inherits_from('/assets/lib/'))
).append(
self.list_and_format_file_list(
'View Templates', '/views/',
caption_if_empty=inherits_from('/views/'))
)
template_values = {}
template_values['page_title'] = self.format_title('Assets')
template_values['page_description'] = messages.ASSETS_DESCRIPTION
template_values['main_content'] = items
self.render_page(template_values)
def get_markup_for_basic_analytics(self, job):
"""Renders markup for basic enrollment and assessment analytics."""
subtemplate_values = {}
errors = []
stats_calculated = False
update_message = safe_dom.Text('')
if not job:
update_message = safe_dom.Text(
'Enrollment/assessment statistics have not been calculated '
'yet.')
else:
if job.status_code == jobs.STATUS_CODE_COMPLETED:
stats = transforms.loads(job.output)
stats_calculated = True
subtemplate_values['enrolled'] = stats['enrollment']['enrolled']
subtemplate_values['unenrolled'] = (
stats['enrollment']['unenrolled'])
scores = []
total_records = 0
for key, value in stats['scores'].items():
total_records += value[0]
avg = round(value[1] / value[0], 1) if value[0] else 0
scores.append({'key': key, 'completed': value[0],
'avg': avg})
subtemplate_values['scores'] = scores
subtemplate_values['total_records'] = total_records
update_message = safe_dom.Text("""
Enrollment and assessment statistics were last updated at
%s in about %s second(s).""" % (
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT),
job.execution_time_sec))
elif job.status_code == jobs.STATUS_CODE_FAILED:
update_message = safe_dom.NodeList().append(
safe_dom.Text("""
There was an error updating enrollment/assessment
statistics. Here is the message:""")
).append(
safe_dom.Element('br')
).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output)))
else:
update_message = safe_dom.Text(
'Enrollment and assessment statistics update started at %s'
' and is running now. Please come back shortly.' %
job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT))
subtemplate_values['stats_calculated'] = stats_calculated
subtemplate_values['errors'] = errors
subtemplate_values['update_message'] = update_message
return jinja2.utils.Markup(self.get_template(
'basic_analytics.html', [os.path.dirname(__file__)]
).render(subtemplate_values, autoescape=True))
def get_analytics(self):
"""Renders course analytics view."""
template_values = {}
template_values['page_title'] = self.format_title('Analytics')
at_least_one_job_exists = False
at_least_one_job_finished = False
basic_analytics_job = ComputeStudentStats(self.app_context).load()
stats_html = self.get_markup_for_basic_analytics(basic_analytics_job)
if basic_analytics_job:
at_least_one_job_exists = True
if basic_analytics_job.status_code == jobs.STATUS_CODE_COMPLETED:
at_least_one_job_finished = True
for callback in DashboardRegistry.analytics_handlers:
handler = callback()
handler.app_context = self.app_context
handler.request = self.request
handler.response = self.response
job = handler.stats_computer(self.app_context).load()
stats_html += handler.get_markup(job)
if job:
at_least_one_job_exists = True
if job.status_code == jobs.STATUS_CODE_COMPLETED:
at_least_one_job_finished = True
template_values['main_content'] = jinja2.utils.Markup(self.get_template(
'analytics.html', [os.path.dirname(__file__)]
).render({
'show_recalculate_button': (
at_least_one_job_finished or not at_least_one_job_exists),
'stats_html': stats_html,
'xsrf_token': self.create_xsrf_token('compute_student_stats'),
}, autoescape=True))
self.render_page(template_values)
def post_compute_student_stats(self):
"""Submits a new student statistics calculation task."""
job = ComputeStudentStats(self.app_context)
job.submit()
for callback in DashboardRegistry.analytics_handlers:
job = callback().stats_computer(self.app_context)
job.submit()
self.redirect('/dashboard?action=analytics')
class ScoresAggregator(object):
"""Aggregates scores statistics."""
def __init__(self):
# We store all data as tuples keyed by the assessment type name. Each
# tuple keeps:
# (student_count, sum(score))
self.name_to_tuple = {}
def visit(self, student):
if student.scores:
scores = transforms.loads(student.scores)
for key in scores.keys():
if key in self.name_to_tuple:
count = self.name_to_tuple[key][0]
score_sum = self.name_to_tuple[key][1]
else:
count = 0
score_sum = 0
self.name_to_tuple[key] = (
count + 1, score_sum + float(scores[key]))
class EnrollmentAggregator(object):
"""Aggregates enrollment statistics."""
def __init__(self):
self.enrolled = 0
self.unenrolled = 0
def visit(self, student):
if student.is_enrolled:
self.enrolled += 1
else:
self.unenrolled += 1
class ComputeStudentStats(jobs.DurableJob):
"""A job that computes student statistics."""
def run(self):
"""Computes student statistics."""
enrollment = EnrollmentAggregator()
scores = ScoresAggregator()
mapper = utils.QueryMapper(
Student.all(), batch_size=500, report_every=1000)
def map_fn(student):
enrollment.visit(student)
scores.visit(student)
mapper.run(map_fn)
data = {
'enrollment': {
'enrolled': enrollment.enrolled,
'unenrolled': enrollment.unenrolled},
'scores': scores.name_to_tuple}
return data
class DashboardRegistry(object):
"""Holds registered handlers that produce HTML code for the dashboard."""
analytics_handlers = []
@classmethod
def add_custom_analytics_section(cls, handler):
"""Adds handlers that provide additional data for the Analytics page."""
if handler not in cls.analytics_handlers:
existing_names = [h.name for h in cls.analytics_handlers]
existing_names.append('enrollment')
existing_names.append('scores')
if handler.name in existing_names:
raise Exception('Stats handler name %s is being duplicated.'
% handler.name)
cls.analytics_handlers.append(handler)
custom_module = None
def register_module():
"""Registers this module in the registry."""
dashboard_handlers = [('/dashboard', DashboardHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course Dashboard',
'A set of pages for managing Course Builder course.',
[], dashboard_handlers)
return custom_module
|
CSavvy/coursebuilder
|
modules/dashboard/dashboard.py
|
Python
|
apache-2.0
| 29,214
|
[
"VisIt"
] |
ca9d83dc208d46de843478f722139f3daecae9a76c795af6d06150613019e6c8
|
import numpy as np
import random
import nest
import nest.voltage_trace as plot
import nest.raster_plot
import matplotlib.pyplot as plt
import pylab as pl
nest.ResetKernel()
nest.SetKernelStatus(
{'overwrite_files': True }) # set to True to permit overwriting
delay = 1. # the delay in ms
w_ex = 45.
g = 3.83
w_in = -w_ex * g
K = 10000
f_ex = 0.8
K_ex = f_ex * K
K_in = (1.0 - f_ex) * K
nu_ex = 10.0 # 2.
nu_in = 10.0 # 2.
pg_ex = nest.Create("poisson_generator")
nest.SetStatus(pg_ex, {"rate": K_ex * nu_ex})
pg_in = nest.Create("poisson_generator")
nest.SetStatus(pg_ex, {"rate": K_in * nu_in})
sd = nest.Create("spike_detector")
nest.SetStatus(sd, {
"label": "spikes",
"withtime": True,
"withgid": True,
"to_file": True,
})
neuron1 = nest.Create("hh_psc_alpha")
neuron2 = nest.Create("hh_psc_alpha")
sero_neuron = nest.Create("hh_psc_alpha")
nest.SetStatus(neuron1,
{"tau_syn_ex": 0.3, "tau_syn_in": 0.3, "tau_minus": 20.0})
nest.SetStatus(neuron2,
{"tau_syn_ex": 0.3, "tau_syn_in": 0.3, "tau_minus": 20.0})
vt = nest.Create("volume_transmitter")
nest.Connect(pg_ex, neuron1, syn_spec={'weight': w_ex, 'delay': delay})
nest.Connect(pg_ex, neuron2, syn_spec={'weight': w_ex, 'delay': delay})
nest.Connect(pg_ex, sero_neuron, syn_spec={'weight': w_ex, 'delay': delay})
nest.Connect(pg_in, neuron1, syn_spec={'weight': w_ex, 'delay': delay})
nest.Connect(pg_in, neuron2, syn_spec={'weight': w_ex, 'delay': delay})
nest.Connect(pg_in, sero_neuron, syn_spec={'weight': w_ex, 'delay': delay})
nest.Connect(neuron1, sd)
nest.Connect(neuron2, sd)
nest.Connect(sero_neuron, sd)
nest.CopyModel("stdp_serotonin_synapse", "sero",
{"vt": vt[0], "weight": 35.,"n": .1, "delay": delay})
nest.CopyModel("static_synapse", "static", {"delay": delay})
nest.Connect(sero_neuron, vt, model="static")
nest.Connect(neuron1, neuron2, model="sero")
# Init and connect voltmeter to see membrane potential of serotonin neuron
voltmeter = nest.Create('voltmeter', 1, {'withgid': True})
nest.Connect(voltmeter, sero_neuron)
dt = 10
T = 150
weight = None
time = []
sero_dyn = []
results_folder = ""
if nest.GetStatus(neuron2)[0]['local']:
sum_time = 0
for t in range(0, T +dt , dt):
nest.Simulate(t)
sum_time+=t
conns = nest.GetConnections(neuron1, synapse_model="sero")
n = nest.GetStatus(conns)[0]['n']
time.append(sum_time)
sero_dyn.append(n)
nest.raster_plot.from_device(sd, hist=True, hist_binwidth=100.)
pl.savefig(results_folder + "spikes_" + str(t) + ".png")
pl.close()
plot.from_device(voltmeter, timeunit="s")
pl.savefig(results_folder + "voltage_" + str(t) + ".png")
pl.close()
plt.plot(time, sero_dyn)
plt.xlabel('Time (ms)')
plt.title('Serotonin concentration dynamics')
plt.savefig( results_folder + "sero_dynamic_" + str(t) + ".png")
|
research-team/NEUCOGAR
|
NEST/cube/noradrenaline/tests/test_sero.py
|
Python
|
gpl-2.0
| 2,782
|
[
"NEURON"
] |
bbe6bff4ba8ea29178d699e382e825a022e7253ee88f5a4558e7b0f9819c9a95
|
"""
Topological fingerprints for macromolecular structures.
"""
import numpy as np
import logging
import itertools
from deepchem.utils.hash_utils import hash_ecfp
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.rdkit_utils import load_complex
from deepchem.utils.hash_utils import vectorize
from deepchem.utils.voxel_utils import voxelize
from deepchem.utils.voxel_utils import convert_atom_to_voxel
from deepchem.utils.rdkit_utils import compute_all_ecfp
from deepchem.utils.rdkit_utils import compute_contact_centroid
from deepchem.utils.rdkit_utils import MoleculeLoadException
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import subtract_centroid
from typing import Tuple, Dict, List
logger = logging.getLogger(__name__)
def featurize_contacts_ecfp(
frag1: Tuple,
frag2: Tuple,
pairwise_distances: np.ndarray = None,
cutoff: float = 4.5,
ecfp_degree: int = 2) -> Tuple[Dict[int, str], Dict[int, str]]:
"""Computes ECFP dicts for pairwise interaction between two molecular fragments.
Parameters
----------
frag1: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
frag2: Tuple
A tuple of (coords, mol) returned by `load_molecule`.
pairwise_distances: np.ndarray
Array of pairwise fragment-fragment distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
ecfp_degree: int
ECFP radius
Returns
-------
Tuple of dictionaries of ECFP contact fragments
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
frag1_ecfp_dict = compute_all_ecfp(
frag1[1], indices=frag1_atoms, degree=ecfp_degree)
frag2_ecfp_dict = compute_all_ecfp(
frag2[1], indices=frag2_atoms, degree=ecfp_degree)
return (frag1_ecfp_dict, frag2_ecfp_dict)
class ContactCircularFingerprint(ComplexFeaturizer):
"""Compute (Morgan) fingerprints near contact points of macromolecular complexes.
Given a macromolecular complex made up of multiple
constituent molecules, first compute the contact points where
atoms from different molecules come close to one another. For
atoms within "contact regions," compute radial "ECFP"
fragments which are sub-molecules centered at atoms in the
contact region.
For a macromolecular complex, returns a vector of shape
`(2*size,)`
"""
def __init__(self, cutoff: float = 4.5, radius: int = 2, size: int = 8):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
radius: int, optional (default 2)
Fingerprint radius.
size: int, optional (default 8)
Length of generated bit vector.
"""
self.cutoff = cutoff
self.radius = radius
self.size = size
def _featurize(self, datapoint, **kwargs):
"""
Compute featurization for a molecular complex
Parameters
----------
datapoint: Tuple[str, str]
Filenames for molecule and protein.
"""
if 'complex' in kwargs:
datapoint = kwargs.get("complex")
raise DeprecationWarning(
'Complex is being phased out as a parameter, please pass "datapoint" instead.'
)
try:
fragments = load_complex(datapoint, add_hydrogens=False)
except MoleculeLoadException:
logger.warning("This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features = []
# We compute pairwise contact fingerprints
for (frag1, frag2) in itertools.combinations(fragments, 2):
# Get coordinates
distances = compute_pairwise_distances(frag1[0], frag2[0])
vector = [
vectorize(hash_ecfp, feature_dict=ecfp_dict, size=self.size)
for ecfp_dict in featurize_contacts_ecfp(
frag1,
frag2,
distances,
cutoff=self.cutoff,
ecfp_degree=self.radius)
]
pairwise_features += vector
pairwise_features = np.concatenate(pairwise_features)
return pairwise_features
class ContactCircularVoxelizer(ComplexFeaturizer):
"""Computes ECFP fingerprints on a voxel grid.
Given a macromolecular complex made up of multiple
constituent molecules, first compute the contact points where
atoms from different molecules come close to one another. For
atoms within "contact regions," compute radial "ECFP"
fragments which are sub-molecules centered at atoms in the
contact region. Localize these ECFP fingeprints at the voxel
in which they originated.
Featurizes a macromolecular complex into a tensor of shape
`(voxels_per_edge, voxels_per_edge, voxels_per_edge, size)` where
`voxels_per_edge = int(box_width/voxel_width)`. If `flatten==True`,
then returns a flattened version of this tensor of length
`size*voxels_per_edge**3`
"""
def __init__(self,
cutoff: float = 4.5,
radius: int = 2,
size: int = 8,
box_width: float = 16.0,
voxel_width: float = 1.0,
flatten: bool = False):
"""
Parameters
----------
cutoff: float (default 4.5)
Distance cutoff in angstroms for molecules in complex.
radius : int, optional (default 2)
Fingerprint radius.
size : int, optional (default 8)
Length of generated bit vector.
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box
is centered on a ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
flatten: bool, optional (default False)
If True, then returns a flat feature vector rather than voxel grid. This
feature vector is constructed by flattening the usual voxel grid.
"""
self.cutoff = cutoff
self.radius = radius
self.size = size
self.box_width = box_width
self.voxel_width = voxel_width
self.voxels_per_edge = int(self.box_width / self.voxel_width)
self.flatten = flatten
def _featurize(self, complex):
"""
Compute featurization for a molecular complex
Parameters
----------
complex: Tuple[str, str]
Filenames for molecule and protein.
"""
try:
fragments = load_complex(complex, add_hydrogens=False)
except MoleculeLoadException:
logger.warning("This molecule cannot be loaded by Rdkit. Returning None")
return None
pairwise_features: List[np.ndarray] = []
# We compute pairwise contact fingerprints
centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)
for (frag1, frag2) in itertools.combinations(fragments, 2):
distances = compute_pairwise_distances(frag1[0], frag2[0])
frag1_xyz = subtract_centroid(frag1[0], centroid)
frag2_xyz = subtract_centroid(frag2[0], centroid)
xyzs = [frag1_xyz, frag2_xyz]
pairwise_features.append(
sum([
voxelize(
convert_atom_to_voxel,
xyz,
self.box_width,
self.voxel_width,
hash_function=hash_ecfp,
feature_dict=ecfp_dict,
nb_channel=self.size) for xyz, ecfp_dict in zip(
xyzs,
featurize_contacts_ecfp(
frag1,
frag2,
distances,
cutoff=self.cutoff,
ecfp_degree=self.radius))
]))
if self.flatten:
return np.concatenate(
[features.flatten() for features in pairwise_features])
else:
# Features are of shape (voxels_per_edge, voxels_per_edge,
# voxels_per_edge, num_feat) so we should concatenate on the last
# axis.
return np.concatenate(pairwise_features, axis=-1)
def compute_all_sybyl(mol, indices=None):
"""Computes Sybyl atom types for atoms in molecule."""
raise NotImplementedError("This function is not implemented yet")
def featurize_binding_pocket_sybyl(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=7.0):
"""Computes Sybyl dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: Rdkit Molecule
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: Rdkit Molecule
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration.
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_sybyl_dict = compute_all_sybyl(protein, indices=protein_atoms)
ligand_sybyl_dict = compute_all_sybyl(ligand)
return (protein_sybyl_dict, ligand_sybyl_dict)
|
peastman/deepchem
|
deepchem/feat/complex_featurizers/contact_fingerprints.py
|
Python
|
mit
| 9,618
|
[
"RDKit"
] |
5d05b79c999f02d198adbfece570889b00cd47065960b7c1519d21e27db2293c
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This modules defines functions for clustering and thresholding timeseries to
produces event triggers
"""
from __future__ import absolute_import
import lal, numpy, copy, os.path
from pycbc import WEAVE_FLAGS
from pycbc.types import Array
from pycbc.types import convert_to_process_params_dict
from pycbc.scheme import schemed
from pycbc.detector import Detector
from . import coinc
@schemed("pycbc.events.threshold_")
def threshold(series, value):
"""Return list of values and indices values over threshold in series.
"""
return None, None
@schemed("pycbc.events.threshold_")
def threshold_only(series, value):
"""Return list of values and indices whose values in series are
larger (in absolute value) than value
"""
return None, None
#FIXME: This should be under schemed, but I don't understand that yet!
def threshold_real_numpy(series, value):
arr = series.data
locs = numpy.where(arr > value)[0]
vals = arr[locs]
return locs, vals
@schemed("pycbc.events.threshold_")
def threshold_and_cluster(series, threshold, window):
"""Return list of values and indices values over threshold in series.
"""
return
@schemed("pycbc.events.threshold_")
def _threshold_cluster_factory(series):
return
class ThresholdCluster(object):
"""Create a threshold and cluster engine
Parameters
-----------
series : complex64
Input pycbc.types.Array (or subclass); it will be searched for
points above threshold that are then clustered
"""
def __new__(cls, *args, **kwargs):
real_cls = _threshold_cluster_factory(*args, **kwargs)
return real_cls(*args, **kwargs) # pylint:disable=not-callable
# The class below should serve as the parent for all schemed classes.
# The intention is that this class serves simply as the location for
# all documentation of the class and its methods, though that is not
# yet implemented. Perhaps something along the lines of:
#
# http://stackoverflow.com/questions/2025562/inherit-docstrings-in-python-class-inheritance
#
# will work? Is there a better way?
class _BaseThresholdCluster(object):
def threshold_and_cluster(self, threshold, window):
"""
Threshold and cluster the memory specified at instantiation with the
threshold specified at creation and the window size specified at creation.
Parameters
-----------
threshold : float32
The minimum absolute value of the series given at object initialization
to return when thresholding and clustering.
window : uint32
The size (in number of samples) of the window over which to cluster
Returns:
--------
event_vals : complex64
Numpy array, complex values of the clustered events
event_locs : uint32
Numpy array, indices into series of location of events
"""
pass
def findchirp_cluster_over_window(times, values, window_length):
""" Reduce the events by clustering over a window using
the FindChirp clustering algorithm
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples. Must be positive.
Returns
-------
indices: Array
The reduced list of indices of the SNR values
"""
assert window_length > 0, 'Clustering window length is not positive'
from weave import inline
indices = numpy.zeros(len(times), dtype=int)
tlen = len(times) # pylint:disable=unused-variable
k = numpy.zeros(1, dtype=int)
absvalues = abs(values) # pylint:disable=unused-variable
times = times.astype(int)
code = """
int j = 0;
int curr_ind = 0;
for (int i=0; i < tlen; i++){
if ((times[i] - times[curr_ind]) > window_length){
j += 1;
indices[j] = i;
curr_ind = i;
}
else if (absvalues[i] > absvalues[curr_ind]){
indices[j] = i;
curr_ind = i;
}
}
k[0] = j;
"""
inline(code, ['times', 'absvalues', 'window_length', 'indices', 'tlen', 'k'],
extra_compile_args=[WEAVE_FLAGS])
return indices[0:k[0]+1]
def cluster_reduce(idx, snr, window_size):
""" Reduce the events by clustering over a window
Parameters
-----------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR value
window_size: int
The size of the window in integer samples.
Returns
-------
indices: Array
The list of indices of the SNR values
snr: Array
The list of SNR values
"""
ind = findchirp_cluster_over_window(idx, snr, window_size)
return idx.take(ind), snr.take(ind)
def newsnr(snr, reduced_x2, q=6., n=2.):
"""Calculate the re-weighted SNR statistic ('newSNR') from given SNR and
reduced chi-squared values. See http://arxiv.org/abs/1208.3491 for
definition. Previous implementation in glue/ligolw/lsctables.py
"""
newsnr = numpy.array(snr, ndmin=1, dtype=numpy.float64)
reduced_x2 = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64)
# newsnr is only different from snr if reduced chisq > 1
ind = numpy.where(reduced_x2 > 1.)[0]
newsnr[ind] *= ( 0.5 * (1. + reduced_x2[ind] ** (q/n)) ) ** (-1./q)
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return newsnr
else:
return newsnr[0]
def newsnr_sgveto(snr, bchisq, sgchisq):
""" Combined SNR derived from NewSNR and Sine-Gaussian Chisq"""
# Test function
nsnr = newsnr(snr, bchisq)
nsnr = numpy.array(nsnr, ndmin=1)
sgchisq = numpy.array(sgchisq, ndmin=1)
t = numpy.array(sgchisq > 4, ndmin=1)
if len(t) > 0:
nsnr[t] = nsnr[t] / (sgchisq[t] / 4.0) ** 0.5
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0]
def effsnr(snr, reduced_x2, fac=250.):
"""Calculate the effective SNR statistic. See (S5y1 paper) for definition.
Previous implementation in glue/ligolw/lsctables.py
"""
snr = numpy.array(snr, ndmin=1, dtype=numpy.float64)
rchisq = numpy.array(reduced_x2, ndmin=1, dtype=numpy.float64)
effsnr = snr / (1 + snr ** 2 / fac) ** 0.25 / rchisq ** 0.25
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return effsnr
else:
return effsnr[0]
class EventManager(object):
def __init__(self, opt, column, column_types, **kwds):
self.opt = opt
self.global_params = kwds
self.event_dtype = [ ('template_id', int) ]
for column, coltype in zip (column, column_types):
self.event_dtype.append( (column, coltype) )
self.events = numpy.array([], dtype=self.event_dtype)
self.template_params = []
self.template_index = -1
self.template_events = numpy.array([], dtype=self.event_dtype)
self.write_performance = False
@classmethod
def from_multi_ifo_interface(cls, opt, ifo, column, column_types, **kwds):
"""
To use this for a single ifo from the multi ifo interface requires
some small fixing of the opt structure. This does that. As we edit the
opt structure the process_params table will not be correct.
"""
opt = copy.deepcopy(opt)
opt_dict = vars(opt)
for arg, value in opt_dict.items():
if isinstance(value, dict):
setattr(opt, arg, getattr(opt, arg)[ifo])
return cls(opt, column, column_types, **kwds)
def chisq_threshold(self, value, num_bins, delta=0):
remove = []
for i, event in enumerate(self.events):
xi = event['chisq'] / (event['chisq_dof'] / 2 + 1 + delta * event['snr'].conj() * event['snr'])
if xi > value:
remove.append(i)
self.events = numpy.delete(self.events, remove)
def newsnr_threshold(self, threshold):
""" Remove events with newsnr smaller than given threshold
"""
if not self.opt.chisq_bins:
raise RuntimeError('Chi-square test must be enabled in order to use newsnr threshold')
remove = [i for i, e in enumerate(self.events) if \
newsnr(abs(e['snr']), e['chisq'] / e['chisq_dof']) < threshold]
self.events = numpy.delete(self.events, remove)
def keep_near_injection(self, window, injections):
from pycbc.events.veto import indices_within_times
if len(self.events) == 0:
return
inj_time = numpy.array(injections.end_times())
gpstime = self.events['time_index'].astype(numpy.float64)
gpstime = gpstime / self.opt.sample_rate + self.opt.gps_start_time
i = indices_within_times(gpstime, inj_time - window, inj_time + window)
self.events = self.events[i]
def keep_loudest_in_interval(self, window, num_keep):
if len(self.events) == 0:
return
e = self.events
stat = newsnr(abs(e['snr']), e['chisq'] / e['chisq_dof'])
time = e['time_index']
wtime = (time / window).astype(numpy.int32)
bins = numpy.unique(wtime)
keep = []
for b in bins:
bloc = numpy.where((wtime == b))[0]
bloudest = stat[bloc].argsort()[-num_keep:]
keep.append(bloc[bloudest])
keep = numpy.concatenate(keep)
self.events = e[keep]
def maximize_over_bank(self, tcolumn, column, window):
if len(self.events) == 0:
return
self.events = numpy.sort(self.events, order=tcolumn)
cvec = self.events[column]
tvec = self.events[tcolumn]
indices = []
# mint = tvec.min()
# maxt = tvec.max()
# edges = numpy.arange(mint, maxt, window)
# # Get the location of each time bin
# bins = numpy.searchsorted(tvec, edges)
# bins = numpy.append(bins, len(tvec))
# for i in range(len(bins)-1):
# kmin = bins[i]
# kmax = bins[i+1]
# if kmin == kmax:
# continue
# event_idx = numpy.argmax(cvec[kmin:kmax]) + kmin
# indices.append(event_idx)
# This algorithm is confusing, but it is what lalapps_inspiral does
# REMOVE ME!!!!!!!!!!!
gps = tvec.astype(numpy.float64) / self.opt.sample_rate + self.opt.gps_start_time
gps_sec = numpy.floor(gps)
gps_nsec = (gps - gps_sec) * 1e9
wnsec = int(window * 1e9 / self.opt.sample_rate)
win = gps_nsec.astype(int) / wnsec
indices.append(0)
for i in xrange(len(tvec)):
if gps_sec[i] == gps_sec[indices[-1]] and win[i] == win[indices[-1]]:
if abs(cvec[i]) > abs(cvec[indices[-1]]):
indices[-1] = i
else:
indices.append(i)
self.events = numpy.take(self.events, indices)
def add_template_events(self, columns, vectors):
""" Add a vector indexed """
# initialize with zeros - since vectors can be None, look for the
# first one that isn't
new_events = None
for v in vectors:
if v is not None:
new_events = numpy.zeros(len(v), dtype=self.event_dtype)
break
# they shouldn't all be None
assert new_events is not None
new_events['template_id'] = self.template_index
for c, v in zip(columns, vectors):
if v is not None:
if isinstance(v, Array):
new_events[c] = v.numpy()
else:
new_events[c] = v
self.template_events = numpy.append(self.template_events, new_events)
def cluster_template_events(self, tcolumn, column, window_size):
""" Cluster the internal events over the named column
"""
cvec = self.template_events[column]
tvec = self.template_events[tcolumn]
indices = findchirp_cluster_over_window(tvec, cvec, window_size)
self.template_events = numpy.take(self.template_events, indices)
def new_template(self, **kwds):
self.template_params.append(kwds)
self.template_index += 1
def add_template_params(self, **kwds):
self.template_params[-1].update(kwds)
def finalize_template_events(self):
self.events = numpy.append(self.events, self.template_events)
self.template_events = numpy.array([], dtype=self.event_dtype)
def make_output_dir(self, outname):
path = os.path.dirname(outname)
if path != '':
if not os.path.exists(path) and path is not None:
os.makedirs(path)
def save_performance(self, ncores, nfilters, ntemplates, run_time,
setup_time):
"""
Calls variables from pycbc_inspiral to be used in a timing calculation
"""
self.run_time = run_time
self.setup_time = setup_time
self.ncores = ncores
self.nfilters = nfilters
self.ntemplates = ntemplates
self.write_performance = True
def write_events(self, outname):
""" Write the found events to a sngl inspiral table
"""
self.make_output_dir(outname)
if '.hdf' in outname:
self.write_to_hdf(outname)
else:
raise ValueError('Cannot write to this format')
def write_to_hdf(self, outname):
class fw(object):
def __init__(self, name, prefix):
import h5py
self.f = h5py.File(name, 'w')
self.prefix = prefix
def __setitem__(self, name, data):
col = self.prefix + '/' + name
self.f.create_dataset(col, data=data,
compression='gzip',
compression_opts=9,
shuffle=True)
self.events.sort(order='template_id')
th = numpy.array([p['tmplt'].template_hash for p in self.template_params])
tid = self.events['template_id']
f = fw(outname, self.opt.channel_name[0:2])
if len(self.events):
f['snr'] = abs(self.events['snr'])
try:
# Precessing
f['u_vals'] = self.events['u_vals']
f['coa_phase'] = self.events['coa_phase']
f['hplus_cross_corr'] = self.events['hplus_cross_corr']
except Exception:
# Not precessing
f['coa_phase'] = numpy.angle(self.events['snr'])
f['chisq'] = self.events['chisq']
f['bank_chisq'] = self.events['bank_chisq']
f['bank_chisq_dof'] = self.events['bank_chisq_dof']
f['cont_chisq'] = self.events['cont_chisq']
f['end_time'] = self.events['time_index'] / float(self.opt.sample_rate) + self.opt.gps_start_time
try:
# Precessing
template_sigmasq_plus = numpy.array([t['sigmasq_plus'] for t in self.template_params], dtype=numpy.float32)
f['sigmasq_plus'] = template_sigmasq_plus[tid]
template_sigmasq_cross = numpy.array([t['sigmasq_cross'] for t in self.template_params], dtype=numpy.float32)
f['sigmasq_cross'] = template_sigmasq_cross[tid]
# FIXME: I want to put something here, but I haven't yet
# figured out what it should be. I think we would also
# need information from the plus and cross correlation
# (both real and imaginary(?)) to get this.
f['sigmasq'] = template_sigmasq_plus[tid]
except Exception:
# Not precessing
template_sigmasq = numpy.array([t['sigmasq'] for t in self.template_params], dtype=numpy.float32)
f['sigmasq'] = template_sigmasq[tid]
template_durations = [p['tmplt'].template_duration for p in self.template_params]
f['template_duration'] = numpy.array(template_durations, dtype=numpy.float32)[tid]
# FIXME: Can we get this value from the autochisq instance?
cont_dof = self.opt.autochi_number_points
if self.opt.autochi_onesided is None:
cont_dof = cont_dof * 2
if self.opt.autochi_two_phase:
cont_dof = cont_dof * 2
if self.opt.autochi_max_valued_dof:
cont_dof = self.opt.autochi_max_valued_dof
f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(self.events))
if 'chisq_dof' in self.events.dtype.names:
f['chisq_dof'] = self.events['chisq_dof'] / 2 + 1
else:
f['chisq_dof'] = numpy.zeros(len(self.events))
f['template_hash'] = th[tid]
if 'sg_chisq' in self.events.dtype.names:
f['sg_chisq'] = self.events['sg_chisq']
if self.opt.psdvar_short_segment is not None:
f['psd_var_val'] = self.events['psd_var_val']
if self.opt.trig_start_time:
f['search/start_time'] = numpy.array([self.opt.trig_start_time])
search_start_time = float(self.opt.trig_start_time)
else:
f['search/start_time'] = numpy.array([self.opt.gps_start_time + self.opt.segment_start_pad])
search_start_time = float(self.opt.gps_start_time + self.opt.segment_start_pad)
if self.opt.trig_end_time:
f['search/end_time'] = numpy.array([self.opt.trig_end_time])
search_end_time = float(self.opt.trig_end_time)
else:
f['search/end_time'] = numpy.array([self.opt.gps_end_time - self.opt.segment_end_pad])
search_end_time = float(self.opt.gps_end_time - self.opt.segment_end_pad)
if self.write_performance:
self.analysis_time = search_end_time - search_start_time
time_ratio = numpy.array([float(self.analysis_time) / float(self.run_time)])
temps_per_core = float(self.ntemplates) / float(self.ncores)
filters_per_core = float(self.nfilters) / float(self.ncores)
f['search/templates_per_core'] = \
numpy.array([float(temps_per_core) * float(time_ratio)])
f['search/filter_rate_per_core'] = \
numpy.array([filters_per_core / float(self.run_time)])
f['search/setup_time_fraction'] = \
numpy.array([float(self.setup_time) / float(self.run_time)])
f['search/run_time'] = numpy.array([float(self.run_time)])
if 'q_trans' in self.global_params:
qtrans = self.global_params['q_trans']
for key in qtrans:
if key == 'qtiles':
for seg in qtrans[key]:
for q in qtrans[key][seg]:
f['qtransform/%s/%s/%s' % (key,seg,q)]=qtrans[key][seg][q]
elif key == 'qplanes':
for seg in qtrans[key]:
f['qtransform/%s/%s' % (key,seg)]=qtrans[key][seg]
if 'gating_info' in self.global_params:
gating_info = self.global_params['gating_info']
for gate_type in ['file', 'auto']:
if gate_type in gating_info:
f['gating/' + gate_type + '/time'] = \
numpy.array([float(g[0]) for g in gating_info[gate_type]])
f['gating/' + gate_type + '/width'] = \
numpy.array([g[1] for g in gating_info[gate_type]])
f['gating/' + gate_type + '/pad'] = \
numpy.array([g[2] for g in gating_info[gate_type]])
class EventManagerMultiDet(EventManager):
def __init__(self, opt, ifos, column, column_types, psd=None, **kwargs):
self.opt = opt
self.ifos = ifos
self.global_params = kwargs
if psd is not None:
self.global_params['psd'] = psd[ifos[0]]
# The events array does not like holding the ifo as string,
# so create a mapping dict and hold as an int
self.ifo_dict = {}
self.ifo_reverse = {}
for i, ifo in enumerate(ifos):
self.ifo_dict[ifo] = i
self.ifo_reverse[i] = ifo
self.event_dtype = [ ('template_id', int), ('event_id', int) ]
for column, coltype in zip (column, column_types):
self.event_dtype.append( (column, coltype) )
self.events = numpy.array([], dtype=self.event_dtype)
self.event_id_map = {}
self.event_index = 0
self.template_params = []
self.template_index = -1
self.template_event_dict = {}
self.coinc_list = []
self.write_performance = False
for ifo in ifos:
self.template_event_dict[ifo] = numpy.array([],
dtype=self.event_dtype)
def add_template_events_to_ifo(self, ifo, columns, vectors):
""" Add a vector indexed """
# Just call through to the standard function
self.template_events = self.template_event_dict[ifo]
self.add_template_events(columns, vectors)
self.template_event_dict[ifo] = self.template_events
self.template_events = None
def cluster_template_events_single_ifo(self, tcolumn, column, window_size,
ifo):
""" Cluster the internal events over the named column
"""
# Just call through to the standard function
self.template_events = self.template_event_dict[ifo]
self.cluster_template_events(tcolumn, column, window_size)
self.template_event_dict[ifo] = self.template_events
self.template_events = None
def finalize_template_events(self, perform_coincidence=True,
coinc_window=0.0):
# Set ids
for ifo in self.ifos:
num_events = len(self.template_event_dict[ifo])
new_event_ids = numpy.arange(self.event_index,
self.event_index+num_events)
self.template_event_dict[ifo]['event_id'] = new_event_ids
self.event_index = self.event_index+num_events
if perform_coincidence:
if not len(self.ifos) == 2:
err_msg = "Coincidence currently only supported for 2 ifos."
raise ValueError(err_msg)
ifo1 = self.ifos[0]
ifo2 = self.ifos[1]
end_times1 = self.template_event_dict[ifo1]['time_index'] /\
float(self.opt.sample_rate[ifo1]) + self.opt.gps_start_time[ifo1]
end_times2 = self.template_event_dict[ifo2]['time_index'] /\
float(self.opt.sample_rate[ifo2]) + self.opt.gps_start_time[ifo2]
light_travel_time = Detector(ifo1).light_travel_time_to_detector(\
Detector(ifo2))
coinc_window = coinc_window + light_travel_time
# FIXME: Remove!!!
coinc_window = 2.0
if len(end_times1) and len(end_times2):
idx_list1, idx_list2, _ = \
coinc.time_coincidence(end_times1, end_times2,
coinc_window)
if len(idx_list1):
for idx1, idx2 in zip(idx_list1, idx_list2):
event1 = self.template_event_dict[ifo1][idx1]
event2 = self.template_event_dict[ifo2][idx2]
self.coinc_list.append((event1, event2))
for ifo in self.ifos:
self.events = numpy.append(self.events,
self.template_event_dict[ifo])
self.template_event_dict[ifo] = numpy.array([],
dtype=self.event_dtype)
def write_events(self, outname):
""" Write the found events to a sngl inspiral table
"""
self.make_output_dir(outname)
if '.hdf' in outname:
self.write_to_hdf(outname)
else:
raise ValueError('Cannot write to this format')
def write_to_hdf(self, outname):
class fw(object):
def __init__(self, name):
import h5py
self.f = h5py.File(name, 'w')
def __setitem__(self, name, data):
col = self.prefix + '/' + name
self.f.create_dataset(col, data=data,
compression='gzip',
compression_opts=9,
shuffle=True)
self.events.sort(order='template_id')
th = numpy.array([p['tmplt'].template_hash for p in \
self.template_params])
tid = self.events['template_id']
f = fw(outname)
for ifo in self.ifos:
f.prefix = ifo
ifo_events = numpy.array([e for e in self.events if \
e['ifo'] == self.ifo_dict[ifo]], dtype=self.event_dtype)
if len(ifo_events):
ifo_str = ifo.lower()[0] if ifo != 'H1' else ifo.lower()
f['snr_%s' % ifo_str] = abs(ifo_events['snr'])
try:
# Precessing
f['u_vals'] = ifo_events['u_vals']
f['coa_phase'] = ifo_events['coa_phase']
f['hplus_cross_corr'] = ifo_events['hplus_cross_corr']
except Exception:
f['coa_phase'] = numpy.angle(ifo_events['snr'])
f['chisq'] = ifo_events['chisq']
f['bank_chisq'] = ifo_events['bank_chisq']
f['bank_chisq_dof'] = ifo_events['bank_chisq_dof']
f['cont_chisq'] = ifo_events['cont_chisq']
f['end_time'] = ifo_events['time_index'] / \
float(self.opt.sample_rate[ifo_str]) + \
self.opt.gps_start_time[ifo_str]
try:
# Precessing
template_sigmasq_plus = numpy.array([t['sigmasq_plus'] for t \
in self.template_params], dtype=numpy.float32)
f['sigmasq_plus'] = template_sigmasq_plus[tid]
template_sigmasq_cross = numpy.array([t['sigmasq_cross'] for t \
in self.template_params], dtype=numpy.float32)
f['sigmasq_cross'] = template_sigmasq_cross[tid]
# FIXME: I want to put something here, but I haven't yet
# figured out what it should be. I think we would also
# need information from the plus and cross correlation
# (both real and imaginary(?)) to get this.
f['sigmasq'] = template_sigmasq_plus[tid]
except Exception:
# Not precessing
template_sigmasq = numpy.array([t['sigmasq'][ifo] for t in \
self.template_params], dtype=numpy.float32)
f['sigmasq'] = template_sigmasq[tid]
template_durations = [p['tmplt'].template_duration for p in \
self.template_params]
f['template_duration'] = numpy.array(template_durations, \
dtype=numpy.float32)[tid]
# FIXME: Can we get this value from the autochisq instance?
cont_dof = self.opt.autochi_number_points
if self.opt.autochi_onesided is None:
cont_dof = cont_dof * 2
#if self.opt.autochi_two_phase:
# cont_dof = cont_dof * 2
#if self.opt.autochi_max_valued_dof:
# cont_dof = self.opt.autochi_max_valued_dof
f['cont_chisq_dof'] = numpy.repeat(cont_dof, len(ifo_events))
if 'chisq_dof' in ifo_events.dtype.names:
f['chisq_dof'] = ifo_events['chisq_dof'] / 2 + 1
else:
f['chisq_dof'] = numpy.zeros(len(ifo_events))
f['template_hash'] = th[tid]
if self.opt.psdvar_short_segment is not None:
f['psd_var_val'] = ifo_events['psd_var_val']
if self.opt.trig_start_time:
f['search/start_time'] = numpy.array([\
self.opt.trig_start_time[ifo]], dtype=numpy.int32)
search_start_time = float(self.opt.trig_start_time[ifo])
else:
f['search/start_time'] = numpy.array([\
self.opt.gps_start_time[ifo] + \
self.opt.segment_start_pad[ifo]], dtype=numpy.int32)
search_start_time = float(self.opt.gps_start_time[ifo] + \
self.opt.segment_start_pad[ifo])
if self.opt.trig_end_time:
f['search/end_time'] = numpy.array([\
self.opt.trig_end_time[ifo]], dtype=numpy.int32)
search_end_time = float(self.opt.trig_end_time[ifo])
else:
f['search/end_time'] = numpy.array([self.opt.gps_end_time[ifo] \
- self.opt.segment_end_pad[ifo]], dtype=numpy.int32)
search_end_time = float(self.opt.gps_end_time[ifo] - \
self.opt.segment_end_pad[ifo])
if self.write_performance:
self.analysis_time = search_end_time - search_start_time
time_ratio = numpy.array([float(self.analysis_time) / float(self.run_time)])
temps_per_core = float(self.ntemplates) / float(self.ncores)
filters_per_core = float(self.nfilters) / float(self.ncores)
f['search/templates_per_core'] = \
numpy.array([float(temps_per_core) * float(time_ratio)])
f['search/filter_rate_per_core'] = \
numpy.array([filters_per_core / float(self.run_time)])
f['search/setup_time_fraction'] = \
numpy.array([float(self.setup_time) / float(self.run_time)])
if 'gating_info' in self.global_params:
gating_info = self.global_params['gating_info']
for gate_type in ['file', 'auto']:
if gate_type in gating_info:
f['gating/' + gate_type + '/time'] = \
numpy.array([float(g[0]) for g in \
gating_info[gate_type]])
f['gating/' + gate_type + '/width'] = \
numpy.array([g[1] for g in gating_info[gate_type]])
f['gating/' + gate_type + '/pad'] = \
numpy.array([g[2] for g in gating_info[gate_type]])
__all__ = ['threshold_and_cluster', 'newsnr', 'effsnr', 'newsnr_sgveto',
'findchirp_cluster_over_window',
'threshold', 'cluster_reduce', 'ThresholdCluster',
'threshold_real_numpy', 'threshold_only',
'EventManager', 'EventManagerMultiDet']
|
soumide1102/pycbc
|
pycbc/events/events.py
|
Python
|
gpl-3.0
| 32,584
|
[
"Gaussian"
] |
8875549d97abd445bc95bd0c83d2bb26f3458fd527070c8a1894e00b7f0c8bbf
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('client', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('good', models.BooleanField(default=True)),
('date', models.DateField()),
('note', models.CharField(max_length=250, blank=True)),
('client', models.ForeignKey(to='client.Client')),
],
options={
'ordering': ['date'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='visit',
unique_together=set([('client', 'date')]),
),
]
|
dwagon/Scheduler
|
scheduler/visit/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 945
|
[
"VisIt"
] |
5eb51a27eaf72ec8252cd494ba3f40fef42d7f2adafa3adec9e9fcc2fbf14c8d
|
from owmeta_core.bundle import Bundle
from owmeta_core.context import Context
from owmeta.neuron import Neuron
from owmeta.worm import Worm
with Bundle('openworm/owmeta-data') as bnd:
ctx = bnd(Context)(ident="http://openworm.org/data").stored
# Extract the network object from the worm object.
net = ctx(Worm).query().neuron_network()
syn = net.synapse.expr
pre = syn.pre_cell
post = syn.post_cell
(pre | post).rdf_type(multiple=True)
(pre | post).name()
pre()
post()
syn.syntype()
syn.synclass()
syn.number()
connlist = syn.to_objects()
conns = []
for conn in connlist:
if (Neuron.rdf_type in conn.pre_cell.rdf_type and
Neuron.rdf_type in conn.post_cell.rdf_type):
num = conn.number
syntype = conn.syntype or ''
synclass = conn.synclass or ''
pre_name = conn.pre_cell.name
post_name = conn.post_cell.name
print(' '.join((pre_name, post_name, str(num), syntype, synclass)))
|
openworm/PyOpenWorm
|
examples/list_conns.py
|
Python
|
mit
| 1,002
|
[
"NEURON"
] |
11682f3e177b0843f8ad6e61fc8988a19751f72776623af4593752dc882dcc93
|
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
import analytics
import pytz
import logging
import smtplib
import StringIO
import csv
from courseware.courses import get_course_by_id
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.db import transaction
from django.db.models import Sum
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE
from eventtracking import tracker
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField
from verify_student.models import SoftwareSecurePhotoVerification
from .exceptions import (
InvalidCartItem, PurchasedCallbackException, ItemAlreadyInCartException,
AlreadyEnrolledInCourseException, CourseDoesNotExistException,
MultipleCouponsNotAllowedException, RegCodeAlreadyExistException,
ItemDoesNotExistAgainstRegCodeException
)
from microsite_configuration import microsite
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
)
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=C0103
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists() # pylint: disable=E1101
else:
items = self.orderitem_set.all().select_subclasses() # pylint: disable=E1101
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all(): # pylint: disable=E1101
if item.list_price:
item.unit_cost = item.list_price
item.list_price = None
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete() # pylint: disable=E1101
@transaction.commit_on_success
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all() # pylint: disable=E1101
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(self, cart_item.paidcourseregistration.course_id, cart_item.qty)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(self, cart_item.courseregcodeitem.course_id)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_info = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(getattr(item, 'course_id'), depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')'))
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_info
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, site_name, courses_info):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, getattr(self.user, 'email'), 'user')] # pylint: disable=E1101
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info]
joined_course_names = " " + ", ".join(courses_names_with_dates)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = microsite.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# send a unique email for each recipient, don't put all email addresses in a single email
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join([course_info[0] for course_info in courses_info]),
'dashboard_url': dashboard_url,
'order_placed_by': '{username} ({email})'.format(username=self.user.username, email=getattr(self.user, 'email')), # pylint: disable=E1101
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL),
'payment_email_signature': microsite.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# only the business order is HTML formatted
# the single seat is simple text
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=E1101
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=E1101
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
courses_info = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name)
self.send_confirmation_emails(orderitems, self.order_type == OrderTypes.BUSINESS, csv_file, site_name, courses_info)
self._emit_purchase_event(orderitems)
def _emit_purchase_event(self, orderitems):
"""
Emit an analytics purchase event for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
event_name = 'Completed Order' # Required event name by Segment
try:
if settings.FEATURES.get('SEGMENT_IO_LMS') and settings.SEGMENT_IO_LMS_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, { # pylint: disable=E1101
'orderId': self.id, # pylint: disable=E1101
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=W0703
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id) # pylint: disable=E1101
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
@property
def additional_instruction_text(self):
"""
Individual instructions for this order item.
Currently, only used for e-mails.
"""
return ''
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id, # pylint: disable=E1101
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
class Invoice(models.Model):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True)
address_line_3 = models.CharField(max_length=255, null=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
course_id = CourseKeyField(max_length=255, db_index=True)
total_amount = models.FloatField()
internal_reference = models.CharField(max_length=255, null=True)
customer_reference_number = models.CharField(max_length=63, null=True)
is_valid = models.BooleanField(default=True)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(default=datetime.now(pytz.utc))
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
invoice = models.ForeignKey(Invoice, null=True)
@classmethod
@transaction.commit_on_success
def free_user_enrollment(cls, cart):
"""
Here we enroll the user free for all courses available in shopping cart
"""
cart_items = cart.orderitem_set.all().select_subclasses()
if cart_items:
for item in cart_items:
CourseEnrollment.enroll(cart.user, item.course_id)
log.info("Enrolled '{0}' in free course '{1}'"
.format(cart.user.email, item.course_id)) # pylint: disable=E1101
item.status = 'purchased'
item.save()
cart.status = 'purchased'
cart.purchase_time = datetime.now(pytz.utc)
cart.save()
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(default=datetime.now(pytz.utc), null=True)
@classmethod
def delete_registration_redemption(cls, user, cart):
"""
This method delete registration redemption
"""
reg_code_redemption = cls.objects.filter(redeemed_by=user, order=cart)
if reg_code_redemption:
reg_code_redemption.delete()
log.info('Registration code redemption entry removed for user {0} for order {1}'.format(user, cart.id))
@classmethod
def add_reg_code_redemption(cls, course_reg_code, order):
"""
add course registration code info into RegistrationCodeRedemption model
"""
cart_items = order.orderitem_set.all().select_subclasses()
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == course_reg_code.course_id:
# If another account tries to use a existing registration code before the student checks out, an
# error message will appear.The reg code is un-reusable.
code_redemption = cls.objects.filter(registration_code=course_reg_code)
if code_redemption:
log.exception("Registration code '{0}' already used".format(course_reg_code.code))
raise RegCodeAlreadyExistException
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, order=order, redeemed_by=order.user)
code_redemption.save()
item.list_price = item.unit_cost
item.unit_cost = 0
item.save()
log.info("Code '{0}' is used by user {1} against order id '{2}' "
.format(course_reg_code.code, order.user.username, order.id))
return course_reg_code
log.warning("Course item does not exist against registration code '{0}'".format(course_reg_code.code))
raise ItemDoesNotExistAgainstRegCodeException
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user):
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_query_set(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_query_set().filter(is_active=True)
def get_query_set(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_query_set()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(default=datetime.now(pytz.utc))
is_active = models.BooleanField(default=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def delete_coupon_redemption(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info('Coupon redemption entry removed for user {0} for order {1}'.format(user, cart.id))
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception("Coupon redemption already exist for user '{0}' against order id '{1}'"
.format(order.user.username, order.id))
raise MultipleCouponsNotAllowedException
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info("Discount generated for user {0} against order id '{1}' "
.format(order.user.username, order.id))
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status='purchased').aggregate(total=Sum('unit_cost', field='qty * unit_cost')) # pylint: disable=E1101
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks
course = modulestore().get_course(course_id) # actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = (_('Please visit your <a href="{dashboard_link}">dashboard</a> to see your new course.')
.format(dashboard_link=reverse('dashboard')))
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=E1101
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status='purchased').aggregate(total=Sum('unit_cost', field='qty * unit_cost')) # pylint: disable=E1101
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None): # pylint: disable=W0221
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks
course = modulestore().get_course(course_id) # actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=W0612
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=W0612
save_registration_code(self.user, self.course_id, invoice=None, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=E1101
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=E1101
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=E0213,W0613
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error("Matching CertificateItem not found while trying to refund. User %s, Course %s", course_enrollment.user, course_enrollment.course_id)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.status = 'refunded'
target_cert.order.save()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
raise InvalidCartItem(_("Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id))
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
try:
verification_attempt = SoftwareSecurePhotoVerification.active_for_user(self.course_enrollment.user)
verification_attempt.submit()
except Exception:
log.exception(
"Could not submit verification attempt for enrollment {}".format(self.course_enrollment)
)
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
@property
def single_item_receipt_template(self):
if self.mode in ('verified', 'professional'):
return 'shoppingcart/verified_cert_receipt.html'
else:
return super(CertificateItem, self).single_item_receipt_template
@property
def single_item_receipt_context(self):
course = modulestore().get_course(self.course_id)
return {
"course_id": self.course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"course_start_date_text": course.start_datetime_text(),
"course_has_started": course.start > datetime.today().replace(tzinfo=pytz.utc),
"course_root_url": reverse(
'course_root',
kwargs={'course_id': self.course_id.to_deprecated_string()} # pylint: disable=no-member
),
"dashboard_url": reverse('dashboard'),
}
@property
def additional_instruction_text(self):
return _("Note - you have up to 2 weeks into the course to unenroll from the Verified Certificate option "
"and receive a full refund. To receive your refund, contact {billing_email}. "
"Please include your order number in your e-mail. "
"Please do NOT include your credit card information.").format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=E1101
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
pass
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
@property
def additional_instruction_text(self):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=settings.PLATFORM_NAME)
@classmethod
def _line_item_description(self, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
err = _(
u"Could not find a course with the ID '{course_id}'"
).format(course_id=course_id)
raise CourseDoesNotExistException(err)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(platform_name=settings.PLATFORM_NAME)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org) # pylint: disable=E1101
else:
data['name'] = settings.PLATFORM_NAME
data['category'] = settings.PLATFORM_NAME
return data
|
wwj718/ANALYSE
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 65,389
|
[
"VisIt"
] |
fa13c3a680eb025ad5f2cc297a936badc81f7bdb48cd3e4904b838be8950c1c4
|
import yaml
import re
from collections import Counter
class Calculation:
"""
class describing a single VASP calculation
"""
def __init__( self, title, energy, stoichiometry ):
"""
Initialise a Calculation object
Args:
title (Str): The title string for this calculation.
energy (Float): Final energy in eV.
stoichiometry (Dict{Str:Int}): A dict desribing the calculation stoichiometry,
e.g. { 'Ti': 1, 'O': 2 }
Returns:
None
"""
self.title = title
self.energy = energy
self.stoichiometry = Counter( stoichiometry )
def __mul__( self, scaling ):
"""
"Multiply" this Calculation by a scaling factor.
Returns a new Calculation with the same title, but scaled energy and stoichiometry.
Args:
scaling (float): The scaling factor.
Returns:
(vasppy.Calculation): The scaled Calculation.
"""
new_calculation = Calculation( title=self.title, energy=self.energy*scaling, stoichiometry=self.scale_stoichiometry( scaling ) )
return new_calculation
def __truediv__( self, scaling ):
"""
Implements division by a scaling factor.
Returns a new Calculation with the same title, but scaled energy and stoichiometry.
Args:
scaling (float): The scaling factor.
Returns:
(vasppy.Calculation): The scaled Calculation.
"""
return self * ( 1 / scaling )
def scale_stoichiometry( self, scaling ):
"""
Scale the Calculation stoichiometry
Returns the stoichiometry, scaled by the argument scaling.
Args:
scaling (float): The scaling factor.
Returns:
(Counter(Str:Int)): The scaled stoichiometry as a :obj:`Counter` of ``label: stoichiometry`` pairs
"""
return { k:v*scaling for k,v in self.stoichiometry.items() }
def delta_E( reactants, products, check_balance=True ):
"""
Calculate the change in energy for reactants --> products.
Args:
reactants (list(:obj:`vasppy.Calculation`)): A `list` of :obj:`vasppy.Calculation` objects. The initial state.
products (list(:obj:`vasppy.Calculation`)): A `list` of :obj:`vasppy.Calculation` objects. The final state.
check_balance (:obj:`bool`, optional): Check that the reaction stoichiometry is balanced. Default is ``True``.
Returns:
(float) The change in energy.
"""
if check_balance:
if delta_stoichiometry( reactants, products ) != {}:
raise ValueError( "reaction is not balanced: {}".format( delta_stoichiometry( reactants, products) ) )
return sum( [ r.energy for r in products ] ) - sum( [ r.energy for r in reactants ] )
def delta_stoichiometry( reactants, products ):
"""
Calculate the change in stoichiometry for reactants --> products.
Args:
reactants (list(:obj:`vasppy.Calculation`): A `list` of :obj:`vasppy.Calculation objects.` The initial state.
products (list(:obj:`vasppy.Calculation`): A `list` of :obj:`vasppy.Calculation objects.` The final state.
Returns:
(Counter): The change in stoichiometry.
"""
totals = Counter()
for r in reactants:
totals.update( ( r * -1.0 ).stoichiometry )
for p in products:
totals.update( p.stoichiometry )
to_return = {}
for c in totals:
if totals[c] != 0:
to_return[c] = totals[c]
return to_return
def energy_string_to_float( string ):
"""
Convert a string of a calculation energy, e.g. '-1.2345 eV' to a float.
Args:
string (str): The string to convert.
Return
(float)
"""
energy_re = re.compile( "(-?\d+\.\d+)" )
return float( energy_re.match( string ).group(0) )
def import_calculations_from_file(filename, skip_incomplete_records=False):
"""
Construct a list of :obj:`Calculation` objects by reading a YAML file.
Each YAML document should include ``title``, ``stoichiometry``, and ``energy`` fields, e.g.::
title: my calculation
stoichiometry:
- A: 1
- B: 2
energy: -0.1234 eV
Separate calculations should be distinct YAML documents, separated by `---`
Args:
filename (str): Name of the YAML file to read.
skip_incomplete_records (bool): Do not parse YAML documents missing one or more of
the required keys. Default is ``False``.
Returns:
(dict(vasppy.Calculation)): A dictionary of :obj:`Calculation` objects. For each :obj:`Calculation` object, the ``title`` field from the YAML input is used as the dictionary key.
"""
calcs = {}
with open( filename, 'r' ) as stream:
docs = yaml.load_all( stream, Loader=yaml.SafeLoader )
for d in docs:
if skip_incomplete_records:
if ('title' not in d) or ('stoichiometry' not in d) or ('energy' not in d):
continue
if 'stoichiometry' in d:
stoichiometry = Counter()
for s in d['stoichiometry']:
stoichiometry.update( s )
else:
raise ValueError('stoichiometry not found for "{d["title"]}"')
calcs[ d['title'] ] = Calculation( title=d['title'],
stoichiometry=stoichiometry,
energy=energy_string_to_float( d['energy'] ) )
return calcs
|
bjmorgan/vasppy
|
vasppy/calculation.py
|
Python
|
mit
| 5,642
|
[
"VASP"
] |
727edb36d16d4fb8de07b02c7b24abecceae4aa4594ff2119be80daf36a128a5
|
import contact_angle as cnt
import mdtraj as md
import numpy as np
from contact_angle.utils.general import get_fn
def test_flipped():
"""Same trajectory, but rotated around x-axis - should give same contact angle
"""
traj = md.load(get_fn('chol-tail-original.dcd'),
top=get_fn('chol-wetting.hoomdxml'))
ca = cnt.calc_contact_angle(traj.xyz[:, 14400:]*6, guess_R=4.0, guess_z0=1.8,
guess_rho_n=1.0, left_tol=0.1, z_range=(-0.1, 9), surface_normal='z',
n_bins=100, fit_range=(2, 4.0), droplet_location='above')
traj = md.load(get_fn('chol-tail-rotated.dcd'),
top=get_fn('chol-wetting.hoomdxml'))
ca2 = cnt.calc_contact_angle(traj.xyz[:, 14400:]*6, guess_R=4.0, guess_z0=1.8,
guess_rho_n=1.0, left_tol=0.1, z_range=(-7.0, 0.3), surface_normal='z',
n_bins=100, fit_range=(-6.0, -1.0), droplet_location='below')
assert(np.absolute(ca['theta'] - ca2['theta']) < 5.0)
|
tcmoore3/contact_angle
|
contact_angle/tests/test_above-below.py
|
Python
|
mit
| 968
|
[
"MDTraj"
] |
a4242b1b91e36ca10d80847ce61b6a52e169d986f397a8cb159806d9dcd6849a
|
import copy
import logging
import os.path
import re
from spitfire.compiler.ast import *
from spitfire.compiler.analyzer import *
from spitfire.compiler.visitor import print_tree
from spitfire.compiler.walker import flatten_tree
import __builtin__
builtin_names = vars(__builtin__)
class _BaseAnalyzer(object):
def __init__(self, ast_root, options, compiler):
self.ast_root = ast_root
self.options = options
self.compiler = compiler
self.unoptimized_node_types = set()
def optimize_ast(self):
self.visit_ast(self.ast_root)
if self.options.debug:
print "unoptimized_node_types", self.unoptimized_node_types
return self.ast_root
# build an AST node list from a single parse node
# need the parent in case we are going to delete a node
def visit_ast(self, node, parent=None):
node.parent = parent
method_name = 'analyze%s' % node.__class__.__name__
method = getattr(self, method_name, self.default_optimize_node)
if method_name in self.compiler.debug_flags:
print method_name, node
return method(node)
def skip_analyze_node(self, node):
return
analyzeLiteralNode = skip_analyze_node
analyzeIdentifierNode = skip_analyze_node
analyzeTargetNode = skip_analyze_node
def default_optimize_node(self, node):
# print "default_optimize_node", type(node)
self.unoptimized_node_types.add(type(node))
return
def get_parent_loop(self, node):
return self._get_parent_node_by_type(node, ForNode)
def get_parent_function(self, node):
return self._get_parent_node_by_type(node, FunctionNode)
def get_parent_block(self, node):
return self._get_parent_node_by_type(node,
(FunctionNode, ForNode, IfNode, ElseNode))
def _get_parent_node_by_type(self, node, node_type):
node = node.parent
while node is not None:
if isinstance(node, node_type):
return node
node = node.parent
return None
# this function has some rules that are a bit unclean - you aren't actually
# looking for the 'parent' scope, but one you might insert nodes into.
# for instance, you skip over a ForNode so that optimizetions are inserted
# in a loop-invariant fashion.
def get_parent_scope(self, node):
node_stack = [node]
node = node.parent
while node is not None:
if type(node) == FunctionNode:
return node.scope
elif type(node) == IfNode:
# elements of the test clause need to reference the next scope
# "up" - usually the function, but could be another conditional block
# fixme: if we ever implement "elif" this will have to get fixed up
if node_stack[-1] != node.test_expression:
return node.scope
elif type(node) == ElseNode:
return node.scope
elif type(node) == ForNode:
if node_stack[-1] != node.expression_list:
return node.scope
node_stack.append(node)
node = node.parent
raise SemanticAnalyzerError("expected a parent function")
def get_insert_block_and_point(self, node):
original_node = node
insert_marker = node
node = node.parent
while node is not None:
if isinstance(node, (FunctionNode, ForNode, IfNode, ElseNode)):
if insert_marker in node.child_nodes:
return node, insert_marker
insert_marker = node
node = node.parent
raise SemanticAnalyzerError("expected a parent block")
def replace_in_parent_block(self, node, new_node):
insert_block, insert_marker = self.get_insert_block_and_point(node)
insert_block.replace(insert_marker, new_node)
def reanalyzeConditionalNode(self, conditional_node):
if (not self.options.hoist_conditional_aliases and
not self.options.cache_filtered_placeholders):
return
parent_node = conditional_node
parent_block, insertion_point = self.get_insert_block_and_point(
conditional_node)
if self.options.hoist_conditional_aliases:
#print "reanalyzeConditionalNode", conditional_node
#print " parent_block", parent_block
#print " parent_scope", parent_block.scope
# NOTE: need to iterate over items, in case we modify something
for alias_node, alias in conditional_node.scope.aliased_expression_map.items():
#print " check alias:", alias
#print " alias_node:", alias_node
assign_alias_node = AssignNode(alias, alias_node)
if alias_node in parent_block.scope.aliased_expression_map:
if self.is_condition_invariant(alias_node, conditional_node):
#print " hoist:", assign_alias_node
self.hoist(
conditional_node, parent_block, insertion_point, alias_node,
assign_alias_node)
def reanalyzeLoopNode(self, loop_node):
if not self.options.hoist_loop_invariant_aliases:
return
parent_block, insertion_point = self.get_insert_block_and_point(loop_node)
# NOTE: need to iterate over items, in case we modify something
for alias_node, alias in loop_node.scope.aliased_expression_map.items():
assign_alias = AssignNode(alias, alias_node)
if alias_node in parent_block.scope.aliased_expression_map:
if self.is_loop_invariant(alias_node, loop_node):
self.hoist(loop_node, parent_block, insertion_point, alias_node,
assign_alias)
else:
# if this alias is not already used in the parent scope, that's
# ok, hoist it if it's loop invariant
if self.is_loop_invariant(alias_node, loop_node):
loop_node.remove(assign_alias)
parent_block.insert_before(loop_node, assign_alias)
parent_block.scope.hoisted_aliases.append(alias_node)
def is_condition_invariant(self, node, conditional_node):
node_dependency_set = self.get_node_dependencies(node)
condition_invariant = not node_dependency_set.intersection(
conditional_node.scope.local_identifiers)
#print "is_condition_invariant:", condition_invariant
#print " locals:", conditional_node.scope.local_identifiers
#print " deps:", node_dependency_set
return condition_invariant
def is_loop_invariant(self, node, loop_node):
node_dependency_set = self.get_node_dependencies(node)
# print "is loop invariant node:", node
# for x in node_dependency_set:
# print " dep:", x
return not loop_node.loop_variant_set.intersection(node_dependency_set)
def get_node_dependencies(self, node):
node_dependency_set = set(flatten_tree(node))
parent_block = self.get_parent_block(node)
for n in list(node_dependency_set):
# when this is an identifier, you need to check all of the potential
# the dependencies for that symbol, which means doing some crawling
if isinstance(n, IdentifierNode):
identifier = n
parent_block_to_check = parent_block
while parent_block_to_check:
for block_node in parent_block_to_check.child_nodes:
if isinstance(block_node, AssignNode):
if block_node.left == identifier:
node_dependency_set.update(
self.get_node_dependencies(block_node.right))
parent_block_to_check = None
break
elif isinstance(block_node, IfNode):
# if you encounter a conditional in your chain, you depend on any
# dependencies of the condition itself
# FIXME: calling get_node_dependencies(block_node.test_expression)
# causes an infinite loop, but that is probably the correct way
# forward to address the dependency chain
node_dependency_set.update(
flatten_tree(block_node.test_expression))
else:
parent_block_to_check = self.get_parent_block(
parent_block_to_check)
#elif isinstance(n, (GetUDNNode, FilterNode)):
# node_dependency_set.update(
# self.get_node_dependencies(node.expression))
#print "get_node_dependencies", node
#print " deps:", node_dependency_set
return node_dependency_set
class OptimizationAnalyzer(_BaseAnalyzer):
def analyzeParameterNode(self, parameter):
self.visit_ast(parameter.default, parameter)
return
def analyzeTemplateNode(self, template):
# at this point, if we have a function registry, add in the nodes before we
# begin optimizing
for alias, (fq_name, method) in self.compiler.function_name_registry.iteritems():
fq_name_parts = fq_name.split('.')
self.ast_root.from_nodes.append(FromNode(
[IdentifierNode(x) for x in fq_name_parts[:-1]],
IdentifierNode(fq_name_parts[-1]),
IdentifierNode(alias)))
for n in template.from_nodes:
if n.alias:
template.global_identifiers.add(n.alias)
else:
template.global_identifiers.add(n.identifier)
# scan extends for dependencies
# this allows faster calling of template functions - we could also
# tune BufferWrite calls for these nodes
if self.options.use_dependency_analysis:
for n in template.extends_nodes:
for ext in template_extensions:
path = os.path.join(
*[ident_node.name
for ident_node in n.source_module_name_list]) + ext
template_function_names = get_template_functions(path)
template.template_methods.update(template_function_names)
self.visit_ast(template.main_function, template)
for n in template.child_nodes:
self.visit_ast(n, template)
def analyzeFunctionNode(self, function):
function.scope.local_identifiers.extend([IdentifierNode(n.name)
for n in function.parameter_list])
for n in function.child_nodes:
self.visit_ast(n, function)
def analyzeForNode(self, for_node):
self.visit_ast(for_node.target_list, for_node)
for_node.loop_variant_set = set(for_node.target_list.flat_list)
self.visit_ast(for_node.expression_list, for_node)
for n in for_node.child_nodes:
self.visit_ast(n, for_node)
def analyzeAssignNode(self, node):
_identifier = IdentifierNode(node.left.name)
scope = self.get_parent_scope(node)
scope.local_identifiers.append(_identifier)
# note: this hack is here so you can partially analyze alias nodes
# without double-processing
if node.right:
self.visit_ast(node.right, node)
def analyzeExpressionListNode(self, expression_list_node):
for n in expression_list_node:
self.visit_ast(n, expression_list_node)
def analyzeTargetListNode(self, target_list_node):
flat_list = []
for n in target_list_node:
self.visit_ast(n, target_list_node)
if type(n) == TargetListNode:
flat_list.extend(n.flat_list)
else:
flat_list.append(n)
target_list_node.flat_list = flat_list
# def analyzeParameterListNode(self, parameter_list_node):
# flat_list = []
# for n in parameter_list_node:
# flat_list.append(n)
# target_list_node.flat_list = flat_list
def analyzeArgListNode(self, arg_list_node):
for n in arg_list_node:
self.visit_ast(n, arg_list_node)
def analyzeTupleLiteralNode(self, tuple_literal_node):
for n in tuple_literal_node.child_nodes:
self.visit_ast(n, tuple_literal_node)
def analyzeDictLiteralNode(self, dict_literal_node):
for key_node, value_node in dict_literal_node.child_nodes:
self.visit_ast(key_node, dict_literal_node)
self.visit_ast(value_node, dict_literal_node)
def analyzeCallFunctionNode(self, function_call):
self.visit_ast(function_call.expression, function_call)
self.visit_ast(function_call.arg_list, function_call)
def analyzeBufferWrite(self, buffer_write):
self.visit_ast(buffer_write.expression, buffer_write)
# template functions output text - don't format them as strings
if (isinstance(buffer_write.expression, BinOpNode) and
buffer_write.expression.operator == '%' and
isinstance(buffer_write.expression.right, CallFunctionNode) and
isinstance(buffer_write.expression.right.expression,
TemplateMethodIdentifierNode)):
buffer_write.replace(
buffer_write.expression, buffer_write.expression.right)
def analyzeEchoNode(self, node):
for n in (node.test_expression, node.true_expression, node.false_expression):
if n:
self.visit_ast(n, node)
def analyzeFilterNode(self, filter_node):
self.visit_ast(filter_node.expression, filter_node)
if (isinstance(filter_node.expression, CallFunctionNode) and
isinstance(filter_node.expression.expression, TemplateMethodIdentifierNode)):
filter_node.parent.replace(filter_node, filter_node.expression)
return
if self.options.cache_filtered_placeholders:
# NOTE: you *must* analyze the node before putting it in a dict
# otherwise the definition of hash and equivalence will change and the
# node will not be found due to the sketchy custom hash function
scope = self.get_parent_scope(filter_node)
alias = scope.aliased_expression_map.get(filter_node)
if not alias:
alias_name = '_fph%08X' % unsigned_hash(filter_node.expression)
if alias_name in scope.alias_name_set:
print "duplicate alias_name", alias_name
print "scope", scope
print "scope.alias_name_set", scope.alias_name_set
print "scope.aliased_expression_map", scope.aliased_expression_map
return
alias = IdentifierNode(alias_name)
scope.alias_name_set.add(alias_name)
scope.aliased_expression_map[filter_node] = alias
assign_alias = AssignNode(alias, filter_node)
insert_block, insert_marker = self.get_insert_block_and_point(
filter_node)
insert_block.insert_before(insert_marker, assign_alias)
filter_node.parent.replace(filter_node, alias)
def analyzePlaceholderNode(self, placeholder):
if self.options.directly_access_defined_variables:
# when the analyzer finds a PlaceholderNode and generates a function
# call out of it, i annotate an IdentifierNode with the original
# placeholder name
local_var = IdentifierNode(placeholder.name)
cached_placeholder = IdentifierNode('_rph_%s' % local_var.name)
local_identifiers = self.get_local_identifiers(placeholder)
#print "local_identifiers", local_identifiers
if local_var in local_identifiers:
placeholder.parent.replace(placeholder, local_var)
elif placeholder.name in self.ast_root.template_methods:
placeholder.parent.replace(
placeholder, TemplateMethodIdentifierNode(
placeholder.name))
elif local_var in self.ast_root.global_identifiers:
placeholder.parent.replace(placeholder, local_var)
elif cached_placeholder in local_identifiers:
placeholder.parent.replace(placeholder, cached_placeholder)
elif local_var.name in builtin_names:
placeholder.parent.replace(placeholder,
IdentifierNode(local_var.name))
elif self.options.cache_resolved_placeholders:
scope = self.get_parent_scope(placeholder)
scope.alias_name_set.add(cached_placeholder.name)
scope.aliased_expression_map[placeholder] = cached_placeholder
insert_block, insert_marker = self.get_insert_block_and_point(
placeholder)
# note: this is sketchy enough that it requires some explanation
# basically, you need to visit the node for the parent function to
# get the memo that this value is aliased. unfortunately, the naive
# case of just calling visit_ast blows up since it tries to double
# analyze a certain set of nodes. you only really need to analyze
# that the assignment took place, then you can safely alias the
# actual function call. definitely sketchy, but it does seem to work
assign_rph = AssignNode(cached_placeholder, None)
cached_placeholder.parent = assign_rph
#print "optimize scope:", insert_block
#print "optimize marker:", insert_marker
insert_block.insert_before(
insert_marker, assign_rph)
self.visit_ast(assign_rph, insert_block)
assign_rph.right = placeholder
placeholder.parent.replace(placeholder, cached_placeholder)
def analyzePlaceholderSubstitutionNode(self, placeholder_substitution):
self.visit_ast(placeholder_substitution.expression,
placeholder_substitution)
# def alias_expression_in_function(self, function, expression):
# alias = function.aliased_expression_map.get(expression)
# if not alias:
# alias_name = '_%s' % (expression.name)
# if alias_name in function.alias_name_set:
# print "duplicate alias_name", alias_name
# return
# alias = IdentifierNode(alias_name)
# function.aliased_expression_map[expression] = alias
# assign_alias = AssignNode(alias, expression)
# parent_loop = self.get_parent_loop(node)
# # fixme: check to see if this expression is loop-invariant
# # must add a test case for this
# child_node_set = set(node.getChildNodes())
# #print "child_node_set", child_node_set
# #print "parent_loop", parent_loop, "parent", node.parent
# if (parent_loop is not None and
# not parent_loop.loop_variant_set.intersection(child_node_set)):
# #print "pull up loop invariant", assign_alias
# parent_loop.parent.insert_before(parent_loop, assign_alias)
# else:
# insert_block, insert_marker = self.get_insert_block_and_point(node)
# insert_block.insert_before(insert_marker, assign_alias)
# node.parent.replace(node, alias)
def analyzeGetAttrNode(self, node):
if not self.options.alias_invariants:
return
# fixme: only handle the trivial case for now
# simplifies the protocol for making up alias names
if type(node.expression) != IdentifierNode:
return
scope = self.get_parent_scope(node)
alias = scope.aliased_expression_map.get(node)
if not alias:
if node.expression.name[0] != '_':
alias_format = '_%s_%s'
else:
alias_format = '%s_%s'
alias_name = alias_format % (node.expression.name, node.name)
if alias_name in scope.alias_name_set:
print "duplicate alias_name", alias_name
print "scope", scope
print "scope.alias_name_set", scope.alias_name_set
print "scope.aliased_expression_map", scope.aliased_expression_map
return
alias = IdentifierNode(alias_name)
scope.alias_name_set.add(alias_name)
scope.aliased_expression_map[node] = alias
assign_alias = AssignNode(alias, node)
parent_loop = self.get_parent_loop(node)
# fixme: check to see if this expression is loop-invariant
# must add a test case for this
child_node_set = set(node.getChildNodes())
#print "child_node_set", child_node_set
#print "parent_loop", parent_loop, "parent", node.parent
if (self.options.inline_hoist_loop_invariant_aliases and
parent_loop is not None and
not parent_loop.loop_variant_set.intersection(child_node_set)):
# print "pull up loop invariant", assign_alias
parent_loop.parent.insert_before(parent_loop, assign_alias)
else:
insert_block, insert_marker = self.get_insert_block_and_point(node)
insert_block.insert_before(insert_marker, assign_alias)
node.parent.replace(node, alias)
def analyzeIfNode(self, if_node):
self.visit_ast(if_node.test_expression, if_node)
for n in if_node.child_nodes:
self.visit_ast(n, if_node)
for n in if_node.else_.child_nodes:
self.visit_ast(n, if_node.else_)
parent_scope = self.get_parent_scope(if_node)
# once both branches are optimized, walk the scopes for any variables that
# are defined in both places. those will be promoted to function scope
# since it is safe to assume that those will defined
# fixme: this feels like a bit of hack - but not sure how to do this
# correctly without reverting to slower performance for almost all calls to
# resolve_placeholder.
#
# it seems like certain optimizations need
# to be hoisted up to the parent scope. this is particularly the case when
# you are aliasing common functions that are likely to occur in the parent
# scope after the conditional block. you *need* to hoist those, or you will
# have errors when the branch fails. essentially you have to detect and
# hoist 'branch invariant' optimizations.
if if_node.else_.child_nodes:
if_scope_vars = set(if_node.scope.local_identifiers)
common_local_identifiers = list(if_scope_vars.intersection(
if_node.else_.scope.local_identifiers))
common_alias_name_set = if_node.scope.alias_name_set.intersection(
if_node.else_.scope.alias_name_set)
common_keys = (
set(if_node.scope.aliased_expression_map.iterkeys()) &
set(if_node.else_.scope.aliased_expression_map.iterkeys()))
common_aliased_expression_map = {}
for key in common_keys:
common_aliased_expression_map[key] = if_node.scope.aliased_expression_map[key]
parent_scope.local_identifiers.extend(common_local_identifiers)
parent_scope.alias_name_set.update(common_alias_name_set)
parent_scope.aliased_expression_map.update(common_aliased_expression_map)
else:
# we can try to hoist up invariants if they don't depend on the
# condition. this is somewhat hard to know, so the best way to do so
# without multiple passes of the optimizer is to hoist only things that
# were already defined in the parent scope - like _buffer, or things on
# self.
pass
def analyzeBinOpNode(self, n):
# if you are trying to use short-circuit behavior, these two optimizations
# can sabotage correct execution since the rhs may be hoisted above the
# IfNode and cause it to get executed prior to passing the lhs check.
if n.operator == 'and':
cache_placeholders = self.options.cache_resolved_placeholders
cache_udn_expressions = self.options.cache_resolved_udn_expressions
self.options.cache_resolved_placeholders = False
self.options.cache_resolved_udn_expressions = False
self.visit_ast(n.left, n)
self.visit_ast(n.right, n)
if n.operator == 'and':
self.options.cache_resolved_placeholders = cache_placeholders
self.options.cache_resolved_udn_expressions = cache_udn_expressions
analyzeBinOpExpressionNode = analyzeBinOpNode
def analyzeUnaryOpNode(self, op_node):
self.visit_ast(op_node.expression, op_node)
def get_local_identifiers(self, node):
local_identifiers = []
# search the parent scopes
# fixme: should this be recursive?
node = node.parent
while node is not None:
if isinstance(node, ForNode):
local_identifiers.extend(node.loop_variant_set)
local_identifiers.extend(node.scope.local_identifiers)
elif isinstance(node, IfNode):
local_identifiers.extend(node.scope.local_identifiers)
elif isinstance(node, ElseNode):
# in this case, we don't want to go to the parent node, which is the
# IfNode - we want to go to the parent 'scope'
local_identifiers.extend(node.scope.local_identifiers)
node = node.parent.parent
continue
elif isinstance(node, FunctionNode):
local_identifiers.extend(node.scope.local_identifiers)
break
node = node.parent
return frozenset(local_identifiers)
def analyzeGetUDNNode(self, node):
if not self.options.prefer_whole_udn_expressions:
self.visit_ast(node.expression, node)
if self.options.cache_resolved_udn_expressions:
cached_udn = IdentifierNode('_rudn_%s' % unsigned_hash(node))
local_identifiers = self.get_local_identifiers(node)
if cached_udn in local_identifiers:
node.parent.replace(node, cached_udn)
else:
insert_block, insert_marker = self.get_insert_block_and_point(
node)
# if there is a reassignment in the parent block, don't cache this
# incase it needs to be re-resolved.
# #set $text = $text.replace('\r\n', '\n')
# #set $text = $text.replace('\t', ' ')
# in this example, if you cache the udn expression text.replace,
# you have a problem - you won't ever use the new string create by
# the first call to replace
for child_node in insert_block.child_nodes:
if (isinstance(child_node, AssignNode) and
child_node.left == node.expression):
return
scope = self.get_parent_scope(node)
scope.alias_name_set.add(cached_udn.name)
scope.aliased_expression_map[node] = cached_udn
# note: this is sketchy enough that it requires some explanation
# basically, you need to visit the node for the parent function to
# get the memo that this value is aliased. unfortunately, the naive
# case of just calling visit_ast blows up since it tries to double
# analyze a certain set of nodes. you only really need to analyze
# that the assignment took place, then you can safely alias the
# actual function call. definitely sketchy, but it does seem to work
assign_rph = AssignNode(cached_udn, None)
cached_udn.parent = assign_rph
insert_block.insert_before(
insert_marker, assign_rph)
self.visit_ast(assign_rph, insert_block)
assign_rph.right = node
node.parent.replace(node, cached_udn)
elif self.options.prefer_whole_udn_expressions:
self.visit_ast(node.expression, node)
def analyzeSliceNode(self, pnode):
self.visit_ast(pnode.expression, pnode)
self.visit_ast(pnode.slice_expression, pnode)
# a second pass over the optimized tree to hoist invariant aliases to their
# parent blocks
class FinalPassAnalyzer(_BaseAnalyzer):
def analyzeTemplateNode(self, template):
self.visit_ast(template.main_function, template)
for n in template.child_nodes:
self.visit_ast(n, template)
def analyzeFunctionNode(self, function):
for n in function.child_nodes:
self.visit_ast(n, function)
def analyzeForNode(self, for_node):
for n in for_node.child_nodes:
self.visit_ast(n, for_node)
self.reanalyzeLoopNode(for_node)
def analyzeIfNode(self, if_node):
# depth-first
for n in if_node.child_nodes:
self.visit_ast(n, if_node)
for n in if_node.else_.child_nodes:
self.visit_ast(n, if_node.else_)
self.reanalyzeConditionalNode(if_node)
self.reanalyzeConditionalNode(if_node.else_)
def hoist(self, parent_node, parent_block, insertion_point, alias_node,
assign_alias_node):
# prune the implementation in the nested block
# print "prune", alias_node
# print "parent_block aliases", parent_block.scope.aliased_expression_map
parent_node.remove(assign_alias_node)
# if we've already hoisted an assignment, don't do it again
if alias_node not in parent_block.scope.hoisted_aliases:
# prune the original implementation in the current block and
# reinsert the alias before it's first potential usage if it
# is needed earlier in the execution path.
# when a variable aliased in both the if and
# else blocks is promoted to the parent scope
# the implementation isn't actually hoisted (should it be?)
# inline with the IfNode optimization so we need to check if the
# node is already here
if assign_alias_node in parent_block.child_nodes:
current_pos = parent_block.child_nodes.index(assign_alias_node)
# an else node's parent is the IfNode, which is the relevant
# node when searching for the insertion point
needed_pos = parent_block.child_nodes.index(insertion_point)
if needed_pos < current_pos:
parent_block.child_nodes.remove(assign_alias_node)
if isinstance(parent_node, ElseNode):
parent_block.insert_before(parent_node.parent, assign_alias_node)
else:
parent_block.insert_before(parent_node, assign_alias_node)
# print "insert_before", alias_node
else:
# still need to insert the alias
parent_block.insert_before(insertion_point, assign_alias_node)
parent_block.scope.hoisted_aliases.append(alias_node)
# NOTE: once we hoist an expression, we need to make sure that we no
# longer use this for dependencies in the current scope
del parent_node.scope.aliased_expression_map[alias_node]
parent_node.scope.alias_name_set.remove(assign_alias_node.left.name)
# FIXME: this is probably an indication of a bug or unnecessary
# difference between the caching of placeholders and filter expressions
if not isinstance(alias_node, FilterNode):
parent_node.scope.local_identifiers.remove(assign_alias_node.left)
template_function_re = re.compile('^[^#]*#(def|block)\s+(\w+)')
extends_re = re.compile('^#extends\s+([\.\w]+)')
template_extensions = ('.spt', '.tmpl')
# scan an spt file for template functions it will output
def get_template_functions(path):
template_function_names = set()
if not os.path.exists(path):
logging.debug('no such template for dependecy check: %s', path)
else:
f = open(path)
for line in f:
match = template_function_re.match(line)
if match:
template_function_names.add(match.group(2))
continue
match = extends_re.match(line)
if match:
extend_name = match.group(1)
extend_path = extend_name.replace('.', '/')
for ext in template_extensions:
template_path = extend_path + ext
template_function_names.update(
get_template_functions(template_path))
return template_function_names
|
eklitzke/spitfire
|
spitfire/compiler/optimizer.py
|
Python
|
bsd-3-clause
| 30,190
|
[
"VisIt"
] |
38d9aef7c58e8500e9b91def8906ac34b67140dc5e32f5a9a15c643bded0d7bf
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
import setuptools.command.build_py
from coalib import assert_supported_version
assert_supported_version()
from coalib.misc.BuildManPage import BuildManPage
from coalib.output.dbus.BuildDbusService import BuildDbusService
from coalib.misc.Constants import Constants
class BuildPyCommand(setuptools.command.build_py.build_py):
def run(self):
self.run_command('build_manpage')
self.run_command('build_dbus')
setuptools.command.build_py.build_py.run(self)
if __name__ == "__main__":
maintainers = "Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger"
maintainer_mails = ('lasse.schuirmann@gmail.com, '
'fabian@neuschmidt.de, '
'makman@alice.de')
data_files = [('.', ['coala.1']), ('.', [Constants.BUS_NAME + '.service'])]
setup(name='coala',
version=Constants.VERSION,
description='Code Analysis Application (coala)',
author=maintainers+", Abdeali Kothari, Udayan Tandon",
author_email=maintainer_mails +
", abdealikothari@gmail.com, udayan12167@iiitd.ac.in",
maintainer=maintainers,
maintainer_email=maintainer_mails,
url='http://coala.rtfd.org/',
platforms='any',
packages=find_packages(exclude=["build.*", "*.tests.*", "*.tests"]),
install_requires=["PyPrint",
"setuptools",
"munkres3",
"coverage",
"pylint",
"language-check",
"autopep8",
"eradicate",
"autoflake",
"restructuredtext_lint",
"proselint",
"cpplint"],
package_data={'coalib': ['default_coafile', "VERSION"]},
license="AGPL v3",
data_files=data_files,
long_description="coala is a simple COde AnaLysis Application. Its "
"goal is to make static code analysis easy while "
"remaining completely modular and therefore "
"extendable and language independent. Code analysis"
" happens in python scripts while coala manages "
"these, tries to provide helpful libraries and "
"provides a user interface. Please visit "
"http://coala.rtfd.org/ for more information or our"
"development repository on "
"https://github.com/coala-analyzer/coala/.",
entry_points={
"console_scripts": [
"coala = coalib.coala:main",
"coala-ci = coalib.coala_ci:main",
"coala-dbus = coalib.coala_dbus:main",
"coala-json = coalib.coala_json:main",
"coala-format = coalib.coala_format:main"]},
# from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications :: Gnome',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License '
'v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Text Processing :: Linguistic'],
cmdclass={'build_manpage': BuildManPage,
'build_dbus': BuildDbusService,
'build_py': BuildPyCommand})
|
Tanmay28/coala
|
setup.py
|
Python
|
agpl-3.0
| 4,386
|
[
"VisIt"
] |
686ff75b24065d6dd584c77198afbd695645adf639d4bc4745a5e66e9356b916
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import traceback
import h2o
from h2o.base import Keyed
from h2o.exceptions import H2OValueError
from h2o.job import H2OJob
from h2o.model.extensions import has_extension
from h2o.plot import decorate_plot_result, get_matplotlib_pyplot, RAISE_ON_FIGURE_ACCESS
from h2o.utils.compatibility import * # NOQA
from h2o.utils.compatibility import viewitems
from h2o.utils.metaclass import backwards_compatibility, deprecated_fn, h2o_meta, deprecated_params
from h2o.utils.shared_utils import can_use_pandas, can_use_numpy
from h2o.utils.typechecks import assert_is_type, assert_satisfies, Enum, is_type
@backwards_compatibility(
instance_attrs=dict(
giniCoef=lambda self, *args, **kwargs: self.gini(*args, **kwargs)
)
)
class ModelBase(h2o_meta(Keyed)):
"""Base class for all models."""
_options_ = {} # dict of options declared in implementation
def __init__(self):
"""Construct a new model instance."""
self._id = None
self._model_json = None
self._metrics_class = None
self._metrics_class_valid = None
self._is_xvalidated = False
self._xval_keys = None
self._parms = {} # internal, for object recycle
self.parms = {} # external
self._estimator_type = None
self._future = False # used by __repr__/show to query job state
self._job = None # used when _future is True
self._have_pojo = False
self._have_mojo = False
self._start_time = None
self._end_time = None
self._run_time = None
@property
def key(self):
return self._id
@property
def model_id(self):
"""Model identifier."""
return self._id
@model_id.setter
def model_id(self, newid):
oldid = self._id
self._id = newid
h2o.rapids("(rename '%s' '%s')" % (oldid, newid))
@property
def params(self):
"""
Get the parameters and the actual/default values only.
:returns: A dictionary of parameters used to build this model.
"""
params = {}
for p in self.parms:
params[p] = {"default": self.parms[p]["default_value"],
"actual": self.parms[p]["actual_value"],
"input": self.parms[p]["input_value"]}
return params
@property
def default_params(self):
"""Dictionary of the default parameters of the model."""
params = {}
for p in self.parms:
params[p] = self.parms[p]["default_value"]
return params
@property
def actual_params(self):
"""Dictionary of actual parameters of the model."""
params_to_select = {"model_id": "name",
"response_column": "column_name",
"training_frame": "name",
"validation_frame": "name"}
params = {}
for p in self.parms:
if p in params_to_select.keys():
params[p] = (self.parms[p].get("actual_value") or {}).get(params_to_select[p], None)
else:
params[p] = self.parms[p]["actual_value"]
return params
@property
def full_parameters(self):
"""Dictionary of the full specification of all parameters."""
return self.parms
@property
def type(self):
"""The type of model built: ``"classifier"`` or ``"regressor"`` or ``"unsupervised"``"""
return self._estimator_type
@property
def have_pojo(self):
"""True, if export to POJO is possible"""
return self._have_pojo
@property
def have_mojo(self):
"""True, if export to MOJO is possible"""
return self._have_mojo
@property
def start_time(self):
"""Timestamp (milliseconds since 1970) when the model training was started."""
return self._start_time
@property
def end_time(self):
"""Timestamp (milliseconds since 1970) when the model training was ended."""
return self._end_time
@property
def run_time(self):
"""Model training time in milliseconds"""
return self._run_time
def __repr__(self):
# PUBDEV-2278: using <method>? from IPython caused everything to dump
stk = traceback.extract_stack()
if not ("IPython" in stk[-2][0] and "info" == stk[-2][2]):
self.show()
return ""
def predict_leaf_node_assignment(self, test_data, type="Path"):
"""
Predict on a dataset and return the leaf node assignment (only for tree-based models).
:param H2OFrame test_data: Data on which to make predictions.
:param Enum type: How to identify the leaf node. Nodes can be either identified by a path from to the root node
of the tree to the node or by H2O's internal node id. One of: ``"Path"``, ``"Node_ID"`` (default: ``"Path"``).
:returns: A new H2OFrame of predictions.
"""
if not isinstance(test_data, h2o.H2OFrame):
raise ValueError("test_data must be an instance of H2OFrame")
assert_is_type(type, None, Enum("Path", "Node_ID"))
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"leaf_node_assignment": True, "leaf_node_assignment_type": type})
return h2o.get_frame(j["predictions_frame"]["name"])
def staged_predict_proba(self, test_data):
"""
Predict class probabilities at each stage of an H2O Model (only GBM models).
The output structure is analogous to the output of function predict_leaf_node_assignment. For each tree t and
class c there will be a column Tt.Cc (eg. T3.C1 for tree 3 and class 1). The value will be the corresponding
predicted probability of this class by combining the raw contributions of trees T1.Cc,..,TtCc. Binomial models
build the trees just for the first class and values in columns Tx.C1 thus correspond to the the probability p0.
:param H2OFrame test_data: Data on which to make predictions.
:returns: A new H2OFrame of staged predictions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"predict_staged_proba": True})
return h2o.get_frame(j["predictions_frame"]["name"])
def predict_contributions(self, test_data, output_format="Original", top_n=None, bottom_n=None, compare_abs=False):
"""
Predict feature contributions - SHAP values on an H2O Model (only GBM, XGBoost, DRF models and equivalent
imported MOJOs).
Returned H2OFrame has shape (#rows, #features + 1) - there is a feature contribution column for each input
feature, the last column is the model bias (same value for each row). The sum of the feature contributions
and the bias term is equal to the raw prediction of the model. Raw prediction of tree-based model is the sum
of the predictions of the individual trees before the inverse link function is applied to get the actual
prediction. For Gaussian distribution the sum of the contributions is equal to the model prediction.
Note: Multinomial classification models are currently not supported.
:param H2OFrame test_data: Data on which to calculate contributions.
:param Enum output_format: Specify how to output feature contributions in XGBoost - XGBoost by default outputs
contributions for 1-hot encoded features, specifying a Compact output format will produce a per-feature
contribution. One of: ``"Original"``, ``"Compact"`` (default: ``"Original"``).
:param top_n: Return only #top_n highest contributions + bias.
If top_n<0 then sort all SHAP values in descending order
If top_n<0 && bottom_n<0 then sort all SHAP values in descending order
:param bottom_n: Return only #bottom_n lowest contributions + bias
If top_n and bottom_n are defined together then return array of #top_n + #bottom_n + bias
If bottom_n<0 then sort all SHAP values in ascending order
If top_n<0 && bottom_n<0 then sort all SHAP values in descending order
:param compare_abs: True to compare absolute values of contributions
:returns: A new H2OFrame made of feature contributions.
:examples:
>>> prostate = "http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv"
>>> fr = h2o.import_file(prostate)
>>> predictors = list(range(2, fr.ncol))
>>> m = H2OGradientBoostingEstimator(ntrees=10, seed=1234)
>>> m.train(x=predictors, y=1, training_frame=fr)
>>> # Compute SHAP
>>> m.predict_contributions(fr)
>>> # Compute SHAP and pick the top two highest
>>> m.predict_contributions(fr, top_n=2)
>>> # Compute SHAP and pick the top two lowest
>>> m.predict_contributions(fr, bottom_n=2)
>>> # Compute SHAP and pick the top two highest regardless of the sign
>>> m.predict_contributions(fr, top_n=2, compare_abs=True)
>>> # Compute SHAP and pick top two lowest regardless of the sign
>>> m.predict_contributions(fr, bottom_n=2, compare_abs=True)
>>> # Compute SHAP values and show them all in descending order
>>> m.predict_contributions(fr, top_n=-1)
>>> # Compute SHAP and pick the top two highest and top two lowest
>>> m.predict_contributions(fr, top_n=2, bottom_n=2)
"""
assert_is_type(output_format, None, Enum("Original", "Compact"))
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"predict_contributions": True,
"predict_contributions_output_format": output_format,
"top_n": top_n,
"bottom_n": bottom_n,
"compare_abs": compare_abs}), "contributions")
j.poll()
return h2o.get_frame(j.dest_key)
def feature_frequencies(self, test_data):
"""
Retrieve the number of occurrences of each feature for given observations
on their respective paths in a tree ensemble model.
Available for GBM, Random Forest and Isolation Forest models.
:param H2OFrame test_data: Data on which to calculate feature frequencies.
:returns: A new H2OFrame made of feature contributions.
"""
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = h2o.api("POST /3/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"feature_frequencies": True})
return h2o.get_frame(j["predictions_frame"]["name"])
def predict(self, test_data, custom_metric = None, custom_metric_func = None):
"""
Predict on a dataset.
:param H2OFrame test_data: Data on which to make predictions.
:param custom_metric: custom evaluation function defined as class reference, the class get uploaded
into the cluster
:param custom_metric_func: custom evaluation function reference, e.g, result of upload_custom_metric
:returns: A new H2OFrame of predictions.
"""
# Upload evaluation function into DKV
if custom_metric:
assert_satisfies(custom_metric_func, custom_metric_func is None,
"The argument 'eval_func_ref' cannot be specified when eval_func is specified, ")
eval_func_ref = h2o.upload_custom_metric(custom_metric)
if not isinstance(test_data, h2o.H2OFrame): raise ValueError("test_data must be an instance of H2OFrame")
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self.model_id, test_data.frame_id), data = {'custom_metric_func': custom_metric_func}),
self._model_json["algo"] + " prediction")
j.poll()
return h2o.get_frame(j.dest_key)
def is_cross_validated(self):
"""Return True if the model was cross-validated."""
return self._is_xvalidated
def xval_keys(self):
"""Return model keys for the cross-validated model."""
return self._xval_keys
def get_xval_models(self, key=None):
"""
Return a Model object.
:param key: If None, return all cross-validated models; otherwise return the model that key points to.
:returns: A model or list of models.
"""
return h2o.get_model(key) if key is not None else [h2o.get_model(k) for k in self._xval_keys]
@property
def xvals(self):
"""
Return a list of the cross-validated models.
:returns: A list of models.
"""
return self.get_xval_models()
def detach(self):
self._id = None
def deepfeatures(self, test_data, layer):
"""
Return hidden layer details.
:param test_data: Data to create a feature space on
:param layer: 0 index hidden layer
"""
if test_data is None:
raise ValueError("Must specify test data")
if str(layer).isdigit():
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self._id, test_data.frame_id),
data={"deep_features_hidden_layer": layer}), "deepfeatures")
else:
j = H2OJob(h2o.api("POST /4/Predictions/models/%s/frames/%s" % (self._id, test_data.frame_id),
data={"deep_features_hidden_layer_name": layer}), "deepfeatures")
j.poll()
return h2o.get_frame(j.dest_key)
def weights(self, matrix_id=0):
"""
Return the frame for the respective weight matrix.
:param matrix_id: an integer, ranging from 0 to number of layers, that specifies the weight matrix to return.
:returns: an H2OFrame which represents the weight matrix identified by matrix_id
"""
num_weight_matrices = len(self._model_json["output"]["weights"])
if matrix_id not in list(range(num_weight_matrices)):
raise ValueError(
"Weight matrix does not exist. Model has {0} weight matrices (0-based indexing), but matrix {1} "
"was requested.".format(num_weight_matrices, matrix_id))
return h2o.get_frame(self._model_json["output"]["weights"][matrix_id]["URL"].split("/")[3])
def biases(self, vector_id=0):
"""
Return the frame for the respective bias vector.
:param: vector_id: an integer, ranging from 0 to number of layers, that specifies the bias vector to return.
:returns: an H2OFrame which represents the bias vector identified by vector_id
"""
num_bias_vectors = len(self._model_json["output"]["biases"])
if vector_id not in list(range(num_bias_vectors)):
raise ValueError(
"Bias vector does not exist. Model has {0} bias vectors (0-based indexing), but vector {1} "
"was requested.".format(num_bias_vectors, vector_id))
return h2o.get_frame(self._model_json["output"]["biases"][vector_id]["URL"].split("/")[3])
def normmul(self):
"""Normalization/Standardization multipliers for numeric predictors."""
return self._model_json["output"]["normmul"]
def normsub(self):
"""Normalization/Standardization offsets for numeric predictors."""
return self._model_json["output"]["normsub"]
def respmul(self):
"""Normalization/Standardization multipliers for numeric response."""
return self._model_json["output"]["normrespmul"]
def respsub(self):
"""Normalization/Standardization offsets for numeric response."""
return self._model_json["output"]["normrespsub"]
def catoffsets(self):
"""Categorical offsets for one-hot encoding."""
return self._model_json["output"]["catoffsets"]
def training_model_metrics(self):
"""
Return training model metrics for any model.
"""
return self._model_json["output"]["training_metrics"]._metric_json
def model_performance(self, test_data=None, train=False, valid=False, xval=False, auc_type=None,
auuc_type=None, auuc_nbins=-1):
"""
Generate model metrics for this model on test_data.
:param H2OFrame test_data: Data set for which model metrics shall be computed against. All three of train,
valid and xval arguments are ignored if test_data is not None.
:param bool train: Report the training metrics for the model.
:param bool valid: Report the validation metrics for the model.
:param bool xval: Report the cross-validation metrics for the model. If train and valid are True, then it
defaults to True.
:param String auc_type: Change default AUC type for multinomial classification AUC/AUCPR calculation when test_data is not None. One of: ``"auto"``, ``"none"``, ``"macro_ovr"``, ``"weighted_ovr"``, ``"macro_ovo"``, ``"weighted_ovo"`` (default: ``"none"``). If type is "auto" or "none" AUC and AUCPR is not calculated.
:param String auuc_type: Change default AUUC type for uplift binomial classification AUUC calculation
when test_data is not None. One of: ``"AUTO"``, ``"qini"``, ``"lift"``, ``"gain"``, (default: ``"AUTO"``).
If type is "auto" qini AUUC is calculated.
:param int auuc_nbins: Number of bins for calculation AUUC. Defaults to -1, which means 1000.
:returns: An object of class H2OModelMetrics.
"""
if test_data is None:
if auc_type is not None and auc_type != "none":
print("WARNING: The `auc_type` parameter is set but it is not used because the `test_data` parameter is None.")
if auuc_type is not None:
print("WARNING: The `auuc_type` parameter is set but it is not used because the `test_data` parameter is None.")
if train:
return self._model_json["output"]["training_metrics"]
if valid:
return self._model_json["output"]["validation_metrics"]
if xval:
return self._model_json["output"]["cross_validation_metrics"]
return self._model_json["output"]["training_metrics"]
else: # cases dealing with test_data not None
if not isinstance(test_data, h2o.H2OFrame):
raise ValueError("`test_data` must be of type H2OFrame. Got: " + type(test_data))
if (self._model_json["response_column_name"] is not None) and not(self._model_json["response_column_name"] in test_data.names):
print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the response column in your dataset.")
return
if auc_type is not None:
assert_is_type(auc_type, Enum("auto", "none", "macro_ovr", "weighted_ovr", "macro_ovo", "weighted_ovo"))
res = h2o.api("POST /3/ModelMetrics/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"auc_type": auc_type})
elif auuc_type is not None:
assert_is_type(auuc_type, Enum("AUTO", "qini", "gain", "lift"))
if (self._model_json["treatment_column_name"] is not None) and not(self._model_json["treatment_column_name"] in test_data.names):
print("WARNING: Model metrics cannot be calculated and metric_json is empty due to the absence of the treatment column in your dataset.")
return
res = h2o.api("POST /3/ModelMetrics/models/%s/frames/%s" % (self.model_id, test_data.frame_id),
data={"auuc_type": auuc_type, "auuc_nbins": auuc_nbins})
else:
res = h2o.api("POST /3/ModelMetrics/models/%s/frames/%s" % (self.model_id, test_data.frame_id))
# FIXME need to do the client-side filtering... (PUBDEV-874)
raw_metrics = None
for mm in res["model_metrics"]:
if mm["frame"] is not None and mm["frame"]["name"] == test_data.frame_id:
raw_metrics = mm
break
return self._metrics_class_valid(raw_metrics, algo=self._model_json["algo"])
def scoring_history(self):
"""
Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.
"""
model = self._model_json["output"]
if "scoring_history" in model and model["scoring_history"] is not None:
return model["scoring_history"].as_data_frame()
print("No score history for this model")
def ntrees_actual(self):
"""
Returns actual number of trees in a tree model. If early stopping enabled, GBM can reset the ntrees value.
In this case, the actual ntrees value is less than the original ntrees value a user set before
building the model.
Type: ``float``
"""
# For now, redirect to h2o.model.extensions.trees for models that support the feature, and print legacy message for others..
# Later, the method will be exposed only for models supporting the feature.
if has_extension(self, 'Trees'):
return self._ntrees_actual()
print("No actual number of trees for this model")
def feature_interaction(self, max_interaction_depth=100, max_tree_depth=100, max_deepening=-1, path=None):
"""
Feature interactions and importance, leaf statistics and split value histograms in a tabular form.
Available for XGBoost and GBM.
Metrics:
Gain - Total gain of each feature or feature interaction.
FScore - Amount of possible splits taken on a feature or feature interaction.
wFScore - Amount of possible splits taken on a feature or feature interaction weighed by
the probability of the splits to take place.
Average wFScore - wFScore divided by FScore.
Average Gain - Gain divided by FScore.
Expected Gain - Total gain of each feature or feature interaction weighed by the probability to gather the gain.
Average Tree Index
Average Tree Depth
:param max_interaction_depth: Upper bound for extracted feature interactions depth. Defaults to 100.
:param max_tree_depth: Upper bound for tree depth. Defaults to 100.
:param max_deepening: Upper bound for interaction start deepening (zero deepening => interactions
starting at root only). Defaults to -1.
:param path: (Optional) Path where to save the output in .xlsx format (e.g. ``/mypath/file.xlsx``).
Please note that Pandas and XlsxWriter need to be installed for using this option. Defaults to None.
:examples:
>>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv")
>>> predictors = boston.columns[:-1]
>>> response = "medv"
>>> boston['chas'] = boston['chas'].asfactor()
>>> train, valid = boston.split_frame(ratios=[.8])
>>> boston_xgb = H2OXGBoostEstimator(seed=1234)
>>> boston_xgb.train(y=response, x=predictors, training_frame=train)
>>> feature_interactions = boston_xgb.feature_interaction()
"""
# For now, redirect to h2o.model.extensions.feature_interaction for models that support the feature, and print legacy message for others..
# Later, the method will be exposed only for models supporting the feature.
if has_extension(self, 'FeatureInteraction'):
return self._feature_interaction(max_interaction_depth=max_interaction_depth,
max_tree_depth=max_tree_depth,
max_deepening=max_deepening,
path=path)
print("No calculation available for this model")
def h(self, frame, variables):
"""
Calculates Friedman and Popescu's H statistics, in order to test for the presence of an interaction between specified variables in h2o gbm and xgb models.
H varies from 0 to 1. It will have a value of 0 if the model exhibits no interaction between specified variables and a correspondingly larger value for a
stronger interaction effect between them. NaN is returned if a computation is spoiled by weak main effects and rounding errors.
See Jerome H. Friedman and Bogdan E. Popescu, 2008, "Predictive learning via rule ensembles", *Ann. Appl. Stat.*
**2**:916-954, http://projecteuclid.org/download/pdfview_1/euclid.aoas/1223908046, s. 8.1.
:param frame: the frame that current model has been fitted to
:param variables: variables of the interest
:return: H statistic of the variables
:examples:
>>> prostate_train = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/prostate_train.csv")
>>> prostate_train["CAPSULE"] = prostate_train["CAPSULE"].asfactor()
>>> gbm_h2o = H2OGradientBoostingEstimator(ntrees=100, learn_rate=0.1,
>>> max_depth=5,
>>> min_rows=10,
>>> distribution="bernoulli")
>>> gbm_h2o.train(x=list(range(1,prostate_train.ncol)),y="CAPSULE", training_frame=prostate_train)
>>> h = gbm_h2o.h(prostate_train, ['DPROS','DCAPS'])
"""
if has_extension(self, 'HStatistic'):
return self._h(frame=frame, variables=variables)
print("No calculation available for this model")
def update_tree_weights(self, frame, weights_column):
"""
Re-calculates tree-node weights based on provided dataset. Modifying node weights will affect how
contribution predictions (Shapley values) are calculated. This can be used to explain the model
on a curated sub-population of the training dataset.
:param frame: frame that will be used to re-populate trees with new observations and to collect per-node weights
:param weights_column: name of the weight column (can be different from training weights)
"""
if has_extension(self, 'SupervisedTrees'):
return self._update_tree_weights(frame, weights_column)
print("Only supervised tree-based models support tree-reweighting")
def cross_validation_metrics_summary(self):
"""
Retrieve Cross-Validation Metrics Summary.
:returns: The cross-validation metrics summary as an H2OTwoDimTable
"""
model = self._model_json["output"]
if "cross_validation_metrics_summary" in model and model["cross_validation_metrics_summary"] is not None:
return model["cross_validation_metrics_summary"]
print("No cross-validation metrics summary for this model")
def summary(self):
"""Print a detailed summary of the model."""
model = self._model_json["output"]
if "model_summary" in model and model["model_summary"] is not None:
return model["model_summary"]
print("No model summary for this model")
def show_summary(self):
summary = self.summary()
if summary is not None:
print(summary)
def show(self):
"""Print innards of model, without regards to type."""
if self._future:
self._job.poll_once()
return
if self._model_json is None:
print("No model trained yet")
return
if self.model_id is None:
print("This H2OEstimator has been removed.")
return
model = self._model_json["output"]
print("Model Details")
print("=============")
print(self.__class__.__name__, ": ", self._model_json["algo_full_name"])
print("Model Key: ", self._id)
print()
self.show_summary()
# training metrics
tm = model["training_metrics"]
if tm is not None: tm.show()
vm = model["validation_metrics"]
if vm is not None: vm.show()
xm = model["cross_validation_metrics"]
if xm is not None: xm.show()
xms = model["cross_validation_metrics_summary"]
if xms is not None: xms.show()
if "scoring_history" in model and model["scoring_history"]:
model["scoring_history"].show()
if "variable_importances" in model and model["variable_importances"]:
model["variable_importances"].show()
def varimp(self, use_pandas=False):
"""
Pretty print the variable importances, or return them in a list.
:param bool use_pandas: If True, then the variable importances will be returned as a pandas data frame.
:returns: A list or Pandas DataFrame.
"""
model = self._model_json["output"]
if "variable_importances" in list(model.keys()) and model["variable_importances"]:
vals = model["variable_importances"].cell_values
header = model["variable_importances"].col_header
if use_pandas and can_use_pandas():
import pandas
return pandas.DataFrame(vals, columns=header)
else:
return vals
else:
print("Warning: This model doesn't have variable importances")
def residual_deviance(self, train=False, valid=False, xval=None):
"""
Retreive the residual deviance if this model has the attribute, or None otherwise.
:param bool train: Get the residual deviance for the training set. If both train and valid are False, then
train is selected by default.
:param bool valid: Get the residual deviance for the validation set. If both train and valid are True, then
train is selected by default.
:returns: Return the residual deviance, or None if it is not present.
"""
if xval:
raise H2OValueError("Cross-validation metrics are not available.")
if valid and not train:
return self._model_json["output"]["validation_metrics"].residual_deviance()
else:
return self._model_json["output"]["training_metrics"].residual_deviance()
def residual_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the residual degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the residual dof for the training set. If both train and valid are False, then train
is selected by default.
:param bool valid: Get the residual dof for the validation set. If both train and valid are True, then train
is selected by default.
:returns: Return the residual dof, or None if it is not present.
"""
if xval:
raise H2OValueError("Cross-validation metrics are not available.")
if valid and not train:
return self._model_json["output"]["validation_metrics"].residual_degrees_of_freedom()
else:
return self._model_json["output"]["training_metrics"].residual_degrees_of_freedom()
def null_deviance(self, train=False, valid=False, xval=False):
"""
Retreive the null deviance if this model has the attribute, or None otherwise.
:param bool train: Get the null deviance for the training set. If both train and valid are False, then train
is selected by default.
:param bool valid: Get the null deviance for the validation set. If both train and valid are True, then train
is selected by default.
:returns: Return the null deviance, or None if it is not present.
"""
if xval:
raise H2OValueError("Cross-validation metrics are not available.")
if valid and not train:
return self._model_json["output"]["validation_metrics"].null_deviance()
else:
return self._model_json["output"]["training_metrics"].null_deviance()
def null_degrees_of_freedom(self, train=False, valid=False, xval=False):
"""
Retreive the null degress of freedom if this model has the attribute, or None otherwise.
:param bool train: Get the null dof for the training set. If both train and valid are False, then train is
selected by default.
:param bool valid: Get the null dof for the validation set. If both train and valid are True, then train is
selected by default.
:returns: Return the null dof, or None if it is not present.
"""
if xval:
raise H2OValueError("Cross-validation metrics are not available.")
if valid and not train:
return self._model_json["output"]["validation_metrics"].null_degrees_of_freedom()
else:
return self._model_json["output"]["training_metrics"].null_degrees_of_freedom()
def pprint_coef(self):
"""Pretty print the coefficents table (includes normalized coefficients)."""
print(self._model_json["output"]["coefficients_table"]) # will return None if no coefs!
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
if (self._model_json["output"]['model_category']=="Multinomial") or \
(self._model_json["output"]['model_category']=="Ordinal"):
return self._fillMultinomialDict(False)
else:
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])}
def coef_norm(self):
"""
Return coefficients fitted on the standardized data (requires standardize = True, which is on by default).
These coefficients can be used to evaluate variable importance.
"""
if self._model_json["output"]["model_category"]=="Multinomial":
return self._fillMultinomialDict(True)
else:
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["standardized_coefficients"])}
def _fillMultinomialDict(self, standardize=False):
if self.algo == 'gam':
tbl = self._model_json["output"]["coefficients_table"]
else:
tbl = self._model_json["output"]["coefficients_table_multinomials_with_class_names"]
if tbl is None:
return None
coeff_dict = {} # contains coefficient names
coeffNames = tbl["names"]
all_col_header = tbl.col_header
startIndex = 1
endIndex = int((len(all_col_header)-1)/2+1)
if standardize:
startIndex = int((len(all_col_header)-1)/2+1) # start index for standardized coefficients
endIndex = len(all_col_header)
for nameIndex in list(range(startIndex, endIndex)):
coeffList = tbl[all_col_header[nameIndex]]
t1Dict = {name: coef for name, coef in zip(coeffNames, coeffList)}
coeff_dict[all_col_header[nameIndex]]=t1Dict
return coeff_dict
def r2(self, train=False, valid=False, xval=False):
"""
Return the R squared for this regression model.
Will return R^2 for GLM Models and will return NaN otherwise.
The R^2 value is defined to be 1 - MSE/var, where var is computed as sigma*sigma.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the R^2 value for the training data.
:param bool valid: If valid is True, then return the R^2 value for the validation data.
:param bool xval: If xval is True, then return the R^2 value for the cross validation data.
:returns: The R squared for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.r2()
return list(m.values())[0] if len(m) == 1 else m
def mse(self, train=False, valid=False, xval=False):
"""
Get the Mean Square Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MSE value for the training data.
:param bool valid: If valid is True, then return the MSE value for the validation data.
:param bool xval: If xval is True, then return the MSE value for the cross validation data.
:returns: The MSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.mse()
return list(m.values())[0] if len(m) == 1 else m
def rmse(self, train=False, valid=False, xval=False):
"""
Get the Root Mean Square Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the RMSE value for the training data.
:param bool valid: If valid is True, then return the RMSE value for the validation data.
:param bool xval: If xval is True, then return the RMSE value for the cross validation data.
:returns: The RMSE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.rmse()
return list(m.values())[0] if len(m) == 1 else m
def mae(self, train=False, valid=False, xval=False):
"""
Get the Mean Absolute Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the MAE value for the training data.
:param bool valid: If valid is True, then return the MAE value for the validation data.
:param bool xval: If xval is True, then return the MAE value for the cross validation data.
:returns: The MAE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
m[k] = None if v is None else v.mae()
return list(m.values())[0] if len(m) == 1 else m
def rmsle(self, train=False, valid=False, xval=False):
"""
Get the Root Mean Squared Logarithmic Error.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the RMSLE value for the training data.
:param bool valid: If valid is True, then return the RMSLE value for the validation data.
:param bool xval: If xval is True, then return the RMSLE value for the cross validation data.
:returns: The RMSLE for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.rmsle()
return list(m.values())[0] if len(m) == 1 else m
def logloss(self, train=False, valid=False, xval=False):
"""
Get the Log Loss.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the log loss value for the training data.
:param bool valid: If valid is True, then return the log loss value for the validation data.
:param bool xval: If xval is True, then return the log loss value for the cross validation data.
:returns: The log loss for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.logloss()
return list(m.values())[0] if len(m) == 1 else m
def mean_residual_deviance(self, train=False, valid=False, xval=False):
"""
Get the Mean Residual Deviances.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the Mean Residual Deviance value for the training data.
:param bool valid: If valid is True, then return the Mean Residual Deviance value for the validation data.
:param bool xval: If xval is True, then return the Mean Residual Deviance value for the cross validation data.
:returns: The Mean Residual Deviance for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.mean_residual_deviance()
return list(m.values())[0] if len(m) == 1 else m
def auc(self, train=False, valid=False, xval=False):
"""
Get the AUC (Area Under Curve).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AUC value for the training data.
:param bool valid: If valid is True, then return the AUC value for the validation data.
:param bool xval: If xval is True, then return the AUC value for the validation data.
:returns: The AUC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
if not(v == None) and not(is_type(v, h2o.model.metrics_base.H2OBinomialModelMetrics)) and not(is_type(v, h2o.model.metrics_base.H2OMultinomialModelMetrics)):
raise H2OValueError("auc() is only available for Binomial and Multinomial classifiers. For Multinomial classifiers is available average AUC value, default is Weighted One-to-Rest AUC.")
m[k] = None if v is None else v.auc()
return list(m.values())[0] if len(m) == 1 else m
def auuc(self, train=False, valid=False):
"""
Get the AUUC (Area Under Uplift Curve).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid".
:param bool train: If train is True, then return the AUUC value for the training data.
:param bool valid: If valid is True, then return the AUUC value for the validation data.
:returns: The AUUC.
"""
tm = ModelBase._get_metrics(self, train, valid, False)
m = {}
for k, v in viewitems(tm):
if not(v is None) and not(is_type(v, h2o.model.metrics_base.H2OBinomialUpliftModelMetrics)):
raise H2OValueError("auuc() is only available for Uplift Binomial classifiers.")
m[k] = None if v is None else v.auuc()
return list(m.values())[0] if len(m) == 1 else m
def auuc_table(self, train=False, valid=False):
"""
Get the AUUC table (Area Under Uplift Curve values of oll types).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid".
:param bool train: If train is True, then return the AUUC table value for the training data.
:param bool valid: If valid is True, then return the AUUC table value for the validation data.
:returns: The AUUC table.
"""
tm = ModelBase._get_metrics(self, train, valid, False)
m = {}
for k, v in viewitems(tm):
if not(v is None) and not(is_type(v, h2o.model.metrics_base.H2OBinomialUpliftModelMetrics)):
raise H2OValueError("auuc_table() is only available for Uplift Binomial classifiers.")
m[k] = None if v is None else v.auuc_table()
return list(m.values())[0] if len(m) == 1 else m
def qini(self, train=False, valid=False):
"""
Get the Qini value (Area Under Uplift Curve - Area Under Random Curve for Qini uplift).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid".
:param bool train: If train is True, then return the Qini value for the training data.
:param bool valid: If valid is True, then return the Qini value for the validation data.
:returns: The Qini value.
"""
tm = ModelBase._get_metrics(self, train, valid, False)
m = {}
for k, v in viewitems(tm):
if not(v is None) and not(is_type(v, h2o.model.metrics_base.H2OBinomialUpliftModelMetrics)):
raise H2OValueError("auuc() is only available for Uplift Binomial classifiers.")
m[k] = None if v is None else v.qini()
return list(m.values())[0] if len(m) == 1 else m
def aic(self, train=False, valid=False, xval=False):
"""
Get the AIC (Akaike Information Criterium).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the AIC value for the training data.
:param bool valid: If valid is True, then return the AIC value for the validation data.
:param bool xval: If xval is True, then return the AIC value for the validation data.
:returns: The AIC.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.aic()
return list(m.values())[0] if len(m) == 1 else m
def gini(self, train=False, valid=False, xval=False):
"""
Get the Gini coefficient.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval"
:param bool train: If train is True, then return the Gini Coefficient value for the training data.
:param bool valid: If valid is True, then return the Gini Coefficient value for the validation data.
:param bool xval: If xval is True, then return the Gini Coefficient value for the cross validation data.
:returns: The Gini Coefficient for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm): m[k] = None if v is None else v.gini()
return list(m.values())[0] if len(m) == 1 else m
def aucpr(self, train=False, valid=False, xval=False):
"""
Get the aucPR (Area Under PRECISION RECALL Curve).
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train",
"valid", and "xval".
:param bool train: If train is True, then return the aucpr value for the training data.
:param bool valid: If valid is True, then return the aucpr value for the validation data.
:param bool xval: If xval is True, then return the aucpr value for the validation data.
:returns: The aucpr.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k, v in viewitems(tm):
if v is not None and not is_type(v, h2o.model.metrics_base.H2OBinomialModelMetrics) and not is_type(v, h2o.model.metrics_base.H2OMultinomialModelMetrics):
raise H2OValueError("aucpr() is only available for Binomial and Multinomial classifiers. For Multinomial classifiers is available average PR AUC value, default is Weighted One-to-Rest PR AUC.")
m[k] = None if v is None else v.aucpr()
return list(m.values())[0] if len(m) == 1 else m
@deprecated_fn(replaced_by=aucpr)
def pr_auc(self, train=False, valid=False, xval=False):
pass
def download_model(self, path="", filename=None):
"""
Download an H2O Model object to disk.
:param path: a path to the directory where the model should be saved.
:param filename: a filename for the saved model
:returns: the path of the downloaded model
"""
assert_is_type(path, str)
return h2o.download_model(self, path, filename=filename)
def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for this model to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the POJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
path = path.rstrip("/")
return h2o.download_pojo(self, path, get_jar=get_genmodel_jar, jar_name=genmodel_name)
def download_mojo(self, path=".", get_genmodel_jar=False, genmodel_name=""):
"""
Download the model in MOJO format.
:param path: the path where MOJO file should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name: Custom name of genmodel jar
:returns: name of the MOJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
if not self.have_mojo:
raise H2OValueError("Export to MOJO not supported")
if get_genmodel_jar:
if genmodel_name == "":
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, "h2o-genmodel.jar"))
else:
h2o.api("GET /3/h2o-genmodel.jar", save_to=os.path.join(path, genmodel_name))
return h2o.api("GET /3/Models/%s/mojo" % self.model_id, save_to=path)
def save_mojo(self, path="", force=False, filename=None):
"""
Save an H2O Model as MOJO (Model Object, Optimized) to disk.
:param path: a path to save the model at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:param filename: a filename for the saved model (file type is always .zip)
:returns str: the path of the saved model
"""
assert_is_type(path, str)
assert_is_type(force, bool)
if not self.have_mojo:
raise H2OValueError("Export to MOJO not supported")
if filename is None:
filename = self.model_id + ".zip"
else:
assert_is_type(filename, str)
path = os.path.join(os.getcwd() if path == "" else path, filename)
return h2o.api("GET /99/Models.mojo/%s" % self.model_id, data={"dir": path, "force": force})["dir"]
def save_model_details(self, path="", force=False, filename=None):
"""
Save Model Details of an H2O Model in JSON Format to disk.
:param path: a path to save the model details at (hdfs, s3, local)
:param force: if True overwrite destination directory in case it exists, or throw exception if set to False.
:param filename: a filename for the saved model (file type is always .json)
:returns str: the path of the saved model details
"""
assert_is_type(path, str)
assert_is_type(force, bool)
if filename is None:
filename = self.model_id + ".json"
else:
assert_is_type(filename, str)
path = os.path.join(os.getcwd() if path == "" else path, filename)
return h2o.api("GET /99/Models/%s/json" % self.model_id, data={"dir": path, "force": force})["dir"]
@staticmethod
def _get_metrics(o, train, valid, xval):
# noinspection PyProtectedMember
output = o._model_json["output"]
metrics = {}
if train:
metrics["train"] = output["training_metrics"]
if valid:
metrics["valid"] = output["validation_metrics"]
if xval:
metrics["xval"] = output["cross_validation_metrics"]
if len(metrics) == 0:
metrics["train"] = output["training_metrics"]
return metrics
@deprecated_params({'save_to_file': 'save_plot_path'})
def partial_plot(self, data, cols=None, destination_key=None, nbins=20, weight_column=None,
plot=True, plot_stddev=True, figsize=(7, 10), server=False, include_na=False, user_splits=None,
col_pairs_2dpdp=None, save_plot_path=None, row_index=None, targets=None):
"""
Create partial dependence plot which gives a graphical depiction of the marginal effect of a variable on the
response. The effect of a variable is measured in change in the mean response.
:param H2OFrame data: An H2OFrame object used for scoring and constructing the plot.
:param cols: Feature(s) for which partial dependence will be calculated.
:param destination_key: An key reference to the created partial dependence tables in H2O.
:param nbins: Number of bins used. For categorical columns make sure the number of bins exceed the level count. If you enable add_missing_NA, the returned length will be nbin+1.
:param weight_column: A string denoting which column of data should be used as the weight column.
:param plot: A boolean specifying whether to plot partial dependence table.
:param plot_stddev: A boolean specifying whether to add std err to partial dependence plot.
:param figsize: Dimension/size of the returning plots, adjust to fit your output cells.
:param server: Specify whether to activate matplotlib "server" mode. In this case, the plots are saved to a file instead of being rendered.
:param include_na: A boolean specifying whether missing value should be included in the Feature values.
:param user_splits: a dictionary containing column names as key and user defined split values as value in a list.
:param col_pairs_2dpdp: list containing pairs of column names for 2D pdp
:param save_plot_path: Fully qualified name to an image file the resulting plot should be saved to, e.g. '/home/user/pdpplot.png'. The 'png' postfix might be omitted. If the file already exists, it will be overridden. Plot is only saved if plot = True.
:param row_index: Row for which partial dependence will be calculated instead of the whole input frame.
:param targets: Target classes for multiclass model.
:returns: Plot and list of calculated mean response tables for each feature requested + the resulting plot (can be accessed using result.figure()).
"""
if not isinstance(data, h2o.H2OFrame): raise ValueError("Data must be an instance of H2OFrame.")
num_1dpdp = 0
num_2dpdp = 0
if cols is not None:
assert_is_type(cols, [str])
num_1dpdp = len(cols)
if col_pairs_2dpdp is not None:
assert_is_type(col_pairs_2dpdp, [[str, str]])
num_2dpdp = len(col_pairs_2dpdp)
if cols is None and col_pairs_2dpdp is None:
raise ValueError("Must specify either cols or col_pairs_2dpd to generate partial dependency plots.")
if col_pairs_2dpdp and targets and len(targets) > 1:
raise ValueError("Multinomial 2D Partial Dependency is available only for one target.")
assert_is_type(destination_key, None, str)
assert_is_type(nbins, int)
assert_is_type(plot, bool)
assert_is_type(figsize, (int, int))
# Check cols specified exist in frame data
if cols is not None:
for xi in cols:
if xi not in data.names:
raise H2OValueError("Column %s does not exist in the training frame." % xi)
if col_pairs_2dpdp is not None:
for oneP in col_pairs_2dpdp:
if oneP[0] not in data.names:
raise H2OValueError("Column %s does not exist in the training frame." % oneP[0])
if oneP[1] not in data.names:
raise H2OValueError("Column %s does not exist in the training frame." % oneP[1])
if oneP[0] is oneP[1]:
raise H2OValueError("2D pdp must be with different columns.")
if isinstance(weight_column, int) and not (weight_column == -1):
raise H2OValueError("Weight column should be a column name in your data frame.")
elif isinstance(weight_column, str): # index is a name
if weight_column not in data.names:
raise H2OValueError("Column %s does not exist in the data frame" % weight_column)
weight_column = data.names.index(weight_column)
if row_index is not None:
if not isinstance(row_index, int):
raise H2OValueError("Row index should be of type int.")
else:
row_index = -1
if targets is not None:
assert_is_type(targets, list)
for i in targets:
assert_is_type(i, str)
num_1dpdp = num_1dpdp
num_2dpdp = num_2dpdp
kwargs = {}
kwargs["cols"] = cols
kwargs["model_id"] = self.model_id
kwargs["frame_id"] = data.frame_id
kwargs["nbins"] = nbins
kwargs["destination_key"] = destination_key
kwargs["weight_column_index"] = weight_column
kwargs["add_missing_na"] = include_na
kwargs["row_index"] = row_index
kwargs["col_pairs_2dpdp"] = col_pairs_2dpdp
if targets:
kwargs["targets"] = targets
self.__generate_user_splits(user_splits, data, kwargs)
json = H2OJob(h2o.api("POST /3/PartialDependence/", data=kwargs), job_type="PartialDependencePlot").poll()
json = h2o.api("GET /3/PartialDependence/%s" % json.dest_key)
# Extract partial dependence data from json response
pps = json["partial_dependence_data"]
# Plot partial dependence plots using matplotlib
return self.__generate_partial_plots(num_1dpdp, num_2dpdp, plot, server, pps, figsize,
col_pairs_2dpdp, data, nbins,
kwargs["user_cols"], kwargs["num_user_splits"],
plot_stddev, cols, save_plot_path, row_index, targets, include_na)
def __generate_user_splits(self, user_splits, data, kwargs):
# extract user defined split points from dict user_splits into an integer array of column indices
# and a double array of user define values for the corresponding columns
if user_splits is not None and len(user_splits) > 0:
if not(isinstance(user_splits, dict)):
raise H2OValueError("user_splits must be a Python dict.")
else:
user_cols = []
user_values = []
user_num_splits = []
data_ncol = data.ncol
column_names = data.names
for colKey,val in user_splits.items():
if is_type(colKey, str) and colKey in column_names:
user_cols.append(colKey)
elif isinstance(colKey, int) and colKey < data_ncol:
user_cols.append(column_names[colKey])
else:
raise H2OValueError("Column names/indices used in user_splits are not valid. They "
"should be chosen from the columns of your data set.")
if data[colKey].isfactor()[0] or data[colKey].isnumeric()[0]: # replace enum string with actual value
nVal = len(val)
if data[colKey].isfactor()[0]:
domains = data[colKey].levels()[0]
numVal = [0]*nVal
for ind in range(nVal):
if val[ind] in domains:
numVal[ind] = domains.index(val[ind])
else:
raise H2OValueError("Illegal enum value {0} encountered. To include missing"
" values in your feature values, set include_na to "
"True.".format(val[ind]))
user_values.extend(numVal)
else:
user_values.extend(val)
user_num_splits.append(nVal)
else:
raise H2OValueError("Partial dependency plots are generated for numerical and categorical "
"columns only.")
kwargs["user_cols"] = user_cols
kwargs["user_splits"] = user_values
kwargs["num_user_splits"] = user_num_splits
else:
kwargs["user_cols"] = None
kwargs["user_splits"] = None
kwargs["num_user_splits"] = None
def __generate_partial_plots(self, num_1dpdp, num_2dpdp, plot, server, pps, figsize, col_pairs_2dpdp, data, nbins,
user_cols, user_num_splits, plot_stddev, cols, save_to_file, row_index, targets, include_na):
# Plot partial dependence plots using matplotlib
to_fig = num_1dpdp + num_2dpdp
if plot and to_fig > 0: # plot 1d pdp for now
plt = get_matplotlib_pyplot(server)
cm = _get_matplotlib_cm("Partial dependency plots")
if not plt:
return decorate_plot_result(res=pps, figure=RAISE_ON_FIGURE_ACCESS)
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=figsize)
gxs = gridspec.GridSpec(to_fig, 1)
if num_2dpdp > 0: # 2d pdp requested
axes_3d = _get_mplot3d_pyplot("2D partial plots")
fig_plotted = False # indicated number of figures plotted
data_index = 0
target = None
if targets and len(targets) == 1:
target = targets[0]
for i in range(to_fig):
if i >= num_1dpdp: # plot 2D pdp
if axes_3d is None or cm is None or plt is None: # quit if cannot find toolbox
break
fig_plotted = self.__plot_2d_pdp(fig, col_pairs_2dpdp, gxs, num_1dpdp, data, pps[i], nbins,
user_cols, user_num_splits, plot_stddev, cm, i, row_index)
else: # plot 1D pdp
col = cols[i]
if targets is None or target:
fig_plotted = self.__plot_1d_pdp(col, i, data, pps[i], fig, gxs, plot_stddev, row_index, target, include_na)
else:
fig_plotted = self.__plot_1d_pdp_multinomial(col, i, data, pps, data_index, fig, gxs, cm,
plot_stddev, row_index, targets, include_na)
data_index = data_index + len(targets)
if fig_plotted:
fig.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
else:
print("No partial plot is generated and/or saved. You may be missing toolboxes like "
"mpl_toolkits.mplot3d or matplotlib.")
if (save_to_file is not None) and fig_plotted: # only save when a figure is actually plotted
plt.savefig(save_to_file)
return decorate_plot_result(res=pps, figure=fig)
else:
return decorate_plot_result(res=pps)
def __plot_2d_pdp(self, fig, col_pairs_2dpdp, gxs, num_1dpdp, data, pp, nbins, user_cols, user_num_splits,
plot_stddev, cm, i, row_index):
ax = fig.add_subplot(gxs[i], projection='3d')
col_pairs = col_pairs_2dpdp[i-num_1dpdp]
x = self.__grab_values(pp, 0, data, col_pairs[0], ax) # change to numpy 2d_array
y = self.__grab_values(pp, 1, data, col_pairs[1], ax)
X,Y,Z = self.__pred_for_3d(x, y, pp[2], col_pairs, nbins, user_cols, user_num_splits)
zupper = [a + b for a, b in zip(pp[2], pp[3])] # pp[1] is mean, pp[2] is std
zlower = [a - b for a, b in zip(pp[2], pp[3])]
_,_,Zupper = self.__pred_for_3d(x, y, zupper, col_pairs, nbins, user_cols, user_num_splits)
_,_,Zlower = self.__pred_for_3d(x, y, zlower, col_pairs, nbins, user_cols, user_num_splits)
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=1, antialiased=False, alpha=0.5, edgecolor='k')
if plot_stddev:
zupper = [a + b for a, b in zip(pp[2], pp[3])] # pp[1] is mean, pp[2] is std
zlower = [a - b for a, b in zip(pp[2], pp[3])]
_,_,Zupper = self.__pred_for_3d(x,y,zupper, col_pairs, nbins, user_cols, user_num_splits)
_,_,Zlower = self.__pred_for_3d(x,y,zlower, col_pairs, nbins, user_cols, user_num_splits)
ax.plot_surface(X, Y, Zupper, cmap=cm.coolwarm,linewidth=0.2, antialiased=False, alpha=0.3, edgecolor='y')
ax.plot_surface(X, Y, Zlower, cmap=cm.coolwarm,linewidth=0.2, antialiased=False, alpha=0.3, edgecolor='g')
ax.set_xlabel(col_pairs[0])
ax.set_xlim(min(x), max(x))
ax.set_ylabel(col_pairs[1])
ax.set_ylim(min(y), max(y))
ax.set_zlabel('Partial dependence')
title = '2D partial dependence plot for '+col_pairs[0] + ' and '+col_pairs[1]
if row_index >= 0:
title += ' and row index {}'.format(row_index)
ax.set_title(title)
return True
def __plot_1d_pdp(self, col, i, data, pp, fig, gxs, plot_stddev, row_index, target=None, include_na=False):
cat = data[col].isfactor()[0]
axs = fig.add_subplot(gxs[i])
self.__set_axs_1d(axs, plot_stddev, cat, pp, col, row_index, target, include_na)
return True
def __plot_1d_pdp_multinomial(self, col, i, data, pps, data_start_index, fig, gxs, cm, plot_stddev, row_index,
targets, include_na):
cat = data[col].isfactor()[0]
axs = fig.add_subplot(gxs[i])
self.__set_axs_1d_multinomial(axs, cm, plot_stddev, cat, pps, data_start_index, col, row_index, targets, include_na)
return True
# change x, y, z to be 2-D numpy arrays in order to plot it.
# note that, x stays at one value for the duration of y value changes.
def __pred_for_3d(self, x, y, z, colPairs, nbins, user_cols, user_num_splits):
# deal with y axis first
if not can_use_numpy():
raise ImportError("numpy is required for 3D partial plots.")
import numpy as np
ycol = colPairs[1]
nBins = nbins
if user_cols is not None and ycol in user_cols:
ind = user_cols.index(ycol)
nBins = user_num_splits[ind]
nrow = int(len(x)/nBins)
X = np.transpose(np.array(x).reshape(nrow, nBins))
Y = np.transpose(np.array(y).reshape(nrow, nBins))
Z = np.transpose(np.array(z).reshape(nrow, nBins))
return X,Y,Z
def __grab_values(self, pp, index, data, col, axs):
cat = data[col].isfactor()[0]
if cat:
labels = pp[index]
uniqueL = list(set(labels))
x = range(len(uniqueL))
xlab = [None]*len(uniqueL)
for ind in range(len(uniqueL)):
xlab[ind] = labels[labels.index(uniqueL[ind])]
# replace string enum labels with integer values
xext = [None]*len(labels)
for ind in range(len(labels)):
xext[ind] = labels.index(labels[ind])
if index == 0: # x-axis
axs.set_xticks(x)
axs.set_xticklabels(xlab)
else: # y-axis
axs.set_yticks(x)
axs.set_yticklabels(xlab)
axs.margins(0.2)
return xext
else:
return pp[index]
def __set_axs_1d(self, axs, plot_stddev, cat, pp, col, row_index, target, include_na):
if not can_use_numpy():
raise ImportError("numpy is required for partial plots.")
import numpy as np
pp_start_index = 0
x = pp[pp_start_index]
y = pp[pp_start_index+1]
if len(x) == 1:
fmt = 'o'
else:
fmt = '-'
if isinstance(x[0], str):
axs.set_xlim(0, len(x)-1)
else:
axs.set_xlim(min(x), max(x))
if cat:
labels = x # 1d pdp, this is 0
x = range(len(labels))
fmt = "o"
axs.set_xticks(x)
axs.set_xticklabels(labels, rotation=45)
axs.set_xlim(min(x) - 0.2, max(x) + 0.2)
if plot_stddev:
std = pp[pp_start_index+2]
upper = np.array([a + b for a, b in zip(y, std)]) # pp[1] is mean, pp[2] is std
lower = np.array([a - b for a, b in zip(y, std)])
if cat:
axs.errorbar(x, y, yerr=std, fmt=fmt, alpha=0.5, capsize=5, label=target)
else:
numline, = axs.plot(x, y, fmt, label=target)
axs.fill_between(x, lower, upper, where=lower < upper, alpha=0.1, interpolate=False)
axs.set_ylim(min(lower) - 0.2 * abs(min(lower)), max(upper) + 0.2 * abs(max(upper)))
else:
numline, = axs.plot(x, y, fmt, label=target)
axs.set_ylim(min(y) - 0.2 * abs(min(y)), max(y) + 0.2 * abs(max(y)))
if (not cat) and include_na:
axs.plot(x, [y[np.argwhere(np.isnan(x))[0][0]]] * len(x), '--', color=numline._color,label="NAN")
axs.legend()
title = "Partial Dependence Plot for {}".format(col)
if target:
title += " and class {}".format(target)
if row_index >= 0:
title += " and row index {}".format(row_index)
axs.set_title(title)
axs.set_xlabel(pp.col_header[pp_start_index])
axs.set_ylabel(pp.col_header[pp_start_index+1])
axs.xaxis.grid()
axs.yaxis.grid()
def __set_axs_1d_multinomial(self, axs, cm, plot_stddev, cat, pps, data_start_index, col, row_index, targets, include_na):
if not can_use_numpy():
raise ImportError("numpy is required for multinomial partial plots.")
import numpy as np
pp_start_index = 0
pp = pps[data_start_index]
x = pp[pp_start_index]
y = pp[pp_start_index + 1]
# get initial maximum and minimum values to set xaxis and yaxis
min_y = min(y)
max_y = max(y)
if plot_stddev:
min_lower = min_y
max_upper = max_y
fmt = None
if cat: # adjust x axis to categorical values
labels = pp[pp_start_index]
x = range(len(labels))
axs.set_xticks(x)
axs.set_xticklabels(labels, rotation=45)
fmt = "o"
axs.set_xlim(min(x) - 0.2, max(x) + 0.2)
else:
axs.set_xlim(min(x), max(x))
axs.set_xlabel(pp.col_header[pp_start_index]) # set x axis label
axs.set_ylabel(pp.col_header[pp_start_index+1]) # set y axis label
cmap = cm.get_cmap("rainbow", len(targets)) # get color map
for i in range(len(targets)):
pp = pps[data_start_index + i]
y = pp[pp_start_index + 1]
min_y = min(min_y, min(y))
max_y = max(max_y, max(y))
if plot_stddev: # set std
std = pp[pp_start_index + 2]
upper = np.array([a + b for a, b in zip(y, std)]) # pp[1] is mean, pp[2] is std
lower = np.array([a - b for a, b in zip(y, std)])
min_lower = min(min_lower, min(lower))
max_upper = max(max_upper, max(upper))
if cat:
axs.errorbar(x, y, yerr=std, fmt=fmt, c=cmap(i), alpha=0.5, capsize=5, label=targets[i])
else:
numline, = axs.plot(x, y, c=cmap(i), label=targets[i])
axs.fill_between(x, lower, upper, where=lower < upper, facecolor=cmap(i), alpha=0.1, interpolate=False)
else:
numline, = axs.plot(x, y, c=cmap(i), marker=fmt, label=targets[i])
if (not cat) and include_na:
axs.plot(x, [y[np.argwhere(np.isnan(x))[0][0]]] * len(x), '--', color=numline._color,label=targets[i] + " NAN")
if plot_stddev:
axs.set_ylim(min_lower - 0.2 * abs(min_lower), max_upper + 0.2 * abs(max_upper))
else:
axs.set_ylim(min_y - 0.2 * abs(min_y), max_y + 0.2 * abs(max_y))
axs.legend()
title = "Partial Dependence Plot for {} and classes \n {}".format(col, ', '.join(targets))
if row_index >= 0:
title += " and row index {}".format(row_index)
axs.set_title(title)
axs.xaxis.grid()
axs.yaxis.grid()
def varimp_plot(self, num_of_features=None, server=False, save_plot_path=None):
"""
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: if true set server settings to matplotlib and do not show the graph
:param save_plot_path: a path to save the plot via using matplotlib function savefig
:returns: object that contains the resulting figure (can be accessed using result.figure())
"""
# For now, redirect to h2o.model.extensions.varimp for models that support the feature, and raise legacy error for others.
# Later, the method will be exposed only for models supporting the feature.
if has_extension(self, 'VariableImportance'):
return self._varimp_plot(num_of_features=num_of_features, server=server, save_plot_path=save_plot_path)
raise H2OValueError("Variable importance plot is not available for this type of model (%s)." % self.algo)
def std_coef_plot(self, num_of_features=None, server=False, save_plot_path=None):
"""
Plot a model's standardized coefficient magnitudes.
:param num_of_features: the number of features shown in the plot.
:param server: if true set server settings to matplotlib and show the graph
:param save_plot_path: a path to save the plot via using matplotlib function savefig
:returns: object that contains the resulting figure (can be accessed using result.figure())
"""
# For now, redirect to h2o.model.extensions.std_coef for models that support the feature, and raise legacy error for others.
# Later, the method will be exposed only for models supporting the feature.
if has_extension(self, 'StandardCoef'):
return self._std_coef_plot(num_of_features=num_of_features, server=server, save_plot_path=save_plot_path)
raise H2OValueError("Standardized coefficient plot is not available for this type of model (%s)." % self.algo)
@staticmethod
def _check_targets(y_actual, y_predicted):
"""Check that y_actual and y_predicted have the same length.
:param H2OFrame y_actual:
:param H2OFrame y_predicted:
:returns: None
"""
if len(y_actual) != len(y_predicted):
raise ValueError("Row mismatch: [{},{}]".format(len(y_actual), len(y_predicted)))
def cross_validation_models(self):
"""
Obtain a list of cross-validation models.
:returns: list of H2OModel objects.
"""
cvmodels = self._model_json["output"]["cross_validation_models"]
if cvmodels is None:
return None
m = []
for p in cvmodels: m.append(h2o.get_model(p["name"]))
return m
def cross_validation_predictions(self):
"""
Obtain the (out-of-sample) holdout predictions of all cross-validation models on their holdout data.
Note that the predictions are expanded to the full number of rows of the training data, with 0 fill-in.
:returns: list of H2OFrame objects.
"""
preds = self._model_json["output"]["cross_validation_predictions"]
if preds is None:
return None
m = []
for p in preds: m.append(h2o.get_frame(p["name"]))
return m
def cross_validation_holdout_predictions(self):
"""
Obtain the (out-of-sample) holdout predictions of all cross-validation models on the training data.
This is equivalent to summing up all H2OFrames returned by cross_validation_predictions.
:returns: H2OFrame
"""
preds = self._model_json["output"]["cross_validation_holdout_predictions_frame_id"]
if preds is None:
return None
return h2o.get_frame(preds["name"])
def cross_validation_fold_assignment(self):
"""
Obtain the cross-validation fold assignment for all rows in the training data.
:returns: H2OFrame
"""
fid = self._model_json["output"]["cross_validation_fold_assignment_frame_id"]
if fid is None:
return None
return h2o.get_frame(fid["name"])
def rotation(self):
"""
Obtain the rotations (eigenvectors) for a PCA model
:return: H2OFrame
"""
if self._model_json["algo"] != "pca":
raise H2OValueError("This function is available for PCA models only")
return self._model_json["output"]["eigenvectors"]
def score_history(self):
"""DEPRECATED. Use :meth:`scoring_history` instead."""
return self.scoring_history()
def permutation_importance(self, frame, metric="AUTO", n_samples=10000, n_repeats=1, features=None, seed=-1, use_pandas=False):
"""
Get Permutation Variable Importance.
When n_repeats == 1, the result is similar to the one from varimp() method, i.e., it contains
the following columns "Relative Importance", "Scaled Importance", and "Percentage".
When n_repeats > 1, the individual columns correspond to the permutation variable
importance values from individual runs which corresponds to the "Relative Importance" and also
to the distance between the original prediction error and prediction error using a frame with
a given feature permuted.
:param frame: training frame
:param metric: metric to be used. One of "AUTO", "AUC", "MAE", "MSE", "RMSE", "logloss", "mean_per_class_error",
"PR_AUC". Defaults to "AUTO".
:param n_samples: number of samples to be evaluated. Use -1 to use the whole dataset. Defaults to 10 000.
:param n_repeats: number of repeated evaluations. Defaults to 1.
:param features: features to include in the permutation importance. Use None to include all.
:param seed: seed for the random generator. Use -1 to pick a random seed. Defaults to -1.
:param use_pandas: set true to return pandas data frame.
:return: H2OTwoDimTable or Pandas data frame
"""
from h2o.two_dim_table import H2OTwoDimTable
from h2o.frame import H2OFrame
from h2o.expr import ExprNode
from h2o.exceptions import H2OValueError
from h2o.utils.shared_utils import can_use_pandas
if type(frame) is not H2OFrame:
raise H2OValueError("Frame is not H2OFrame")
if self.actual_params["response_column"] not in frame.columns:
raise H2OValueError("Frame must contain the response column!")
if features is not None and len(features) == 0:
features = None
if n_samples < -1 or n_samples in (0, 1):
raise H2OValueError("Argument n_samples has to be either -1 to use the whole frame or greater than 2!")
if n_samples > frame.nrows:
n_samples = -1
if n_repeats < 1:
raise H2OValueError("Argument n_repeats must be greater than 0!")
assert_is_type(features, None, [str])
if features is not None:
not_in_frame = [f for f in features if f not in frame.columns]
if len(not_in_frame) > 0:
raise H2OValueError("Features " + ", ".join(not_in_frame) + " are not present in the provided frame!")
existing_metrics = [k.lower() for k in self._model_json['output']['training_metrics']._metric_json.keys()]
if metric.lower() not in ["auto"] + existing_metrics:
raise H2OValueError("Metric " + metric + " doesn't exist for this model.")
m_frame = H2OFrame._expr(ExprNode(
"PermutationVarImp",
self,
frame,
metric,
n_samples,
n_repeats,
features,
seed))
if use_pandas and can_use_pandas():
import pandas
pd = h2o.as_list(m_frame)
return pandas.DataFrame(pd, columns=pd.columns).set_index("Variable")
else:
def _replace_empty_str(row):
return [
float("nan") if "" == elem else elem
for elem in row
]
varimp = H2OTwoDimTable(
table_header="Variable Importances",
col_header=m_frame.columns,
col_types=["string"] + ["double"] * (len(m_frame.columns) - 1),
raw_cell_values=list(map(_replace_empty_str, zip(*m_frame.as_data_frame(use_pandas=False, header=False)))) # transpose
)
return varimp
def permutation_importance_plot(self, frame, metric="AUTO", n_samples=10000, n_repeats=1, features=None, seed=-1,
num_of_features=10, server=False, save_plot_path=None):
"""
Plot Permutation Variable Importance. This method plots either a bar plot or if n_repeats > 1 a box plot and
returns the variable importance table.
:param frame: training frame
:param metric: metric to be used. One of "AUTO", "AUC", "MAE", "MSE", "RMSE", "logloss", "mean_per_class_error",
"PR_AUC". Defaults to "AUTO".
:param n_samples: number of samples to be evaluated. Use -1 to use the whole dataset. Defaults to 10 000.
:param n_repeats: number of repeated evaluations. Defaults to 1.
:param features: features to include in the permutation importance. Use None to include all.
:param seed: seed for the random generator. Use -1 to pick a random seed. Defaults to -1.
:param num_of_features: number of features to plot. Defaults to 10.
:param server: if true set server settings to matplotlib and do not show the plot
:param save_plot_path: a path to save the plot via using matplotlib function savefig
:return: object that contains H2OTwoDimTable with variable importance and the resulting figure (can be accessed using result.figure())
"""
plt = get_matplotlib_pyplot(server)
if not plt:
return decorate_plot_result(figure=RAISE_ON_FIGURE_ACCESS)
importance = self.permutation_importance(frame, metric, n_samples, n_repeats, features, seed, use_pandas=False)
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
if n_repeats > 1:
vi = sorted([{"feature": row[0], "mean": sum(row[1:])/(len(row)-1), "values": row[1:]}
for row in importance.cell_values],
key=lambda x: -x["mean"])[:num_of_features][::-1]
ax.boxplot([x["values"] for x in vi], vert=False, labels=[x["feature"] for x in vi])
else:
importance_val = importance["Scaled Importance"]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(importance_val))[::-1]
num_of_features = min(len(importance_val), num_of_features)
plt.barh(pos[0:num_of_features], importance_val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
plt.yticks(pos[0:num_of_features], importance["Variable"][0:num_of_features]) # col 0 is str: importance
plt.ylim([min(pos[0:num_of_features]) - 1, max(pos[0:num_of_features]) + 1])
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.title("Permutation Variable Importance: " + self.algo +
(" (" + metric.lower() + ")" if metric.lower() != "auto" else ""), fontsize=20)
if not server:
plt.show()
if save_plot_path is not None:
fig.savefig(fname=save_plot_path)
return decorate_plot_result(res=importance, figure=fig)
def _get_mplot3d_pyplot(function_name):
try:
# noinspection PyUnresolvedReferences
from mpl_toolkits.mplot3d import Axes3D
return Axes3D
except ImportError:
print("`mpl_toolkits.mplot3d` library is required for function {0}!".format(function_name))
return None
def _get_matplotlib_cm(function_name):
try:
from matplotlib import cm
return cm
except ImportError:
print('matplotlib library is required for 3D plots for function {0}'.format(function_name))
return None
|
h2oai/h2o-3
|
h2o-py/h2o/model/model_base.py
|
Python
|
apache-2.0
| 88,728
|
[
"Gaussian"
] |
45ce53d1e50c092b5d141bb96dd135b5ffaa64bb571cc2b88cf58fe76a45c38c
|
import os
import sys
import glob
import re
import livereload
import multiprocessing
import logging
import shutil
import MooseDocs
import build
log = logging.getLogger(__name__)
def serve_options(parser, subparser):
"""
Command-line options for serve command.
"""
serve_parser = subparser.add_parser('serve', help='Serve the documentation using a local server.')
serve_parser.add_argument('--host', default='127.0.0.1', type=str, help="The local host location for live web server (default: %(default)s).")
serve_parser.add_argument('--port', default='8000', type=str, help="The local host port for live web server (default: %(default)s).")
serve_parser.add_argument('--num-threads', '-j', type=int, default=multiprocessing.cpu_count(), help="Specify the number of threads to build pages with.")
return serve_parser
def serve(config_file='moosedocs.yml', host='127.0.0.1', port='8000', num_threads=multiprocessing.cpu_count()):
"""
Create live server
"""
# Location of serve site
tempdir = os.path.abspath(os.path.join(os.getenv('HOME'), '.local', 'share', 'moose', 'site'))
# Clean the "temp" directory (if desired)
if os.path.exists(tempdir):
log.info('Cleaning build directory: {}'.format(tempdir))
shutil.rmtree(tempdir)
# Create the "temp" directory
if not os.path.exists(tempdir):
os.makedirs(tempdir)
# Perform the initial build
log.info("Building documentation...")
# Wrapper for building complete website
def build_complete():
return build.build_site(config_file=config_file, site_dir=tempdir, num_threads=num_threads)
config, parser, builder = build_complete()
# Create the live server
server = livereload.Server()
# Watch markdown files
for page in builder:
server.watch(page.source(), page.build)
# Watch support directories
server.watch(os.path.join(os.getcwd(), 'media'), builder.copyFiles)
server.watch(os.path.join(os.getcwd(), 'css'), builder.copyFiles)
server.watch(os.path.join(os.getcwd(), 'js'), builder.copyFiles)
server.watch(os.path.join(os.getcwd(), 'fonts'), builder.copyFiles)
# Watch the files and directories that require complete rebuild
moose_extension = MooseDocs.get_moose_markdown_extension(parser)
if moose_extension:
server.watch(os.path.join(os.getcwd(), moose_extension.getConfig('executable')), build_complete)
server.watch(config_file, build_complete)
server.watch(config['navigation'], builder.build)
server.watch('templates', builder.build)
# Start the server
server.serve(root=config['site_dir'], host=host, port=port, restart_delay=0)
|
backmari/moose
|
python/MooseDocs/commands/serve.py
|
Python
|
lgpl-2.1
| 2,695
|
[
"MOOSE"
] |
a74de31b76acadfcde53afadca43de8eca5efddee5cea0e7a3d65f77c1142b53
|
###############################################################################
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public #
# License as published by the Free Software Foundation; either #
# version 3.0 of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library. #
# #
###############################################################################
__author__ = "Tim Lamberton"
__copyright__ = "Copyright 2015"
__credits__ = ["Tim Lamberton"]
__license__ = "LGPLv3"
__maintainer__ = "Tim Lamberton"
__email__ = "tim.lamberton@gmail.com"
###############################################################################
# system imports
from nose.tools import assert_equals, assert_true
import sys
import os
import subprocess
import pysam
###############################################################################
###############################################################################
###############################################################################
###############################################################################
class TestBamFilter:
@classmethod
def setup_class(self):
"""Setup class variables before any tests."""
self.bamm = os.path.join("~", "git", "BamM", "bin", "bamm")
self.dataDir = os.path.join(os.path.split(__file__)[0], "filter_test_data")
self.bamNames = ["1", "2"]
# the following files already exist
self.bamFiles = dict(zip(self.bamNames,
[os.path.join(self.dataDir, "%s.bam" % name) for name in self.bamNames]))
self.testDataDirs = dict(zip(self.bamNames,
[os.path.join(self.dataDir, name) for name in self.bamNames]))
# generated files
self.outputBamFnames = dict(zip(self.bamNames,
["%s_filtered.bam" % name for name in self.bamNames]))
# if True tests should fail
if False:
self.bamFiles = dict(zip(self.bamNames,
[os.path.join(self.dataDir, "f.bam") for _ in self.bamNames]))
self.outputBamFnames = dict(zip(self.bamNames,
["f_filtered.bam" for _ in self.bamNames]))
# test parameters
self.params = {
"none": ['--use_secondary',
'--use_supplementary',
'--percentage_aln', "0",
'--percentage_id', "0"],
"aln_only_90": ['--use_secondary',
'--use_supplementary',
'--percentage_aln', "0.9",
'--percentage_id', "0"],
"aln_only_101": ['--use_secondary',
'--use_supplementary',
'--percentage_aln', "1.01",
'--percentage_id', "0"],
"id_only_90": ['--use_secondary',
'--use_supplementary',
'--percentage_aln', "0",
'--percentage_id', "0.9"],
"id_only_101": ['--use_secondary',
'--use_supplementary',
'--percentage_aln', "0",
'--percentage_id', "1.01"],
"no_secondary_only": ['--use_supplementary',
'--percentage_aln', "0",
'--percentage_id', "0"],
"no_supp_only": ['--use_secondary',
'--percentage_aln', "0",
'--percentage_id', "0"],
"all_conds": ['--percentage_aln', "0.9",
'--percentage_id', "0.9"]
}
@classmethod
def teardown_class(self):
for name in self.bamNames:
self.rmTestFile(name)
@classmethod
def rmTestFile(self, name):
path = os.path.join(self.dataDir, self.outputBamFnames[name])
if os.path.exists(path):
os.remove(path)
else:
sys.stderr.write("No file: %s\n" % path)
def generate_bam(self, name, args):
cmd = "%s filter -b %s -o %s %s" % (self.bamm, self.bamFiles[name], self.dataDir, " ".join(args))
subprocess.call(cmd, shell=True)
def assert_equal_query_sequences(self, out, expected):
try:
aln_expected = pysam.AlignmentFile(expected, "rb")
except:
raise
raise AssertionError('File of expected reads "%s" exists and is readable.' % expected)
try:
aln_out = pysam.AlignmentFile(out, "rb")
except:
raise AssertionError('File of filtered reads "%s" exists and is readable.' % out)
while True:
try:
expected_read = aln_expected.next()
except StopIteration:
expected_read = None
try:
out_read = aln_out.next()
except StopIteration:
out_read = None
if expected_read is None and out_read is None:
break
assert_true(expected_read is not None and out_read is not None, 'Filtered file "%s" contains expected number of reads.' %out)
assert_true(expected_read.compare(out_read) == 0, 'Filtered file "%s" queries match expected queries.' % out)
def testFilter(self):
for bamName in self.bamNames:
for (testName, args) in self.params.iteritems():
self.generate_bam(bamName, args)
out = os.path.join(self.dataDir, self.outputBamFnames[bamName])
test = os.path.join(self.testDataDirs[bamName], "%s_%s.bam" % (bamName, testName))
self.assert_equal_query_sequences(out, test)
#self.rmTestFile(bamName)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
|
wwood/BamM
|
bamm/tests/test_bamFilter.py
|
Python
|
lgpl-3.0
| 7,042
|
[
"pysam"
] |
e289c70a022e98bb4c6ac2b76d0b386005655954d6a48b1330e50fb9d6f530d6
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
"""
import collections
import datetime
import logging
import os
import re
import shlex
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import io_stats_parser
try:
import pexpect
except:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = '/data/local.prop'
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
MEMORY_INFO_RE = re.compile('^(?P<key>\w+):\s+(?P<usage_kb>\d+) kB$')
NVIDIA_MEMORY_INFO_RE = re.compile('^\s*(?P<user>\S+)\s*(?P<name>\S+)\s*'
'(?P<pid>\d+)\s*(?P<usage_bytes>\d+)$')
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
MD5SUM_LD_LIBRARY_PATH = 'LD_LIBRARY_PATH=%s' % MD5SUM_DEVICE_FOLDER
def GetEmulators():
"""Returns a list of emulators. Does not filter by status (e.g. offline).
Both devices starting with 'emulator' will be returned in below output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
emulator-5558 device
"""
re_device = re.compile('^emulator-[0-9]+', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput([constants.ADB_PATH,
'devices']))
return devices
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def GetAttachedDevices():
"""Returns a list of attached, online android devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
"""
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput([constants.ADB_PATH,
'devices']))
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ComputeFileListHash(md5sum_output):
"""Returns a list of MD5 strings from the provided md5sum output."""
return [line.split(' ')[0] for line in md5sum_output]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
if not command_output:
return False
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
def __init__(self, device=None):
adb_dir = os.path.dirname(constants.ADB_PATH)
if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
# Required by third_party/android_testrunner to call directly 'adb'.
os.environ['PATH'] += os.pathsep + adb_dir
self._adb = adb_interface.AdbInterface()
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
self._md5sum_build_dir = ''
self._external_storage = ''
self._util_wrapper = ''
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
return self._adb
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
out = self._adb.SendCommand('get-state')
return out.strip() == 'device'
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
assert self._external_storage, 'Unable to find $EXTERNAL_STORAGE'
return self._external_storage
def WaitForDevicePm(self):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm()
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
timeout = 300
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm()
self.WaitForSdCardReady(timeout)
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_failure=2):
"""Installs specified package and reboots device on timeouts.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_failure: number of time to reboot if package manager is frozen.
Returns:
A status string returned by adb install
"""
reboots_left = reboots_on_failure
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return install_status
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install failure')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbServer(self):
"""Restart the adb server."""
self.KillAdbServer()
self.StartAdbServer()
def KillAdbServer(self):
"""Kill adb server."""
adb_cmd = [constants.ADB_PATH, 'kill-server']
return cmd_helper.RunCmd(adb_cmd)
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = [constants.ADB_PATH, 'start-server']
return cmd_helper.RunCmd(adb_cmd)
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self._adb.SendShellCommand('getprop sys.boot_completed',
retry_count=1)
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send. Must not include
the single quotes as we use them to escape the whole command.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command: logging.warning(command + " contains ' quotes")
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
self.RunShellCommand('kill -9 ' + ' '.join(pids))
return len(pids)
def KillAllBlocking(self, process, timeout_sec):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return 0
return processes_killed
def _GetActivityCommand(self, package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop)
self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop)
self.StartMonitoringLogcat()
self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return GetLogTimestamp(start_line, self.GetDeviceYear())
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def CheckMd5Sum(self, local_path, device_path, ignore_paths=False):
"""Compares the md5sum of a local path against a device path.
Args:
local_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_paths: If False, both the md5sum and the relative paths/names of
files must match. If True, only the md5sum must match.
Returns:
True if the md5sums match.
"""
assert os.path.exists(local_path), 'Local path not found %s' % local_path
if not self._md5sum_build_dir:
default_build_type = os.environ.get('BUILD_TYPE', 'Debug')
build_dir = '%s/%s/' % (
cmd_helper.OutDirectory().get(), default_build_type)
md5sum_dist_path = '%s/md5sum_dist' % build_dir
if not os.path.exists(md5sum_dist_path):
build_dir = '%s/Release/' % cmd_helper.OutDirectory().get()
md5sum_dist_path = '%s/md5sum_dist' % build_dir
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._md5sum_build_dir = build_dir
self._pushed_files.append(device_path)
hashes_on_device = _ComputeFileListHash(
self.RunShellCommand(MD5SUM_LD_LIBRARY_PATH + ' ' + self._util_wrapper +
' ' + MD5SUM_DEVICE_PATH + ' ' + device_path))
assert os.path.exists(local_path), 'Local path not found %s' % local_path
md5sum_output = cmd_helper.GetCmdOutput(
['%s/md5sum_bin_host' % self._md5sum_build_dir, local_path])
hashes_on_host = _ComputeFileListHash(md5sum_output.splitlines())
if ignore_paths:
hashes_on_device = [h.split()[0] for h in hashes_on_device]
hashes_on_host = [h.split()[0] for h in hashes_on_host]
return hashes_on_device == hashes_on_host
def PushIfNeeded(self, local_path, device_path):
"""Pushes |local_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
if self.CheckMd5Sum(local_path, device_path):
return
# They don't match, so remove everything first and then create it.
if os.path.isdir(local_path):
self.RunShellCommand('rm -r %s' % device_path, timeout_time=2 * 60)
self.RunShellCommand('mkdir -p %s' % device_path)
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout of
# 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (local_path, device_path)
self._LogShell(push_command)
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
assert _HasAdbPushSucceeded(output)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
_TEMP_FILE_BASE_FMT = 'temp_file_%d'
_TEMP_SCRIPT_FILE_BASE_FMT = 'temp_script_file_%d.sh'
def _GetDeviceTempFileName(self, base_name):
i = 0
while self.FileExistsOnDevice(
self.GetExternalStorage() + '/' + base_name % i):
i += 1
return self.GetExternalStorage() + '/' + base_name % i
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su".
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
r = self.RunShellCommand('su -c cat /dev/null')
return r == [] or r[0].strip() == ''
def GetProtectedFileContents(self, filename, log_result=False):
"""Gets contents from the protected file specified by |filename|.
This is less efficient than GetFileContents, but will work for protected
files and device files.
"""
# Run the script as root
return self.RunShellCommand('su -c cat "%s" 2> /dev/null' % filename)
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents, but will work for protected
files and device files.
"""
temp_file = self._GetDeviceTempFileName(AndroidCommands._TEMP_FILE_BASE_FMT)
temp_script = self._GetDeviceTempFileName(
AndroidCommands._TEMP_SCRIPT_FILE_BASE_FMT)
# Put the contents in a temporary file
self.SetFileContents(temp_file, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script, 'cat %s > %s' % (temp_file, filename))
# Run the script as root
self.RunShellCommand('su -c sh %s' % temp_script)
# And remove the temporary files
self.RunShellCommand('rm ' + temp_file)
self.RunShellCommand('rm ' + temp_script)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- 1 user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self._device_utc_offset)
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
properties = file(temp_props_file.name).read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.RunShellCommand('getprop ' + JAVA_ASSERT_PROPERTY)
if was_set == enable:
return False
self.RunShellCommand('setprop %s "%s"' % (JAVA_ASSERT_PROPERTY,
enable and 'all' or ''))
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.RunShellCommand('getprop ro.build.id')[0]
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.RunShellCommand('getprop ro.build.type')[0]
assert build_type
return build_type
def GetProductModel(self):
"""Returns the namve of the product model (e.g. "Galaxy Nexus") """
model = self.RunShellCommand('getprop ro.product.model')[0]
assert model
return model
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and syncronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.ADB_PATH, args, timeout=10,
logfile=logfile)
self.RunShellCommand('log startup_sync')
if self._logcat.expect(['startup_sync', pexpect.EOF,
pexpect.TIMEOUT]) == 0:
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0: raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.ADB_PATH,
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=['*:v']):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.TemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
return output
def SearchLogcatRecord(self, record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
for line in self.GetFileContents('/proc/diskstats', log_result=False):
stats = io_stats_parser.ParseIoStatsLine(line)
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
A tuple containg:
[0]: Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,
KernelPageSize, MMUPageSize, Nvidia (tablet only).
[1]: Detailed /proc/[PID]/smaps information.
"""
usage_dict = collections.defaultdict(int)
smaps = collections.defaultdict(dict)
current_smap = ''
for line in self.GetProtectedFileContents('/proc/%s/smaps' % pid,
log_result=False):
items = line.split()
# See man 5 proc for more details. The format is:
# address perms offset dev inode pathname
if len(items) > 5:
current_smap = ' '.join(items[5:])
elif len(items) > 3:
current_smap = ' '.join(items[3:])
match = re.match(MEMORY_INFO_RE, line)
if match:
key = match.group('key')
usage_kb = int(match.group('usage_kb'))
usage_dict[key] += usage_kb
if key not in smaps[current_smap]:
smaps[current_smap][key] = 0
smaps[current_smap][key] += usage_kb
if not usage_dict or not any(usage_dict.values()):
# Presumably the process died between ps and calling this method.
logging.warning('Could not find memory usage for pid ' + str(pid))
for line in self.GetProtectedFileContents('/d/nvmap/generic-0/clients',
log_result=False):
match = re.match(NVIDIA_MEMORY_INFO_RE, line)
if match and match.group('pid') == pid:
usage_bytes = int(match.group('usage_bytes'))
usage_dict['Nvidia'] = int(round(usage_bytes / 1000.0)) # kB
break
return (usage_dict, smaps)
def GetMemoryUsageForPackage(self, package):
"""Returns the memory usage for all processes whose name contains |pacakge|.
Args:
package: A string holding process name to lookup pid list for.
Returns:
A tuple containg:
[0]: Dict of {metric:usage_kb}, summed over all pids associated with
|name|.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,
KernelPageSize, MMUPageSize, Nvidia (tablet only).
[1]: a list with detailed /proc/[PID]/smaps information.
"""
usage_dict = collections.defaultdict(int)
pid_list = self.ExtractPid(package)
smaps = collections.defaultdict(dict)
for pid in pid_list:
usage_dict_per_pid, smaps_per_pid = self.GetMemoryUsageForPid(pid)
smaps[pid] = smaps_per_pid
for (key, value) in usage_dict_per_pid.items():
usage_dict[key] += value
return usage_dict, smaps
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def TakeScreenshot(self, host_file):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host.
"""
host_dir = os.path.dirname(host_file)
if not os.path.exists(host_dir):
os.makedirs(host_dir)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand('/system/bin/screencap -p %s' % device_file)
assert self._adb.Pull(device_file, host_file)
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunInstrumentationTest(self, test, test_package, instr_args, timeout):
"""Runs a single instrumentation test.
Args:
test: Test class/method.
test_package: Package name of test apk.
instr_args: Extra key/value to pass to am instrument.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
instrumentation_path = ('%s/android.test.InstrumentationTestRunner' %
test_package)
args_with_filter = dict(instr_args)
args_with_filter['class'] = test
logging.info(args_with_filter)
(raw_results, _) = self._adb.StartInstrumentation(
instrumentation_path=instrumentation_path,
instrumentation_args=args_with_filter,
timeout_time=timeout)
assert len(raw_results) == 1
return raw_results[0]
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
|
wangscript/libjingle-1
|
trunk/build/android/pylib/android_commands.py
|
Python
|
bsd-3-clause
| 49,727
|
[
"Galaxy"
] |
cca6c071991443e4e12712fa1ec0b9239e132000aecbcebf4f2c266fb1b17734
|
#
# Copyright (C) 2010 Gianluca Sforna
#
# All Rights Reserved
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
import math
class CanvasBase:
"""Base class for specialized canvas backends"""
def addCanvasLine(self, p1, p2, color=(0, 0, 0), color2=None, **kwargs):
"""Draw a single line on the canvas
This function will draw a line between `p1` and `p2` with the
given `color`.
If `color2` is specified, it will be used to draw the second half
of the segment
"""
raise NotImplementedError('This should be implemented')
def addCanvasText(self, text, pos, font, color=(0, 0, 0), **kwargs):
"""Draw some text
The provided `text` is drawn at position `pos` using the given
`font` and the chosen `color`.
"""
raise NotImplementedError('This should be implemented')
def addCanvasPolygon(self, ps, color=(0, 0, 0), **kwargs):
"""Draw a polygon
Draw a polygon identified by vertexes given in `ps` using
the given `color`
"""
raise NotImplementedError('This should be implemented')
def addCanvasDashedWedge(self, p1, p2, p3, dash=(2, 2), color=(0, 0, 0), color2=None, **kwargs):
"""Draw a dashed wedge
The wedge is identified by the three points `p1`, `p2`, and `p3`.
It will be drawn using the given `color`; if `color2` is specified
it will be used for the second half of the wedge
TODO: fix comment, I'm not sure what `dash` does
"""
raise NotImplementedError('This should be implemented')
def flush(self):
"""Complete any remaining draw operation
This is supposed to be the last operation on the canvas before
saving it
"""
raise NotImplementedError('This should be implemented')
def _getLinePoints(self, p1, p2, dash):
x1, y1 = p1
x2, y2 = p2
dx = x2 - x1
dy = y2 - y1
lineLen = math.sqrt(dx * dx + dy * dy)
theta = math.atan2(dy, dx)
cosT = math.cos(theta)
sinT = math.sin(theta)
pos = (x1, y1)
pts = [pos]
dist = 0
currDash = 0
while dist < lineLen:
currL = dash[currDash % len(dash)]
if dist + currL > lineLen:
currL = lineLen - dist
endP = (pos[0] + currL * cosT, pos[1] + currL * sinT)
pts.append(endP)
pos = endP
dist += currL
currDash += 1
return pts
|
rvianello/rdkit
|
rdkit/Chem/Draw/canvasbase.py
|
Python
|
bsd-3-clause
| 2,571
|
[
"RDKit"
] |
c54e12427d753d44e40dc649ee0351fe728bdca90f6eaf9f2ea3a6bfd37d9b8a
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\
Johnson%E2%80%93Lindenstrauss_lemma
"""
import sys
from time import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.fixes import parse_version
# `normed` is being deprecated in favor of `density` in histograms
if parse_version(matplotlib.__version__) >= parse_version("2.1"):
density_param = {"density": True}
else:
density_param = {"normed": True}
# %%
# Theoretical bounds
# ==================
# The distortion introduced by a random projection `p` is asserted by
# the fact that `p` is defining an eps-embedding with good probability
# as defined by:
#
# .. math::
# (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
#
# Where u and v are any rows taken from a dataset of shape (n_samples,
# n_features) and p is a projection by a random Gaussian N(0, 1) matrix
# of shape (n_components, n_features) (or a sparse Achlioptas matrix).
#
# The minimum number of components to guarantees the eps-embedding is
# given by:
#
# .. math::
# n\_components \geq 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
#
#
# The first plot shows that with an increasing number of samples ``n_samples``,
# the minimal number of dimensions ``n_components`` increased logarithmically
# in order to guarantee an ``eps``-embedding.
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# %%
# The second plot shows that an increase of the admissible
# distortion ``eps`` allows to reduce drastically the minimal number of
# dimensions ``n_components`` for a given number of samples ``n_samples``
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# %%
# Empirical validation
# ====================
#
# We validate the above bounds on the 20 newsgroups text document
# (TF-IDF word frequencies) dataset or on the digits dataset:
#
# - for the 20 newsgroups dataset some 500 documents with 100k
# features in total are projected using a sparse random matrix to smaller
# euclidean spaces with various values for the target number of dimensions
# ``n_components``.
#
# - for the digits dataset, some 8x8 gray level pixels data for 500
# handwritten digits pictures are randomly projected to spaces for various
# larger number of dimensions ``n_components``.
#
# The default dataset is the 20 newsgroups dataset. To run the example on the
# digits dataset, pass the ``--use-digits-dataset`` command line argument to
# this script.
if "--use-digits-dataset" in sys.argv:
data = load_digits().data[:500]
else:
data = fetch_20newsgroups_vectorized().data[:500]
# %%
# For each value of ``n_components``, we plot:
#
# - 2D distribution of sample pairs with pairwise distances in original
# and projected spaces as x and y axis respectively.
#
# - 1D histogram of the ratio of those distances (projected / original).
n_samples, n_features = data.shape
print(
"Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features)
)
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print(
"Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0)
)
if hasattr(rp, "components_"):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(projected_data, squared=True).ravel()[nonzero]
plt.figure()
min_dist = min(projected_dists.min(), dists.min())
max_dist = max(projected_dists.max(), dists.max())
plt.hexbin(
dists,
projected_dists,
gridsize=100,
cmap=plt.cm.PuBu,
extent=[min_dist, max_dist, min_dist, max_dist],
)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" % n_components)
cb = plt.colorbar()
cb.set_label("Sample pairs counts")
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, range=(0.0, 2.0), edgecolor="k", **density_param)
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
# %%
# We can see that for low values of ``n_components`` the distribution is wide
# with many distorted pairs and a skewed distribution (due to the hard
# limit of zero ratio on the left as distances are always positives)
# while for larger values of n_components the distortion is controlled
# and the distances are well preserved by the random projection.
# %%
# Remarks
# =======
#
# According to the JL lemma, projecting 500 samples without too much distortion
# will require at least several thousands dimensions, irrespective of the
# number of features of the original dataset.
#
# Hence using random projections on the digits dataset which only has 64
# features in the input space does not make sense: it does not allow
# for dimensionality reduction in this case.
#
# On the twenty newsgroups on the other hand the dimensionality can be
# decreased from 56436 down to 10000 while reasonably preserving
# pairwise distances.
|
sergeyf/scikit-learn
|
examples/miscellaneous/plot_johnson_lindenstrauss_bound.py
|
Python
|
bsd-3-clause
| 7,773
|
[
"Gaussian"
] |
8623cfcc4bda5c9400c4375d0f287b7ebb1048d7214e3835c1558ec8578bb258
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
Pyste version %s
Usage:
pyste [options] interface-files
where options are:
--module=<name> The name of the module that will be generated;
defaults to the first interface filename, without
the extension.
-I <path> Add an include path
-D <symbol> Define symbol
--multiple Create various cpps, instead of only one
(useful during development)
--out=<name> Specify output filename (default: <module>.cpp)
in --multiple mode, this will be a directory
--no-using Do not declare "using namespace boost";
use explicit declarations instead
--pyste-ns=<name> Set the namespace where new types will be declared;
default is the empty namespace
--debug Writes the xml for each file parsed in the current
directory
--cache-dir=<dir> Directory for cache files (speeds up future runs)
--only-create-cache Recreates all caches (doesn't generate code).
--generate-main Generates the _main.cpp file (in multiple mode)
--file-list A file with one pyste file per line. Use as a
substitute for passing the files in the command
line.
--gccxml-path=<path> Path to gccxml executable (default: gccxml)
--no-default-include Do not use INCLUDE environment variable for include
files to pass along gccxml.
-h, --help Print this help and exit
-v, --version Print version information
"""
import sys
import os
import getopt
import exporters
import SingleCodeUnit
import MultipleCodeUnit
import infos
import exporterutils
import settings
import gc
import sys
from policies import *
from CppParser import CppParser, CppParserError
import time
import declarations
__version__ = '0.9.30'
def RecursiveIncludes(include):
'Return a list containg the include dir and all its subdirectories'
dirs = [include]
def visit(arg, dir, names):
# ignore CVS dirs
if os.path.split(dir)[1] != 'CVS':
dirs.append(dir)
os.path.walk(include, visit, None)
return dirs
def GetDefaultIncludes():
if 'INCLUDE' in os.environ:
include = os.environ['INCLUDE']
return include.split(os.pathsep)
else:
return []
def ProcessIncludes(includes):
if sys.platform == 'win32':
index = 0
for include in includes:
includes[index] = include.replace('\\', '/')
index += 1
def ReadFileList(filename):
f = file(filename)
files = []
try:
for line in f:
line = line.strip()
if line:
files.append(line)
finally:
f.close()
return files
def ParseArguments():
def Usage():
print __doc__ % __version__
sys.exit(1)
try:
options, files = getopt.getopt(
sys.argv[1:],
'R:I:D:vh',
['module=', 'multiple', 'out=', 'no-using', 'pyste-ns=', 'debug', 'cache-dir=',
'only-create-cache', 'version', 'generate-main', 'file-list=', 'help',
'gccxml-path=', 'no-default-include'])
except getopt.GetoptError, e:
print
print 'ERROR:', e
Usage()
default_includes = GetDefaultIncludes()
includes = []
defines = []
module = None
out = None
multiple = False
cache_dir = None
create_cache = False
generate_main = False
gccxml_path = 'gccxml'
for opt, value in options:
if opt == '-I':
includes.append(value)
elif opt == '-D':
defines.append(value)
elif opt == '-R':
includes.extend(RecursiveIncludes(value))
elif opt == '--module':
module = value
elif opt == '--out':
out = value
elif opt == '--no-using':
settings.namespaces.python = 'boost::python::'
settings.USING_BOOST_NS = False
elif opt == '--pyste-ns':
settings.namespaces.pyste = value + '::'
elif opt == '--debug':
settings.DEBUG = True
elif opt == '--multiple':
multiple = True
elif opt == '--cache-dir':
cache_dir = value
elif opt == '--only-create-cache':
create_cache = True
elif opt == '--file-list':
files += ReadFileList(value)
elif opt in ['-h', '--help']:
Usage()
elif opt in ['-v', '--version']:
print 'Pyste version %s' % __version__
sys.exit(2)
elif opt == '--generate-main':
generate_main = True
elif opt == '--gccxml-path':
gccxml_path = value
elif opt == '--no-default-include':
default_includes = []
else:
print 'Unknown option:', opt
Usage()
includes[0:0] = default_includes
if not files:
Usage()
if not module:
module = os.path.splitext(os.path.basename(files[0]))[0]
if not out:
out = module
if not multiple:
out += '.cpp'
for file in files:
d = os.path.dirname(os.path.abspath(file))
if d not in sys.path:
sys.path.append(d)
if create_cache and not cache_dir:
print 'Error: Use --cache-dir to indicate where to create the cache files!'
Usage()
sys.exit(3)
if generate_main and not multiple:
print 'Error: --generate-main only valid in multiple mode.'
Usage()
sys.exit(3)
ProcessIncludes(includes)
return includes, defines, module, out, files, multiple, cache_dir, create_cache, \
generate_main, gccxml_path
def PCHInclude(*headers):
code = '\n'.join(['#include <%s>' % x for x in headers])
infos.CodeInfo(code, 'pchinclude')
def CreateContext():
'create the context where a interface file will be executed'
context = {}
context['Import'] = Import
# infos
context['Function'] = infos.FunctionInfo
context['Class'] = infos.ClassInfo
context['Include'] = lambda header: infos.CodeInfo('#include <%s>\n' % header, 'include')
context['PCHInclude'] = PCHInclude
context['Template'] = infos.ClassTemplateInfo
context['Enum'] = infos.EnumInfo
context['AllFromHeader'] = infos.HeaderInfo
context['Var'] = infos.VarInfo
# functions
context['rename'] = infos.rename
context['set_policy'] = infos.set_policy
context['exclude'] = infos.exclude
context['set_wrapper'] = infos.set_wrapper
context['use_shared_ptr'] = infos.use_shared_ptr
context['use_auto_ptr'] = infos.use_auto_ptr
context['holder'] = infos.holder
context['add_method'] = infos.add_method
context['final'] = infos.final
context['export_values'] = infos.export_values
# policies
context['return_internal_reference'] = return_internal_reference
context['with_custodian_and_ward'] = with_custodian_and_ward
context['return_value_policy'] = return_value_policy
context['reference_existing_object'] = reference_existing_object
context['copy_const_reference'] = copy_const_reference
context['copy_non_const_reference'] = copy_non_const_reference
context['return_opaque_pointer'] = return_opaque_pointer
context['manage_new_object'] = manage_new_object
context['return_by_value'] = return_by_value
context['return_self'] = return_self
# utils
context['Wrapper'] = exporterutils.FunctionWrapper
context['declaration_code'] = lambda code: infos.CodeInfo(code, 'declaration-outside')
context['module_code'] = lambda code: infos.CodeInfo(code, 'module')
context['class_code'] = infos.class_code
return context
def Begin():
# parse arguments
includes, defines, module, out, interfaces, multiple, cache_dir, create_cache, generate_main, gccxml_path = ParseArguments()
# run pyste scripts
for interface in interfaces:
ExecuteInterface(interface)
# create the parser
parser = CppParser(includes, defines, cache_dir, declarations.version, gccxml_path)
try:
if not create_cache:
if not generate_main:
return GenerateCode(parser, module, out, interfaces, multiple)
else:
return GenerateMain(module, out, OrderInterfaces(interfaces))
else:
return CreateCaches(parser)
finally:
parser.Close()
def CreateCaches(parser):
# There is one cache file per interface so we organize the headers
# by interfaces. For each interface collect the tails from the
# exporters sharing the same header.
tails = JoinTails(exporters.exporters)
# now for each interface file take each header, and using the tail
# get the declarations and cache them.
for interface, header in tails:
tail = tails[(interface, header)]
declarations = parser.ParseWithGCCXML(header, tail)
cachefile = parser.CreateCache(header, interface, tail, declarations)
print 'Cached', cachefile
return 0
_imported_count = {} # interface => count
def ExecuteInterface(interface):
old_interface = exporters.current_interface
if not os.path.exists(interface):
if old_interface and os.path.exists(old_interface):
d = os.path.dirname(old_interface)
interface = os.path.join(d, interface)
if not os.path.exists(interface):
raise IOError, "Cannot find interface file %s."%interface
_imported_count[interface] = _imported_count.get(interface, 0) + 1
exporters.current_interface = interface
context = CreateContext()
context['INTERFACE_FILE'] = os.path.abspath(interface)
execfile(interface, context)
exporters.current_interface = old_interface
def Import(interface):
exporters.importing = True
ExecuteInterface(interface)
exporters.importing = False
def JoinTails(exports):
'''Returns a dict of {(interface, header): tail}, where tail is the
joining of all tails of all exports for the header.
'''
tails = {}
for export in exports:
interface = export.interface_file
header = export.Header()
tail = export.Tail() or ''
if (interface, header) in tails:
all_tails = tails[(interface,header)]
all_tails += '\n' + tail
tails[(interface, header)] = all_tails
else:
tails[(interface, header)] = tail
return tails
def OrderInterfaces(interfaces):
interfaces_order = [(_imported_count[x], x) for x in interfaces]
interfaces_order.sort()
interfaces_order.reverse()
return [x for _, x in interfaces_order]
def GenerateMain(module, out, interfaces):
codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out)
codeunit.GenerateMain(interfaces)
return 0
def GenerateCode(parser, module, out, interfaces, multiple):
# prepare to generate the wrapper code
if multiple:
codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out)
else:
codeunit = SingleCodeUnit.SingleCodeUnit(module, out)
# stop referencing the exporters here
exports = exporters.exporters
exporters.exporters = None
exported_names = dict([(x.Name(), None) for x in exports])
# order the exports
order = {}
for export in exports:
if export.interface_file in order:
order[export.interface_file].append(export)
else:
order[export.interface_file] = [export]
exports = []
interfaces_order = OrderInterfaces(interfaces)
for interface in interfaces_order:
exports.extend(order[interface])
del order
del interfaces_order
# now generate the code in the correct order
#print exported_names
tails = JoinTails(exports)
for i in xrange(len(exports)):
export = exports[i]
interface = export.interface_file
header = export.Header()
if header:
tail = tails[(interface, header)]
declarations, parsed_header = parser.Parse(header, interface, tail)
else:
declarations = []
parsed_header = None
ExpandTypedefs(declarations, exported_names)
export.SetDeclarations(declarations)
export.SetParsedHeader(parsed_header)
if multiple:
codeunit.SetCurrent(export.interface_file, export.Name())
export.GenerateCode(codeunit, exported_names)
# force collect of cyclic references
exports[i] = None
del declarations
del export
gc.collect()
# finally save the code unit
codeunit.Save()
if not multiple:
print 'Module %s generated' % module
return 0
def ExpandTypedefs(decls, exported_names):
'''Check if the names in exported_names are a typedef, and add the real class
name in the dict.
'''
for name in exported_names.keys():
for decl in decls:
if isinstance(decl, declarations.Typedef):
exported_names[decl.type.FullName()] = None
def UsePsyco():
'Tries to use psyco if possible'
try:
import psyco
psyco.profile()
except: pass
def main():
start = time.clock()
UsePsyco()
status = Begin()
print '%0.2f seconds' % (time.clock()-start)
sys.exit(status)
if __name__ == '__main__':
main()
|
alexa-infra/negine
|
thirdparty/boost-python/libs/python/pyste/src/Pyste/pyste.py
|
Python
|
mit
| 14,446
|
[
"VisIt"
] |
cd9d27216ab605573ad0eeaad6d34d3f3592de027fed43917d03874842d4663d
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from cinder import test
from cinder.api.openstack import xmlutil
class SelectorTest(test.TestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertEqual(elem.text, None)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertEqual(elem.text, None)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertEqual(elem.text, None)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(MasterTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertNotEqual(MasterTemplateBuilder._tmpl, None)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(SlaveTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertNotEqual(SlaveTemplateBuilder._tmpl, None)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
|
tylertian/Openstack
|
openstack F/cinder/cinder/tests/api/openstack/test_xmlutil.py
|
Python
|
apache-2.0
| 25,721
|
[
"VisIt"
] |
4556bc78536abf25f5bf05b2199eb37318d5246352720ea4eb66bf9febfeda8b
|
"""
Plots the 2D vorticity field from a IBAMR simulation at saved time-steps using
the visualization software VisIt.
"""
from snake.ibamr.simulation import IBAMRSimulation
simulation = IBAMRSimulation()
body_name = 'flyingSnake2dAoA35ds004filledInside' # file name (no extension)
simulation.plot_field_contours_visit('vorticity', (-5.0, 5.0),
body=body_name,
solution_folder='numericalSolution',
view=(-2.0, -5.0, 15.0, 5.0),
width=800)
|
barbagroup/cuIBM
|
external/snake-0.3/examples/ibamr/plotVorticity.py
|
Python
|
mit
| 590
|
[
"VisIt"
] |
b2606fba2bccb4432752b1c967e2d73fd7cab5a93dd93c7041ea7d5e467b7789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`gp.py` - Gaussian Processes
------------------------------------
Routines for optimizing the GP hyperparameters for a given light curve.
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from .mathutils import Chunks
from scipy.linalg import cho_factor, cho_solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.signal import savgol_filter
import numpy as np
np.random.seed(48151623)
from distutils.version import LooseVersion, StrictVersion
import george
from george.kernels import Matern32Kernel, ExpSine2Kernel
try:
george_version = StrictVersion(george.__version__)
except ValueError:
george_version = LooseVersion(george.__version__)
comp_version = LooseVersion("0.3.0")
else:
comp_version = StrictVersion("0.3.0")
if george_version < comp_version:
from george.kernels import WhiteKernel
OLDGEORGE = True
else:
OLDGEORGE = False
import logging
log = logging.getLogger(__name__)
def GP(kernel, kernel_params, white=False):
'''
'''
if kernel == 'Basic':
w, a, t = kernel_params
if white:
if OLDGEORGE:
return george.GP(WhiteKernel(w ** 2) +
a ** 2 * Matern32Kernel(t ** 2))
else:
return george.GP(a ** 2 * Matern32Kernel(t ** 2),
white_noise=np.log(w ** 2),
fit_white_noise=True)
else:
return george.GP(a ** 2 * Matern32Kernel(t ** 2))
elif kernel == 'QuasiPeriodic':
w, a, g, p = kernel_params
if white:
if OLDGEORGE:
return george.GP(WhiteKernel(w ** 2) +
a ** 2 * ExpSine2Kernel(g, p))
else:
return george.GP(a ** 2 * ExpSine2Kernel(g, p),
white_noise=np.log(w ** 2),
fit_white_noise=True)
else:
return george.GP(a ** 2 * ExpSine2Kernel(g, p))
else:
raise ValueError('Invalid value for `kernel`.')
def GetCovariance(kernel, kernel_params, time, errors):
'''
Returns the covariance matrix for a given light curve
segment.
:param array_like kernel_params: A list of kernel parameters \
(white noise amplitude, red noise amplitude, and red noise timescale)
:param array_like time: The time array (*N*)
:param array_like errors: The data error array (*N*)
:returns: The covariance matrix :py:obj:`K` (*N*,*N*)
'''
# NOTE: We purposefully compute the covariance matrix
# *without* the GP white noise term
K = np.diag(errors ** 2)
K += GP(kernel, kernel_params, white=False).get_matrix(time)
return K
def GetKernelParams(time, flux, errors, kernel='Basic', mask=[],
giter=3, gmaxf=200, guess=None):
'''
Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like mask: The indices to be masked when training the GP. \
Default `[]`
:param int giter: The number of iterations. Default 3
:param int gmaxf: The maximum number of function evaluations. Default 200
:param tuple guess: The guess to initialize the minimization with. \
Default :py:obj:`None`
'''
log.info("Optimizing the GP...")
# Save a copy of time and errors for later
time_copy = np.array(time)
errors_copy = np.array(errors)
# Apply the mask
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
# Remove 5-sigma outliers to be safe
f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0]
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
# Initial guesses and bounds
white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)])
amp = np.nanstd(flux)
tau = 30.0
if kernel == 'Basic':
if guess is None:
guess = [white, amp, tau]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[0.5, 100.]]
elif kernel == 'QuasiPeriodic':
if guess is None:
guess = [white, amp, tau, 1., 20.]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[1e-5, 1e2],
[0.02, 100.]]
else:
raise ValueError('Invalid value for `kernel`.')
# Loop
llbest = -np.inf
xbest = np.array(guess)
for i in range(giter):
# Randomize an initial guess
iguess = [np.inf for g in guess]
for j, b in enumerate(bounds):
tries = 0
while (iguess[j] < b[0]) or (iguess[j] > b[1]):
iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j]
tries += 1
if tries > 100:
iguess[j] = b[0] + np.random.random() * (b[1] - b[0])
break
# Optimize
x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False,
bounds=bounds, args=(time, flux, errors, kernel),
maxfun=gmaxf)
log.info('Iteration #%d/%d:' % (i + 1, giter))
log.info(' ' + x[2]['task'].decode('utf-8'))
log.info(' ' + 'Function calls: %d' % x[2]['funcalls'])
log.info(' ' + 'Log-likelihood: %.3e' % -x[1])
if kernel == 'Basic':
log.info(' ' + 'White noise : %.3e (%.1f x error bars)' %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info(' ' + 'Red timescale : %.2f days' % x[0][2])
elif kernel == 'QuasiPeriodic':
log.info(' ' + 'White noise : %.3e (%.1f x error bars)' %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info(' ' + 'Gamma : %.3e' % x[0][2])
log.info(' ' + 'Period : %.2f days' % x[0][3])
if -x[1] > llbest:
llbest = -x[1]
xbest = np.array(x[0])
return xbest
def NegLnLike(x, time, flux, errors, kernel):
'''
Returns the negative log-likelihood function and its gradient.
'''
gp = GP(kernel, x, white=True)
gp.compute(time, errors)
if OLDGEORGE:
nll = -gp.lnlikelihood(flux)
# NOTE: There was a bug on this next line! Used to be
#
# ngr = -gp.grad_lnlikelihood(flux) / gp.kernel.pars
#
# But I think we want
#
# dlogL/dx = dlogL/dlogx^2 * dlogx^2/dx^2 * dx^2/dx
# = gp.grad_lnlikelihood() * 1/x^2 * 2x
# = 2 * gp.grad_lnlikelihood() / x
# = 2 * gp.grad_lnlikelihood() / np.sqrt(x^2)
# = 2 * gp.grad_lnlikelihood() / np.sqrt(gp.kernel.pars)
#
# (with a negative sign out front for the negative gradient).
# So we probably weren't optimizing the GP correctly! This affects
# all campaigns through C13. It's not a *huge* deal, since the sign
# of the gradient was correct and the model isn't that sensitive to
# the value of the hyperparameters, but it may have contributed to
# the poor performance on super variable stars. In most cases it means
# the solver takes longer to converge and isn't as good at finding
# the minimum.
ngr = -2 * gp.grad_lnlikelihood(flux) / np.sqrt(gp.kernel.pars)
else:
nll = -gp.log_likelihood(flux)
ngr = -2 * gp.grad_log_likelihood(flux) / \
np.sqrt(np.exp(gp.get_parameter_vector()))
return nll, ngr
|
rodluger/everest
|
everest/gp.py
|
Python
|
mit
| 8,337
|
[
"Gaussian"
] |
ecbd88156b69af29d098ddfb75045ea6cc04931dedad4ad01472f6e4c37cec9b
|
import numpy as np
import pele.utils.readAmberParam as readAmb
import ambgmin_ as GMIN
import pele.potentials.gminpotential as gminpot
import pele.basinhopping as bh
from pele.storage import savenlowest
from pele.NEB import NEB
from pele.takestep import displace
class molSystem:
def __init__(self):
self.storage = savenlowest.SaveN(10)
GMIN.initialize()
# self.bondList = bondList
def createBasinHopping(self):
GMIN.initialize()
pot = gminpot.GMINPotental(GMIN)
coords = pot.getCoords()
step = displace.RandomDisplacement()
opt = bh.BasinHopping(coords, pot, takeStep=step, temperature=0.4, storage=self.storage)
return opt
def drawCylinder(self, X1, X2):
from OpenGL import GL, GLU
z = np.array([0.,0.,1.]) #default cylinder orientation
p = X2-X1 #desired cylinder orientation
r = np.linalg.norm(p)
t = np.cross(z,p) #angle about which to rotate
a = np.arccos( np.dot( z,p) / r ) #rotation angle
a *= (180. / np.pi) #change units to angles
GL.glPushMatrix()
GL.glTranslate( X1[0], X1[1], X1[2] )
GL.glRotate( a, t[0], t[1], t[2] )
g=GLU.gluNewQuadric()
GLU.gluCylinder(g, .1,0.1,r,30,30) #I can't seem to draw a cylinder
GL.glPopMatrix()
def draw(self, coordsl, index):
from OpenGL import GL,GLUT
coords=coordsl.reshape(coordsl.size/3,3)
#coords = coords.reshape(GMIN.getNAtoms, 3)
com=np.mean(coords, axis=0)
for xx in coords:
x = xx-com
GL.glPushMatrix()
GL.glTranslate(x[0],x[1],x[2])
GLUT.glutSolidSphere(0.3,30,30)
GL.glPopMatrix()
# get bond list from amber params
mol = readAmb.readAmberParam()
mol.populateBondConn()
# draw bonds
for atomPairs in mol.bondConn:
xyz1 = coords[atomPairs[0]-1] - com
xyz2 = coords[atomPairs[1]-1] - com
self.drawCylinder(xyz1, xyz2)
def createNEB(self, coords1, coords2):
pot = gminpot.GMINPotental(GMIN)
return NEB.NEB(coords1, coords2, pot, k = 100. ,nimages=20)
if __name__ == "__main__":
import pele.gui.run as gr
gr.run_gui(molSystem)
|
kjs73/pele
|
playground/amber/metenk/run_gui.py
|
Python
|
gpl-3.0
| 2,407
|
[
"Amber"
] |
d86f15f16b8c8c45201bf66e94b0215ab851342a0690d88fcf977232edb0c868
|
"""
BLAST+6 format (:mod:`skbio.io.format.blast6`)
==============================================
.. currentmodule:: skbio.io.format.blast6
The BLAST+6 format (``blast+6``) stores the results of a BLAST [1]_ database
search. The results are stored in a simple tabular format with no column
headers. Values are separated by the tab character.
An example BLAST+6-formatted file comparing two protein sequences, taken
from [2]_ (tab characters represented by ``<tab>``)::
moaC<tab>gi|15800534|ref|NP_286546.1|<tab>100.00<tab>161<tab>0<tab>0<tab>1\
<tab>161<tab>1<tab>161<tab>3e-114<tab>330
moaC<tab>gi|170768970|ref|ZP_02903423.1|<tab>99.38<tab>161<tab>1<tab>0\
<tab>1<tab>161<tab>1<tab>161<tab>9e-114<tab>329
Format Support
--------------
**Has Sniffer: No**
**State: Experimental as of 0.4.1.**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |No |:mod:`pandas.DataFrame` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
BLAST+6 format is a tabular text-based format produced by both BLAST+ output
format 6 (``-outfmt 6``) and legacy BLAST output format 8 (``-m 8``). It is
tab-separated and has no column headers. With BLAST+, users can specify the
columns that are present in their BLAST output file by specifying column names
(e.g., ``-outfmt "6 qseqid sseqid bitscore qstart sstart"``), if the default
columns output by BLAST are not desired.
BLAST Column Types
^^^^^^^^^^^^^^^^^^
The following column types are output by BLAST and supported by scikit-bio.
This information is taken from [3]_.
+-----------+------------------------------------+-----+
|Name |Description |Type |
+===========+====================================+=====+
|qseqid |Query Seq-id |str |
+-----------+------------------------------------+-----+
|qgi |Query GI |int |
+-----------+------------------------------------+-----+
|qacc |Query accesion |str |
+-----------+------------------------------------+-----+
|qaccver |Query accesion.version |str |
+-----------+------------------------------------+-----+
|qlen |Query sequence length |int |
+-----------+------------------------------------+-----+
|sseqid |Subject Seq-id |str |
+-----------+------------------------------------+-----+
|sallseqid |All subject Seq-id(s), separated by |str |
| |a ';' | |
+-----------+------------------------------------+-----+
|sgi |Subject GI |int |
+-----------+------------------------------------+-----+
|sallgi |All subject GIs |int |
+-----------+------------------------------------+-----+
|sacc |Subject accesion |str |
+-----------+------------------------------------+-----+
|saccver |Subject accesion.version |str |
+-----------+------------------------------------+-----+
|sallacc |All subject accesions |str |
+-----------+------------------------------------+-----+
|slen |Subject sequence length |int |
+-----------+------------------------------------+-----+
|qstart |Start of alignment in query |int |
+-----------+------------------------------------+-----+
|qend |End of alignment in query |int |
+-----------+------------------------------------+-----+
|sstart |Start of alignment in subject |int |
+-----------+------------------------------------+-----+
|send |End of alignment in subject |int |
+-----------+------------------------------------+-----+
|qseq |Aligned part of query sequence |str |
+-----------+------------------------------------+-----+
|sseq |Aligned part of subject sequence |str |
+-----------+------------------------------------+-----+
|evalue |Expect value |float|
+-----------+------------------------------------+-----+
|bitscore |Bit score |float|
+-----------+------------------------------------+-----+
|score |Raw score |int |
+-----------+------------------------------------+-----+
|length |Alignment length |int |
+-----------+------------------------------------+-----+
|pident |Percent of identical matches |float|
+-----------+------------------------------------+-----+
|nident |Number of identical matches |int |
+-----------+------------------------------------+-----+
|mismatch |Number of mismatches |int |
+-----------+------------------------------------+-----+
|positive |Number of positive-scoring matches |int |
+-----------+------------------------------------+-----+
|gapopen |Number of gap openings |int |
+-----------+------------------------------------+-----+
|gaps |Total number of gaps |int |
+-----------+------------------------------------+-----+
|ppos |Percentage of positive-scoring matc\|float|
| |hes | |
+-----------+------------------------------------+-----+
|frames |Query and subject frames separated |str |
| |by a '/' | |
+-----------+------------------------------------+-----+
|qframe |Query frame |int |
+-----------+------------------------------------+-----+
|sframe |Subject frame |int |
+-----------+------------------------------------+-----+
|btop |Blast traceback operations (BTOP) |int |
+-----------+------------------------------------+-----+
|staxids |Unique Subject Taxonomy ID(s), sepa\|str |
| |rated by a ';' (in numerical order) | |
+-----------+------------------------------------+-----+
|sscinames |Unique Subject Scientific Name(s), |str |
| |separated by a ';' | |
+-----------+------------------------------------+-----+
|scomnames |Unique Subject Common Name(s), sepa\|str |
| |rated by a ';' | |
+-----------+------------------------------------+-----+
|sblastnames|unique Subject Blast Name(s), separ\|str |
| |ated by a ';' (in alphabetical | |
| |order) | |
+-----------+------------------------------------+-----+
|sskingdoms |unique Subject Super Kingdom(s), se\|str |
| |parated by a ';' (in alphabetical | |
| |order) | |
+-----------+------------------------------------+-----+
|stitle |Subject Title |str |
+-----------+------------------------------------+-----+
|sstrand |Subject Strand |str |
+-----------+------------------------------------+-----+
|salltitles |All Subject Title(s), separated by |str |
| |a '<>' | |
+-----------+------------------------------------+-----+
|qcovs |Query Coverage Per Subject |int |
+-----------+------------------------------------+-----+
|qcovhsp |Query Coverage Per HSP |int |
+-----------+------------------------------------+-----+
.. note:: When a BLAST+6-formatted file contains ``N/A`` values, scikit-bio
will convert these values into ``np.nan``, matching pandas' convention for
representing missing data.
.. note:: scikit-bio stores columns of type ``int`` as type ``float`` in the
returned ``pd.DataFrame``. This is necessary in order to allow ``N/A``
values in integer columns (this is currently a limitation of pandas).
Format Parameters
-----------------
The following format parameters are available in ``blast+6`` format:
- ``default_columns``: ``False`` by default. If ``True``, will use the default
columns output by BLAST, which are qseqid, sseqid, pident, length, mismatch,
gapopen, qstart, qend, sstart, send, evalue, and bitscore.
.. warning:: When reading legacy BLAST files, you must pass
``default_columns=True`` because legacy BLAST does not allow users to
specify which columns are present in the output file.
- ``columns``: ``None`` by default. If provided, must be a list of column names
in the order they will appear in the file.
.. note:: Either ``default_columns`` or ``columns`` must be provided, as
``blast+6`` does not contain column headers.
Examples
--------
Suppose we have a ``blast+6`` file with default columns:
>>> from io import StringIO
>>> import skbio.io
>>> import pandas as pd
>>> fs = '\\n'.join([
... 'moaC\\tgi|15800534|ref|NP_286546.1|\\t100.00\\t161\\t0\\t0\\t1\\t161\
\\t1\\t161\\t3e-114\\t330',
... 'moaC\\tgi|170768970|ref|ZP_02903423.1|\\t99.38\\t161\\t1\\t0\\t1\\t\
161\\t1\\t161\\t9e-114\\t329'
... ])
>>> fh = StringIO(fs)
Read the file into a ``pd.DataFrame`` and specify that default columns should
be used:
>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
... default_columns=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
qseqid sseqid pident length mismatch gapopen \\
0 moaC gi|15800534|ref|NP_286546.1| 100.00 161.0 0.0 0.0
1 moaC gi|170768970|ref|ZP_02903423.1| 99.38 161.0 1.0 0.0
<BLANKLINE>
qstart qend sstart send evalue bitscore
0 1.0 161.0 1.0 161.0 3.000000e-114 330.0
1 1.0 161.0 1.0 161.0 9.000000e-114 329.0
Suppose we have a ``blast+6`` file with user-supplied (non-default) columns:
>>> from io import StringIO
>>> import skbio.io
>>> import pandas as pd
>>> fs = '\\n'.join([
... 'moaC\\t100.00\\t0\\t161\\t0\\t161\\t330\\t1',
... 'moaC\\t99.38\\t1\\t161\\t0\\t161\\t329\\t1'
... ])
>>> fh = StringIO(fs)
Read the file into a ``pd.DataFrame`` and specify which columns are present
in the file:
>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
... columns=['qseqid', 'pident', 'mismatch', 'length',
... 'gapopen', 'qend', 'bitscore', 'sstart'])
>>> df # doctest: +NORMALIZE_WHITESPACE
qseqid pident mismatch length gapopen qend bitscore sstart
0 moaC 100.00 0.0 161.0 0.0 161.0 330.0 1.0
1 moaC 99.38 1.0 161.0 0.0 161.0 329.0 1.0
References
----------
.. [1] Altschul, S.F., Gish, W., Miller, W., Myers, E.W. & Lipman, D.J. (1990)
"Basic local alignment search tool." J. Mol. Biol. 215:403-410.
.. [2] http://blastedbio.blogspot.com/2014/11/column-headers-in-blast-tabular-\
and-csv.html
.. [3] http://www.ncbi.nlm.nih.gov/books/NBK279675/
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
from skbio.io import create_format
from skbio.io.format._blast import _parse_blast_data, _possible_columns
blast6 = create_format('blast+6')
_default_columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
@blast6.reader(pd.DataFrame, monkey_patch=False)
def _blast6_to_data_frame(fh, columns=None, default_columns=False):
if default_columns and columns is not None:
raise ValueError("`columns` and `default_columns` cannot both be"
" provided.")
if not default_columns and columns is None:
raise ValueError("Either `columns` or `default_columns` must be"
" provided.")
if default_columns:
columns = _default_columns
else:
for column in columns:
if column not in _possible_columns:
raise ValueError("Unrecognized column (%r)."
" Supported columns:\n%r" %
(column, set(_possible_columns.keys())))
return _parse_blast_data(fh, columns, ValueError,
"Specified number of columns (%r) does not equal"
" number of columns in file (%r).")
|
anderspitman/scikit-bio
|
skbio/io/format/blast6.py
|
Python
|
bsd-3-clause
| 12,758
|
[
"BLAST",
"scikit-bio"
] |
a84835f79a6acccfdef9db87cfda17412074644b07a10885f19890241c58ada1
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import datetime
import mock
import unittest
from stoqlib.gui.editors.accounteditor import AccountEditor
from stoqlib.gui.editors.accounttransactioneditor import AccountTransactionEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestAccountTransactionEditor(GUITest):
def test_create(self):
account = self.create_account()
editor = AccountTransactionEditor(self.store, None, account)
editor.date.update(datetime.date.today())
self.check_editor(editor, 'editor-transaction-create')
def test_show(self):
account = self.create_account()
transaction = self.create_account_transaction(account)
editor = AccountTransactionEditor(self.store, transaction, account)
editor.date.update(datetime.date.today())
self.check_editor(editor, 'editor-transaction-show')
def test_confirm(self):
account = self.create_account()
editor = AccountTransactionEditor(self.store, None, account)
self.assertFalse(editor.validate_confirm())
editor.description.update('description')
editor.code.update(15)
editor.value.update(150)
self.assertTrue(editor.validate_confirm())
editor.main_dialog.confirm()
self.check_editor(editor, 'editor-transaction-confirm',
[editor.retval, account])
@mock.patch('stoqlib.gui.editors.accounttransactioneditor.api.new_store')
@mock.patch('stoqlib.gui.editors.accounttransactioneditor.run_dialog')
def test_add_account(self, run_dialog, new_store):
new_account = self.create_account()
new_store.return_value = self.store
run_dialog.return_value = new_account
editor = AccountTransactionEditor(self.store, None, new_account)
parent_account = editor._get_account()
with mock.patch.object(self.store, 'commit'):
with mock.patch.object(self.store, 'close'):
self.click(editor.add_account)
run_dialog.assert_called_once_with(AccountEditor, editor,
self.store,
parent_account=parent_account)
if __name__ == '__main__':
from stoqlib.api import api
c = api.prepare_test()
unittest.main()
|
tiagocardosos/stoq
|
stoqlib/gui/test/test_account_transaction_editor.py
|
Python
|
gpl-2.0
| 3,202
|
[
"VisIt"
] |
24b0e3d16fb5ceec46ed1f6c6943924c9db443ac0098ffedbbdbe59041af6742
|
#! /bin/python
# $Id
# -----------------------------------------------------------------------------
# CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-16 Bradley M. Bell
#
# CppAD is distributed under multiple licenses. This distribution is under
# the terms of the
# Eclipse Public License Version 1.0.
#
# A copy of this license is included in the COPYING file of this distribution.
# Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
# -----------------------------------------------------------------------------
from __future__ import print_function
# -----------------------------------------------------------------------------
# list of svn commands to execute in the svn directory before make changes
# indicated by git directory; some example commands are included below
svn_commands = [
# 'svn mkdir cppad/utility',
# 'svn move cppad/*.hpp cppad/utility',
# 'svn move cppad/utility/cppad.hpp cppad/cppad.hpp',
# 'svn move cppad/utility/base_require.hpp cppad/base_require.hpp',
# 'svn move omh/library.omh omh/utility.omh'
]
# -----------------------------------------------------------------------------
# imports
import sys
import os
import re
import subprocess
import pdb
# -----------------------------------------------------------------------------
# command line arguments
usage = '\tbin/push_git2svn.py svn_branch_path\n'
narg = len(sys.argv)
if sys.argv[0] != 'bin/push_git2svn.py' :
msg = 'bin/push_git2svn.py must be executed from its parent directory'
sys.exit(usage + msg)
if narg != 2 :
msg = 'expected 1 but found ' + str(narg-1) + ' command line arguments'
sys.exit(usage + msg)
svn_branch_path = sys.argv[1]
if svn_branch_path == 'master' :
msg = 'trunk is the svn_branch_path for the master branch'
sys.exit(usage + msg)
# -----------------------------------------------------------------------------
# some settings
svn_repository = 'https://projects.coin-or.org/svn/CppAD'
git_repository = 'https://github.com/coin-or/CppAD'
work_directory = 'build/work'
# -----------------------------------------------------------------------------
# some simple functions
def pause(question, choice_list) :
response=''
while not response in choice_list :
print(question, end='')
response = sys.stdin.readline()
if response.endswith('\n') :
response = response[:-1]
return response
#
def system(cmd) :
try :
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError as info :
msg = info.output
msg += '\nbin/push_git2svn.py exiting because command above failed'
sys.exit(cmd + '\n\n' + msg)
return output
def print_system(cmd) :
print(cmd)
try :
output = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError as info :
msg = info.output
msg += '\nbin/push_git2svn.py exiting because command above failed'
sys.exit(msg)
return output
id_pattern = re.compile(r'^.*\$Id.*$', re.MULTILINE)
def ignore_data(data) :
data = re.sub(id_pattern, '', data)
return data
# -----------------------------------------------------------------------------
# determine git_branch_name
if svn_branch_path == 'trunk' :
git_branch_name = 'master'
git_branch_path = svn_branch_path
elif svn_branch_path.startswith('branches/') :
git_branch_name = svn_branch_path[len('branches/'):]
git_branch_path = svn_branch_path
else :
git_branch_name = svn_branch_path
git_branch_path = 'branches/' + svn_branch_path
# -----------------------------------------------------------------------------
# hash code for the git branch
cmd = 'git show-ref origin/' + git_branch_name
git_hash_code = system(cmd)
pattern = ' refs/remotes/origin/' + git_branch_name
git_hash_code = git_hash_code.replace(pattern, '')
# -----------------------------------------------------------------------------
# make sure work directory exists
if not os.path.isdir(work_directory) :
os.makedirs(work_directory)
# -----------------------------------------------------------------------------
# checkout svn version of directory
svn_directory = work_directory + '/svn'
if os.path.isdir(svn_directory) :
question = 'Use existing svn directory:\n\t'
question += svn_directory + '\n'
question += 'or remove it and check out a new copy ? [use/new] '
choice_list = [ 'use' , 'new' ]
choice = pause(question, choice_list)
if choice == 'new' :
cmd = 'rm -r ' + svn_directory
print_system(cmd)
else :
choice = 'new'
if choice == 'use' :
cmd = 'svn revert --recursive ' + svn_directory
print_system(cmd)
cmd = 'svn update ' + svn_directory
print_system(cmd)
cmd = 'svn status ' + svn_directory
svn_status = system(cmd)
svn_status = svn_status.split('\n')
for entry in svn_status :
if entry.startswith('? ') :
file_name = entry[8:]
cmd = 'rm ' + file_name
system(cmd)
else :
cmd = 'svn checkout '
cmd += svn_repository + '/' + svn_branch_path + ' ' + svn_directory
print_system(cmd)
# ----------------------------------------------------------------------------
tmp = os.getcwd()
os.chdir( svn_directory )
for cmd in svn_commands :
assert cmd.startswith('svn')
print_system(cmd)
os.chdir( tmp )
# ----------------------------------------------------------------------------
# git hash code corresponding to verison in svn directory
cmd = 'svn info ' + svn_directory
svn_info = system(cmd)
rev_pattern = re.compile('Last Changed Rev: *([0-9]+)')
match = re.search(rev_pattern, svn_info)
svn_revision = match.group(1)
cmd = 'svn log -r ' + svn_revision + ' ' + svn_directory
svn_log = system(cmd)
hash_pattern = re.compile('\nend hash code: *([0-9a-f]+)')
match = re.search(hash_pattern, svn_log)
if match :
svn_hash_code = match.group(1)
else :
svn_hash_code = None
# -----------------------------------------------------------------------------
# export the git verison of the directory
git_directory = work_directory + '/git'
if os.path.isdir(git_directory) :
cmd = 'rm -r ' + git_directory
print_system(cmd)
cmd = 'svn export '
cmd += git_repository + '/' + git_branch_path + ' ' + git_directory
print_system(cmd)
# -----------------------------------------------------------------------------
# list of files for the svn and git directories
svn_pattern = re.compile(svn_directory + '/')
svn_file_list = []
svn_dir_list = []
for directory, dir_list, file_list in os.walk(svn_directory) :
ok = ( directory.find('/.svn/') == -1 )
ok = ok and ( not directory.endswith('/.svn') )
if ok :
if directory != svn_directory :
local_name = re.sub(svn_pattern, '', directory)
svn_dir_list.append(local_name)
for name in file_list :
local_name = directory + '/' + name
local_name = re.sub(svn_pattern, '', local_name)
svn_file_list.append( local_name )
#
git_pattern = re.compile(git_directory + '/')
git_file_list = []
git_dir_list = []
for directory, dir_list, file_list in os.walk(git_directory) :
index = directory.find('/.svn/')
assert index == -1
if directory != git_directory :
local_name = re.sub(git_pattern, '', directory)
git_dir_list.append(local_name)
for name in file_list :
local_name = directory + '/' + name
local_name = re.sub(git_pattern, '', local_name)
git_file_list.append( local_name )
# -----------------------------------------------------------------------------
# list of files that have been created and deleted
created_file_list=[]
for name in git_file_list :
if not name in svn_file_list :
created_file_list.append(name)
#
deleted_file_list=[]
for name in svn_file_list :
if not name in git_file_list :
deleted_file_list.append(name)
# -----------------------------------------------------------------------------
# list of directories that have been created and deleted
created_dir_list=[]
for name in git_dir_list :
if not name in svn_dir_list :
created_dir_list.append(name)
#
deleted_dir_list=[]
for name in svn_dir_list :
if not name in git_dir_list :
deleted_dir_list.append(name)
# -----------------------------------------------------------------------------
# automated svn commands
#
for git_dir in created_dir_list :
cmd = 'svn mkdir ' + svn_directory + '/' + git_dir
print_system(cmd)
#
for git_file in created_file_list :
git_f = open(git_directory + '/' + git_file, 'rb')
git_data = git_f.read()
git_f.close()
git_data = ignore_data(git_data)
#
found = False
for svn_file in deleted_file_list :
svn_f = open(svn_directory + '/' + svn_file, 'rb')
svn_data = svn_f.read()
svn_f.close()
svn_data = ignore_data(svn_data)
#
if svn_data == git_data :
assert not found
cmd = 'svn copy ' + svn_directory + '/' + svn_file + ' \\\n\t'
cmd += svn_directory + '/' + git_file
print_system(cmd)
cmd = 'cp ' + git_directory + '/' + git_file + ' \\\n\t'
cmd += svn_directory + '/' + git_file
system(cmd)
found = True
if not found :
cmd = 'cp ' + git_directory + '/' + git_file + ' \\\n\t'
cmd += svn_directory + '/' + git_file
system(cmd)
cmd = 'svn add ' + svn_directory + '/' + git_file
print_system(cmd)
#
for svn_file in deleted_file_list :
svn_file_path = svn_directory + '/' + svn_file
if os.path.isfile(svn_file_path) :
cmd = 'svn delete --force ' + svn_file_path
print_system(cmd)
#
for git_file in git_file_list :
do_cp = True
do_cp = do_cp and git_file not in created_file_list
if git_file in svn_file_list :
git_f = open(git_directory + '/' + git_file, 'rb')
git_data = git_f.read()
git_f.close()
git_data = ignore_data(git_data)
#
svn_f = open(svn_directory + '/' + git_file, 'rb')
svn_data = svn_f.read()
svn_f.close()
svn_data = ignore_data(svn_data)
#
do_cp = do_cp and git_data != svn_data
if do_cp :
cmd = 'cp ' + git_directory + '/' + git_file + ' \\\n\t'
cmd += svn_directory + '/' + git_file
system(cmd)
#
for svn_dir in deleted_dir_list :
cmd = 'svn rm ' + svn_directory + '/' + svn_dir
print_system(cmd)
# -----------------------------------------------------------------------------
data = 'merge to branch: ' + svn_branch_path + '\n'
data += 'from repository: ' + git_repository + '\n'
if svn_hash_code != None :
data += 'start hash code: ' + svn_hash_code + '\n'
else :
data += 'start hash code: missing\n'
data += 'end hash code: ' + git_hash_code + '\n\n'
if svn_hash_code != None :
sed_cmd = "sed -e '/" + svn_hash_code + "/,$d'"
cmd = 'git log origin/' + git_branch_name + ' | ' + sed_cmd
output = system(cmd)
data += output
log_f = open( svn_directory + '/push_git2svn.log' , 'wb')
log_f.write(data)
log_f.close()
#
msg = '\nChange into svn directory with the command\n\t'
msg += 'cd ' + svn_directory + '\n'
msg += 'If these changes are OK, execute the command:\n\t'
msg += 'svn commit --file push_git2svn.log\n'
if svn_hash_code != None :
msg += 'You should inspect and possibly edit push_git2svn.log'
else :
msg += 'The start hash code could not be automatically determined.\n'
msg += 'You should edit push_git2svn.log to describe the changes.'
print(msg)
|
wegamekinglc/CppAD
|
bin/push_git2svn.py
|
Python
|
epl-1.0
| 11,193
|
[
"VisIt"
] |
262a450f28725eb3b8d246bd2899cf44ba58ccbd6536743ef79c4b95b444fbcb
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the tensorflow HookeanSpring module
pydoc:dap.tf.hooke
"""
import numpy as np
import tensorflow as tf
from ase.build import molecule
from dap.tf.hooke import HookeanSpring
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class TestHookeanSpring(tf.test.TestCase):
"""Tests for the model HookeanSpring Calculator."""
def test_default(self):
"""Test energy and forces with default settings."""
atoms = molecule('H2')
atoms.set_calculator(HookeanSpring())
e0 = 0.5 * 1.0 * (1 - atoms.get_distance(0, 1))**2
e1 = atoms.get_potential_energy()
self.assertTrue(e0 == e1)
fmag = np.abs(-1.0 * (1 - atoms.get_distance(0, 1)))
f = atoms.get_forces()
self.assertTrue(fmag == np.linalg.norm(f[0]))
self.assertTrue(fmag == np.linalg.norm(f[1]))
def test_custom(self):
"""Test energy and forces with custom settings."""
atoms = molecule('H2')
k, x0 = 1.5, 0.9
atoms.set_calculator(HookeanSpring(k=k, x0=x0))
e0 = 0.5 * k * (x0 - atoms.get_distance(0, 1))**2
e1 = atoms.get_potential_energy()
self.assertTrue(e0 == e1)
fmag = np.abs(-k * (x0 - atoms.get_distance(0, 1)))
f = atoms.get_forces()
self.assertTrue(fmag == np.linalg.norm(f[0]))
self.assertTrue(fmag == np.linalg.norm(f[1]))
def test_custom(self):
"""Test energy and forces with custom settings."""
atoms = molecule('H2O')
atoms.set_calculator(HookeanSpring())
with self.assertRaisesRegexp(Exception,
'You can only use a two atom systems'):
e = atoms.get_potential_energy()
|
google/differentiable-atomistic-potentials
|
dap/tests/test_tf_hook.py
|
Python
|
apache-2.0
| 2,166
|
[
"ASE"
] |
1e361c3c71b5fbafffba1c02965c1f5b185d5e31cbb5b70dbd17db2bc884cc69
|
'''\
a piddle wrapper for wxPython DeviceContexts
piddleWxDc.py
By Paul and Kevin Jacobs
History -
1.0 Many fixes and the rest of required piddle functionality added.
0.5 Much work done by Jeffrey Kunce on image support and code factoring.
PiddleWxDc adds Piddle-compatible methods to any wxPython DeviceContext.
It can be used any where a wxDC is used (onPaint, onDraw, etc).
Code factoring and pil image support by Jeffrey Kunce
see also piddleWxDcDemo.py
'''
from wxPython.wx import *
from rdkit.sping import pid as sping_pid
class WxCanvasError(RuntimeError):
pass
class PiddleWxDc(sping_pid.Canvas):
def __init__(self, aWxDc, size=(300, 300), name="piddleWX"):
sping_pid.Canvas.__init__(self, size, name)
self.dc = aWxDc
self.dc.BeginDrawing()
def __del__(self):
self.dc.EndDrawing()
def _getWXcolor(self, color, default=None):
'''Converts PIDDLE colors to wx colors'''
if color is not None:
if color == sping_pid.transparent:
return None
elif color.red >= 0 and color.green >= 0 and color.blue >= 0:
return wxColour(color.red * 255, color.green * 255, color.blue * 255)
if default is not None:
return self._getWXcolor(default)
else:
return None # End of the line
def _getWXbrush(self, color, default_color=None):
'''Converts PIDDLE colors to a wx brush'''
if color == sping_pid.transparent:
return wxTRANSPARENT_BRUSH
wxcolor = self._getWXcolor(color)
if wxcolor is None:
if default_color is not None:
return self._getWXbrush(default_color)
else:
raise WxCanvasError("Cannot create brush.")
return wxBrush(wxcolor)
def _getWXpen(self, width, color, default_color=None):
'''Converts PIDDLE colors to a wx pen'''
if width is None or width < 0:
width = self.defaultLineWidth
if color == sping_pid.transparent:
return wxTRANSPARENT_PEN
wxcolor = self._getWXcolor(color)
if wxcolor is None:
if default_color is not None:
return self._getWXpen(width, default_color)
else:
raise WxCanvasError("Cannot create pen.")
return wxPen(wxcolor, width)
def _getWXfont(self, font):
'''Returns a wxFont roughly equivalent to the requested PIDDLE font'''
if font is None:
font = self.defaultFont
# PIDDLE fonts are matched to wxFont families. While it is possible to
# match them to individual fonts, this is difficult to do in a platform
# independent way
if font.face is None or font.face == 'times':
family = wxDEFAULT
elif font.face == 'courier' or font.face == 'monospaced':
family = wxMODERN
elif font.face == 'helvetica' or font.face == 'sansserif':
family = wxSWISS
elif font.face == 'serif' or font.face == 'symbol':
family = wxDEFAULT
else:
family = wxDEFAULT
weight = wxNORMAL
style = wxNORMAL
underline = 0
if font.bold == 1:
weight = wxBOLD
if font.underline == 1:
underline = 1
if font.italic == 1:
style = wxITALIC
return wxFont(font.size, family, style, weight, underline)
def _setWXfont(self, font=None):
'''set/return the current font for the dc
jjk 10/28/99'''
wx_font = self._getWXfont(font)
self.dc.SetFont(wx_font)
return (wx_font)
def isInteractive(self):
return (0)
def canUpdate(self):
return 1
def clear(self):
self.dc.Clear()
#------------ string/font info ------------
def stringWidth(self, s, font=None):
'''Return the logical width of the string if it were drawn \
in the current font (defaults to self.font).'''
wx_font = self._setWXfont(font)
return self.dc.GetTextExtent(s)[0]
def fontHeight(self, font=None):
'''Find the total height (ascent + descent) of the given font.'''
return self.fontAscent(font) + self.fontDescent(font)
def fontAscent(self, font=None):
'''Find the ascent (height above base) of the given font.'''
wx_font = self._setWXfont(font)
return self.dc.GetCharHeight() - self.fontDescent(font)
def fontDescent(self, font=None):
'''Find the descent (extent below base) of the given font.'''
wx_font = self._setWXfont(font)
extents = self.dc.GetFullTextExtent(' ', wx_font)
return extents[2]
#------------- drawing methods --------------
# Note default parameters "=None" means use the defaults set in the
# Canvas method: defaultLineColor, etc.
def drawLine(self, x1, y1, x2, y2, color=None, width=None):
'''Draw a straight line between x1,y1 and x2,y2.'''
if width is None or width < 0:
width = self.defaultLineWidth
self.dc.SetPen(self._getWXpen(width, color, self.defaultLineColor))
self.dc.DrawLine(x1, y1, x2, y2)
def drawString(self, s, x, y, font=None, color=None, angle=None):
'''Draw a string starting at location x,y.
NOTE: the baseline goes on y; drawing covers (y-ascent,y+descent)
Text rotation (angle%360 != 0) is not supported.'''
self._setWXfont(font)
if color == sping_pid.transparent:
return
# No defaultFontColor?
wx_color = self._getWXcolor(color, self.defaultLineColor)
if wx_color is None:
wx_color = wxBLACK
self.dc.SetTextForeground(wx_color)
if '\n' in s or '\r' in s:
#normalize line ends
s = s.replace('\r\n', '\n')
s = s.replace('\n\r', '\n')
lines = s.split('\n')
else:
lines = [s]
if angle is not None:
self._drawRotatedString(lines, x, y, font, wx_color, angle)
else:
line_height = self.fontHeight(font)
for l in range(0, len(lines)):
self.dc.DrawText(lines[l], x, y - self.fontAscent(font) + l * line_height)
def _drawRotatedString(self, lines, x, y, font=None, color=None, angle=0):
import math
# [kbj] Hack since the default system font may not be able to rotate.
if font is None:
font = sping_pid.Font(face='helvetica')
self._setWXfont(font)
ascent = self.fontAscent(font)
height = self.fontHeight(font)
rad = angle * math.pi / 180.
s = math.sin(rad)
c = math.cos(rad)
dx = s * height
dy = c * height
lx = x - dx
ly = y - c * ascent
for i in range(0, len(lines)):
self.dc.DrawRotatedText(lines[i], lx + i * dx, ly + i * dy, angle)
# drawPolygon: For fillable shapes, edgeColor defaults to
# self.defaultLineColor, edgeWidth defaults to self.defaultLineWidth, and
# fillColor defaults to self.defaultFillColor. Specify "don't fill" by
# passing fillColor=transparent.
def drawPolygon(self, pointlist, edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
"""drawPolygon(pointlist) -- draws a polygon
pointlist: a list of (x,y) tuples defining vertices
closed: if 1, adds an extra segment connecting the last point
to the first
"""
# Because wxPython automatically closes polygons, the polygon fill and the border
# are drawn separately, so open polygons will display correctly
self.dc.SetPen(wxTRANSPARENT_PEN)
self.dc.SetBrush(self._getWXbrush(fillColor, self.defaultFillColor))
# Workaround : PIDDLE will pass mixed lists of lists and 2-tuples
# instead of just 2-tuples. Therefore, pointlist must be re-created as
# only 2-tuples
pointlist = map(lambda i: tuple(i), pointlist)
if closed == 1:
pointlist.append(pointlist[0])
self.dc.DrawPolygon(pointlist)
# Create a list of lines (4-tuples) to pass to drawLines
linelist = []
if len(pointlist) > 1:
for i in range(1, len(pointlist)):
linelist.append(
(pointlist[i - 1][0], pointlist[i - 1][1], pointlist[i][0], pointlist[i][1]))
else:
linelist.append((pointlist[0][0], pointlist[0][1], pointlist[0][0], pointlist[0][1]))
self.drawLines(linelist, edgeColor, edgeWidth)
# no colors apply to drawImage; the image is drawn as-is
def drawImage(self, image, x1, y1, x2=None, y2=None):
"""Draw a PIL Image into the specified rectangle. If x2 and y2 are
omitted, they are calculated from the image size.
jjk 11/03/99"""
try:
from PIL import Image
except ImportError:
print('PIL not installed as package')
try:
import Image
except ImportError:
raise RuntimeError("PIL not available!")
if (x2 and y2 and x2 > x1 and y2 > y1):
imgPil = image.resize((x2 - x1, y2 - y1))
else:
imgPil = image
if (imgPil.mode != 'RGB'):
imgPil = imgPil.convert('RGB')
imgData = imgPil.tobytes()
imgWx = wxEmptyImage(imgPil.size[0], imgPil.size[1])
imgWx.SetData(imgData)
self.dc.DrawBitmap(imgWx.ConvertToBitmap(), x1, y1)
|
bp-kelley/rdkit
|
rdkit/sping/WX/pidWxDc.py
|
Python
|
bsd-3-clause
| 8,748
|
[
"RDKit"
] |
36256bf2bb1889dd911e46d195a184afa2413bb83c90332fe3d7d862387c7ada
|
from js_ast import *
from js_process_ast import *
from js_cc import js_parse
from js_global import Glob, glob
from js_typed_classes import resolve_type
import os, sys, os.path, struct, imp, shutil, time, random, math
"""
okay. basic data model of typed objects:
each object compiles to an array of typed arrays, like so:
[float array, int array, byte array, etc]
an object reference is just an array, like so:
[reference to above typed arrays, starting offset (in bytes)]
"""
FLOAT_ARR = 0
INT_ARR = 1
BYTE_ARR = 2
DOUBLE_ARR = 3
LONGLONG_ARR = 4
def gen_class_code(node, typespace):
ret = StatementList()
def replace_this(n):
if type(n) in [IdentNode, VarDeclNode] and n.val == "this":
n.val = "self"
for c in n.children:
if type(c) in [FunctionNode, ClassNode, TypedClassNode]: continue
if type(c) == BinOpNode and c.op == ".":
replace_this(c[0])
continue
replace_this(c)
methods = list(node.methods.values()) + list(node.getters.values()) + list(node.setters.values())
for m in methods:
if type(m) == MethodNode:
name = node.name + "__" + m.name
elif type(m) == MethodGetter:
name = node.name + "_get__" + m.name
elif type(m) == MethodSetter:
name = node.name + "_set__" + m.name
func = FunctionNode(name)
func.add(m[0].copy())
func.add(m[1].copy())
func[0].prepend(VarDeclNode(ExprNode([]), name="self"))
func[0][0].type = TypedClassRef(node)
func[0][0].add(func[0][0].type)
for c in func:
replace_this(c)
ret.add(func)
return ret
class TransformVisit (NodeVisit):
def __init__(self, typespace):
NodeVisit.__init__(self)
self.typespace = typespace
self.required_nodes = []
def FunctionNode(self, node, scope, t, tlevel):
scope = dict(scope)
for c in node[0]:
n = c
while type(n) not in [VarDeclNode, IdentNode]:
n = n[0]
scope[n.val] = c
for c in node[1:]:
t(c, scope, tlevel)
def var_transform(self, cls, var, member):
#print(cls, var, member)
if type(member) != str: member = member.val
p = cls.childmap[member]
if type(p) == VarDeclNode:
arr = -1
t = p.type
while type(t) == StaticArrNode:
t = t[0]
if type(t) == BuiltinTypeNode:
if t.type == "int": arr = INT_ARR
elif t.type == "float": arr = FLOAT_ARR
elif t.type == "byte": arr = BYTE_ARR
if arr == -1:
self.typespace.error("internal error", var)
off = p.start
ret = js_parse(var.gen_js(0) + "[%d][%d];" % (arr, off), start_node=ArrayRefNode)
ret.type = p
return ret
elif type(p) in [MethodGetter, MethodSetter]:
#see if we have a getter
if member in cls.getters:
p = cls.getters[member]
cls = p.parent
name = cls.name + "_get__" + p.name
ret = js_parse(name+"($n)", [var], start_node=FuncCallNode)
ret.src = p
return ret
else:
cls = p.parent
name = cls.name + "_set__" + p.name
ret = js_parse(name+"($n)", [var], start_node=FuncCallNode)
ret.src = p
return ret
return IdentNode("[implement me]")
def ArrayRefNode(self, node, scope, t, tlevel):
#label any child array refs
if type(node.parent) != ArrayRefNode:
lvl = 0
n = node
while type(n) == ArrayRefNode:
n.lvl = lvl
n = n[0]
lvl += 1
n = node
for i in range(lvl):
#n.lvl = lvl - i - 1
n = n[0]
t(node[0], scope, tlevel)
t(node[1], scope, tlevel)
if node[0].type != None:
print(type(node[0].type.parent))
if node[0].type != None and type(node[0].type.parent) == TypedClassNode:
#now. theoretically, we should have a nice little ArrayRefNode
#as a child
if type(node[0]) != ArrayRefNode:
typespace.error("Internal parse error 2", node)
t = node[0].type.type
if type(t) != StaticArrNode:
typespace.error("Invalid array lookup on non-array value", node);
print(node.lvl, "<-------------------------")
ref = node[0]
b = 1
for i in range(node.lvl):
t = t[0]
for i in range(node.lvl):
t = t.parent
b *= t.size
print("yay", t.size)
a = ref[1]
c = node[1]
if type(a) == NumLitNode and type(c) == NumLitNode:
a.val = int(a.val) + int(c.val)*b
else:
ref.replace(a, BinOpNode(a, BinOpNode(NumLitNode(b), c, "*"), "+"))
node.parent.replace(node, ref)
def AssignNode(self, node, scope, t, tlevel):
t(node[0], scope, tlevel)
t(node[1], scope, tlevel)
#detect setters, which are inserted by var_transform *only* if
#a getter doesn't exist, otherwise a getter is inserted and
#we check if there's a setter here.
if type(node[0]) == FuncCallNode and hasattr(node[0], "src"):
#var_transform will insert a getter, if it exists, before
#a setter.
if "_get__" in node[0][0].gen_js(0):
cls = node[0].src.parent
name = node[0].src.name
if name in cls.setters:
n2 = IdentNode(cls.name + "_set__" + name)
node[0].replace(node[0][0], n2)
else:
self.typespace.error("Cannot assign to read-only property "+name, node)
if not "_set__" in node[0].gen_js(0):
self.typespace.error("Cannot assign values to function calls", node)
n2 = node[1]
node.remove(n2)
node[0][1].add(n2)
node.parent.replace(node, node[0])
def BinOpNode(self, node, scope, t, tlevel):
if node.op == "=":
self.AssignNode(node, scope, t, tlevel)
return
if node.op == ".":
t1 = resolve_type(self.typespace, node[0], scope)
t2 = resolve_type(self.typespace, node[1], scope)
base = node[0]
while type(base) not in [VarDeclNode, IdentNode]:
base = base[0]
if type(t1) == TypedClassNode and type(node[1]) == IdentNode:
scope = dict(scope)
scope[node[1].val] = t1.childmap[node[1].val]
node.parent.replace(node, self.var_transform(t1, base, node[1].val))
return
t(node[0], scope, tlevel)
t(node[1], scope, tlevel)
def type_size(node, typespace):
size = 0
basesize = 0
t = resolve_type(typespace, node, {})
print(t, "s")
if type(t) == StaticArrNode:
n = t
size = 1
while type(n) == StaticArrNode:
size *= n.size
n = n[0]
size2, basesize = type_size(n, typespace)
if basesize > 4: basesize = 8
size *= size2
elif type(t) == BuiltinTypeNode:
if t.type in ["float", "int"]:
size = 4
elif t.type in ["double", "long long"]:
size = 8
elif t.type in ["char", "byte"]:
size = 1
else:
typespace.error("Invalid builtin type " + t.type, node)
basesize = size
elif type(t) == TypedClassNode:
if t.size == None: layout_class(t, typespace)
size = t.size
#typed classes are four-byte-aligned *if*
#their size is four bytes or less, otherwise
#they are eight byte aligned.
basesize = 4 if size <= 4 else 8
return size, basesize
def layout_class(node, typespace):
#struct rules!
#ints are 32 bits. so are floats.
#ints, floats, and structures are 4-byte aligned
#generate list of properties, in right order
#this of course is not guaranteed by node.props, which
#is a dict.
#first 4 bytes are reserved for an integer reference
#to a class vtable. in the future, this ref should be
#omitted if the compiler detects that it's not needed.
props = []
for c in node:
if type(c) == VarDeclNode:
props.append(c)
byte = 4
for p in props:
size, basesize = type_size(p, typespace)
if size == 0:
typespace.error("failed to calculate struct layout", node)
#do byte alignment
while (byte%basesize) != 0:
byte += 1
p.start = byte
p.size = size
byte += size
def do_transform(node, typespace):
for k in typespace.types:
t = typespace.types[k]
if type(t) != TypedClassNode: continue
layout_class(t, typespace)
visit = TransformVisit(typespace)
visit.traverse(node)
class TypedArrayClassGen:
def __init__(self):
self.has_typed_classes = False
def boilerplate(self, node, typespace):
ret = gen_class_code(node, typespace)
self.has_typed_classes = True
return ret
def transform(self, node, typespace):
print("yay, transform")
if self.has_typed_classes:
do_transform(node, typespace)
|
joeedh/webblender
|
tools/extjs_cc/js_typed_class_generator.py
|
Python
|
apache-2.0
| 8,860
|
[
"VisIt"
] |
6c12b9a0d9f00f9d36d359f1f159c58d5d4a7806d88a7ec7aaae627c6a1d70b1
|
"""
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import IntegrityError, transaction
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.views.decorators.http import require_http_methods
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from django.utils.translation import ugettext as _
from social_auth.backends import get_backend
from social_auth.models import UserSocialAuth
from sudo.decorators import sudo_required
from sentry.models import (
UserEmail, LostPasswordHash, Project, UserOption, Authenticator
)
from sentry.signals import email_verified
from sentry.web.decorators import login_required, signed_auth_required
from sentry.web.forms.accounts import (
AccountSettingsForm, AppearanceSettingsForm,
RecoverPasswordForm, ChangePasswordRecoverForm,
EmailForm
)
from sentry.web.helpers import render_to_response
from sentry.utils import auth
def send_password_recovery_mail(user):
password_hash, created = LostPasswordHash.objects.get_or_create(
user=user
)
if not password_hash.is_valid():
password_hash.date_added = timezone.now()
password_hash.set_hash()
password_hash.save()
password_hash.send_recover_mail()
return password_hash
@login_required
def login_redirect(request):
login_url = auth.get_login_redirect(request)
return HttpResponseRedirect(login_url)
def expired(request, user):
password_hash = send_password_recovery_mail(user)
return render_to_response('sentry/account/recover/expired.html', {
'email': password_hash.user.email,
}, request)
def recover(request):
form = RecoverPasswordForm(request.POST or None)
if form.is_valid():
password_hash = send_password_recovery_mail(form.cleaned_data['user'])
return render_to_response('sentry/account/recover/sent.html', {
'email': password_hash.user.email,
}, request)
context = {
'form': form,
}
return render_to_response('sentry/account/recover/index.html', context, request)
def recover_confirm(request, user_id, hash):
try:
password_hash = LostPasswordHash.objects.get(user=user_id, hash=hash)
if not password_hash.is_valid():
password_hash.delete()
raise LostPasswordHash.DoesNotExist
user = password_hash.user
except LostPasswordHash.DoesNotExist:
context = {}
tpl = 'sentry/account/recover/failure.html'
else:
tpl = 'sentry/account/recover/confirm.html'
if request.method == 'POST':
form = ChangePasswordRecoverForm(request.POST)
if form.is_valid():
user.set_password(form.cleaned_data['password'])
user.refresh_session_nonce(request)
user.save()
# Ugly way of doing this, but Django requires the backend be set
user = authenticate(
username=user.username,
password=form.cleaned_data['password'],
)
login_user(request, user)
password_hash.delete()
return login_redirect(request)
else:
form = ChangePasswordRecoverForm()
context = {
'form': form,
}
return render_to_response(tpl, context, request)
@login_required
@require_http_methods(["POST"])
def start_confirm_email(request):
from sentry.app import ratelimiter
if ratelimiter.is_limited(
'auth:confirm-email:{}'.format(request.user.id),
limit=10, window=60, # 10 per minute should be enough for anyone
):
return HttpResponse(
'You have made too many email confirmation requests. Please try again later.',
content_type='text/plain',
status=429,
)
if 'primary-email' in request.POST:
email = request.POST.get('email')
try:
email_to_send = UserEmail.objects.get(user=request.user, email=email)
except UserEmail.DoesNotExist:
msg = _('There was an error confirming your email.')
level = messages.ERROR
else:
request.user.send_confirm_email_singular(email_to_send)
msg = _('A verification email has been sent to %s.') % (email)
level = messages.SUCCESS
messages.add_message(request, level, msg)
return HttpResponseRedirect(reverse('sentry-account-settings'))
elif request.user.has_unverified_emails():
request.user.send_confirm_emails()
unverified_emails = [e.email for e in request.user.get_unverified_emails()]
msg = _('A verification email has been sent to %s.') % (', ').join(unverified_emails)
else:
msg = _('Your email (%s) has already been verified.') % request.user.email
messages.add_message(request, messages.SUCCESS, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
def confirm_email(request, user_id, hash):
msg = _('Thanks for confirming your email')
level = messages.SUCCESS
try:
email = UserEmail.objects.get(user=user_id, validation_hash=hash)
if not email.hash_is_valid():
raise UserEmail.DoesNotExist
except UserEmail.DoesNotExist:
if request.user.is_anonymous() or request.user.has_unverified_emails():
msg = _('There was an error confirming your email. Please try again or '
'visit your Account Settings to resend the verification email.')
level = messages.ERROR
else:
email.is_verified = True
email.validation_hash = ''
email.save()
email_verified.send(email=email.email, sender=email)
messages.add_message(request, level, msg)
return HttpResponseRedirect(reverse('sentry-account-settings-emails'))
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def account_settings(request):
user = request.user
form = AccountSettingsForm(
user, request, request.POST or None,
initial={
'email': UserEmail.get_primary_email(user).email,
'username': user.username,
'name': user.name,
},
)
if form.is_valid():
old_email = user.email
form.save()
# remove previously valid email address
# TODO(dcramer): we should maintain validation here when we support
# multiple email addresses
if request.user.email != old_email:
UserEmail.objects.filter(user=user, email=old_email).delete()
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_email_singular(user_email)
msg = _('A confirmation email has been sent to %s.') % user_email.email
messages.add_message(
request,
messages.SUCCESS,
msg)
messages.add_message(
request, messages.SUCCESS, _('Your settings were saved.'))
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'settings',
'has_2fa': Authenticator.objects.user_has_2fa(request.user),
'AUTH_PROVIDERS': auth.get_auth_providers(),
'email': UserEmail.get_primary_email(user),
})
return render_to_response('sentry/account/settings.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def twofactor_settings(request):
interfaces = Authenticator.objects.all_interfaces_for_user(
request.user, return_missing=True)
if request.method == 'POST' and 'back' in request.POST:
return HttpResponseRedirect(reverse('sentry-account-settings'))
context = csrf(request)
context.update({
'page': 'security',
'has_2fa': any(x.is_enrolled and not x.is_backup_interface for x in interfaces),
'interfaces': interfaces,
})
return render_to_response('sentry/account/twofactor.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def avatar_settings(request):
context = csrf(request)
context.update({
'page': 'avatar',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/avatar.html', context, request)
@csrf_protect
@never_cache
@login_required
@transaction.atomic
def appearance_settings(request):
from django.conf import settings
options = UserOption.objects.get_all_values(user=request.user, project=None)
form = AppearanceSettingsForm(request.user, request.POST or None, initial={
'language': options.get('language') or request.LANGUAGE_CODE,
'stacktrace_order': int(options.get('stacktrace_order', -1) or -1),
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
'clock_24_hours': options.get('clock_24_hours') or False,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'appearance',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/appearance.html', context, request)
@csrf_protect
@never_cache
@signed_auth_required
@transaction.atomic
def email_unsubscribe_project(request, project_id):
# For now we only support getting here from the signed link.
if not request.user_from_signed_request:
raise Http404()
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
raise Http404()
if request.method == 'POST':
if 'cancel' not in request.POST:
UserOption.objects.set_value(
request.user, project, 'mail:alert', 0)
return HttpResponseRedirect(auth.get_login_url())
context = csrf(request)
context['project'] = project
return render_to_response('sentry/account/email_unsubscribe_project.html',
context, request)
@csrf_protect
@never_cache
@login_required
def list_identities(request):
identity_list = list(UserSocialAuth.objects.filter(user=request.user))
AUTH_PROVIDERS = auth.get_auth_providers()
context = csrf(request)
context.update({
'identity_list': identity_list,
'page': 'identities',
'AUTH_PROVIDERS': AUTH_PROVIDERS,
})
return render_to_response('sentry/account/identities.html', context, request)
@csrf_protect
@never_cache
@login_required
def disconnect_identity(request, identity_id):
if request.method != 'POST':
raise NotImplementedError
try:
auth = UserSocialAuth.objects.get(id=identity_id)
except UserSocialAuth.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-account-settings-identities'))
backend = get_backend(auth.provider, request, '/')
if backend is None:
raise Exception('Backend was not found for request: {}'.format(auth.provider))
# stop this from bubbling up errors to social-auth's middleware
# XXX(dcramer): IM SO MAD ABOUT THIS
try:
backend.disconnect(request.user, identity_id)
except Exception as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(Exception, exc, exc_tb)
del exc_tb
# XXX(dcramer): we experienced an issue where the identity still existed,
# and given that this is a cheap query, lets error hard in that case
assert not UserSocialAuth.objects.filter(
user=request.user,
id=identity_id,
).exists()
backend_name = backend.AUTH_BACKEND.name
messages.add_message(
request, messages.SUCCESS,
'Your {} identity has been disconnected.'.format(
settings.AUTH_PROVIDER_LABELS.get(backend_name, backend_name),
)
)
return HttpResponseRedirect(reverse('sentry-account-settings-identities'))
@csrf_protect
@never_cache
@login_required
def show_emails(request):
user = request.user
primary_email = UserEmail.get_primary_email(user)
alt_emails = user.emails.all().exclude(email=primary_email.email)
email_form = EmailForm(user, request.POST or None,
initial={
'primary_email': primary_email.email,
},
)
if 'remove' in request.POST:
email = request.POST.get('email')
del_email = UserEmail.objects.filter(user=user, email=email)
del_email.delete()
return HttpResponseRedirect(request.path)
if email_form.is_valid():
old_email = user.email
email_form.save()
if user.email != old_email:
useroptions = UserOption.objects.filter(user=user, value=old_email)
for option in useroptions:
option.value = user.email
option.save()
UserEmail.objects.filter(user=user, email=old_email).delete()
try:
with transaction.atomic():
user_email = UserEmail.objects.create(
user=user,
email=user.email,
)
except IntegrityError:
pass
else:
user_email.set_hash()
user_email.save()
user.send_confirm_email_singular(user_email)
msg = _('A confirmation email has been sent to %s.') % user_email.email
messages.add_message(
request,
messages.SUCCESS,
msg)
alternative_email = email_form.cleaned_data['alt_email']
# check if this alternative email already exists for user
if alternative_email and not UserEmail.objects.filter(user=user, email=alternative_email):
# create alternative email for user
try:
with transaction.atomic():
new_email = UserEmail.objects.create(
user=user,
email=alternative_email
)
except IntegrityError:
pass
else:
new_email.set_hash()
new_email.save()
# send confirmation emails to any non verified emails
user.send_confirm_email_singular(new_email)
msg = _('A confirmation email has been sent to %s.') % new_email.email
messages.add_message(
request,
messages.SUCCESS,
msg)
messages.add_message(
request, messages.SUCCESS, _('Your settings were saved.'))
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'email_form': email_form,
'primary_email': primary_email,
'alt_emails': alt_emails,
'page': 'emails',
'AUTH_PROVIDERS': auth.get_auth_providers(),
})
return render_to_response('sentry/account/emails.html', context, request)
|
zenefits/sentry
|
src/sentry/web/frontend/accounts.py
|
Python
|
bsd-3-clause
| 15,937
|
[
"VisIt"
] |
5f9a30962a4115c83a2cc4f53db64b0f1981a12c7b93b0226b0fd46203e3a871
|
from ase.all import *
from ase.calculators.morse import MorsePotential
atoms = Atoms('H7',
positions=[(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(1, 1, 0),
(0, 2, 0),
(1, 2, 0),
(0.5, 0.5, 1)],
constraint=[FixAtoms(range(6))],
calculator=MorsePotential())
traj = Trajectory('H.traj', 'w', atoms)
dyn = QuasiNewton(atoms, maxstep=0.2)
dyn.attach(traj.write)
dyn.run(fmax=0.01, steps=100)
print(atoms)
del atoms[-1]
print(atoms)
del atoms[5]
print(atoms)
assert len(atoms.constraints[0].index) == 5
|
suttond/MODOI
|
ase/test/example.py
|
Python
|
lgpl-3.0
| 682
|
[
"ASE"
] |
47d6d411630a6961341564790e9795a9490940342298114ee4c5cb89a19ff986
|
# -*- coding: utf-8 -*-
import os
import codecs
import tempfile
import unittest
from StringIO import StringIO
from yalign.datatypes import Sentence
from yalign.input_conversion import tokenize, text_to_document, \
html_to_document, parallel_corpus_to_documents, tmx_file_to_documents, \
srt_to_document
base_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(base_path, "data")
def reader(N):
return StringIO('\n'.join([str(x) for x in xrange(N)]))
class BaseTestTokenization(object):
language = "en"
text = ""
expected = "" or []
def test_expected_words_are_in_tokenization(self):
words = tokenize(self.text, self.language)
self.assertIsInstance(words, Sentence)
if isinstance(self.expected, basestring):
self.expected = self.expected.split() # Yes, this is irony.
for expected_word in self.expected:
self.assertIn(expected_word, words)
class TestTokenizationEn1(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"The dog is hungry.The cat is evil."
expected = u"dog hungry evil ."
class TestTokenizationEn2(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"It's 3:39 am, what do you want?"
expected = u"It's 3:39 want ?"
class TestTokenizationEn3(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"Try with ssh://tom@hawk:2020 and tell me"
expected = u"ssh://tom@hawk:2020"
class TestTokenizationEn4(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"Visit http://google.com"
expected = u"http://google.com"
class TestTokenizationEn5(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"I'm ready for you all. Aren't you ready?"
expected = u"all . Aren't"
class TestTokenizationEn6(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"Back to 10-23-1984 but not to 23/10/1984"
expected = u"10-23-1984 23 10 1984"
class TestTokenizationEn7(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"User-friendliness is a must, use get_text."
expected = u"User-friendliness must get_text ."
class TestTokenizationEn8(BaseTestTokenization, unittest.TestCase):
language = "en"
text = u"John's bar is cool, right :) XD? :panda"
expected = u"John 's cool , :) XD ?"
class TestTokenizationEs1(BaseTestTokenization, unittest.TestCase):
language = "es"
text = u"Ahí hay un vaso, me lo podrías alcanzar?porfavor"
expected = u"Ahí vaso , podrías alcanzar ? porfavor"
class TestTokenizationEs2(BaseTestTokenization, unittest.TestCase):
language = "es"
text = u"Me pueden 'contactar' en juancito@pepito.com"
expected = u"' contactar juancito@pepito.com"
class TestTokenizationEs3(BaseTestTokenization, unittest.TestCase):
language = "es"
text = u"Visita www.com.com y gana premios (seguro)"
expected = u"www.com.com ( seguro )"
class TestTokenizationEs3(BaseTestTokenization, unittest.TestCase):
language = "pt"
text = u"A expressão tornou-se bastante comum no internetês."
expected = u"expressão tornou-se internetês"
class TestTokenizationEs3(BaseTestTokenization, unittest.TestCase):
language = "pt"
text = u"uma cantora e compositora norte-americana de R&B."
expected = u"norte-americana R&B"
class BaseTestTextToDocument(object):
language = "en"
text = ""
def test_contains_more_than_one_sentence(self):
document = text_to_document(self.text, self.language)
self.assertGreater(len(document), 1)
for sentence in document:
self.assertIsInstance(sentence, Sentence)
for word in sentence:
self.assertIsInstance(word, unicode)
class TestTextToDocumentEn(BaseTestTextToDocument, unittest.TestCase):
language = "en"
text = (u"The Bastard Operator From Hell (BOFH), a fictional character "
u"created by Simon Travaglia, is a rogue system administrator who "
u"takes out his anger on users (often referred to as lusers), "
u"colleagues, bosses, and anyone else who pesters him with their "
u"pitiful user created \"problems\".\n"
u"The BOFH stories were originally posted in 1992 to Usenet by "
u"Travaglia, with some being reprinted in Datamation. They were "
u"published weekly from 1995 to 1999 in Network Week and since 2000"
u" they have been published most weeks in The Register. They were "
u"also published in PC Plus magazine for a short time, and several"
u" books of the stories have also been released.")
class TestTextToDocumentEs(BaseTestTextToDocument, unittest.TestCase):
language = "es"
text = (u"El bombo posee un gran espectro dinámico y poder sonoro, y puede"
u"golpearse con una gran variedad de mazas y baquetas para lograr "
u"diversos matices o efectos. Además, el ataque —o modo de "
u"iniciarse el sonido— y la resonancia —o vibración del "
u"instrumento— influyen en su timbre. Las técnicas de ejecución "
u"incluyen diferentes tipos de golpe como el legato o stacatto, "
u"al igual que efectos como redobles, apagado, golpeos al unísono "
u"o notas de gracia. Desde sus orígenes es además habitual su "
u"empleo junto a los platillos.")
class TestTextToDocumentPt(BaseTestTextToDocument, unittest.TestCase):
language = "pt"
text = (u"O casamento tinha a oposição dos governos do Reino Unido e dos "
u"territórios autônomos da Commonwealth. Objeções religiosas, "
u"jurídicas, políticas e morais foram levantadas. Como monarca "
u"britânico, Eduardo era o chefe nominal da Igreja da Inglaterra, "
u"que não permitia que pessoas divorciadas se casassem novamente "
u"se seus ex-cônjuges ainda estivessem vivos; por isso, "
u"acreditava-se que Eduardo não poderia casar-se com Wallis Simpson"
u" e permanecer no trono. Simpson era considerada política e "
u"socialmente inadequada como consorte devido aos seus dois "
u"casamentos fracassados. O Establishment entendia que ela era "
u"movida pelo amor ao dinheiro ou à posição e não por amor ao rei."
u" Apesar da oposição, Eduardo declarou que amava Wallis e que "
u"pretendia casar-se com ela, com ou sem a aprovação "
u"governamental.")
class TestHtmlToDocument(unittest.TestCase):
def test_generates_something(self):
text = open(os.path.join(data_path, "index.html")).read()
document = html_to_document(text, "en")
self.assertGreater(len(document), 1)
for sentence in document:
self.assertIsInstance(sentence, Sentence)
for word in sentence:
self.assertIsInstance(word, unicode)
def test_extract(self):
html = "<html><head></head><body><p>Hello Peter</p></body></html>"
d = [list(xs) for xs in html_to_document(html, "en")]
self.assertEquals([u'Hello Peter'.split()], d)
html = ("<html><head></head><body><p>Hello Peter. "
"Go for gold.</p></body></html>")
d = [list(xs) for xs in html_to_document(html, "en")]
self.assertEquals([u'Hello Peter .'.split(), u'Go for gold .'.split()],
d)
def test_newlines(self):
html = ("<html><head></head>\n\n<body><p>\nHello Peter."
"\n\n\n Go for gold.\n</p>\n</body></html>")
d = [list(xs) for xs in html_to_document(html, "en")]
self.assertEquals([u'Hello Peter .'.split(), u'Go for gold .'.split()],
d)
def test_remove_whitespacing(self):
html = ("<html><head></head><body><p>Wow\n\tWhat now?\t\t"
"</p></body></html>")
d = [list(xs) for xs in html_to_document(html, "en")]
self.assertEquals([u'Wow What now ?'.split()], d)
def test_sentence_splitting(self):
html = ("<html><head></head><body><p>Wow!! "
"I did not know! Are you sure?</p></body></html>")
d = [list(xs) for xs in html_to_document(html, "en")]
self.assertEquals([u'Wow !!'.split(),
u'I did not know !'.split(),
u'Are you sure ?'.split()], d)
class TestParallelCorpusDocument(unittest.TestCase):
def setUp(self):
document_path = os.path.join(data_path, "parallel-en-es.txt")
A, B = parallel_corpus_to_documents(document_path)
self.document_a = A
self.document_b = B
def test_same_length(self):
self.assertEqual(len(self.document_a), len(self.document_b))
self.assertEqual(len(self.document_a), 250)
def test_do_not_accept_non_tokenized_documents(self):
_, tmpfile = tempfile.mkstemp()
inputfile = codecs.open(tmpfile, "w", encoding="utf-8")
inputfile.write("some non tokenized sentences.\n")
inputfile.write("some non tokenized sentences.\n")
inputfile.write("so, this is John's?\n")
inputfile.write("so, this is John's?\n")
inputfile.close()
with self.assertRaises(ValueError):
A, B = parallel_corpus_to_documents(tmpfile)
class TestTMXDocument(unittest.TestCase):
def setUp(self):
document_path = os.path.join(data_path, "corpus-en-es.tmx")
self.document_a, self.document_b = tmx_file_to_documents(document_path,
"en", "es")
def test_correct_length(self):
self.assertEqual(len(self.document_a), 20)
self.assertEqual(len(self.document_b), 20)
def test_correct_type(self):
for a, b in zip(self.document_a, self.document_b):
self.assertTrue(isinstance(a, Sentence))
self.assertTrue(isinstance(b, Sentence))
def test_swap_languages(self):
document_path = os.path.join(data_path, "corpus-en-es.tmx")
swap_a, swap_b = tmx_file_to_documents(document_path, "es", "en")
for x, y in zip(swap_a, self.document_b):
self.assertEqual(x, y)
for x, y in zip(swap_b, self.document_a):
self.assertEqual(x, y)
class TestTMXDocument(unittest.TestCase):
def test_empty_string(self):
d = list(srt_to_document(""))
self.assertEqual(d, [])
def test_ok_from_file(self):
filepath = os.path.join(data_path, "en.srt")
filedata = open(filepath).read()
d = list(srt_to_document(filedata))
self.assertEqual(len(d), 4)
for sentence in d:
self.assertIsInstance(sentence, Sentence)
for word in sentence:
self.assertIsInstance(word, unicode)
self.assertNotIn("<i>", word)
if __name__ == "__main__":
unittest.main()
|
pombredanne/yalign
|
tests/test_input_conversion.py
|
Python
|
bsd-3-clause
| 11,043
|
[
"VisIt"
] |
d8ed48e4b7715caf75d833a0786d2743efa8170590e1d13b1ac76dbe25d63868
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#######################################################################
#
# MetrixWeatherSetup for Enigma2
# Coded by Sinthex IT-Solutions (c) 2014
# www.open-store.net
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Screens.Screen import Screen
from Components.Renderer import OMMetrixWeatherWidget
from Components.Label import Label
from Components.config import ConfigSelection, getConfigListEntry, config, configfile, ConfigSubsection, ConfigNumber, ConfigSelectionNumber, ConfigYesNo, ConfigText
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
class OMMetrixWeatherSetup(Screen,ConfigListScreen):
skin = """
<screen name="MetrixWeatherSetup" position="160,150" size="450,200" title="Weather Setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="10,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="300,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="10,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="300,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="config" position="10,44" size="430,146" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
ConfigListScreen.__init__(self, [])
self.initConfigList()
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"], {
"green": self.save,
"red": self.exit,
"cancel": self.close},-1)
def save(self):
config.plugins.MetrixWeather.lastUpdated.value = "2000-01-01 01:01:01"
config.plugins.MetrixWeather.save()
configfile.save()
self.close()
def initConfigList(self):
self.list = []
self.list.append(getConfigListEntry(_("Show Weather Widget"), config.plugins.MetrixWeather.enabled))
self.list.append(getConfigListEntry(_(" ")))
self.list.append(getConfigListEntry(_("Weather ID"), config.plugins.MetrixWeather.woeid))
self.list.append(getConfigListEntry(_("Get your Weather ID on weather.open-store.net")))
self.list.append(getConfigListEntry(_(" ")))
self.list.append(getConfigListEntry(_("Unit"), config.plugins.MetrixWeather.tempUnit))
self["config"].setList(self.list)
def exit(self):
for x in self["config"].list:
if len(x) > 1:
x[1].cancel()
else:
pass
self.close()
|
idrogeno/IdroMips
|
lib/python/Screens/OMMetrixWeatherSetup.py
|
Python
|
gpl-2.0
| 3,249
|
[
"VisIt"
] |
220d35c8f07536cac657ad7c2cef0bd90c0d1969ddbd8991b8127f7d3cb7588f
|
#
# @file TestParameterRule.py
# @brief ParameterRule unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestParameterRule.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestParameterRule(unittest.TestCase):
global PR
PR = None
def setUp(self):
self.PR = libsbml.AssignmentRule(1,2)
self.PR.setL1TypeCode(libsbml.SBML_PARAMETER_RULE)
if (self.PR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.PR ]; _dummyList[:] = []; del _dummyList
pass
def test_ParameterRule_create(self):
self.assert_( self.PR.getTypeCode() == libsbml.SBML_ASSIGNMENT_RULE )
self.assert_( self.PR.getL1TypeCode() == libsbml.SBML_PARAMETER_RULE )
self.assert_( self.PR.getNotes() == None )
self.assert_( self.PR.getAnnotation() == None )
self.assert_( self.PR.getFormula() == "" )
self.assert_( self.PR.getUnits() == "" )
self.assert_( self.PR.getVariable() == "" )
self.assert_( self.PR.getType() == libsbml.RULE_TYPE_SCALAR )
self.assertEqual( False, self.PR.isSetVariable() )
self.assertEqual( False, self.PR.isSetUnits() )
pass
def test_ParameterRule_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_ParameterRule_setName(self):
name = "cell";
self.PR.setVariable(name)
self.assert_(( name == self.PR.getVariable() ))
self.assertEqual( True, self.PR.isSetVariable() )
if (self.PR.getVariable() == name):
pass
c = self.PR.getVariable()
self.PR.setVariable(c)
self.assert_(( name == self.PR.getVariable() ))
self.PR.setVariable("")
self.assertEqual( False, self.PR.isSetVariable() )
if (self.PR.getVariable() != None):
pass
pass
def test_ParameterRule_setUnits(self):
units = "cell";
self.PR.setUnits(units)
self.assert_(( units == self.PR.getUnits() ))
self.assertEqual( True, self.PR.isSetUnits() )
if (self.PR.getUnits() == units):
pass
self.PR.setUnits(self.PR.getUnits())
self.assert_(( units == self.PR.getUnits() ))
self.PR.setUnits("")
self.assertEqual( False, self.PR.isSetUnits() )
if (self.PR.getUnits() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestParameterRule))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestParameterRule.py
|
Python
|
gpl-3.0
| 3,753
|
[
"VisIt"
] |
02732933a978b8d4c63b22c47d15676d435d1e12a302873e228519e97c484799
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format [('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com')]
ADMINS = []
# List of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = []
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = [
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('ast', gettext_noop('Asturian')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('io', gettext_noop('Ido')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('mr', gettext_noop('Marathi')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
]
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ["he", "ar", "fa", "ur"]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = []
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
EMAIL_SSL_CERTFILE = None
EMAIL_SSL_KEYFILE = None
EMAIL_TIMEOUT = None
# List of strings representing installed apps.
INSTALLED_APPS = []
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = []
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
]
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
# 'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
]
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
TEMPLATES = []
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = [
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# ]
DISALLOWED_USER_AGENTS = []
ABSOLUTE_URL_OVERRIDES = {}
# List of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ['/home/html', '/var/www']
ALLOWED_INCLUDE_ROOTS = []
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = [
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# ]
IGNORABLE_404_URLS = []
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
]
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal server
# (runserver) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
############
# SESSIONS #
############
# Cache to store session data if using the cache session backend.
SESSION_CACHE_ALIAS = 'default'
# Cookie name. This can be whatever you want.
SESSION_COOKIE_NAME = 'sessionid'
# Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2
# A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_DOMAIN = None
# Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_SECURE = False
# The path of the session cookie.
SESSION_COOKIE_PATH = '/'
# Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_COOKIE_HTTPONLY = True
# Whether to save the session data on every request.
SESSION_SAVE_EVERY_REQUEST = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# The module to store session data
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
# Directory to store session files if using the file session module. If None,
# the backend will use a sensible default.
SESSION_FILE_PATH = None
# class to serialize session data
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
]
AUTH_PASSWORD_VALIDATORS = []
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN'
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Apps that don't need to be serialized at test database creation time
# (only apps with migrations are to start with)
TEST_NON_SERIALIZED_APPS = []
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = []
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = []
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
#######################
# SECURITY MIDDLEWARE #
#######################
SECURE_BROWSER_XSS_FILTER = False
SECURE_CONTENT_TYPE_NOSNIFF = False
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_HSTS_SECONDS = 0
SECURE_REDIRECT_EXEMPT = []
SECURE_SSL_HOST = None
SECURE_SSL_REDIRECT = False
|
jhoos/django
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 22,662
|
[
"VisIt"
] |
c33ff9a26eb8751cf2d618417141cd1bee600397db36438a4ef7dd36afeffdde
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subtype assigner for lib2to3 trees.
This module assigns extra type information to the lib2to3 trees. This
information is more specific than whether something is an operator or an
identifier. For instance, it can specify if a node in the tree is part of a
subscript.
AssignSubtypes(): the main function exported by this module.
Annotations:
subtype: The subtype of a pytree token. See 'format_token' module for a list
of subtypes.
"""
from lib2to3 import pytree
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
def AssignSubtypes(tree):
"""Run the subtype assigner visitor over the tree, modifying it in place.
Arguments:
tree: the top-level pytree node to annotate with subtypes.
"""
subtype_assigner = _SubtypeAssigner()
subtype_assigner.Visit(tree)
# Map tokens in argument lists to their respective subtype.
_ARGLIST_TOKEN_TO_SUBTYPE = {
'=': format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
':': format_token.Subtype.TYPED_NAME,
'*': format_token.Subtype.VARARGS_STAR,
'**': format_token.Subtype.KWARGS_STAR_STAR,
}
class _SubtypeAssigner(pytree_visitor.PyTreeVisitor):
"""_SubtypeAssigner - see file-level docstring for detailed description.
The subtype is added as an annotation to the pytree token.
"""
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [',']))
for child in node.children:
self.Visit(child)
comp_for = False
dict_maker = False
for child in node.children:
if pytree_utils.NodeName(child) == 'comp_for':
comp_for = True
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICT_SET_GENERATOR)
elif pytree_utils.NodeName(child) in ('COLON', 'DOUBLESTAR'):
dict_maker = True
if not comp_for and dict_maker:
last_was_colon = False
unpacking = False
for child in node.children:
if pytree_utils.NodeName(child) == 'DOUBLESTAR':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.KWARGS_STAR_STAR)
if last_was_colon:
if style.Get('INDENT_DICTIONARY_VALUE'):
_InsertPseudoParentheses(child)
else:
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_VALUE)
elif (isinstance(child, pytree.Node) or
(not child.value.startswith('#') and child.value not in '{:,')):
# Mark the first leaf of a key entry as a DICTIONARY_KEY. We
# normally want to split before them if the dictionary cannot exist
# on a single line.
if not unpacking or pytree_utils.FirstLeafNode(child).value == '**':
_AppendFirstLeafTokenSubtype(child,
format_token.Subtype.DICTIONARY_KEY)
_AppendSubtypeRec(child, format_token.Subtype.DICTIONARY_KEY_PART)
last_was_colon = pytree_utils.NodeName(child) == 'COLON'
if pytree_utils.NodeName(child) == 'DOUBLESTAR':
unpacking = True
elif last_was_colon:
unpacking = False
def Visit_expr_stmt(self, node): # pylint: disable=invalid-name
# expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist)
# | ('=' (yield_expr|testlist_star_expr))*)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.ASSIGN_OPERATOR)
def Visit_or_test(self, node): # pylint: disable=invalid-name
# or_test ::= and_test ('or' and_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'or':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_test(self, node): # pylint: disable=invalid-name
# and_test ::= not_test ('and' not_test)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'and':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == 'not':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
# comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not'
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
elif pytree_utils.NodeName(child) == 'comp_op':
for grandchild in child.children:
_AppendTokenSubtype(grandchild, format_token.Subtype.BINARY_OPERATOR)
def Visit_star_expr(self, node): # pylint: disable=invalid-name
# star_expr ::= '*' expr
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '*':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_STAR)
def Visit_expr(self, node): # pylint: disable=invalid-name
# expr ::= xor_expr ('|' xor_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '|':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_xor_expr(self, node): # pylint: disable=invalid-name
# xor_expr ::= and_expr ('^' and_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '^':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_and_expr(self, node): # pylint: disable=invalid-name
# and_expr ::= shift_expr ('&' shift_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '&':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_shift_expr(self, node): # pylint: disable=invalid-name
# shift_expr ::= arith_expr (('<<'|'>>') arith_expr)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}:
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_term(self, node): # pylint: disable=invalid-name
# term ::= factor (('*'|'/'|'%'|'//') factor)*
for child in node.children:
self.Visit(child)
if (isinstance(child, pytree.Leaf) and
child.value in {'*', '/', '%', '//'}):
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_factor(self, node): # pylint: disable=invalid-name
# factor ::= ('+'|'-'|'~') factor | power
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '+-~':
_AppendTokenSubtype(child, format_token.Subtype.UNARY_OPERATOR)
def Visit_power(self, node): # pylint: disable=invalid-name
# power ::= atom trailer* ['**' factor]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '**':
_AppendTokenSubtype(child, format_token.Subtype.BINARY_OPERATOR)
def Visit_trailer(self, node): # pylint: disable=invalid-name
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value in '[]':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_BRACKET)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_sliceop(self, node): # pylint: disable=invalid-name
# sliceop ::= ':' [test]
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == ':':
_AppendTokenSubtype(child, format_token.Subtype.SUBSCRIPT_COLON)
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::=
# test [comp_for] | test '=' test
self._ProcessArgLists(node)
def Visit_arglist(self, node): # pylint: disable=invalid-name
# arglist ::=
# (argument ',')* (argument [',']
# | '*' test (',' argument)* [',' '**' test]
# | '**' test)
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def Visit_tname(self, node): # pylint: disable=invalid-name
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
def Visit_decorator(self, node): # pylint: disable=invalid-name
# decorator ::=
# '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
for child in node.children:
if isinstance(child, pytree.Leaf) and child.value == '@':
_AppendTokenSubtype(child, subtype=format_token.Subtype.DECORATOR)
self.Visit(child)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::=
# 'def' NAME parameters ['->' test] ':' suite
for child in node.children:
if pytree_utils.NodeName(child) == 'NAME' and child.value != 'def':
_AppendTokenSubtype(child, format_token.Subtype.FUNC_DEF)
break
for child in node.children:
self.Visit(child)
def Visit_typedargslist(self, node): # pylint: disable=invalid-name
# typedargslist ::=
# ((tfpdef ['=' test] ',')*
# ('*' [tname] (',' tname ['=' test])* [',' '**' tname]
# | '**' tname)
# | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
self._ProcessArgLists(node)
_SetArgListSubtype(node, format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN,
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST)
tname = False
for child in node.children:
if pytree_utils.NodeName(child) == 'tname':
tname = True
_SetArgListSubtype(child, format_token.Subtype.TYPED_NAME,
format_token.Subtype.TYPED_NAME_ARG_LIST)
if not isinstance(child, pytree.Leaf):
continue
if child.value == ',':
tname = False
elif child.value == '=' and tname:
_AppendTokenSubtype(child, subtype=format_token.Subtype.TYPED_NAME)
tname = False
def Visit_varargslist(self, node): # pylint: disable=invalid-name
# varargslist ::=
# ((vfpdef ['=' test] ',')*
# ('*' [vname] (',' vname ['=' test])* [',' '**' vname]
# | '**' vname)
# | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
self._ProcessArgLists(node)
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf) and child.value == '=':
_AppendTokenSubtype(child, format_token.Subtype.VARARGS_LIST)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_FOR)
# Mark the previous node as COMP_EXPR unless this is a nested comprehension
# as these will have the outer comprehension as their previous node.
attr = pytree_utils.GetNodeAnnotation(node.parent,
pytree_utils.Annotation.SUBTYPE)
if not attr or format_token.Subtype.COMP_FOR not in attr:
_AppendSubtypeRec(node.parent.children[0], format_token.Subtype.COMP_EXPR)
self.DefaultNodeVisit(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
_AppendSubtypeRec(node, format_token.Subtype.COMP_IF)
self.DefaultNodeVisit(node)
def _ProcessArgLists(self, node):
"""Common method for processing argument lists."""
for child in node.children:
self.Visit(child)
if isinstance(child, pytree.Leaf):
_AppendTokenSubtype(
child,
subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value,
format_token.Subtype.NONE))
def _SetArgListSubtype(node, node_subtype, list_subtype):
"""Set named assign subtype on elements in a arg list."""
def HasSubtype(node):
"""Return True if the arg list has a named assign subtype."""
if isinstance(node, pytree.Leaf):
if node_subtype in pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SUBTYPE, set()):
return True
return False
has_subtype = False
for child in node.children:
if pytree_utils.NodeName(child) != 'arglist':
has_subtype |= HasSubtype(child)
return has_subtype
if HasSubtype(node):
for child in node.children:
if pytree_utils.NodeName(child) != 'COMMA':
_AppendFirstLeafTokenSubtype(child, list_subtype)
def _AppendTokenSubtype(node, subtype):
"""Append the token's subtype only if it's not already set."""
pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE,
subtype)
def _AppendFirstLeafTokenSubtype(node, subtype):
"""Append the first leaf token's subtypes."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
_AppendFirstLeafTokenSubtype(node.children[0], subtype)
def _AppendSubtypeRec(node, subtype, force=True):
"""Append the leafs in the node to the given subtype."""
if isinstance(node, pytree.Leaf):
_AppendTokenSubtype(node, subtype)
return
for child in node.children:
_AppendSubtypeRec(child, subtype, force=force)
def _InsertPseudoParentheses(node):
"""Insert pseudo parentheses so that dicts can be formatted correctly."""
comment_node = None
if isinstance(node, pytree.Node):
if node.children[-1].type == token.COMMENT:
comment_node = node.children[-1].clone()
node.children[-1].remove()
first = pytree_utils.FirstLeafNode(node)
last = pytree_utils.LastLeafNode(node)
if first == last and first.type == token.COMMENT:
# A comment was inserted before the value, which is a pytree.Leaf.
# Encompass the dictionary's value into an ATOM node.
last = first.next_sibling
new_node = pytree.Node(syms.atom, [first.clone(), last.clone()])
node.replace(new_node)
node = new_node
last.remove()
first = pytree_utils.FirstLeafNode(node)
last = pytree_utils.LastLeafNode(node)
lparen = pytree.Leaf(
token.LPAR, u'(', context=('', (first.get_lineno(), first.column - 1)))
last_lineno = last.get_lineno()
if last.type == token.STRING and '\n' in last.value:
last_lineno += last.value.count('\n')
if last.type == token.STRING and '\n' in last.value:
last_column = len(last.value.split('\n')[-1]) + 1
else:
last_column = last.column + len(last.value) + 1
rparen = pytree.Leaf(
token.RPAR, u')', context=('', (last_lineno, last_column)))
lparen.is_pseudo = True
rparen.is_pseudo = True
if isinstance(node, pytree.Node):
node.insert_child(0, lparen)
node.append_child(rparen)
if comment_node:
node.append_child(comment_node)
_AppendFirstLeafTokenSubtype(node, format_token.Subtype.DICTIONARY_VALUE)
else:
clone = node.clone()
new_node = pytree.Node(syms.atom, [lparen, clone, rparen])
node.replace(new_node)
_AppendFirstLeafTokenSubtype(clone, format_token.Subtype.DICTIONARY_VALUE)
|
sbc100/yapf
|
yapf/yapflib/subtype_assigner.py
|
Python
|
apache-2.0
| 17,304
|
[
"VisIt"
] |
e36bc1634d9a5fbaf9b22608410c984ecab4f6c9b374051e10d011f61bf6be96
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
from zeroinstall import support
def get_fetch_info(policy, impl):
"""Get the text for a Fetch column."""
if impl is None:
return ""
elif policy.get_cached(impl):
if impl.local_path:
return _('(local)')
elif impl.id.startswith('package:'):
return _('(package)')
else:
return _('(cached)')
else:
src = policy.fetcher.get_best_source(impl)
if src:
return support.pretty_size(src.size)
else:
return _('(unavailable)')
|
timdiels/zeroinstall
|
zeroinstall/0launch-gui/utils.py
|
Python
|
lgpl-2.1
| 577
|
[
"VisIt"
] |
012e12199ebce6f0d18f4c960bbe2bb633fa3e2536cd1b38c4a89c9978df851c
|
import unittest
import numpy as np
import pysal
from pysal.spreg.twosls_sp_regimes import GM_Lag_Regimes
from pysal.spreg import utils
from pysal.spreg.twosls_sp import GM_Lag
from pysal.common import RTOL
class TestGMLag_Regimes(unittest.TestCase):
def setUp(self):
self.w = pysal.queen_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y = np.array(self.db.by_col("CRIME"))
self.y = np.reshape(y, (49,1))
self.r_var = 'NSA'
self.regimes = self.db.by_col(self.r_var)
def test___init__(self):
#Matches SpaceStat
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("HOVAL"))
self.X = np.array(X).T
reg = GM_Lag_Regimes(self.y, self.X, self.regimes, w=self.w, sig2n_k=True, regime_lag_sep=False, regime_err_sep=False)
betas = np.array([[ 45.14892906],
[ -1.42593383],
[ -0.11501037],
[ 40.99023016],
[ -0.81498302],
[ -0.28391409],
[ 0.4736163 ]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
e_5 = np.array([[ -1.47960519],
[ -7.93748769],
[ -5.88561835],
[-13.37941105],
[ 5.2524303 ]])
np.testing.assert_allclose(reg.e_pred[0:5], e_5,RTOL)
h_0 = np.array([[ 0. , 0. , 0. , 1. , 19.531 ,
80.467003 , 0. , 0. , 18.594 , 35.4585005]])
np.testing.assert_allclose(reg.h[0]*np.eye(10), h_0)
self.assertEqual(reg.k, 7)
self.assertEqual(reg.kstar, 1)
np.testing.assert_allclose(reg.mean_y, 35.128823897959187,RTOL)
self.assertEqual(reg.n, 49)
np.testing.assert_allclose(reg.pr2, 0.6572182131915739,RTOL)
np.testing.assert_allclose(reg.pr2_e, 0.5779687278635434,RTOL)
pfora1a2 = np.array([ -2.15017629, -0.30169328, -0.07603704, -22.06541809,
0.45738058, 0.02805828, 0.39073923])
np.testing.assert_allclose(reg.pfora1a2[0], pfora1a2,RTOL)
predy_5 = np.array([[ 13.93216104],
[ 23.46424269],
[ 34.43510955],
[ 44.32473878],
[ 44.39117516]])
np.testing.assert_allclose(reg.predy[0:5], predy_5,RTOL)
predy_e_5 = np.array([[ 17.20558519],
[ 26.73924169],
[ 36.51239935],
[ 45.76717105],
[ 45.4790797 ]])
np.testing.assert_allclose(reg.predy_e[0:5], predy_e_5,RTOL)
q_5 = np.array([[ 0. , 0. , 18.594 , 35.4585005]])
np.testing.assert_allclose(reg.q[0]*np.eye(4), q_5, RTOL)
self.assertEqual(reg.robust, 'unadjusted')
np.testing.assert_allclose(reg.sig2n_k, 109.76462904625834,RTOL)
np.testing.assert_allclose(reg.sig2n, 94.08396775393571,RTOL)
np.testing.assert_allclose(reg.sig2, 109.76462904625834,RTOL)
np.testing.assert_allclose(reg.std_y, 16.732092091229699,RTOL)
u_5 = np.array([[ 1.79381896],
[ -4.66248869],
[ -3.80832855],
[-11.93697878],
[ 6.34033484]])
np.testing.assert_allclose(reg.u[0:5], u_5,RTOL)
np.testing.assert_allclose(reg.utu, 4610.11441994285,RTOL)
varb = np.array([ 1.23841820e+00, -3.65620114e-02, -1.21919663e-03,
1.00057547e+00, -2.07403182e-02, -1.27232693e-03,
-1.77184084e-02])
np.testing.assert_allclose(reg.varb[0], varb,RTOL)
vm = np.array([ 1.35934514e+02, -4.01321561e+00, -1.33824666e-01,
1.09827796e+02, -2.27655334e+00, -1.39656494e-01,
-1.94485452e+00])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
x_0 = np.array([[ 0. , 0. , 0. , 1. , 19.531 ,
80.467003]])
np.testing.assert_allclose(reg.x[0]*np.eye(6), x_0,RTOL)
y_5 = np.array([[ 15.72598 ],
[ 18.801754],
[ 30.626781],
[ 32.38776 ],
[ 50.73151 ]])
np.testing.assert_allclose(reg.y[0:5], y_5,RTOL)
yend_5 = np.array([[ 24.7142675 ],
[ 26.24684033],
[ 29.411751 ],
[ 34.64647575],
[ 40.4653275 ]])
np.testing.assert_allclose(reg.yend[0:5]*np.array([[1]]), yend_5,RTOL)
z_0 = np.array([[ 0. , 0. , 0. , 1. , 19.531 ,
80.467003 , 24.7142675]])
np.testing.assert_allclose(reg.z[0]*np.eye(7), z_0,RTOL)
zthhthi = np.array([ 1.00000000e+00, -2.35922393e-16, 5.55111512e-17,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
-4.44089210e-16, 2.22044605e-16, 0.00000000e+00,
0.00000000e+00])
# np.testing.assert_allclose(reg.zthhthi[0], zthhthi, RTOL)
np.testing.assert_array_almost_equal(reg.zthhthi[0], zthhthi)
chow_regi = np.array([[ 0.19692667, 0.65721307],
[ 0.5666492 , 0.45159351],
[ 0.45282066, 0.5009985 ]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 0.82409867601863462,RTOL)
def test_init_discbd(self):
#Matches SpaceStat.
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, lag_q=False, w=self.w, sig2n_k=True, regime_lag_sep=False, regime_err_sep=False)
tbetas = np.array([[ 42.7266306 ],
[ -0.15552345],
[ 37.70545276],
[ -0.5341577 ],
[ -0.68305796],
[ -0.37106077],
[ 0.55809516]])
np.testing.assert_allclose(tbetas, reg.betas,RTOL)
vm = np.array([ 270.62979422, 3.62539081, 327.89638627, 6.24949355,
-5.25333106, -6.01743515, -4.19290074])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
e_3 = np.array([[-0.33142796],
[-9.51719607],
[-7.86272153]])
np.testing.assert_allclose(reg.e_pred[0:3], e_3,RTOL)
u_3 = np.array([[ 4.51839601],
[-5.67363147],
[-5.1927562 ]])
np.testing.assert_allclose(reg.u[0:3], u_3,RTOL)
predy_3 = np.array([[ 11.20758399],
[ 24.47538547],
[ 35.8195372 ]])
np.testing.assert_allclose(reg.predy[0:3], predy_3,RTOL)
predy_e_3 = np.array([[ 16.05740796],
[ 28.31895007],
[ 38.48950253]])
np.testing.assert_allclose(reg.predy_e[0:3], predy_e_3,RTOL)
chow_regi = np.array([[ 0.13130991, 0.71707772],
[ 0.04740966, 0.82763357],
[ 0.15474413, 0.6940423 ]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 0.31248100032096549,RTOL)
def test_lag_q(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w, sig2n_k=True, regime_lag_sep=False, regime_err_sep=False)
tbetas = np.array([[ 37.87698329],
[ -0.89426982],
[ 31.4714777 ],
[ -0.71640525],
[ -0.28494432],
[ -0.2294271 ],
[ 0.62996544]])
np.testing.assert_allclose(tbetas, reg.betas,RTOL)
vm = np.array([ 128.25714554, -0.38975354, 95.7271044 , -1.8429218 ,
-1.75331978, -0.18240338, -1.67767464])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
chow_regi = np.array([[ 0.43494049, 0.50957463],
[ 0.02089281, 0.88507135],
[ 0.01180501, 0.91347943]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 0.54288190938307757,RTOL)
def test_all_regi(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w, regime_lag_sep=False, regime_err_sep=True)
tbetas = np.array([[ 37.87698329, -0.89426982, 31.4714777 , -0.71640525,
-0.28494432, -0.2294271 , 0.62996544]])
np.testing.assert_allclose(tbetas, reg.betas.T,RTOL)
vm = np.array([ 70.38291551, -0.64868787, 49.25453215, -0.62851534,
-0.75413453, -0.12674433, -0.97179236])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
e_3 = np.array([[-2.66997799],
[-7.69786264],
[-4.39412782]])
np.testing.assert_allclose(reg.e_pred[0:3], e_3,RTOL)
u_3 = np.array([[ 1.13879007],
[-3.76873198],
[-1.89671717]])
np.testing.assert_allclose(reg.u[0:3], u_3,RTOL)
predy_3 = np.array([[ 14.58718993],
[ 22.57048598],
[ 32.52349817]])
np.testing.assert_allclose(reg.predy[0:3], predy_3,RTOL)
predy_e_3 = np.array([[ 18.39595799],
[ 26.49961664],
[ 35.02090882]])
np.testing.assert_allclose(reg.predy_e[0:3], predy_e_3,RTOL)
chow_regi = np.array([[ 0.60091096, 0.43823066],
[ 0.03006744, 0.8623373 ],
[ 0.01943727, 0.88912016]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 0.88634854058300516,RTOL)
def test_all_regi_sig2(self):
#Artficial:
n = 256
x1 = np.random.uniform(-10,10,(n,1))
x2 = np.random.uniform(1,5,(n,1))
q = x2 + np.random.normal(0,1,(n,1))
x = np.hstack((x1,x2))
y = np.dot(np.hstack((np.ones((n,1)),x)),np.array([[1],[0.5],[2]])) + np.random.normal(0,1,(n,1))
latt = int(np.sqrt(n))
w = pysal.lat2W(latt,latt)
w.transform='r'
regi = [0]*(n//2) + [1]*(n//2)
model = GM_Lag_Regimes(y, x1, regi, q=q, yend=x2, w=w, regime_lag_sep=True, regime_err_sep=True)
w1 = pysal.lat2W(latt//2,latt)
w1.transform='r'
model1 = GM_Lag(y[0:(n//2)].reshape((n//2),1), x1[0:(n//2)],yend=x2[0:(n//2)], q=q[0:(n//2)], w=w1)
model2 = GM_Lag(y[(n//2):n].reshape((n//2),1), x1[(n//2):n],yend=x2[(n//2):n], q=q[(n//2):n], w=w1)
tbetas = np.vstack((model1.betas, model2.betas))
np.testing.assert_allclose(model.betas,tbetas)
vm = np.hstack((model1.vm.diagonal(),model2.vm.diagonal()))
np.testing.assert_allclose(model.vm.diagonal(), vm,RTOL)
#Columbus:
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w,regime_lag_sep=True, regime_err_sep = True)
tbetas = np.array([[ 42.35827477],
[ -0.09472413],
[ -0.68794223],
[ 0.54482537],
[ 32.24228762],
[ -0.12304063],
[ -0.46840307],
[ 0.67108156]])
np.testing.assert_allclose(tbetas, reg.betas)
vm = np.array([ 200.92894859, 4.56244927, -4.85603079, -2.9755413 ,
0. , 0. , 0. , 0. ])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
e_3 = np.array([[ -1.32209547],
[-13.15611199],
[-11.62357696]])
np.testing.assert_allclose(reg.e_pred[0:3], e_3,RTOL)
u_3 = np.array([[ 6.99250069],
[-7.5665856 ],
[-7.04753328]])
np.testing.assert_allclose(reg.u[0:3], u_3,RTOL)
predy_3 = np.array([[ 8.73347931],
[ 26.3683396 ],
[ 37.67431428]])
np.testing.assert_allclose(reg.predy[0:3], predy_3,RTOL)
predy_e_3 = np.array([[ 17.04807547],
[ 31.95786599],
[ 42.25035796]])
np.testing.assert_allclose(reg.predy_e[0:3], predy_e_3,RTOL)
chow_regi = np.array([[ 1.51825373e-01, 6.96797034e-01],
[ 3.20105698e-04, 9.85725412e-01],
[ 8.58836996e-02, 7.69476896e-01],
[ 1.01357290e-01, 7.50206873e-01]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 0.38417230022512161,RTOL)
def test_fixed_const(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("HOVAL"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag_Regimes(self.y, X, self.regimes, yend=yd, q=q, w=self.w, constant_regi='one', regime_lag_sep=False, regime_err_sep=False)
tbetas = np.array([[ -0.37658823],
[ -0.9666079 ],
[ 35.5445944 ],
[ -0.45793559],
[ -0.24216904],
[ 0.62500602]])
np.testing.assert_allclose(tbetas, reg.betas,RTOL)
vm = np.array([ 1.4183697 , -0.05975784, -0.27161863, -0.62517245, 0.02266177,
0.00312976])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
e_3 = np.array([[ 0.17317815],
[-5.53766328],
[-3.82889307]])
np.testing.assert_allclose(reg.e_pred[0:3], e_3,RTOL)
u_3 = np.array([[ 3.10025518],
[-1.83150689],
[-1.49598494]])
np.testing.assert_allclose(reg.u[0:3], u_3,RTOL)
predy_3 = np.array([[ 12.62572482],
[ 20.63326089],
[ 32.12276594]])
np.testing.assert_allclose(reg.predy[0:3], predy_3,RTOL)
predy_e_3 = np.array([[ 15.55280185],
[ 24.33941728],
[ 34.45567407]])
np.testing.assert_allclose(reg.predy_e[0:3], predy_e_3,RTOL)
chow_regi = np.array([[ 1.85767047e-01, 6.66463269e-01],
[ 1.19445012e+01, 5.48089036e-04]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 12.017256217621382,RTOL)
def test_names(self):
y_var = 'CRIME'
x_var = ['INC']
x = np.array([self.db.by_col(name) for name in x_var]).T
yd_var = ['HOVAL']
yd = np.array([self.db.by_col(name) for name in yd_var]).T
q_var = ['DISCBD']
q = np.array([self.db.by_col(name) for name in q_var]).T
r_var = 'NSA'
reg = GM_Lag_Regimes(self.y, x, self.regimes, yend=yd, q=q, w=self.w, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='columbus', name_w='columbus.gal', regime_lag_sep=False, regime_err_sep=False)
betas = np.array([[ 37.87698329],
[ -0.89426982],
[ 31.4714777 ],
[ -0.71640525],
[ -0.28494432],
[ -0.2294271 ],
[ 0.62996544]])
np.testing.assert_allclose(reg.betas, betas,RTOL)
vm = np.array([ 109.93469618, -0.33407447, 82.05180377, -1.57964725,
-1.50284553, -0.15634575, -1.43800683])
np.testing.assert_allclose(reg.vm[0], vm,RTOL)
chow_regi = np.array([[ 0.50743058, 0.47625326],
[ 0.02437494, 0.87593468],
[ 0.01377251, 0.9065777 ]])
np.testing.assert_allclose(reg.chow.regi, chow_regi,RTOL)
np.testing.assert_allclose(reg.chow.joint[0], 0.63336222761359162,RTOL)
self.assertListEqual(reg.name_x, ['0_CONSTANT', '0_INC', '1_CONSTANT', '1_INC'])
self.assertListEqual(reg.name_yend, ['0_HOVAL', '1_HOVAL', '_Global_W_CRIME'])
self.assertListEqual(reg.name_q, ['0_DISCBD', '0_W_INC', '0_W_DISCBD', '1_DISCBD', '1_W_INC', '1_W_DISCBD'])
self.assertEqual(reg.name_y, y_var)
if __name__ == '__main__':
unittest.main()
|
TaylorOshan/pysal
|
pysal/spreg/tests/test_twosls_sp_regimes.py
|
Python
|
bsd-3-clause
| 15,918
|
[
"COLUMBUS"
] |
cd8434f467f663af182d1cc5e9780c349be27bc1a33f6c443aafa498eb146286
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import collections
from kiwi.ui.forms import ChoiceField, DateField, TextField, MultiLineField
from stoqlib.api import api
from stoqlib.domain.person import Client, CreditCheckHistory
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.fields import PersonField
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.decorators import cached_property
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class CreditCheckHistoryEditor(BaseEditor):
model_type = CreditCheckHistory
model_name = _("Client Credit Check History")
size = (400, -1)
@cached_property()
def fields(self):
return collections.OrderedDict(
client_id=PersonField(_('Client'), proxy=True, person_type=Client,
mandatory=True),
identifier=TextField(_('Identifier'), proxy=True, mandatory=True),
status=ChoiceField('Status', values=self.get_status_options(),
mandatory=True),
check_date=DateField(_('Date'), proxy=True),
user=ChoiceField(_('User')),
notes=MultiLineField(_('Notes'), proxy=True),
)
def __init__(self, store, model, client, visual_mode=None):
self._client = client
BaseEditor.__init__(self, store, model, visual_mode)
if visual_mode or client:
self.client_id_add_button.hide()
self.client_id_edit_button.hide()
if self.model.client:
self.set_description(_('client credit check history for %s') %
self.model.client.person.name)
self.client_id.set_sensitive(False)
else:
self.set_description(_('client credit check history'))
def create_model(self, store):
return CreditCheckHistory(check_date=localtoday().date(),
identifier=u'',
status=CreditCheckHistory.STATUS_NOT_INCLUDED,
client=self._client,
notes=u'',
user=api.get_current_user(self.store),
store=store)
def setup_proxies(self):
self._fill_user_field()
def _fill_user_field(self):
self.user.prefill([(self.model.user.person.name,
self.model.user)])
self.user.set_sensitive(False)
@classmethod
def get_status_options(cls):
return [(value, key) for key, value in CreditCheckHistory.statuses.items()]
|
tiagocardosos/stoq
|
stoqlib/gui/editors/creditcheckhistoryeditor.py
|
Python
|
gpl-2.0
| 3,510
|
[
"VisIt"
] |
490f5d2bc7e1064549982ce3693289c61c9df1e10696ae9cd2983142d2319d05
|
""" The Matcher service provides an interface for matching jobs to pilots
It uses a Matcher and a Limiter object that encapsulates the matching logic.
It connects to JobDB, TaskQueueDB, JobLoggingDB, and PilotAgentsDB.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.DEncode import ignoreEncodeWarning
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.Client.Matcher import Matcher
from DIRAC.WorkloadManagementSystem.Client.Limiter import Limiter
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
class MatcherHandler(RequestHandler):
@classmethod
def initializeHandler(cls, serviceInfoDict):
cls.jobDB = JobDB()
cls.jobLoggingDB = JobLoggingDB()
cls.taskQueueDB = TaskQueueDB()
cls.pilotAgentsDB = PilotAgentsDB()
cls.limiter = Limiter(jobDB=cls.jobDB)
cls.taskQueueDB.recalculateTQSharesForAll()
gMonitor.registerActivity('matchTime', "Job matching time",
'Matching', "secs", gMonitor.OP_MEAN, 300)
gMonitor.registerActivity('matchesDone', "Job Match Request",
'Matching', "matches", gMonitor.OP_RATE, 300)
gMonitor.registerActivity('matchesOK', "Matched jobs",
'Matching', "matches", gMonitor.OP_RATE, 300)
gMonitor.registerActivity('numTQs', "Number of Task Queues",
'Matching', "tqsk queues", gMonitor.OP_MEAN, 300)
gThreadScheduler.addPeriodicTask(120, cls.taskQueueDB.recalculateTQSharesForAll)
gThreadScheduler.addPeriodicTask(60, cls.sendNumTaskQueues)
cls.sendNumTaskQueues()
return S_OK()
@classmethod
def sendNumTaskQueues(cls):
result = cls.taskQueueDB.getNumTaskQueues()
if result['OK']:
gMonitor.addMark('numTQs', result['Value'])
else:
gLogger.error("Cannot get the number of task queues", result['Message'])
##############################################################################
types_requestJob = [six.string_types + (dict,)]
def export_requestJob(self, resourceDescription):
""" Serve a job to the request of an agent which is the highest priority
one matching the agent's site capacity
"""
resourceDescription['Setup'] = self.serviceInfoDict['clientSetup']
credDict = self.getRemoteCredentials()
pilotRef = resourceDescription.get('PilotReference', 'Unknown')
try:
opsHelper = Operations(group=credDict['group'])
matcher = Matcher(pilotAgentsDB=self.pilotAgentsDB,
jobDB=self.jobDB,
tqDB=self.taskQueueDB,
jlDB=self.jobLoggingDB,
opsHelper=opsHelper,
pilotRef=pilotRef)
result = matcher.selectJob(resourceDescription, credDict)
except RuntimeError as rte:
self.log.error("[%s] Error requesting job: " % pilotRef, rte)
return S_ERROR("Error requesting job")
# result can be empty, meaning that no job matched
if result:
gMonitor.addMark("matchesDone")
gMonitor.addMark("matchesOK")
return S_OK(result)
# FIXME: This is correctly interpreted by the JobAgent, but DErrno should be used instead
return S_ERROR("No match found")
##############################################################################
types_getActiveTaskQueues = []
@classmethod
@ignoreEncodeWarning
def export_getActiveTaskQueues(cls):
""" Return all task queues
"""
return cls.taskQueueDB.retrieveTaskQueues()
##############################################################################
types_getMatchingTaskQueues = [dict]
# int keys are cast into str
@classmethod
@ignoreEncodeWarning
def export_getMatchingTaskQueues(cls, resourceDict):
""" Return all task queues that match the resourceDict
"""
if 'Site' in resourceDict and isinstance(resourceDict['Site'], six.string_types):
negativeCond = cls.limiter.getNegativeCondForSite(resourceDict['Site'])
else:
negativeCond = cls.limiter.getNegativeCond()
matcher = Matcher(pilotAgentsDB=cls.pilotAgentsDB,
jobDB=cls.jobDB,
tqDB=cls.taskQueueDB,
jlDB=cls.jobLoggingDB)
resourceDescriptionDict = matcher._processResourceDescription(resourceDict)
return cls.taskQueueDB.getMatchingTaskQueues(resourceDescriptionDict,
negativeCond=negativeCond)
|
yujikato/DIRAC
|
src/DIRAC/WorkloadManagementSystem/Service/MatcherHandler.py
|
Python
|
gpl-3.0
| 5,080
|
[
"DIRAC"
] |
b0f1dfc3b68b5d179bd4b0148e832c9f20d76aaa04032f92d3ab4b0cd98e527a
|
#!/usr/bin/python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test command-line interface.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
import textwrap
import subprocess
import openmoltools as omt
from yank import utils
#=============================================================================================
# UNIT TESTS
#=============================================================================================
def run_cli(arguments, expected_output=None):
#cli.main(argv=arguments.split())
command = 'yank ' + arguments
[stoutdata, sterrdata] = subprocess.Popen(command.split()).communicate()
# TODO: Interprety suprocess data better
if sterrdata:
message = "An error return value (%s) was obtained:\n" % str(sterrdata)
message += "\n"
message += stoutdata
message += "\n"
raise Exception(message)
if expected_output:
if stoutdata != expected_output:
message = "Output differs from expected output.\n"
message += "\n"
message += "Expected output:\n"
message += expected_output
message += "\n"
message += "Actual output:\n"
message += stoutdata
message += "\n"
raise Exception(message)
def test_help():
run_cli('--help')
def test_selftest():
try:
run_cli('selftest')
except ImportError as e:
# Trap the libOpenCl error
if "libOpenCL.so" in e.message:
print("Failed to load OpenCL. If this is an expected reult, carry on, if not, please debug!")
else:
raise(e)
def notest_prepare_binding():
dirname = utils.get_data_filename("../examples/benzene-toluene-implicit/setup/") # Could only figure out how to install things like yank.egg/examples/, rather than yank.egg/yank/examples/
with omt.utils.temporary_directory() as store_dir:
run_cli('prepare binding amber --setupdir=%(dirname)s --ligand="resname TOL" --store %(store_dir)s --gbsa OBC1' % vars())
def test_script_yaml():
"""Check that yank script --yaml command works."""
setup_dir = utils.get_data_filename(os.path.join('tests', 'data', 'p-xylene-implicit'))
pxylene_path = os.path.join(setup_dir, 'p-xylene.mol2')
lysozyme_path = os.path.join(setup_dir, '181L-pdbfixer.pdb')
with omt.utils.temporary_directory() as tmp_dir:
yaml_content = """
---
options:
number_of_iterations: 1
output_dir: '.'
molecules:
T4lysozyme:
filepath: {}
p-xylene:
filepath: {}
antechamber:
charge_method: bcc
solvents:
vacuum:
nonbonded_method: NoCutoff
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]
systems:
system:
receptor: T4lysozyme
ligand: p-xylene
solvent: vacuum
leap:
parameters: [leaprc.gaff, oldff/leaprc.ff14SB]
experiments:
system: system
protocol: absolute-binding
restraint:
type: FlatBottom
""".format(lysozyme_path, pxylene_path)
yaml_file_path = os.path.join(tmp_dir, 'yank.yaml')
with open(yaml_file_path, 'w') as f:
f.write(textwrap.dedent(yaml_content))
run_cli('script --yaml={}'.format(yaml_file_path))
|
andrrizzi/yank
|
Yank/tests/test_cli.py
|
Python
|
mit
| 4,237
|
[
"Amber"
] |
fee44b4898db2e0fbbdc08a358b86ce17b83914c241095224835548b43c4a080
|
import os
import sys
import math
import vtk
import colorsys
import time
import functools
import traceback
import PythonQt
from PythonQt import QtCore, QtGui
import ddapp.applogic as app
from ddapp import objectmodel as om
from ddapp import perception
from ddapp import lcmUtils
from ddapp import roboturdf
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp.transformUtils import getTransformFromAxes
from ddapp.timercallback import TimerCallback
from ddapp import mapsregistrar
from ddapp.affordanceitems import *
from ddapp.visualization import *
from ddapp.filterUtils import *
from ddapp.fieldcontainer import FieldContainer
from ddapp.segmentationroutines import *
from ddapp import cameraview
import numpy as np
import vtkNumpy
from debugVis import DebugData
from shallowCopy import shallowCopy
import ioUtils
from ddapp.uuidutil import newUUID
import drc as lcmdrc
import bot_core as lcmbotcore
import vs as lcmvs
from ddapp import lcmUtils
DRILL_TRIANGLE_BOTTOM_LEFT = 'bottom left'
DRILL_TRIANGLE_BOTTOM_RIGHT = 'bottom right'
DRILL_TRIANGLE_TOP_LEFT = 'top left'
DRILL_TRIANGLE_TOP_RIGHT = 'top right'
# using drc plane segmentation instead of PCL
planeSegmentationFilter = vtk.vtkPlaneSegmentation
#planeSegmentationFilter = vtk.vtkPCLSACSegmentationPlane
_defaultSegmentationView = None
def getSegmentationView():
return _defaultSegmentationView or app.getViewManager().findView('Segmentation View')
def getDRCView():
return app.getDRCView()
def switchToView(viewName):
app.getViewManager().switchToView(viewName)
def getCurrentView():
return app.getCurrentRenderView()
def cropToLineSegment(polyData, point1, point2):
line = np.array(point2) - np.array(point1)
length = np.linalg.norm(line)
axis = line / length
polyData = labelPointDistanceAlongAxis(polyData, axis, origin=point1, resultArrayName='dist_along_line')
return thresholdPoints(polyData, 'dist_along_line', [0.0, length])
'''
icp programmable filter
import vtkFiltersGeneralPython as filtersGeneral
points = inputs[0]
block = inputs[1]
print points.GetNumberOfPoints()
print block.GetNumberOfPoints()
if points.GetNumberOfPoints() < block.GetNumberOfPoints():
block, points = points, block
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(points.VTKObject)
icp.SetTarget(block.VTKObject)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = filtersGeneral.vtkTransformPolyDataFilter()
t.SetInput(points.VTKObject)
t.SetTransform(icp)
t.Update()
output.ShallowCopy(t.GetOutput())
'''
def lockAffordanceToHand(aff, hand='l_hand'):
linkFrame = getLinkFrame(hand)
affT = aff.actor.GetUserTransform()
if not hasattr(aff, 'handToAffT') or not aff.handToAffT:
aff.handToAffT = computeAToB(linkFrame, affT)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(aff.handToAffT)
t.Concatenate(linkFrame)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
handAffUpdater = None
def lockToHandOn():
aff = getDefaultAffordanceObject()
if not aff:
return
global handAffUpdater
if handAffUpdater is None:
handAffUpdater = TimerCallback()
handAffUpdater.targetFps = 30
handAffUpdater.callback = functools.partial(lockAffordanceToHand, aff)
handAffUpdater.start()
def lockToHandOff():
aff = getDefaultAffordanceObject()
if not aff:
return
handAffUpdater.stop()
aff.handToAffT = None
class DisparityPointCloudItem(vis.PolyDataItem):
def __init__(self, name, imagesChannel, cameraName, imageManager):
vis.PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view=None)
self.addProperty('Channel', imagesChannel)
self.addProperty('Camera name', cameraName)
self.addProperty('Decimation', 0, attributes=om.PropertyAttributes(enumNames=['1', '2', '4', '8', '16']))
self.addProperty('Remove Size', 1000, attributes=om.PropertyAttributes(decimals=0, minimum=0, maximum=100000.0, singleStep=1000))
self.addProperty('Target FPS', 1.0, attributes=om.PropertyAttributes(decimals=1, minimum=0.1, maximum=30.0, singleStep=0.1))
self.timer = TimerCallback()
self.timer.callback = self.update
self.lastUtime = 0
self.imageManager = imageManager
self.cameraName = cameraName
self.setProperty('Visible', False)
def _onPropertyChanged(self, propertySet, propertyName):
vis.PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
if self.getProperty(propertyName):
self.timer.start()
else:
self.timer.stop()
elif propertyName in ('Decimation', 'Remove outliers'):
self.lastUtime = 0
def onRemoveFromObjectModel(self):
vis.PolyDataItem.onRemoveFromObjectModel(self)
self.timer.stop()
def update(self):
utime = self.imageManager.queue.getCurrentImageTime(self.cameraName)
if utime == self.lastUtime:
return
if (utime < self.lastUtime ):
temp=0 # dummy
elif (utime - self.lastUtime < 1E6/self.getProperty('Target FPS')):
return
decimation = int(self.properties.getPropertyEnumValue('Decimation'))
removeSize = int(self.properties.getProperty('Remove Size'))
polyData = getDisparityPointCloud(decimation, imagesChannel=self.getProperty('Channel'), cameraName=self.getProperty('Camera name'),
removeOutliers=False, removeSize=removeSize)
self.setPolyData(polyData)
if not self.lastUtime:
self.setProperty('Color By', 'rgb_colors')
self.lastUtime = utime
def getRandomColor():
'''
Return a random color as a list of RGB values between 0.0 and 1.0.
'''
return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9)
def extractLargestCluster(polyData, minClusterSize=100):
polyData = applyEuclideanClustering(polyData, minClusterSize=minClusterSize)
return thresholdPoints(polyData, 'cluster_labels', [1, 1])
def segmentGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' A More complex ground removal algorithm. Works when plane isn't
preceisely flat. First clusters on z to find approx ground height, then fits a plane there
'''
searchRegionThickness = 0.5
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
vtkNumpy.addNumpyToVtk(polyData, zvalues.copy(), 'z')
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - searchRegionThickness/2.0, groundHeight + searchRegionThickness/2.0])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return origin, normal, groundPoints, scenePoints
def segmentGroundPlane():
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = shallowCopy(inputObj.polyData)
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - 0.3, groundHeight + 0.3])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [0.05, 10])
updatePolyData(groundPoints, 'ground points', alpha=0.3)
updatePolyData(scenePoints, 'scene points', alpha=0.3)
#scenePoints = applyEuclideanClustering(scenePoints, clusterTolerance=0.10, minClusterSize=100, maxClusterSize=1e6)
#updatePolyData(scenePoints, 'scene points', colorByName='cluster_labels')
def applyLocalPlaneFit(polyData, searchPoint, searchRadius, searchRadiusEnd=None, removeGroundFirst=True):
useVoxelGrid = True
voxelGridSize = 0.03
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
if removeGroundFirst:
_, polyData = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.04)
cropped = cropToSphere(polyData, searchPoint, searchRadius)
updatePolyData(cropped, 'crop to sphere', visible=False, colorByName='distance_to_point')
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=searchPoint, searchRadius=searchRadius)
if searchRadiusEnd is not None:
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, perpendicularAxis=normal, angleEpsilon=math.radians(30), searchOrigin=searchPoint, searchRadius=searchRadiusEnd)
fitPoints = thresholdPoints(polyData, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
updatePolyData(fitPoints, 'fitPoints', visible=False)
fitPoints = labelDistanceToPoint(fitPoints, searchPoint)
clusters = extractClusters(fitPoints, clusterTolerance=0.05, minClusterSize=3)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
fitPoints = clusters[0]
return fitPoints, normal
normalEstimationSearchRadius = 0.065
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(polyData)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotPlaneNormal = np.abs(np.dot(normals, normal))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotPlaneNormal, 'normals_dot_plane_normal')
showPolyData(scenePoints, 'scene_with_normals', parent=getDebugFolder(), colorByName='normals_dot_plane_normal')
surfaces = thresholdPoints(scenePoints, 'normals_dot_plane_normal', [0.95, 1.0])
clusters = extractClusters(surfaces, clusterTolerance=0.1, minClusterSize=5)
clusters = clusters[:10]
for i, cluster in enumerate(clusters):
showPolyData(cluster, 'plane cluster %i' % i, parent=getDebugFolder(), visible=False)
return fitPoints
def orientToMajorPlane(polyData, pickedPoint):
'''
Find the largest plane and transform the cloud to align that plane
Use the given point as the origin
'''
distanceToPlaneThreshold=0.02
searchRadius = 0.5
planePoints, origin, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=pickedPoint, searchRadius=searchRadius, returnOrigin=True)
vis.updatePolyData(planePoints, 'local plane fit', color=[0,1,0], parent=getDebugFolder(), visible=False)
planeFrame = transformUtils.getTransformFromOriginAndNormal(pickedPoint, normal)
vis.updateFrame(planeFrame, 'plane frame', scale=0.15, parent=getDebugFolder(), visible=False)
polyData = transformPolyData(polyData, planeFrame.GetLinearInverse() )
# if the mean point is below the horizontal plane, flip the cloud
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
midCloudHeight = np.mean(zvalues)
if (midCloudHeight < 0):
flipTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0])
polyData = transformPolyData(polyData, flipTransform )
return polyData, planeFrame
def getMajorPlanes(polyData, useVoxelGrid=True):
voxelGridSize = 0.01
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
polyDataList = []
minClusterSize = 100
while len(polyDataList) < 25:
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceToPlaneThreshold)
f.Update()
polyData = shallowCopy(f.GetOutput())
outliers = thresholdPoints(polyData, 'ransac_labels', [0, 0])
inliers = thresholdPoints(polyData, 'ransac_labels', [1, 1])
largestCluster = extractLargestCluster(inliers)
#i = len(polyDataList)
#showPolyData(inliers, 'inliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(outliers, 'outliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(largestCluster, 'cluster %d' % i, color=getRandomColor(), parent='major planes')
if largestCluster.GetNumberOfPoints() > minClusterSize:
polyDataList.append(largestCluster)
polyData = outliers
else:
break
return polyDataList
def showMajorPlanes(polyData=None):
if not polyData:
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = inputObj.polyData
om.removeFromObjectModel(om.findObjectByName('major planes'))
folderObj = om.findObjectByName('segmentation')
folderObj = om.getOrCreateContainer('major planes', folderObj)
origin = SegmentationContext.getGlobalInstance().getViewFrame().GetPosition()
polyData = labelDistanceToPoint(polyData, origin)
polyData = thresholdPoints(polyData, 'distance_to_point', [1, 4])
polyDataList = getMajorPlanes(polyData)
for i, polyData in enumerate(polyDataList):
obj = showPolyData(polyData, 'plane %d' % i, color=getRandomColor(), visible=True, parent='major planes')
obj.setProperty('Point Size', 3)
def cropToBox(polyData, transform, dimensions):
'''
dimensions is length 3 describing box dimensions
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, length in zip(axes, dimensions):
cropAxis = np.array(axis)*(length/2.0)
polyData = cropToLineSegment(polyData, origin - cropAxis, origin + cropAxis)
return polyData
def cropToBounds(polyData, transform, bounds):
'''
bounds is a 2x3 containing the min/max values along the transform axes to use for cropping
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, bound in zip(axes, bounds):
axis = np.array(axis)/np.linalg.norm(axis)
polyData = cropToLineSegment(polyData, origin + axis*bound[0], origin + axis*bound[1])
return polyData
def cropToSphere(polyData, origin, radius):
polyData = labelDistanceToPoint(polyData, origin)
return thresholdPoints(polyData, 'distance_to_point', [0, radius])
def applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=None, perpendicularAxis=None, angleEpsilon=0.2, returnOrigin=False, searchOrigin=None, searchRadius=None):
expectedNormal = expectedNormal if expectedNormal is not None else [-1,0,0]
fitInput = polyData
if searchOrigin is not None:
assert searchRadius
fitInput = cropToSphere(fitInput, searchOrigin, searchRadius)
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(fitInput)
f.SetDistanceThreshold(distanceThreshold)
if perpendicularAxis is not None:
f.SetPerpendicularConstraintEnabled(True)
f.SetPerpendicularAxis(perpendicularAxis)
f.SetAngleEpsilon(angleEpsilon)
f.Update()
origin = f.GetPlaneOrigin()
normal = np.array(f.GetPlaneNormal())
# flip the normal if needed
if np.dot(normal, expectedNormal) < 0:
normal = -normal
# for each point, compute signed distance to plane
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
if returnOrigin:
return polyData, origin, normal
else:
return polyData, normal
def flipNormalsWithViewDirection(polyData, viewDirection):
normals = vnp.getNumpyFromVtk(polyData, 'normals')
normals[np.dot(normals, viewDirection) > 0] *= -1
def normalEstimation(dataObj, searchCloud=None, searchRadius=0.05, useVoxelGrid=False, voxelGridLeafSize=0.05):
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(searchRadius)
f.SetInput(dataObj)
if searchCloud:
f.SetInput(1, searchCloud)
elif useVoxelGrid:
f.SetInput(1, applyVoxelGrid(dataObj, voxelGridLeafSize))
f.Update()
dataObj = shallowCopy(f.GetOutput())
dataObj.GetPointData().SetNormals(dataObj.GetPointData().GetArray('normals'))
return dataObj
def addCoordArraysToPolyData(polyData):
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = viewFrame.TransformPoint([0.0, 0.0, 0.0])
viewX = viewFrame.TransformVector([1.0, 0.0, 0.0])
viewY = viewFrame.TransformVector([0.0, 1.0, 0.0])
viewZ = viewFrame.TransformVector([0.0, 0.0, 1.0])
polyData = labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_view_x')
polyData = labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_view_y')
polyData = labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_view_z')
return polyData
def getDebugRevolutionData():
#dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../drc-data'))
#filename = os.path.join(dataDir, 'valve_wall.vtp')
#filename = os.path.join(dataDir, 'bungie_valve.vtp')
#filename = os.path.join(dataDir, 'cinder-blocks.vtp')
#filename = os.path.join(dataDir, 'cylinder_table.vtp')
#filename = os.path.join(dataDir, 'firehose.vtp')
#filename = os.path.join(dataDir, 'debris.vtp')
#filename = os.path.join(dataDir, 'rev1.vtp')
#filename = os.path.join(dataDir, 'drill-in-hand.vtp')
filename = os.path.expanduser('~/Desktop/scans/debris-scan.vtp')
return addCoordArraysToPolyData(ioUtils.readPolyData(filename))
def getCurrentScanBundle():
obj = om.findObjectByName('SCANS_HALF_SWEEP')
if not obj:
return None
revPolyData = obj.polyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return None
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getCurrentRevolutionData():
revPolyData = perception._multisenseItem.model.revPolyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return getCurrentScanBundle()
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getDisparityPointCloud(decimation=4, removeOutliers=True, removeSize=0, imagesChannel='CAMERA', cameraName='CAMERA_LEFT'):
p = cameraview.getStereoPointCloud(decimation, imagesChannel=imagesChannel, cameraName=cameraName, removeSize=removeSize)
if not p:
return None
if removeOutliers:
# attempt to scale outlier filtering, best tuned for decimation of 2 or 4
scaling = (10*16)/(decimation*decimation)
p = labelOutliers(p, searchRadius=0.06, neighborsInSearchRadius=scaling)
p = thresholdPoints(p, 'is_outlier', [0.0, 0.0])
return p
def getCurrentMapServerData():
mapServer = om.findObjectByName('Map Server')
polyData = None
if mapServer and mapServer.getProperty('Visible'):
polyData = mapServer.source.polyData
if not polyData or not polyData.GetNumberOfPoints():
return None
return addCoordArraysToPolyData(polyData)
useVoxelGrid = False
def segmentGroundPlanes():
objs = []
for obj in om.getObjects():
name = obj.getProperty('Name')
if name.startswith('pointcloud snapshot'):
objs.append(obj)
objs = sorted(objs, key=lambda x: x.getProperty('Name'))
d = DebugData()
prevHeadAxis = None
for obj in objs:
name = obj.getProperty('Name')
print '----- %s---------' % name
print 'head axis:', obj.headAxis
origin, normal, groundPoints, _ = segmentGround(obj.polyData)
print 'ground normal:', normal
showPolyData(groundPoints, name + ' ground points', visible=False)
a = np.array([0,0,1])
b = np.array(normal)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
if prevHeadAxis is not None:
a = prevHeadAxis
b = np.array(obj.headAxis)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
prevHeadAxis = np.array(obj.headAxis)
d.addLine([0,0,0], normal)
updatePolyData(d.getPolyData(), 'normals')
def extractCircle(polyData, distanceThreshold=0.04, radiusLimit=None):
circleFit = vtk.vtkPCLSACSegmentationCircle()
circleFit.SetDistanceThreshold(distanceThreshold)
circleFit.SetInput(polyData)
if radiusLimit is not None:
circleFit.SetRadiusLimit(radiusLimit)
circleFit.SetRadiusConstraintEnabled(True)
circleFit.Update()
polyData = thresholdPoints(circleFit.GetOutput(), 'ransac_labels', [1.0, 1.0])
return polyData, circleFit
def removeMajorPlane(polyData, distanceThreshold=0.02):
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceThreshold)
f.Update()
polyData = thresholdPoints(f.GetOutput(), 'ransac_labels', [0.0, 0.0])
return polyData, f
def removeGroundSimple(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' Simple ground plane removal algorithm. Uses ground height
and does simple z distance filtering.
Suitable for noisy data e.g. kinect/stereo camera
(Default args should be relaxed, filtering simplfied)
'''
groundHeight = SegmentationContext.getGlobalInstance().getGroundHeight()
origin = [0, 0, groundHeight]
normal = [0, 0, 1]
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return groundPoints, scenePoints
def removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
origin, normal, groundPoints, scenePoints = segmentGround(polyData, groundThickness, sceneHeightFromGround)
return groundPoints, scenePoints
def generateFeetForValve():
aff = om.findObjectByName('valve affordance')
assert aff
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
xaxis = -params['axis']
zaxis = np.array([0,0,1])
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.2
stanceRotation = 25.0
stanceOffset = [-1.0, -0.5, 0.0]
valveFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
valveFrame.PostMultiply()
valveFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(valveFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForDebris():
aff = om.findObjectByName('board A')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin = origin + params['zaxis']*params['zwidth']/2.0 - params['xaxis']*params['xwidth']/2.0
origin[2] = 0.0
yaxis = params['zaxis']
zaxis = np.array([0,0,1])
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.35
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
boardFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
boardFrame.PostMultiply()
boardFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(boardFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForWye():
aff = om.findObjectByName('wye points')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
yaxis = params['xaxis']
xaxis = -params['zaxis']
zaxis = np.cross(xaxis, yaxis)
stanceWidth = 0.20
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
affGroundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
affGroundFrame.PostMultiply()
affGroundFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(affGroundFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(affGroundFrame, 'affordance ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
def getFootFramesFromReferenceFrame(referenceFrame, stanceWidth, stanceRotation, stanceOffset):
footHeight=0.0745342
ref = vtk.vtkTransform()
ref.SetMatrix(referenceFrame.GetMatrix())
stanceFrame = vtk.vtkTransform()
stanceFrame.PostMultiply()
stanceFrame.RotateZ(stanceRotation)
stanceFrame.Translate(stanceOffset)
stanceFrame.Concatenate(ref)
lfootFrame = vtk.vtkTransform()
lfootFrame.PostMultiply()
lfootFrame.Translate(0, stanceWidth/2.0, footHeight)
lfootFrame.Concatenate(stanceFrame)
rfootFrame = vtk.vtkTransform()
rfootFrame.PostMultiply()
rfootFrame.Translate(0, -stanceWidth/2.0, footHeight)
rfootFrame.Concatenate(stanceFrame)
return stanceFrame, lfootFrame, rfootFrame
def poseFromFrame(frame):
trans = lcmdrc.vector_3d_t()
trans.x, trans.y, trans.z = frame.GetPosition()
wxyz = range(4)
perception.drc.vtkMultisenseSource.GetBotQuaternion(frame, wxyz)
quat = lcmdrc.quaternion_t()
quat.w, quat.x, quat.y, quat.z = wxyz
pose = lcmdrc.position_3d_t()
pose.translation = trans
pose.rotation = quat
return pose
def cropToPlane(polyData, origin, normal, threshold):
polyData = shallowCopy(polyData)
normal = normal/np.linalg.norm(normal)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
cropped = thresholdPoints(polyData, 'dist_to_plane', threshold)
return cropped, polyData
def createLine(blockDimensions, p1, p2):
sliceWidth = np.array(blockDimensions).max()/2.0 + 0.02
sliceThreshold = [-sliceWidth, sliceWidth]
# require p1 to be point on left
if p1[0] > p2[0]:
p1, p2 = p2, p1
_, worldPt1 = getRayFromDisplayPoint(app.getCurrentRenderView(), p1)
_, worldPt2 = getRayFromDisplayPoint(app.getCurrentRenderView(), p2)
cameraPt = np.array(app.getCurrentRenderView().camera().GetPosition())
leftRay = worldPt1 - cameraPt
rightRay = worldPt2 - cameraPt
middleRay = (leftRay + rightRay) / 2.0
d = DebugData()
d.addLine(cameraPt, worldPt1)
d.addLine(cameraPt, worldPt2)
d.addLine(worldPt1, worldPt2)
d.addLine(cameraPt, cameraPt + middleRay)
updatePolyData(d.getPolyData(), 'line annotation', parent=getDebugFolder(), visible=False)
inputObj = om.findObjectByName('pointcloud snapshot')
if inputObj:
polyData = shallowCopy(inputObj.polyData)
else:
polyData = getCurrentRevolutionData()
origin = cameraPt
normal = np.cross(rightRay, leftRay)
leftNormal = np.cross(normal, leftRay)
rightNormal = np.cross(rightRay, normal)
normal /= np.linalg.norm(normal)
leftNormal /= np.linalg.norm(leftNormal)
rightNormal /= np.linalg.norm(rightNormal)
middleRay /= np.linalg.norm(middleRay)
cropped, polyData = cropToPlane(polyData, origin, normal, sliceThreshold)
updatePolyData(polyData, 'slice dist', parent=getDebugFolder(), colorByName='dist_to_plane', colorByRange=[-0.5, 0.5], visible=False)
updatePolyData(cropped, 'slice', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
cropped, _ = cropToPlane(cropped, origin, leftNormal, [-1e6, 0])
cropped, _ = cropToPlane(cropped, origin, rightNormal, [-1e6, 0])
updatePolyData(cropped, 'slice segment', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=middleRay, angleEpsilon=math.radians(60))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'board segmentation', parent=getDebugFolder(), color=getRandomColor(), visible=False)
'''
names = ['board A', 'board B', 'board C', 'board D', 'board E', 'board F', 'board G', 'board H', 'board I']
for name in names:
if not om.findObjectByName(name):
break
else:
name = 'board'
'''
name = 'board'
segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=-middleRay, expectedXAxis=middleRay, edgeSign=-1, name=name)
def updateBlockAffordances(polyData=None):
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
if 'refit' in obj.getProperty('Name'):
om.removeFromObjectModel(obj)
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
updateBlockFit(obj, polyData)
def updateBlockFit(affordanceObj, polyData=None):
affordanceObj.updateParamsFromActorTransform()
name = affordanceObj.getProperty('Name') + ' refit'
origin = affordanceObj.params['origin']
normal = affordanceObj.params['yaxis']
edgePerpAxis = affordanceObj.params['xaxis']
blockDimensions = [affordanceObj.params['xwidth'], affordanceObj.params['ywidth']]
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
cropThreshold = 0.1
cropped = polyData
cropped, _ = cropToPlane(cropped, origin, normal, [-cropThreshold, cropThreshold])
cropped, _ = cropToPlane(cropped, origin, edgePerpAxis, [-cropThreshold, cropThreshold])
updatePolyData(cropped, 'refit search region', parent=getDebugFolder(), visible=False)
cropped = extractLargestCluster(cropped)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=normal, angleEpsilon=math.radians(10))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'refit board segmentation', parent=getDebugFolder(), visible=False)
refitObj = segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=normal, expectedXAxis=edgePerpAxis, edgeSign=-1, name=name)
refitOrigin = np.array(refitObj.params['origin'])
refitLength = refitObj.params['zwidth']
refitZAxis = refitObj.params['zaxis']
refitEndPoint1 = refitOrigin + refitZAxis*refitLength/2.0
originalLength = affordanceObj.params['zwidth']
correctedOrigin = refitEndPoint1 - refitZAxis*originalLength/2.0
originDelta = correctedOrigin - refitOrigin
refitObj.params['zwidth'] = originalLength
refitObj.polyData.DeepCopy(affordanceObj.polyData)
refitObj.actor.GetUserTransform().Translate(originDelta)
refitObj.updateParamsFromActorTransform()
def startInteractiveLineDraw(blockDimensions):
picker = LineDraw(app.getCurrentRenderView())
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(createLine, blockDimensions)
def startLeverValveSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentLeverValve)
def refitValveAffordance(aff, point1, origin, normal):
xaxis = aff.params['xaxis']
yaxis = aff.params['yaxis']
zaxis = aff.params['zaxis']
origin = aff.params['origin']
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
aff.updateParamsFromActorTransform()
def segmentValve(expectedValveRadius, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, _, wallNormal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
polyData, _, _ = applyPlaneFit(polyData, expectedNormal=wallNormal, searchOrigin=point2, searchRadius=expectedValveRadius, angleEpsilon=0.2, returnOrigin=True)
valveCluster = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
valveCluster = cropToSphere(valveCluster, point2, expectedValveRadius*2)
valveCluster = extractLargestCluster(valveCluster, minClusterSize=1)
updatePolyData(valveCluster, 'valve cluster', parent=getDebugFolder(), visible=False)
origin = np.average(vtkNumpy.getNumpyFromVtk(valveCluster, 'Points') , axis=0)
zaxis = wallNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
zwidth = 0.03
radius = expectedValveRadius
d = DebugData()
d.addLine(np.array([0,0,-zwidth/2.0]), np.array([0,0,zwidth/2.0]), radius=radius)
name = 'valve affordance'
obj = showPolyData(d.getPolyData(), name, cls=FrameAffordanceItem, parent='affordances', color=[0,1,0])
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitValveAffordance, obj))
params = dict(axis=zaxis, radius=radius, length=zwidth, origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis,
xwidth=radius, ywidth=radius, zwidth=zwidth,
otdf_type='steering_cyl', friendly_name='valve')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, scale=radius, visible=False)
frameObj.addToView(app.getDRCView())
def segmentValveByBoundingBox(polyData, searchPoint):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData = cropToSphere(polyData, searchPoint, radius=0.6)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
# extract tube search region
polyData = labelDistanceToLine(polyData, searchPoint, np.array(searchPoint) + np.array([0,0,1]))
searchRegion = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.2])
updatePolyData(searchRegion, 'valve tube search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# guess valve plane
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.01, perpendicularAxis=viewDirection, angleEpsilon=math.radians(30), expectedNormal=-viewDirection, returnOrigin=True)
# extract plane search region
polyData = labelPointDistanceAlongAxis(polyData, normal, origin)
searchRegion = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(searchRegion, 'valve plane search region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
valvePoints = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(valvePoints, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
valvePoints, _ = applyPlaneFit(valvePoints, expectedNormal=normal, perpendicularAxis=normal, distanceThreshold=0.01)
valveFit = thresholdPoints(valvePoints, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
#origin = computeCentroid(valveFit)
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=radius, Segments=20)
desc['Tube Radius'] = tubeRadius
import affordancepanel
obj = affordancepanel.panel.affordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentDoorPlane(polyData, doorPoint, stanceFrame):
doorPoint = np.array(doorPoint)
doorBand = 1.5
polyData = cropToLineSegment(polyData, doorPoint + [0.0,0.0,doorBand/2], doorPoint - [0.0,0.0,doorBand/2])
fitPoints, normal = applyLocalPlaneFit(polyData, doorPoint, searchRadius=0.2, searchRadiusEnd=1.0, removeGroundFirst=False)
updatePolyData(fitPoints, 'door points', visible=False, color=[0,1,0])
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
if np.dot(normal, viewDirection) > 0:
normal = -normal
origin = computeCentroid(fitPoints)
groundHeight = stanceFrame.GetPosition()[2]
origin = [origin[0], origin[1], groundHeight]
xaxis = -normal
zaxis = [0,0,1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
return t
def segmentValveByRim(polyData, rimPoint1, rimPoint2):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
yaxis = np.array(rimPoint2) - np.array(rimPoint1)
zaxis = [0,0,1]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
# flip xaxis to be with view direction
if np.dot(xaxis, viewDirection) < 0:
xaxis = -xaxis
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
origin = (np.array(rimPoint2) + np.array(rimPoint1)) / 2.0
polyData = labelPointDistanceAlongAxis(polyData, xaxis, origin)
polyData = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(polyData, 'valve plane region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
polyData = cropToSphere(polyData, origin, radius=0.4)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
updatePolyData(polyData, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
valveFit = extractLargestCluster(polyData, minClusterSize=1)
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
vis.updatePolyData(transformPolyData(fields.box, fields.frame), 'valve cluster bounding box', visible=False)
#origin = computeCentroid(valveFit)
'''
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
'''
radius = np.max(fields.dims)/2.0 - tubeRadius
proj = [np.abs(np.dot(xaxis, axis)) for axis in fields.axes]
xaxisNew = fields.axes[np.argmax(proj)]
if np.dot(xaxisNew, xaxis) < 0:
xaxisNew = -xaxisNew
xaxis = xaxisNew
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
import affordancepanel
obj = affordancepanel.panel.affordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentValveByWallPlane(expectedValveRadius, point1, point2):
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
_ , polyData = removeGround(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
perpLine = np.cross(point2 - point1, normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.05, 0.5])
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
searchRegionSpokes = shallowCopy(searchRegion)
searchRegion, origin, _ = applyPlaneFit(searchRegion, expectedNormal=normal, perpendicularAxis=normal, returnOrigin=True)
searchRegion = thresholdPoints(searchRegion, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(searchRegion, 'valve search region 2', parent=getDebugFolder(), color=[0,1,0], visible=False)
largestCluster = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(largestCluster, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
radiusLimit = [expectedValveRadius - 0.01, expectedValveRadius + 0.01] if expectedValveRadius else None
#radiusLimit = None
polyData, circleFit = extractCircle(largestCluster, distanceThreshold=0.01, radiusLimit=radiusLimit)
updatePolyData(polyData, 'circle fit', parent=getDebugFolder(), visible=False)
#polyData, circleFit = extractCircle(polyData, distanceThreshold=0.01)
#showPolyData(polyData, 'circle fit', colorByName='z')
radius = circleFit.GetCircleRadius()
origin = np.array(circleFit.GetCircleOrigin())
circleNormal = np.array(circleFit.GetCircleNormal())
circleNormal = circleNormal/np.linalg.norm(circleNormal)
if np.dot(circleNormal, normal) < 0:
circleNormal *= -1
# force use of the plane normal
circleNormal = normal
radius = expectedValveRadius
d = DebugData()
d.addLine(origin - normal*radius, origin + normal*radius)
d.addCircle(origin, circleNormal, radius)
updatePolyData(d.getPolyData(), 'valve axes', parent=getDebugFolder(), visible=False)
zaxis = -circleNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
# Spoke angle fitting:
if (1==0): # disabled jan 2015
# extract the relative positon of the points to the valve axis:
searchRegionSpokes = labelDistanceToLine(searchRegionSpokes, origin, [origin + circleNormal])
searchRegionSpokes = thresholdPoints(searchRegionSpokes, 'distance_to_line', [0.05, radius-0.04])
updatePolyData(searchRegionSpokes, 'valve spoke search', parent=getDebugFolder(), visible=False)
searchRegionSpokesLocal = transformPolyData(searchRegionSpokes, t.GetLinearInverse() )
points = vtkNumpy.getNumpyFromVtk(searchRegionSpokesLocal , 'Points')
spoke_angle = findValveSpokeAngle(points)
else:
spoke_angle = 0
spokeAngleTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,spoke_angle])
spokeTransform = transformUtils.copyFrame(t)
spokeAngleTransform.Concatenate(spokeTransform)
spokeObj = showFrame(spokeAngleTransform, 'spoke frame', parent=getDebugFolder(), visible=False, scale=radius)
spokeObj.addToView(app.getDRCView())
t = spokeAngleTransform
zwidth = 0.0175
d = DebugData()
#d.addLine(np.array([0,0,-zwidth/2.0]), np.array([0,0,zwidth/2.0]), radius=radius)
d.addTorus(radius, 0.127)
d.addLine(np.array([0,0,0]), np.array([radius-zwidth,0,0]), radius=zwidth) # main bar
name = 'valve'
obj = showPolyData(d.getPolyData(), name, cls=FrameAffordanceItem, parent='affordances', color=[0,1,0])
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitValveAffordance, obj))
params = dict(axis=zaxis, radius=radius, length=zwidth, origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis,
xwidth=radius, ywidth=radius, zwidth=zwidth,
otdf_type='steering_cyl', friendly_name='valve')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False, scale=radius)
frameObj.addToView(app.getDRCView())
def showHistogram(polyData, arrayName, numberOfBins=100):
import matplotlib.pyplot as plt
x = vnp.getNumpyFromVtk(polyData, arrayName)
hist, bins = np.histogram(x, bins=numberOfBins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
return bins[np.argmax(hist)] + (bins[1] - bins[0])/2.0
def applyKmeansLabel(polyData, arrayName, numberOfClusters, whiten=False):
import scipy.cluster
ar = vnp.getNumpyFromVtk(polyData, arrayName).copy()
if whiten:
scipy.cluster.vq.whiten(ar)
codes, disturbances = scipy.cluster.vq.kmeans(ar, numberOfClusters)
if arrayName == 'normals' and numberOfClusters == 2:
v1 = codes[0]
v2 = codes[1]
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
angle = np.arccos(np.dot(v1, v2))
print 'angle between normals:', np.degrees(angle)
code, distance = scipy.cluster.vq.vq(ar, codes)
polyData = shallowCopy(polyData)
vnp.addNumpyToVtk(polyData, code, '%s_kmeans_label' % arrayName)
return polyData
def findValveSpokeAngle(points):
'''
Determine the location of the valve spoke angle
By binning the spoke returns. returns angle in degrees
'''
#np.savetxt("/home/mfallon/Desktop/spoke_points.csv", points, delimiter=",")
# convert all points to degrees in range [0,120]
angle = np.degrees( np.arctan2( points[:,1] , points[:,0] ) )
qq = np.where(angle < 0)[0]
angle[qq] += 360
angle = np.mod( angle, 120)
# find the spoke as the max of a histogram:
bins = range(0,130,10) # 0,10,...130
freq, bins = np.histogram(angle, bins)
amax = np.argmax(freq)
spoke_angle = bins[amax] + 5 # correct for 5deg offset
return spoke_angle
def findWallCenter(polyData, removeGroundMethod=removeGround):
'''
Find a frame at the center of the valve wall
X&Y: average of points on the wall plane
Z: 4 feet off the ground (determined using robot's feet
Orientation: z-normal into plane, y-axis horizontal
'''
_ , polyData = removeGroundMethod(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
wallPoints = applyVoxelGrid(wallPoints, leafSize=0.03)
wallPoints = extractLargestCluster(wallPoints, minClusterSize=100)
updatePolyData(wallPoints, 'auto valve wall', parent=getDebugFolder(), visible=False)
xvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,0]
yvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,1]
# median or mid of max or min?
#xcenter = np.median(xvalues)
#ycenter = np.median(yvalues)
xcenter = (np.max(xvalues)+np.min(xvalues))/2
ycenter = (np.max(yvalues)+np.min(yvalues))/2
# not used, not very reliable
#zvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,2]
#zcenter = np.median(zvalues)
zcenter = SegmentationContext.getGlobalInstance().getGroundHeight() + 1.2192 # valves are 4ft from ground
point1 =np.array([ xcenter, ycenter, zcenter ]) # center of the valve wall
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
normalObj = showFrame(t, 'valve wall frame', parent=getDebugFolder(), visible=False) # z direction out of wall
normalObj.addToView(app.getDRCView())
return t
def segmentValveWallAuto(expectedValveRadius=.195, mode='both', removeGroundMethod=removeGround ):
'''
Automatically segment a valve hanging in front of the wall at the center
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
t = findWallCenter(polyData, removeGroundMethod)
valve_point1 = [ 0 , 0.6 , 0]
valveTransform1 = transformUtils.frameFromPositionAndRPY(valve_point1, [0,0,0])
valveTransform1.Concatenate(t)
point1 = np.array(valveTransform1.GetPosition()) # left of wall
valve_point2 = [ 0 , -0.6 , 0]
valveTransform2 = transformUtils.frameFromPositionAndRPY(valve_point2, [0,0,0])
valveTransform2.Concatenate(t)
point2 = np.array(valveTransform2.GetPosition()) # left of wall
valve_point3 = [ 0 , 1.0 , 0] # lever can over hang
valveTransform3 = transformUtils.frameFromPositionAndRPY(valve_point3, [0,0,0])
valveTransform3.Concatenate(t)
point3 =valveTransform3.GetPosition() # right of wall
d = DebugData()
d.addSphere(point2, radius=0.01)
d.addSphere(point1, radius=0.03)
d.addSphere(point3, radius=0.01)
updatePolyData(d.getPolyData(), 'auto wall points', parent=getDebugFolder(), visible=False)
if (mode=='valve'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
elif (mode=='lever'):
segmentLeverByWallPlane(point1, point3)
elif (mode=='both'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
segmentLeverByWallPlane(point1, point3)
else:
raise Exception('unexpected segmentation mode: ' + mode)
def segmentLeverByWallPlane(point1, point2):
'''
determine the position (including rotation of a lever near a wall
input is as for the valve - to points on the wall either side of the lever
'''
# 1. determine the wall plane and normal
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
# 2. Crop the cloud down to the lever only using the wall plane
perpLine = np.cross(point2 - point1, -normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'lever crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'lever valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.12, 0.2]) # very tight threshold
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'lever search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# 3. fit line to remaining points - all assumed to be the lever
linePoint, lineDirection, _ = applyLineFit(searchRegion, distanceThreshold=0.02)
#if np.dot(lineDirection, forwardDirection) < 0:
# lineDirection = -lineDirection
d = DebugData()
d.addSphere(linePoint, radius=0.02)
updatePolyData(d.getPolyData(), 'lever point', parent=getDebugFolder(), visible=False)
pts = vtkNumpy.getNumpyFromVtk(searchRegion, 'Points')
dists = np.dot(pts-linePoint, lineDirection)
lever_center = linePoint + lineDirection*np.min(dists)
lever_tip = linePoint + lineDirection*np.max(dists)
# 4. determine which lever point is closest to the lower left of the wall. That's the lever_center point
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
# a distant point down and left from wall
wall_point_lower_left = [ -20 , -20.0 , 0]
wall_point_lower_left_Transform = transformUtils.frameFromPositionAndRPY(wall_point_lower_left, [0,0,0])
wall_point_lower_left_Transform.Concatenate(t)
wall_point_lower_left = wall_point_lower_left_Transform.GetPosition()
d1 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_center, origin, normal) )**2) )
d2 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_tip, origin, normal) )**2) )
if (d2 < d1): # flip the points to match variable names
p_temp = lever_center
lever_center = lever_tip
lever_tip = p_temp
lineDirection = -lineDirection
# 5. compute the rotation angle of the lever and, using that, its frame
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(lever_center) # nominal frame at lever center
rotationAngle = -computeSignedAngleBetweenVectors(lineDirection, [0, 0, 1], -normal)
t_lever = transformUtils.frameFromPositionAndRPY( [0,0,0], [0,0, math.degrees( rotationAngle ) ] )
t_lever.PostMultiply()
t_lever.Concatenate(t)
d = DebugData()
# d.addSphere( point1 , radius=0.1)
d.addSphere( wall_point_lower_left , radius=0.1)
d.addSphere(lever_center, radius=0.04)
d.addSphere(lever_tip, radius=0.01)
d.addLine(lever_center, lever_tip)
updatePolyData(d.getPolyData(), 'lever end points', color=[0,1,0], parent=getDebugFolder(), visible=False)
radius = 0.01
length = np.sqrt( np.sum((lever_tip - lever_center )**2) )
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances' , color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t_lever)
obj.addToView(app.getDRCView())
frameObj = showFrame(t_lever, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t_lever.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def applyICP(source, target):
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = vtk.vtkTransform()
t.SetMatrix(icp.GetMatrix())
return t
def applyDiskGlyphs(polyData):
voxelGridLeafSize = 0.03
normalEstimationSearchRadius = 0.05
diskRadius = 0.015
diskResolution = 12
scanInput = polyData
pd = applyVoxelGrid(scanInput, leafSize=voxelGridLeafSize)
pd = labelOutliers(pd, searchRadius=normalEstimationSearchRadius, neighborsInSearchRadius=3)
pd = thresholdPoints(pd, 'is_outlier', [0, 0])
pd = normalEstimation(pd, searchRadius=normalEstimationSearchRadius, searchCloud=scanInput)
disk = vtk.vtkDiskSource()
disk.SetOuterRadius(diskRadius)
disk.SetInnerRadius(0.0)
disk.SetRadialResolution(0)
disk.SetCircumferentialResolution(diskResolution)
disk.Update()
t = vtk.vtkTransform()
t.RotateY(90)
disk = transformPolyData(disk.GetOutput(), t)
glyph = vtk.vtkGlyph3D()
glyph.ScalingOff()
glyph.OrientOn()
glyph.SetSource(disk)
glyph.SetInput(pd)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def applyArrowGlyphs(polyData, computeNormals=True, voxelGridLeafSize=0.03, normalEstimationSearchRadius=0.05, arrowSize=0.02):
polyData = applyVoxelGrid(polyData, leafSize=0.02)
if computeNormals:
voxelData = applyVoxelGrid(polyData, leafSize=voxelGridLeafSize)
polyData = normalEstimation(polyData, searchRadius=normalEstimationSearchRadius, searchCloud=voxelData)
polyData = removeNonFinitePoints(polyData, 'normals')
flipNormalsWithViewDirection(polyData, SegmentationContext.getGlobalInstance().getViewDirection())
assert polyData.GetPointData().GetNormals()
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph = vtk.vtkGlyph3D()
glyph.SetScaleFactor(arrowSize)
glyph.SetSource(arrow.GetOutput())
glyph.SetInput(polyData)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def segmentLeverValve(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
radius = 0.01
length = 0.33
normal = -normal # set z to face into wall
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point2)
leverP1 = point2
leverP2 = point2 + xaxis * length
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances', color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentWye(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
wyeMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/wye.obj'))
wyeMeshPoint = np.array([0.0, 0.0, 0.005])
wyeMeshLeftHandle = np.array([0.032292, 0.02949, 0.068485])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-wyeMeshPoint)
t.PostMultiply()
t.Translate(point2)
d = DebugData()
d.addSphere(point2, radius=0.005)
updatePolyData(d.getPolyData(), 'wye pick point', parent=getDebugFolder(), visible=False)
wyeObj = showPolyData(wyeMesh, 'wye', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
wyeObj.actor.SetUserTransform(t)
wyeObj.addToView(app.getDRCView())
frameObj = showFrame(t, 'wye frame', parent=wyeObj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='wye', otdf_type='wye')
wyeObj.setAffordanceParams(params)
wyeObj.updateParamsFromActorTransform()
def segmentDoorHandle(otdfType, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
handlePoint = np.array([0.005, 0.065, 0.011])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
xwidth = 0.01
ywidth = 0.13
zwidth = 0.022
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
#t.PreMultiply()
#t.Translate(-handlePoint)
t.PostMultiply()
t.Translate(point2)
name = 'door handle'
obj = showPolyData(cube, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentTruss(point1, point2):
edge = point2 - point1
edgeLength = np.linalg.norm(edge)
stanceOffset = [-0.42, 0.0, 0.0]
stanceYaw = 0.0
d = DebugData()
p1 = [0.0, 0.0, 0.0]
p2 = -np.array([0.0, -1.0, 0.0]) * edgeLength
d.addSphere(p1, radius=0.02)
d.addSphere(p2, radius=0.02)
d.addLine(p1, p2)
stanceTransform = vtk.vtkTransform()
stanceTransform.PostMultiply()
stanceTransform.Translate(stanceOffset)
#stanceTransform.RotateZ(stanceYaw)
geometry = transformPolyData(d.getPolyData(), stanceTransform.GetLinearInverse())
yaxis = edge/edgeLength
zaxis = [0.0, 0.0, 1.0]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xwidth = 0.1
ywidth = edgeLength
zwidth = 0.1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Concatenate(stanceTransform)
t.PostMultiply()
t.Translate(point1)
name = 'truss'
otdfType = 'robot_knees'
obj = showPolyData(geometry, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=t.GetPosition(), xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentHoseNozzle(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
searchRegion = cropToSphere(polyData, point1, 0.10)
updatePolyData(searchRegion, 'nozzle search region', parent=getDebugFolder(), visible=False)
xaxis = [1,0,0]
yaxis = [0,-1,0]
zaxis = [0,0,-1]
origin = point1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
nozzleRadius = 0.0266
nozzleLength = 0.042
nozzleTipRadius = 0.031
nozzleTipLength = 0.024
d = DebugData()
d.addLine(np.array([0,0,-nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0]), radius=nozzleRadius)
d.addLine(np.array([0,0,nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0 + nozzleTipLength]), radius=nozzleTipRadius)
obj = showPolyData(d.getPolyData(), 'hose nozzle', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'nozzle frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='firehose', otdf_type='firehose')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentDrillWall(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
points = [point1, point2, point3]
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, point3 - point1)
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=(point1 + point2 + point3)/3.0, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
points = [projectPointToPlane(point, origin, normal) for point in points]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(points[0])
d = DebugData()
pointsInWallFrame = []
for p in points:
pp = np.zeros(3)
t.GetLinearInverse().TransformPoint(p, pp)
pointsInWallFrame.append(pp)
d.addSphere(pp, radius=0.02)
for a, b in zip(pointsInWallFrame, pointsInWallFrame[1:] + [pointsInWallFrame[0]]):
d.addLine(a, b, radius=0.015)
aff = showPolyData(d.getPolyData(), 'drill target', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill target frame', parent=aff, visible=False)
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
params = dict(origin=points[0], xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=pointsInWallFrame[0][1], p1z=pointsInWallFrame[0][2],
p2y=pointsInWallFrame[1][1], p2z=pointsInWallFrame[1][2],
p3y=pointsInWallFrame[2][1], p3z=pointsInWallFrame[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
refitWallCallbacks = []
def refitWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
for func in refitWallCallbacks:
func(point1, origin, normal)
def refitDrillWall(aff, point1, origin, normal):
t = aff.actor.GetUserTransform()
targetOrigin = np.array(t.GetPosition())
projectedOrigin = projectPointToPlane(targetOrigin, origin, normal)
projectedOrigin[2] = targetOrigin[2]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(projectedOrigin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
# this should be depreciated!
def getGroundHeightFromFeet():
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
return np.array(rfoot.GetPosition())[2] - 0.0745342
# this should be depreciated!
def getTranslationRelativeToFoot(t):
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
def segmentDrillWallConstrained(rightAngleLocation, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, [0.0, 0.0, 1.0])
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
triangleOrigin = projectPointToPlane(point2, origin, normal)
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(triangleOrigin)
createDrillWall(rightAngleLocation, t)
def createDrillWall(rightAngleLocation, trianglePose):
# recover the origin and axes from the pose:
triangleOrigin = trianglePose.GetPosition()
xaxis, yaxis, zaxis = transformUtils.getAxesFromTransform( trianglePose )
# 0.6096 = 24 * .0254 (m = feet)
# 0.3048 = 12 * .0254 (m = feet)
edgeRight = np.array([0.0, -1.0, 0.0]) * (0.6)
edgeUp = np.array([0.0, 0.0, 1.0]) * (0.3)
pointsInWallFrame = np.zeros((3,3))
if rightAngleLocation == DRILL_TRIANGLE_BOTTOM_LEFT:
pointsInWallFrame[1] = edgeUp
pointsInWallFrame[2] = edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_BOTTOM_RIGHT:
pointsInWallFrame[1] = edgeUp # edgeRight +edgeUp
pointsInWallFrame[2] = -edgeRight # edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_TOP_LEFT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = -edgeUp
elif rightAngleLocation == DRILL_TRIANGLE_TOP_RIGHT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = edgeRight - edgeUp
else:
raise Exception('unexpected value for right angle location: ', + rightAngleLocation)
center = pointsInWallFrame.sum(axis=0)/3.0
shrinkFactor = 1#0.90
shrinkPoints = (pointsInWallFrame - center) * shrinkFactor + center
d = DebugData()
for p in pointsInWallFrame:
d.addSphere(p, radius=0.015)
for a, b in zip(pointsInWallFrame, np.vstack((pointsInWallFrame[1:], pointsInWallFrame[0]))):
d.addLine(a, b, radius=0.005)#01)
for a, b in zip(shrinkPoints, np.vstack((shrinkPoints[1:], shrinkPoints[0]))):
d.addLine(a, b, radius=0.005)#0.025
folder = om.getOrCreateContainer('affordances')
wall = om.findObjectByName('wall')
om.removeFromObjectModel(wall)
aff = showPolyData(d.getPolyData(), 'wall', cls=FrameAffordanceItem, color=[0,1,0], visible=True, parent=folder)
aff.actor.SetUserTransform(trianglePose)
aff.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
frameObj = showFrame(trianglePose, 'wall frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=triangleOrigin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=shrinkPoints[0][1], p1z=shrinkPoints[0][2],
p2y=shrinkPoints[1][1], p2z=shrinkPoints[1][2],
p3y=shrinkPoints[2][1], p3z=shrinkPoints[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
'''
rfoot = getLinkFrame(drcargs.getDirectorConfig()['rightFootLink'])
tt = getTransformFromAxes(xaxis, yaxis, zaxis)
tt.PostMultiply()
tt.Translate(rfoot.GetPosition())
showFrame(tt, 'rfoot with wall orientation')
aff.footToAffTransform = computeAToB(tt, trianglePose)
footToAff = list(aff.footToAffTransform.GetPosition())
tt.TransformVector(footToAff, footToAff)
d = DebugData()
d.addSphere(tt.GetPosition(), radius=0.02)
d.addLine(tt.GetPosition(), np.array(tt.GetPosition()) + np.array(footToAff))
showPolyData(d.getPolyData(), 'rfoot debug')
'''
def getDrillAffordanceParams(origin, xaxis, yaxis, zaxis, drillType="dewalt_button"):
if (drillType=="dewalt_button"):
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=-90.0,
button_pitch=-90.0,
button_yaw=0.0,
bit_x=-0.01,
bit_y=0.0,
bit_z=0.15,
bit_roll=0,
bit_pitch=-90,
bit_yaw=0,
friendly_name='dewalt_button', otdf_type='dewalt_button')
else:
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=0.0,
button_pitch=0.0,
button_yaw=0.0,
bit_x=0.18,
bit_y=0.0,
bit_z=0.13,
bit_roll=0,
bit_pitch=0,
bit_yaw=0,
friendly_name='dewalt_barrel', otdf_type='dewalt_barrel')
return params
def getDrillMesh(applyBitOffset=False):
button = np.array([0.007, -0.035, -0.06])
drillMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt_button.obj'))
if applyBitOffset:
t = vtk.vtkTransform()
t.Translate(0.01, 0.0, 0.0)
drillMesh = transformPolyData(drillMesh, t)
d = DebugData()
d.addPolyData(drillMesh)
d.addSphere(button, radius=0.005, color=[0,1,0])
d.addLine([0.0,0.0,0.155], [0.0, 0.0, 0.14], radius=0.001, color=[0,1,0])
return shallowCopy(d.getPolyData())
def getDrillBarrelMesh():
return ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt.ply'), computeNormals=True)
def segmentDrill(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point2, 0.30)
drillPoints = extractLargestCluster(searchRegion)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = point3 - point2
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(point2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def makePolyDataFields(pd):
mesh = computeDelaunay3D(pd)
if not mesh.GetNumberOfPoints():
return None
origin, edges, wireframe = getOrientedBoundingBox(mesh)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
mesh = transformPolyData(mesh, t.GetLinearInverse())
return FieldContainer(points=pd, box=wireframe, mesh=mesh, frame=t, dims=edgeLengths, axes=axes)
def makeMovable(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is not
given, then an origin frame is computed for the polydata using the
center and orientation of the oriented bounding of the polydata. The polydata
is transformed using the inverse of initialTransform and then a child frame
is assigned to the object to reposition it.
'''
pd = obj.polyData
t = initialTransform
if t is None:
origin, edges, wireframe = getOrientedBoundingBox(pd)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
obj.setPolyData(pd)
frame = obj.getChildFrame()
if frame:
frame.copyFrame(t)
else:
frame = vis.showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False)
obj.actor.SetUserTransform(t)
def segmentTable(polyData, searchPoint):
'''
Segment a horizontal table surface (perpendicular to +Z) in the given polyData
using the given search point.
Returns polyData, tablePoints, origin, normal
polyData is the input polyData with a new 'dist_to_plane' attribute.
'''
expectedNormal = np.array([0.0, 0.0, 1.0])
tableNormalEpsilon = 0.4
polyData = applyVoxelGrid(polyData, leafSize=0.01)
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=searchPoint, searchRadius=0.3, angleEpsilon=tableNormalEpsilon, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
tablePoints = labelDistanceToPoint(tablePoints, searchPoint)
tablePointsClusters = extractClusters(tablePoints, minClusterSize=10, clusterTolerance=0.1)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
return polyData, tablePoints, origin, normal
def filterClusterObjects(clusters):
result = []
for cluster in clusters:
if np.abs(np.dot(cluster.axes[0], [0,0,1])) < 0.5:
continue
if cluster.dims[0] < 0.1:
continue
result.append(cluster)
return result
def segmentTableThenFindDrills(polyData,pickedPoint):
''' Given a point cloud of a table with drills on it.
Find all clusters and fit drills
Assumes that all clusters are of drills
Nothing else is ever on a table ;)
'''
# 1 segment a table and return clusters and the plane normal
clusters, tablePoints, plane_origin, plane_normal = segmentTableSceneClusters(polyData, pickedPoint, True)
# 2 Detect drills within the clusters:
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotForward =forwardDirection
fitResults=[]
for clusterObj in clusters:
# vis.showPolyData(clusterObj, 'cluster debug')
drillFrame = fitDrillBarrel (clusterObj, robotForward, plane_origin, plane_normal)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
if not fitResults:
return
for i, fitResult in enumerate(fitResults):
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
drillMesh = getDrillBarrelMesh()
#drill = om.findObjectByName('drill')
name= 'drill %d' % i
name2= 'drill %d frame' % i
drill = showPolyData(drillMesh, name, cls=FrameAffordanceItem, color=[0, 1, 0], visible=True)
drillFrame = updateFrame(drillFrame, name2, parent=drill, scale=0.2, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
def segmentTableScene(polyData, searchPoint, filterClustering = True):
''' This seems to be unused, depreciated? '''
objectClusters, tablePoints, _, _ = segmentTableSceneClusters(polyData, searchPoint)
clusters = [makePolyDataFields(cluster) for cluster in objectClusters]
clusters = [cluster for cluster in clusters if cluster is not None]
if (filterClustering):
clusters = filterClusterObjects(clusters)
return FieldContainer(table=makePolyDataFields(tablePoints), clusters=clusters)
def segmentTableSceneClusters(polyData, searchPoint, clusterInXY=False):
''' Given a point cloud of a table with some objects on it
and a point on that table
determine the plane of the table and
extract clusters above the table
'''
polyData, tablePoints, plane_origin, plane_normal = segmentTable(polyData, searchPoint)
tableCentroid = computeCentroid(tablePoints)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.5])
# TODO: replace with 'all points above the table':
searchRegion = cropToSphere(searchRegion, tableCentroid, 0.5) # was 1.0
tableCentroidFrame = transformUtils.frameFromPositionAndRPY(tableCentroid, [0,0,0])
showFrame(tableCentroidFrame, 'tableCentroid', visible=False, parent=getDebugFolder(), scale=0.15)
showPolyData(searchRegion, 'searchRegion', color=[1,0,0], visible=False, parent=getDebugFolder())
objectClusters = extractClusters(searchRegion, clusterInXY, clusterTolerance=0.02, minClusterSize=10)
#print 'got %d clusters' % len(objectClusters)
for i,c in enumerate(objectClusters):
name= "cluster %d" % i
showPolyData(c, name, color=getRandomColor(), visible=False, parent=getDebugFolder())
return objectClusters, tablePoints, plane_origin, plane_normal
def segmentTableEdge(polyData, searchPoint, edgePoint):
'''
segment a table using two points:
searchPoint is a point on the table top
edgePoint is a point on the edge facing the robot
'''
polyData, tablePoints, origin, normal = segmentTable(polyData, searchPoint)
tableMesh = computeDelaunay3D(tablePoints)
origin, edges, wireframe = getOrientedBoundingBox(tableMesh)
origin = origin + 0.5*np.sum(edges, axis=0)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
def findAxis(referenceVector):
refAxis = referenceVector / np.linalg.norm(referenceVector)
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
axisIndex = axisProjections.argmax()
axis = axes[axisIndex]
if np.dot(axis, refAxis) < 0:
axis = -axis
return axis, axisIndex
tableXAxis, tableXAxisIndex = findAxis(searchPoint - edgePoint)
tableZAxis, tableZAxisIndex = findAxis([0,0,1])
tableYAxis, tableYAxisIndex = findAxis(np.cross(tableZAxis, tableXAxis))
assert len(set([tableXAxisIndex, tableYAxisIndex, tableZAxisIndex])) == 3
axes = tableXAxis, tableYAxis, tableZAxis
edgeLengths = edgeLengths[tableXAxisIndex], edgeLengths[tableYAxisIndex], edgeLengths[tableZAxisIndex]
edgeCenter = origin - 0.5 * axes[0]*edgeLengths[0] + 0.5*axes[2]*edgeLengths[2]
edgeLeft = edgeCenter + 0.5 * axes[1]*edgeLengths[1]
edgeRight = edgeCenter - 0.5 * axes[1]*edgeLengths[1]
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(edgeRight)
table_center = [edgeLengths[0]/2, edgeLengths[1]/2, -edgeLengths[2]/2]
t.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY(table_center,[0,0,0])
t.Concatenate(t3)
tablePoints = transformPolyData(tablePoints, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
tableMesh = transformPolyData(tableMesh, t.GetLinearInverse())
return FieldContainer(points=tablePoints, box=wireframe, mesh=tableMesh, frame=t, dims=edgeLengths, axes=axes)
def segmentDrillAuto(point1, polyData=None):
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
expectedNormal = np.array([0.0, 0.0, 1.0])
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=point1, searchRadius=0.4, angleEpsilon=0.2, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point1, 0.30)
drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
# determine drill orientation (rotation about z axis)
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
# note this hack to orient the drill correctly:
t = getTransformFromAxes(yaxis, -xaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(centroids[-1])
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False, scale=0.2).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def segmentDrillButton(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed drill button', color=[0,0.5,0.5], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed drill button frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def segmentPointerTip(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed pointer tip', color=[0.5,0.5,0.0], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed pointer tip frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def fitGroundObject(polyData=None, expectedDimensionsMin=[0.2, 0.02], expectedDimensionsMax=[1.3, 0.1]):
removeGroundFunc = removeGroundSimple
polyData = polyData or getCurrentRevolutionData()
groundPoints, scenePoints = removeGroundFunc(polyData, groundThickness=0.02, sceneHeightFromGround=0.035)
searchRegion = thresholdPoints(scenePoints, 'dist_to_plane', [0.05, 0.2])
clusters = extractClusters(searchRegion, clusterTolerance=0.07, minClusterSize=4)
candidates = []
for clusterId, cluster in enumerate(clusters):
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
found = (expectedDimensionsMin[0] <= edgeLengths[0] < expectedDimensionsMax[0]
and expectedDimensionsMin[1] <= edgeLengths[1] < expectedDimensionsMax[1])
if not found:
updatePolyData(cluster, 'candidate cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
continue
updatePolyData(cluster, 'cluster %d' % clusterId, color=[0,1,0], parent=getDebugFolder(), visible=False)
candidates.append(cluster)
if not candidates:
return None
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = np.array(viewFrame.GetPosition())
dists = [np.linalg.norm(viewOrigin - computeCentroid(cluster)) for cluster in candidates]
candidates = [candidates[i] for i in np.argsort(dists)]
cluster = candidates[0]
obj = makePolyDataFields(cluster)
return vis.showClusterObjects([obj], parent='segmentation')[0]
def findHorizontalSurfaces(polyData, removeGroundFirst=False, normalEstimationSearchRadius=0.05,
clusterTolerance=0.025, distanceToPlaneThreshold=0.0025, normalsDotUpRange=[0.95, 1.0], showClusters=False):
'''
Find the horizontal surfaces, tuned to work with walking terrain
'''
searchZ = [0.0, 2.0]
voxelGridLeafSize = 0.01
minClusterSize = 150
verboseFlag = False
if (removeGroundFirst):
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', searchZ)
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=verboseFlag)
else:
scenePoints = polyData
if not scenePoints.GetNumberOfPoints():
return
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.SetInput(1, applyVoxelGrid(scenePoints, voxelGridLeafSize))
# Duration 0.2 sec for V1 log:
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', normalsDotUpRange)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
updatePolyData(surfaces, 'surfaces points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
clusters = extractClusters(surfaces, clusterTolerance=clusterTolerance, minClusterSize=minClusterSize)
planeClusters = []
clustersLarge = []
om.removeFromObjectModel(om.findObjectByName('surface clusters'))
folder = om.getOrCreateContainer('surface clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
updatePolyData(cluster, 'surface cluster %d' % i, parent=folder, color=getRandomColor(), visible=verboseFlag)
planePoints, _ = applyPlaneFit(cluster, distanceToPlaneThreshold)
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
if planePoints.GetNumberOfPoints() > minClusterSize:
clustersLarge.append(cluster)
obj = makePolyDataFields(planePoints)
if obj is not None:
planeClusters.append(obj)
folder = om.getOrCreateContainer('surface objects', parentObj=getDebugFolder())
if showClusters:
vis.showClusterObjects(planeClusters, parent=folder)
return clustersLarge
def fitVerticalPosts(polyData):
groundPoints, scenePoints = removeGround(polyData)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.1, 4.0])
if not scenePoints.GetNumberOfPoints():
return
scenePoints = applyVoxelGrid(scenePoints, leafSize=0.03)
clusters = extractClusters(scenePoints, clusterTolerance=0.15, minClusterSize=10)
def isPostCluster(cluster, lineDirection):
up = [0,0,1]
minPostLength = 1.0
maxRadius = 0.3
angle = math.degrees(math.acos(np.dot(up,lineDirection) / (np.linalg.norm(up) * np.linalg.norm(lineDirection))))
if angle > 15:
return False
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[0] < minPostLength:
return False
# extract top half
zvalues = vtkNumpy.getNumpyFromVtk(cluster, 'Points')[:,2].copy()
vtkNumpy.addNumpyToVtk(cluster, zvalues, 'z')
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
cluster = thresholdPoints(cluster, 'z', [(minZ + maxZ)/2.0, maxZ])
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[1] > maxRadius or edgeLengths[2] > maxRadius:
return False
return True
def makeCylinderAffordance(linePoints, lineDirection, lineOrigin, postId):
pts = vtkNumpy.getNumpyFromVtk(linePoints, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
origin = (p1+p2)/2.0
lineLength = np.linalg.norm(p2-p1)
t = transformUtils.getTransformFromOriginAndNormal(origin, lineDirection)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CylinderAffordanceItem', Name='post %d' % postId,
uuid=newUUID(), pose=pose, Radius=0.05, Length=float(lineLength), Color=[0.0, 1.0, 0.0])
desc['Collision Enabled'] = True
import affordancepanel
return affordancepanel.panel.affordanceFromDescription(desc)
rejectFolder = om.getOrCreateContainer('nonpost clusters', parentObj=getDebugFolder())
keepFolder = om.getOrCreateContainer('post clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
linePoint, lineDirection, linePoints = applyLineFit(cluster, distanceThreshold=0.1)
if isPostCluster(cluster, lineDirection):
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=keepFolder)
makeCylinderAffordance(linePoints, lineDirection, linePoint, i)
else:
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=rejectFolder)
def findAndFitDrillBarrel(polyData=None):
''' Find the horizontal surfaces
on the horizontal surfaces, find all the drills
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.50)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.5, 1.7])
if not scenePoints.GetNumberOfPoints():
return
normalEstimationSearchRadius = 0.10
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', [0.95, 1.0])
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=False)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=False)
updatePolyData(surfaces, 'surfaces', parent=getDebugFolder(), visible=False)
clusters = extractClusters(surfaces, clusterTolerance=0.15, minClusterSize=50)
fitResults = []
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotOrigin = viewFrame.GetPosition()
robotForward =forwardDirection
#print 'robot origin:', robotOrigin
#print 'robot forward:', robotForward
centroid =[]
for clusterId, cluster in enumerate(clusters):
clusterObj = updatePolyData(cluster, 'surface cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
skipCluster = False
for edgeLength in edgeLengths:
#print 'cluster %d edge length: %f' % (clusterId, edgeLength)
if edgeLength < 0.35 or edgeLength > 0.75:
skipCluster = True
if skipCluster:
continue
clusterObj.setSolidColor([0, 0, 1])
centroid = np.average(vtkNumpy.getNumpyFromVtk(cluster, 'Points'), axis=0)
try:
drillFrame = segmentDrillBarrelFrame(centroid, polyData=scenePoints, forwardDirection=robotForward)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
except:
print traceback.format_exc()
print 'fit drill failed for cluster:', clusterId
if not fitResults:
return
sortFittedDrills(fitResults, robotOrigin, robotForward)
return centroid
def sortFittedDrills(fitResults, robotOrigin, robotForward):
angleToFitResults = []
for fitResult in fitResults:
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
angleToDrill = np.abs(computeSignedAngleBetweenVectors(robotForward, drillOrigin - robotOrigin, [0,0,1]))
angleToFitResults.append((angleToDrill, cluster, drillFrame))
#print 'angle to candidate drill:', angleToDrill
angleToFitResults.sort(key=lambda x: x[0])
#print 'using drill at angle:', angleToFitResults[0][0]
drillMesh = getDrillBarrelMesh()
for i, fitResult in enumerate(angleToFitResults):
angleToDrill, cluster, drillFrame = fitResult
if i == 0:
drill = om.findObjectByName('drill')
drill = updatePolyData(drillMesh, 'drill', color=[0, 1, 0], cls=FrameAffordanceItem, visible=True)
drillFrame = updateFrame(drillFrame, 'drill frame', parent=drill, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setAffordanceParams(dict(otdf_type='dewalt_button', friendly_name='dewalt_button'))
drill.updateParamsFromActorTransform()
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
else:
drill = showPolyData(drillMesh, 'drill candidate', color=[1,0,0], visible=False, parent=getDebugFolder())
drill.actor.SetUserTransform(drillFrame)
om.addToObjectModel(drill, parentObj=getDebugFolder())
def computeSignedAngleBetweenVectors(v1, v2, perpendicularVector):
'''
Computes the signed angle between two vectors in 3d, given a perpendicular vector
to determine sign. Result returned is radians.
'''
v1 = np.array(v1)
v2 = np.array(v2)
perpendicularVector = np.array(perpendicularVector)
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
perpendicularVector /= np.linalg.norm(perpendicularVector)
return math.atan2(np.dot(perpendicularVector, np.cross(v1, v2)), np.dot(v1, v2))
def segmentDrillBarrelFrame(point1, polyData, forwardDirection):
tableClusterSearchRadius = 0.4
drillClusterSearchRadius = 0.5 #0.3
expectedNormal = np.array([0.0, 0.0, 1.0])
if not polyData.GetNumberOfPoints():
return
polyData, plane_origin, plane_normal = applyPlaneFit(polyData, expectedNormal=expectedNormal,
perpendicularAxis=expectedNormal, searchOrigin=point1,
searchRadius=tableClusterSearchRadius, angleEpsilon=0.2, returnOrigin=True)
if not polyData.GetNumberOfPoints():
return
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
if not tablePointsClusters:
return
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.3])
if not searchRegion.GetNumberOfPoints():
return
searchRegion = cropToSphere(searchRegion, point1, drillClusterSearchRadius)
#drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
t = fitDrillBarrel (searchRegion, forwardDirection, plane_origin, plane_normal)
return t
def segmentDrillBarrel(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
forwardDirection = -np.array(getCurrentView().camera().GetViewPlaneNormal())
t = segmentDrillBarrel(point1, polyData, forwardDirection)
assert t is not None
drillMesh = getDrillBarrelMesh()
aff = showPolyData(drillMesh, 'drill', visible=True)
aff.addToView(app.getDRCView())
aff.actor.SetUserTransform(t)
drillFrame = showFrame(t, 'drill frame', parent=aff, visible=False)
drillFrame.addToView(app.getDRCView())
return aff, drillFrame
def segmentDrillAlignedWithTable(point, polyData = None):
'''
Yet Another Drill Fitting Algorithm [tm]
This one fits the button drill assuming its on the table
and aligned with the table frame (because the button drill orientation is difficult to find)
Table must have long side facing robot
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
# segment the table and recover the precise up direction normal:
polyDataOut, tablePoints, origin, normal = segmentTable(polyData,point)
#print origin # this origin is bunk
#tableCentroid = computeCentroid(tablePoints)
# get the bounding box edges
OBBorigin, edges, _ = getOrientedBoundingBox(tablePoints)
#print "OBB out"
#print OBBorigin
#print edges
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
#print edgeLengths
#print axes
# check which direction the robot is facing and flip x-axis of table if necessary
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
#print "main axes", axes[1]
#print "viewDirection", viewDirection
#dp = np.dot(axes[1], viewDirection)
#print dp
if np.dot(axes[1], viewDirection) < 0:
#print "flip the x-direction"
axes[1] = -axes[1]
# define the x-axis to be along the 2nd largest edge
xaxis = axes[1]
xaxis = np.array(xaxis)
zaxis = np.array( normal )
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
tableOrientation = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
#tableTransform = transformUtils.frameFromPositionAndRPY( tableCentroid , tableOrientation.GetOrientation() )
#updateFrame(tableTransform, 'table frame [z up, x away face]', parent="segmentation", visible=True).addToView(app.getDRCView())
data = segmentTableScene(polyData, point )
#vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
# crude use of the table frame to determine the frame of the drill on the table
#t2 = transformUtils.frameFromPositionAndRPY([0,0,0], [180, 0 , 90] )
#drillOrientationTransform = transformUtils.copyFrame( om.findObjectByName('object 1 frame').transform )
#drillOrientationTransform.PreMultiply()
#drillOrientationTransform.Concatenate(t2)
#vis.updateFrame(t, 'drillOrientationTransform',visible=True)
#table_xaxis, table_yaxis, table_zaxis = transformUtils.getAxesFromTransform( data.table.frame )
#drillOrientation = transformUtils.orientationFromAxes( table_yaxis, table_xaxis, -1*np.array( table_zaxis) )
drillTransform = transformUtils.frameFromPositionAndRPY( data.clusters[0].frame.GetPosition() , tableOrientation.GetOrientation() )
drillMesh = getDrillMesh()
drill = om.findObjectByName('drill')
om.removeFromObjectModel(drill)
aff = showPolyData(drillMesh, 'drill', color=[0.0, 1.0, 0.0], cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(drillTransform)
aff.addToView(app.getDRCView())
frameObj = updateFrame(drillTransform, 'drill frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(drillTransform.GetPosition()), [1,0,0], [0,1,0], [0,0,1], drillType="dewalt_button")
aff.setAffordanceParams(params)
def segmentDrillInHand(p1, p2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
distanceToLineThreshold = 0.05
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
lineSegment = p2 - p1
lineLength = np.linalg.norm(lineSegment)
cropped, polyData = cropToPlane(polyData, p1, lineSegment/lineLength, [-0.03, lineLength + 0.03])
updatePolyData(cropped, 'drill cluster', parent=getDebugFolder(), visible=False)
drillPoints = cropped
normal = lineSegment/lineLength
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(p2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def addDrillAffordance():
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
t = vtk.vtkTransform()
t.PostMultiply()
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), [1,0,0], [0,1,0], [0,0,1])
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
return aff
def getLinkFrame(linkName):
robotStateModel = om.findObjectByName('robot state model')
robotStateModel = robotStateModel or getVisibleRobotModel()
assert robotStateModel
t = vtk.vtkTransform()
robotStateModel.model.getLinkToWorld(linkName, t)
return t
def getDrillInHandOffset(zRotation=0.0, zTranslation=0.0, xTranslation=0.0, yTranslation=0.0,flip=False):
drillOffset = vtk.vtkTransform()
drillOffset.PostMultiply()
if flip:
drillOffset.RotateY(180)
drillOffset.RotateZ(zRotation)
drillOffset.RotateY(-90)
#drillOffset.Translate(0, 0.09, zTranslation - 0.015)
#drillOffset.Translate(zTranslation - 0.015, 0.035 + xTranslation, 0.0)
drillOffset.Translate(zTranslation, xTranslation, 0.0 + yTranslation)
return drillOffset
def moveDrillToHand(drillOffset, hand='right'):
drill = om.findObjectByName('drill')
if not drill:
drill = addDrillAffordance()
assert hand in ('right', 'left')
drillTransform = drill.actor.GetUserTransform()
rightBaseLink = getLinkFrame('%s_hand_face' % hand[0])
drillTransform.PostMultiply()
drillTransform.Identity()
drillTransform.Concatenate(drillOffset)
drillTransform.Concatenate(rightBaseLink)
drill._renderAllViews()
class PointPicker(TimerCallback):
def __init__(self, numberOfPoints=3):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.numberOfPoints = numberOfPoints
self.annotationObj = None
self.drawLines = True
self.clear()
def clear(self):
self.points = [None for i in xrange(self.numberOfPoints)]
self.hoverPos = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
#print 'mouse press:', modifiers
#if not modifiers:
# return
for i in xrange(self.numberOfPoints):
if self.points[i] is None:
self.points[i] = self.hoverPos
break
if self.points[-1] is not None:
self.finish()
def finish(self):
self.enabled = False
om.removeFromObjectModel(self.annotationObj)
points = [p.copy() for p in self.points]
if self.annotationFunc is not None:
self.annotationFunc(*points)
removeViewPicker(self)
def handleRelease(self, displayPoint):
pass
def draw(self):
d = DebugData()
points = [p if p is not None else self.hoverPos for p in self.points]
# draw points
for p in points:
if p is not None:
d.addSphere(p, radius=0.01)
if self.drawLines:
# draw lines
for a, b in zip(points, points[1:]):
if b is not None:
d.addLine(a, b)
# connect end points
if points[-1] is not None:
d.addLine(points[0], points[-1])
self.annotationObj = updatePolyData(d.getPolyData(), 'annotation', parent=getDebugFolder())
self.annotationObj.setProperty('Color', QtGui.QColor(0, 255, 0))
self.annotationObj.actor.SetPickable(False)
def tick(self):
if not self.enabled:
return
if not om.findObjectByName('pointcloud snapshot'):
self.annotationFunc = None
self.finish()
return
self.hoverPos = pickPoint(self.lastMovePos, getSegmentationView(), obj='pointcloud snapshot')
self.draw()
class LineDraw(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.view = view
self.renderer = view.renderer()
self.line = vtk.vtkLeaderActor2D()
self.line.SetArrowPlacementToNone()
self.line.GetPositionCoordinate().SetCoordinateSystemToViewport()
self.line.GetPosition2Coordinate().SetCoordinateSystemToViewport()
self.line.GetProperty().SetLineWidth(4)
self.line.SetPosition(0,0)
self.line.SetPosition2(0,0)
self.clear()
def clear(self):
self.p1 = None
self.p2 = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
self.renderer.RemoveActor2D(self.line)
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
if self.p1 is None:
self.p1 = list(self.lastMovePos)
if self.p1 is not None:
self.renderer.AddActor2D(self.line)
else:
self.p2 = self.lastMovePos
self.finish()
def finish(self):
self.enabled = False
self.renderer.RemoveActor2D(self.line)
if self.annotationFunc is not None:
self.annotationFunc(self.p1, self.p2)
def handleRelease(self, displayPoint):
pass
def tick(self):
if not self.enabled:
return
if self.p1:
self.line.SetPosition(self.p1)
self.line.SetPosition2(self.lastMovePos)
self.view.render()
viewPickers = []
def addViewPicker(picker):
global viewPickers
viewPickers.append(picker)
def removeViewPicker(picker):
global viewPickers
viewPickers.remove(picker)
def distanceToLine(x0, x1, x2):
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2))
denom = np.linalg.norm(x2-x1)
return numerator / denom
def labelDistanceToLine(polyData, linePoint1, linePoint2, resultArrayName='distance_to_line'):
x0 = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
x1 = np.array(linePoint1)
x2 = np.array(linePoint2)
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2, axis=1))
denom = np.linalg.norm(x2-x1)
dists = numerator / denom
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def labelDistanceToPoint(polyData, point, resultArrayName='distance_to_point'):
assert polyData.GetNumberOfPoints()
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
points = points - point
dists = np.sqrt(np.sum(points**2, axis=1))
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def getPlaneEquationFromPolyData(polyData, expectedNormal):
_, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, returnOrigin=True)
return origin, normal, np.hstack((normal, [np.dot(origin, normal)]))
def computeEdge(polyData, edgeAxis, perpAxis, binWidth=0.03):
polyData = labelPointDistanceAlongAxis(polyData, edgeAxis, resultArrayName='dist_along_edge')
polyData = labelPointDistanceAlongAxis(polyData, perpAxis, resultArrayName='dist_perp_to_edge')
polyData, bins = binByScalar(polyData, 'dist_along_edge', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
distToEdge = vtkNumpy.getNumpyFromVtk(polyData, 'dist_perp_to_edge')
numberOfBins = len(bins) - 1
edgePoints = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binDists = distToEdge[binLabels == i]
if len(binDists):
edgePoints.append(binPoints[binDists.argmax()])
return np.array(edgePoints)
def computeCentroids(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
centroids = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
if len(binPoints):
centroids.append(np.average(binPoints, axis=0))
return np.array(centroids)
def computePointCountsAlongAxis(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
binCount = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binCount.append(len(binPoints))
return np.array(binCount)
def binByScalar(lidarData, scalarArrayName, binWidth, binLabelsArrayName='bin_labels'):
'''
Gets the array with name scalarArrayName from lidarData.
Computes bins by dividing the scalar array into bins of size binWidth.
Adds a new label array to the lidar points identifying which bin the point belongs to,
where the first bin is labeled with 0.
Returns the new, labeled lidar data and the bins.
The bins are an array where each value represents a bin edge.
'''
scalars = vtkNumpy.getNumpyFromVtk(lidarData, scalarArrayName)
bins = np.arange(scalars.min(), scalars.max()+binWidth, binWidth)
binLabels = np.digitize(scalars, bins) - 1
assert(len(binLabels) == len(scalars))
newData = shallowCopy(lidarData)
vtkNumpy.addNumpyToVtk(newData, binLabels, binLabelsArrayName)
return newData, bins
def showObbs(polyData):
labelsArrayName = 'cluster_labels'
assert polyData.GetPointData().GetArray(labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
showPolyData(f.GetOutput(), 'bboxes')
def getOrientedBoundingBox(polyData):
'''
returns origin, edges, and outline wireframe
'''
nPoints = polyData.GetNumberOfPoints()
assert nPoints
polyData = shallowCopy(polyData)
labelsArrayName = 'bbox_labels'
labels = np.ones(nPoints)
vtkNumpy.addNumpyToVtk(polyData, labels, labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
assert f.GetNumberOfBoundingBoxes() == 1
origin = np.zeros(3)
edges = [np.zeros(3) for i in xrange(3)]
f.GetBoundingBoxOrigin(0, origin)
for i in xrange(3):
f.GetBoundingBoxEdge(0, i, edges[i])
return origin, edges, shallowCopy(f.GetOutput())
def segmentBlockByAnnotation(blockDimensions, p1, p2, p3):
segmentationObj = om.findObjectByName('pointcloud snapshot')
segmentationObj.mapper.ScalarVisibilityOff()
segmentationObj.setProperty('Point Size', 2)
segmentationObj.setProperty('Alpha', 0.8)
# constraint z to lie in plane
#p1[2] = p2[2] = p3[2] = max(p1[2], p2[2], p3[2])
zedge = p2 - p1
zaxis = zedge / np.linalg.norm(zedge)
#xwidth = distanceToLine(p3, p1, p2)
# expected dimensions
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(zedge)
yaxis = np.cross(p2 - p1, p3 - p1)
yaxis = yaxis / np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
# reorient axes
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
if np.dot(yaxis, viewPlaneNormal) < 0:
yaxis *= -1
if np.dot(xaxis, p3 - p1) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
origin = ((p1 + p2) / 2.0) + xaxis*xwidth/2.0 + yaxis*ywidth/2.0
d = DebugData()
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
om.findObjectByName('annotation').setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
####
# debrs task ground frame
def getBoardCorners(params):
axes = [np.array(params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
widths = [np.array(params[axis])/2.0 for axis in ['xwidth', 'ywidth', 'zwidth']]
edges = [axes[i] * widths[i] for i in xrange(3)]
origin = np.array(params['origin'])
return [
origin + edges[0] + edges[1] + edges[2],
origin - edges[0] + edges[1] + edges[2],
origin - edges[0] - edges[1] + edges[2],
origin + edges[0] - edges[1] + edges[2],
origin + edges[0] + edges[1] - edges[2],
origin - edges[0] + edges[1] - edges[2],
origin - edges[0] - edges[1] - edges[2],
origin + edges[0] - edges[1] - edges[2],
]
def getPointDistances(target, points):
return np.array([np.linalg.norm(target - p) for p in points])
def computeClosestCorner(aff, referenceFrame):
corners = getBoardCorners(aff.params)
dists = getPointDistances(np.array(referenceFrame.GetPosition()), corners)
return corners[dists.argmin()]
def computeGroundFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
xaxis = boardAxis
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
groundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
groundFrame.PostMultiply()
groundFrame.Translate(closestCorner[0], closestCorner[1], 0.0)
return groundFrame
def computeCornerFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
edgeLengths = [edgeLength for edgeLength in ['xwidth', 'ywidth', 'zwidth']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
longAxis = axes[np.argmax(edgeLengths)]
xaxis = boardAxis
yaxis = axes[2]
zaxis = np.cross(xaxis, yaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
cornerFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
cornerFrame.PostMultiply()
cornerFrame.Translate(closestCorner)
return cornerFrame
def publishTriad(transform, collectionId=1234):
o = lcmvs.obj_t()
xyz = transform.GetPosition()
rpy = transformUtils.rollPitchYawFromTransform(transform)
o.roll, o.pitch, o.yaw = rpy
o.x, o.y, o.z = xyz
o.id = 1
m = lcmvs.obj_collection_t()
m.id = collectionId
m.name = 'stance_triads'
m.type = lcmvs.obj_collection_t.AXIS3D
m.nobjs = 1
m.reset = False
m.objs = [o]
lcmUtils.publish('OBJ_COLLECTION', m)
def createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances'):
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = BoxAffordanceItem(name, view=app.getCurrentRenderView())
obj.setProperty('Dimensions', [float(v) for v in [xwidth, ywidth, zwidth]])
obj.actor.SetUserTransform(t)
om.addToObjectModel(obj, parentObj=om.getOrCreateContainer(parent))
frameObj = vis.showFrame(t, name + ' frame', scale=0.2, visible=False, parent=obj)
obj.addToView(app.getDRCView())
frameObj.addToView(app.getDRCView())
affordanceManager.registerAffordance(obj)
return obj
def segmentBlockByTopPlane(polyData, blockDimensions, expectedNormal, expectedXAxis, edgeSign=1, name='block affordance'):
polyData, planeOrigin, normal = applyPlaneFit(polyData, distanceThreshold=0.05, expectedNormal=expectedNormal, returnOrigin=True)
_, lineDirection, _ = applyLineFit(polyData)
zaxis = lineDirection
yaxis = normal
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
expectedXAxis = np.array(xaxis)
edgePoints = computeEdge(polyData, zaxis, xaxis*edgeSign)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
d = DebugData()
obj = updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
polyData = labelPointDistanceAlongAxis(polyData, xaxis, resultArrayName='dist_along_line')
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
p1 = projectPointToPlane(p1, planeOrigin, normal)
p2 = projectPointToPlane(p2, planeOrigin, normal)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 - edgeSign*xaxis*xwidth/2.0 - yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
#d.addSphere(linePoint, radius=0.02)
#d.addLine(linePoint, linePoint + yaxis*ywidth)
#d.addLine(linePoint, linePoint + xaxis*xwidth)
#d.addLine(linePoint, linePoint + zaxis*zwidth)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
#d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
#d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
#d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
d.addLine(origin, origin + xaxis*xwidth/2.0)
d.addLine(origin, origin + yaxis*ywidth/2.0)
d.addLine(origin, origin + zaxis*zwidth/2.0)
#obj = updatePolyData(d.getPolyData(), 'block axes')
#obj.setProperty('Color', QtGui.QColor(255, 255, 0))
#obj.setProperty('Visible', False)
obj = createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name)
obj.setProperty('Color', [222/255.0, 184/255.0, 135/255.0])
computeDebrisGraspSeed(obj)
t = computeDebrisStanceFrame(obj)
if t:
showFrame(t, 'debris stance frame', parent=obj)
obj.publishCallback = functools.partial(publishDebrisStanceFrame, obj)
return obj
def computeDebrisGraspSeed(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
if debrisReferenceFrame:
debrisReferenceFrame = debrisReferenceFrame.transform
affCornerFrame = computeCornerFrame(aff, debrisReferenceFrame)
showFrame(affCornerFrame, 'board corner frame', parent=aff, visible=False)
def computeDebrisStanceFrame(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
debrisWallEdge = om.findObjectByName('debris plane edge')
if debrisReferenceFrame and debrisWallEdge:
debrisReferenceFrame = debrisReferenceFrame.transform
affGroundFrame = computeGroundFrame(aff, debrisReferenceFrame)
updateFrame(affGroundFrame, 'board ground frame', parent=getDebugFolder(), visible=False)
affWallEdge = computeGroundFrame(aff, debrisReferenceFrame)
framePos = np.array(affGroundFrame.GetPosition())
p1, p2 = debrisWallEdge.points
edgeAxis = p2 - p1
edgeAxis /= np.linalg.norm(edgeAxis)
projectedPos = p1 + edgeAxis * np.dot(framePos - p1, edgeAxis)
affWallFrame = vtk.vtkTransform()
affWallFrame.PostMultiply()
useWallFrameForRotation = True
if useWallFrameForRotation:
affWallFrame.SetMatrix(debrisReferenceFrame.GetMatrix())
affWallFrame.Translate(projectedPos - np.array(debrisReferenceFrame.GetPosition()))
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = 0.45
stanceRotation = 0.0
else:
affWallFrame.SetMatrix(affGroundFrame.GetMatrix())
affWallFrame.Translate(projectedPos - framePos)
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = -0.45
stanceRotation = math.pi/2.0
stanceFrame, _, _ = getFootFramesFromReferenceFrame(affWallFrame, stanceWidth, math.degrees(stanceRotation), [stanceOffsetX, stanceOffsetY, 0.0])
return stanceFrame
def publishDebrisStanceFrame(aff):
frame = computeDebrisStanceFrame(aff)
publishTriad(frame)
def segmentBlockByPlanes(blockDimensions):
planes = om.findObjectByName('selected planes').children()[:2]
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
origin1, normal1, plane1 = getPlaneEquationFromPolyData(planes[0].polyData, expectedNormal=viewPlaneNormal)
origin2, normal2, plane2 = getPlaneEquationFromPolyData(planes[1].polyData, expectedNormal=viewPlaneNormal)
xaxis = normal2
yaxis = normal1
zaxis = np.cross(xaxis, yaxis)
xaxis = np.cross(yaxis, zaxis)
pts1 = vtkNumpy.getNumpyFromVtk(planes[0].polyData, 'Points')
pts2 = vtkNumpy.getNumpyFromVtk(planes[1].polyData, 'Points')
linePoint = np.zeros(3)
centroid2 = np.sum(pts2, axis=0)/len(pts2)
vtk.vtkPlane.ProjectPoint(centroid2, origin1, normal1, linePoint)
dists = np.dot(pts1-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 + xaxis*xwidth/2.0 + yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
d.addSphere(linePoint, radius=0.02)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def estimatePointerTip(robotModel, polyData):
'''
Given a robot model, uses forward kinematics to determine a pointer tip
search region, then does a ransac line fit in the search region to find
points on the pointer, and selects the maximum point along the line fit
as the pointer tip. Returns the pointer tip xyz on success and returns
None on failure.
'''
palmFrame = robotModel.getLinkFrame('r_hand_force_torque')
p1 = [0.0, 0.14, -0.06]
p2 = [0.0, 0.24, -0.06]
palmFrame.TransformPoint(p1, p1)
palmFrame.TransformPoint(p2, p2)
p1 = np.array(p1)
p2 = np.array(p2)
d = DebugData()
d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'pointer line', color=[1,0,0], parent=getDebugFolder(), visible=False)
polyData = cropToLineSegment(polyData, p1, p2)
if not polyData.GetNumberOfPoints():
#print 'pointer search region is empty'
return None
vis.updatePolyData(polyData, 'cropped to pointer line', parent=getDebugFolder(), visible=False)
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.07])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer search region is empty'
return None
updatePolyData(polyData, 'distance to pointer line', colorByName='distance_to_line', parent=getDebugFolder(), visible=False)
ransacDistanceThreshold = 0.0075
lineOrigin, lineDirection, polyData = applyLineFit(polyData, distanceThreshold=ransacDistanceThreshold)
updatePolyData(polyData, 'line fit ransac', colorByName='ransac_labels', parent=getDebugFolder(), visible=False)
lineDirection = np.array(lineDirection)
lineDirection /= np.linalg.norm(lineDirection)
if np.dot(lineDirection, (p2 - p1)) < 0:
lineDirection *= -1
polyData = thresholdPoints(polyData, 'ransac_labels', [1.0, 1.0])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer ransac line fit failed to find inliers'
return None
obj = updatePolyData(polyData, 'line fit points', colorByName='dist_along_line', parent=getDebugFolder(), visible=True)
obj.setProperty('Point Size', 5)
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
d = DebugData()
#d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'fit pointer line', color=[0,1,0], parent=getDebugFolder(), visible=True)
return p2
def startBoundedPlaneSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentBoundedPlaneByAnnotation)
def startValveSegmentationByWallPlane(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentValveByWallPlane, expectedValveRadius)
def startValveSegmentationManual(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentValve, expectedValveRadius)
def startRefitWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = refitWall
def startWyeSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentWye)
def startDoorHandleSegmentation(otdfType):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDoorHandle, otdfType)
def startTrussSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentTruss)
def startHoseNozzleSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentHoseNozzle)
def storePoint(p):
global _pickPoint
_pickPoint = p
def getPickPoint():
global _pickPoint
return _pickPoint
def startPickPoint():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = storePoint
def startSelectToolTip():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = selectToolTip
def startDrillSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrill)
def startDrillAutoSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAuto)
def startDrillButtonSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillButton)
def startPointerTipSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentPointerTip)
def startDrillAutoSegmentationAlignedWithTable():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAlignedWithTable)
def startDrillBarrelSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillBarrel)
def startDrillWallSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWall)
def startDrillWallSegmentationConstrained(rightAngleLocation):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWallConstrained, rightAngleLocation)
def startDrillInHandSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillInHand)
def startSegmentDebrisWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWall)
def startSegmentDebrisWallManual():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWallManual)
def selectToolTip(point1):
print point1
def segmentDebrisWallManual(point1, point2):
p1, p2 = point1, point2
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', visible=True)
edgeObj.points = [p1, p2]
xaxis = p2 - p1
xaxis /= np.linalg.norm(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=edgeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=edgeObj, visible=False)
def segmentDebrisWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.25, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.02, 0.02])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=10)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
planeObj = updatePolyData(planePoints, 'debris plane points', parent=getDebugFolder(), visible=False)
perpAxis = [0,0,-1]
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
#binCounts = computePointCountsAlongAxis(planePoints, lineDirection)
xaxis = lineDirection
yaxis = normal
zaxis = np.cross(xaxis, yaxis)
if np.dot(zaxis, [0, 0, 1]) < 0:
zaxis *= -1
xaxis *= -1
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, xaxis)
p1 = linePoint + xaxis*np.min(dists)
p2 = linePoint + xaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', parent=planeObj, visible=True)
edgeObj.points = [p1, p2]
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=planeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=planeObj, visible=False)
def segmentBoundedPlaneByAnnotation(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.015, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=12)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
updatePolyData(planePoints, 'plane points', parent=getDebugFolder(), visible=False)
perpAxis = point2 - point1
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = normal
yaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, perpAxis) < 0:
xaxis *= -1
# make right handed
yaxis = np.cross(zaxis, xaxis)
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, yaxis)
p1 = linePoint + yaxis*np.min(dists)
p2 = linePoint + yaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
updatePolyData(d.getPolyData(), 'plane edge', parent=getDebugFolder(), visible=False)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate((p1 + p2)/ 2.0)
updateFrame(t, 'plane edge frame', parent=getDebugFolder(), visible=False)
savedCameraParams = None
def perspective():
global savedCameraParams
if savedCameraParams is None:
return
aff = getDefaultAffordanceObject()
if aff:
aff.setProperty('Alpha', 1.0)
obj = om.findObjectByName('pointcloud snapshot')
if obj is not None:
obj.actor.SetPickable(1)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOff()
c.SetPosition(savedCameraParams['Position'])
c.SetFocalPoint(savedCameraParams['FocalPoint'])
c.SetViewUp(savedCameraParams['ViewUp'])
view.setCameraManipulationStyle()
view.render()
def saveCameraParams(overwrite=False):
global savedCameraParams
if overwrite or (savedCameraParams is None):
view = getSegmentationView()
c = view.camera()
savedCameraParams = dict(Position=c.GetPosition(), FocalPoint=c.GetFocalPoint(), ViewUp=c.GetViewUp())
def getDefaultAffordanceObject():
obj = om.getActiveObject()
if isinstance(obj, AffordanceItem):
return obj
for obj in om.getObjects():
if isinstance(obj, AffordanceItem):
return obj
def getVisibleRobotModel():
for obj in om.getObjects():
if isinstance(obj, roboturdf.RobotModelItem) and obj.getProperty('Visible'):
return obj
def orthoX():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['xaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['xwidth']*3
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoY():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['yaxis']
viewUp = -aff.params['xaxis']
viewDistance = aff.params['ywidth']*4
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoZ():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['zaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['zwidth']
scale = aff.params['ywidth']*6
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def zoomToDisplayPoint(displayPoint, boundsRadius=0.5, view=None):
pickedPoint = pickPoint(displayPoint, getSegmentationView(), obj='pointcloud snapshot')
if pickedPoint is None:
return
view = view or app.getCurrentRenderView()
worldPt1, worldPt2 = getRayFromDisplayPoint(getSegmentationView(), displayPoint)
diagonal = np.array([boundsRadius, boundsRadius, boundsRadius])
bounds = np.hstack([pickedPoint - diagonal, pickedPoint + diagonal])
bounds = [bounds[0], bounds[3], bounds[1], bounds[4], bounds[2], bounds[5]]
view.renderer().ResetCamera(bounds)
view.camera().SetFocalPoint(pickedPoint)
view.render()
def extractPointsAlongClickRay(position, ray, polyData=None, distanceToLineThreshold=0.025, nearestToCamera=False):
#segmentationObj = om.findObjectByName('pointcloud snapshot')
if polyData is None:
polyData = getCurrentRevolutionData()
if not polyData or not polyData.GetNumberOfPoints():
return None
polyData = labelDistanceToLine(polyData, position, position + ray)
# extract points near line
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
if not polyData.GetNumberOfPoints():
return None
polyData = labelPointDistanceAlongAxis(polyData, ray, origin=position, resultArrayName='distance_along_line')
polyData = thresholdPoints(polyData, 'distance_along_line', [0.20, 1e6])
if not polyData.GetNumberOfPoints():
return None
updatePolyData(polyData, 'ray points', colorByName='distance_to_line', visible=False, parent=getDebugFolder())
if nearestToCamera:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_line')
else:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_to_line')
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
intersectionPoint = points[dists.argmin()]
d = DebugData()
d.addSphere( intersectionPoint, radius=0.005)
d.addLine(position, intersectionPoint)
obj = updatePolyData(d.getPolyData(), 'intersecting ray', visible=False, color=[0,1,0], parent=getDebugFolder())
obj.actor.GetProperty().SetLineWidth(2)
d2 = DebugData()
end_of_ray = position + 2*ray
d2.addLine(position, end_of_ray)
obj2 = updatePolyData(d2.getPolyData(), 'camera ray', visible=False, color=[1,0,0], parent=getDebugFolder())
obj2.actor.GetProperty().SetLineWidth(2)
return intersectionPoint
def segmentDrillWallFromTag(position, ray):
'''
Fix the drill wall relative to a ray intersected with the wall
Desc: given a position and a ray (typically derived from a camera pixel)
Use that point to determine a position for the Drill Wall
This function uses a hard coded offset between the position on the wall
to produce the drill cutting origin
'''
#inputObj = om.findObjectByName('pointcloud snapshot')
#polyData = shallowCopy(inputObj.polyData)
polyData = getCurrentRevolutionData()
if (polyData is None): # no data yet
print "no LIDAR data yet"
return False
point1 = extractPointsAlongClickRay(position, ray, polyData )
# view direction is out:
viewDirection = -1 * SegmentationContext.getGlobalInstance().getViewDirection()
polyDataOut, origin, normal = applyPlaneFit(polyData, expectedNormal=viewDirection, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
# project the lidar point onto the plane (older, variance is >1cm with robot 2m away)
#intersection_point = projectPointToPlane(point1, origin, normal)
# intersect the ray with the plane (variance was about 4mm with robot 2m away)
intersection_point = intersectLineWithPlane(position, ray, origin, normal)
# Define a frame:
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(intersection_point)
t2 = transformUtils.copyFrame(t)
t2.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [0,0.6,-0.25] , [0,0,0] )
t2.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, t2)
wall= om.findObjectByName('wall')
vis.updateFrame( t ,'wall fit tag', parent=wall, visible=False, scale=0.2)
d = DebugData()
d.addSphere( intersection_point, radius=0.002)
obj = updatePolyData(d.getPolyData(), 'intersection', parent=wall, visible=False, color=[0,1,0]) #
obj.actor.GetProperty().SetLineWidth(1)
return True
def segmentDrillWallFromWallCenter():
'''
Get the drill wall target as an offset from the center of
the full wall
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
# hardcoded position to target frame from center of wall
# conincides with the distance from the april tag to this position
wallFrame = transformUtils.copyFrame( findWallCenter(polyData) )
wallFrame.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [-0.07,-0.3276,0] , [180,-90,0] )
wallFrame.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, wallFrame)
wall= om.findObjectByName('wall')
vis.updateFrame( wallFrame ,'wall fit lidar', parent=wall, visible=False, scale=0.2)
|
RussTedrake/director
|
src/python/ddapp/segmentation.py
|
Python
|
bsd-3-clause
| 161,203
|
[
"VTK"
] |
be8ef3fe4dd9522b27c98c58a624fc4937f4c0afc3a256a86cd473d5638c58da
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Mar 19, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 19, 2012"
import os
import unittest2 as unittest
from pymatgen.entries.compatibility import MaterialsProjectCompatibility, \
MITCompatibility, AqueousCorrection, MITAqueousCompatibility, MaterialsProjectAqueousCompatibility
from pymatgen.entries.computed_entries import ComputedEntry, \
ComputedStructureEntry
from pymatgen import Composition, Lattice, Structure, Element
class MaterialsProjectCompatibilityTest(unittest.TestCase):
def setUp(self):
self.entry1 = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.entry_sulfide = ComputedEntry(
'FeS', -1, 0.0,
parameters={'is_hubbard': False,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE S 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.entry2 = ComputedEntry(
'Fe3O4', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.entry3 = ComputedEntry(
'FeO', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 4.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.compat = MaterialsProjectCompatibility(check_potcar_hash=False)
self.ggacompat = MaterialsProjectCompatibility("GGA", check_potcar_hash=False)
def test_process_entry(self):
#Correct parameters
self.assertIsNotNone(self.compat.process_entry(self.entry1))
self.assertIsNone(self.ggacompat.process_entry(self.entry1))
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False, "hubbards": {}, 'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
self.assertIsNotNone(self.ggacompat.process_entry(entry))
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNotNone(self.compat.process_entry(entry))
def test_correction_values(self):
#test_corrections
self.assertAlmostEqual(self.compat.process_entry(self.entry1).correction,
- 2.733 * 2 - 0.70229 * 3)
entry = ComputedEntry(
'FeF3', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'F': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE F 08Apr2002',
'hash': '180141c33d032bfbfff30b3bea9d23dd'}]})
self.assertIsNotNone(self.compat.process_entry(entry))
#Check actual correction
self.assertAlmostEqual(self.compat.process_entry(entry).correction, -2.733)
self.assertAlmostEqual(self.compat.process_entry(
self.entry_sulfide).correction, -0.66346)
def test_U_values(self):
#Wrong U value
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 5.2, 'O': 0}, 'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
#GGA run of U
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
#GGA+U run of non-U
entry = ComputedEntry(
'Al2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Al': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel': 'PAW_PBE Al 06Sep2000',
'hash': '805c888bbd2793e462311f6a20d873d9'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
#Materials project should not have a U for sulfides
entry = ComputedEntry(
'FeS2', -2, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'S': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{"titel": 'PAW_PBE S 08Apr2002',
'hash': "f7f8e4a74a6cbb8d63e41f4373b54df2"}]})
self.assertIsNone(self.compat.process_entry(entry))
def test_wrong_psp(self):
#Wrong psp
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
def test_element_processing(self):
entry = ComputedEntry(
'O', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': {},
'potcar_spec': [{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}],
'run_type': 'GGA'})
entry = self.compat.process_entry(entry)
# self.assertEqual(entry.entry_id, -8)
self.assertAlmostEqual(entry.energy, -1)
self.assertAlmostEqual(self.ggacompat.process_entry(entry).energy,
-1)
def test_get_explanation_dict(self):
compat = MaterialsProjectCompatibility(check_potcar_hash=False)
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel': 'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': "7a25bc5b9a5393f46600a4939d357982"}]})
d = compat.get_explanation_dict(entry)
self.assertEqual('MPRelaxSet Potcar Correction', d["corrections"][0][
"name"])
def test_get_corrections_dict(self):
compat = MaterialsProjectCompatibility(check_potcar_hash=False)
ggacompat = MaterialsProjectCompatibility("GGA", check_potcar_hash=False)
#Correct parameters
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 5.3, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': "7a25bc5b9a5393f46600a4939d357982"}]})
c = compat.get_corrections_dict(entry)
self.assertAlmostEqual(c["MP Anion Correction"], -2.10687)
self.assertAlmostEqual(c["MP Advanced Correction"], -5.466)
entry.parameters["is_hubbard"] = False
del entry.parameters["hubbards"]
c = ggacompat.get_corrections_dict(entry)
self.assertNotIn("MP Advanced Correction", c)
def test_process_entries(self):
entries = self.compat.process_entries([self.entry1,
self.entry2,
self.entry3])
self.assertEqual(len(entries), 2)
class MITCompatibilityTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(check_potcar_hash=True)
self.ggacompat = MITCompatibility("GGA", check_potcar_hash=True)
self.entry_O = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.entry_F = ComputedEntry(
'FeF3', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'F': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE F 08Apr2002',
'hash': '180141c33d032bfbfff30b3bea9d23dd'}]})
self.entry_S = ComputedEntry(
'FeS2', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 1.9, 'S': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE S 08Apr2002',
'hash': 'd368db6899d8839859bbee4811a42a88'}]})
def test_process_entry(self):
#Correct parameters
self.assertIsNotNone(self.compat.process_entry(self.entry_O))
self.assertIsNotNone(self.compat.process_entry(self.entry_F))
def test_correction_value(self):
#Check actual correction
self.assertAlmostEqual(self.compat.process_entry(self.entry_O).correction,
- 1.723 * 2 -0.66975*3)
self.assertAlmostEqual(self.compat.process_entry(self.entry_F).correction, -1.723)
self.assertAlmostEqual(self.compat.process_entry(self.entry_S).correction, -1.113)
def test_U_value(self):
# MIT should have a U value for Fe containing sulfides
self.assertIsNotNone(self.compat.process_entry(self.entry_S))
# MIT should not have a U value for Ni containing sulfides
entry = ComputedEntry(
'NiS2', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Ni': 1.9, 'S': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Ni 06Sep2000',
'hash': '653f5772e68b2c7fd87ffd1086c0d710'},
{'titel': 'PAW_PBE S 08Apr2002',
'hash': 'd368db6899d8839859bbee4811a42a88'}]})
self.assertIsNone(self.compat.process_entry(entry))
entry = ComputedEntry(
'NiS2', -2, 0.0,
parameters={'is_hubbard': True,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Ni 06Sep2000',
'hash': '653f5772e68b2c7fd87ffd1086c0d710'},
{'titel': 'PAW_PBE S 08Apr2002',
'hash': 'd368db6899d8839859bbee4811a42a88'}]})
self.assertIsNotNone(self.ggacompat.process_entry(entry))
def test_wrong_U_value(self):
#Wrong U value
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 5.2, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
#GGA run
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
self.assertIsNotNone(self.ggacompat.process_entry(entry))
def test_wrong_psp(self):
#Wrong psp
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe_pv 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
self.assertIsNone(self.compat.process_entry(entry))
def test_element_processing(self):
#Testing processing of elements.
entry = ComputedEntry(
'O', -1, 0.0,
parameters={'is_hubbard': False, 'hubbards': {},
'potcar_spec': [{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}],
'run_type': 'GGA'})
entry = self.compat.process_entry(entry)
self.assertAlmostEqual(entry.energy, -1)
def test_same_potcar_symbol(self):
# Same symbol different hash thus a different potcar
#Correct Hash Correct Symbol
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': '9530da8244e4dac17580869b4adab115'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
#Incorrect Hash Correct Symbol
entry2 = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel':'PAW_PBE Fe 06Sep2000',
'hash': 'DifferentHash'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
compat = MITCompatibility()
self.assertEqual(len(compat.process_entries([entry, entry2])), 2)
self.assertEqual(len(self.compat.process_entries([entry, entry2])), 1)
def test_revert_to_symbols(self):
#Test that you can revert to potcar_symbols if potcar_spec is not present
compat = MITCompatibility()
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe 06Sep2000', 'PAW_PBE O 08Apr2002']})
self.assertIsNotNone(compat.process_entry(entry))
#raise if check_potcar_hash is set
self.assertRaises(ValueError, self.compat.process_entry, entry)
def test_potcar_doenst_match_structure(self):
compat = MITCompatibility()
entry = ComputedEntry(
'Li2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_symbols': ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002']})
self.assertIsNone(compat.process_entry(entry))
def test_potcar_spec_is_none(self):
compat = MITCompatibility(check_potcar_hash=True)
entry = ComputedEntry(
'Li2O3', -1, 0.0,
parameters={'is_hubbard': True,
'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [None, None]})
self.assertIsNone(compat.process_entry(entry))
def test_get_explanation_dict(self):
compat = MITCompatibility(check_potcar_hash=False)
entry = ComputedEntry(
'Fe2O3', -1, 0.0,
parameters={'is_hubbard': True, 'hubbards': {'Fe': 4.0, 'O': 0},
'run_type': 'GGA+U',
'potcar_spec': [{'titel': 'PAW_PBE Fe 06Sep2000',
'hash': '994537de5c4122b7f1b77fb604476db4'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': "7a25bc5b9a5393f46600a4939d357982"}]})
d = compat.get_explanation_dict(entry)
self.assertEqual('MITRelaxSet Potcar Correction', d["corrections"][0][
"name"])
class OxideTypeCorrectionTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(check_potcar_hash=True)
def test_no_struct_compat(self):
lio2_entry_nostruct = ComputedEntry(Composition("Li2O4"), -3,
data={"oxide_type": "superoxide"},
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
lio2_entry_corrected = self.compat.process_entry(lio2_entry_nostruct)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 - 0.13893*4, 4)
def test_process_entry_superoxide(self):
el_li = Element("Li")
el_o = Element("O")
latt = Lattice([[3.985034, 0.0, 0.0],
[0.0, 4.881506, 0.0],
[0.0, 0.0, 2.959824]])
elts = [el_li, el_li, el_o, el_o, el_o, el_o]
coords = list()
coords.append([0.500000, 0.500000, 0.500000])
coords.append([0.0, 0.0, 0.0])
coords.append([0.632568, 0.085090, 0.500000])
coords.append([0.367432, 0.914910, 0.500000])
coords.append([0.132568, 0.414910, 0.000000])
coords.append([0.867432, 0.585090, 0.000000])
struct = Structure(latt, elts, coords)
lio2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
lio2_entry_corrected = self.compat.process_entry(lio2_entry)
self.assertAlmostEqual(lio2_entry_corrected.energy, -3 -0.13893*4, 4)
def test_process_entry_peroxide(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4)
def test_process_entry_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911,
133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0)
def test_process_entry_oxide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278,
60, 60, 60)
coords = [[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75],
[0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 -0.66975, 4)
class OxideTypeCorrectionNoPeroxideCorrTest(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(correct_peroxide=False)
def test_oxide_energy_corr(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_o]
latt = Lattice.from_parameters(3.278, 3.278, 3.278,
60, 60, 60)
coords = [[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75],
[0.0, 0.0, 0.0]]
struct = Structure(latt, elts, coords)
li2o_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
li2o_entry_corrected = self.compat.process_entry(li2o_entry)
self.assertAlmostEqual(li2o_entry_corrected.energy, -3.0 -0.66975, 4)
def test_peroxide_energy_corr(self):
latt = Lattice.from_parameters(3.159597, 3.159572, 7.685205, 89.999884, 89.999674, 60.000510)
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_li, el_li, el_li, el_o, el_o, el_o, el_o]
coords = [[0.666656, 0.666705, 0.750001],
[0.333342, 0.333378, 0.250001],
[0.000001, 0.000041, 0.500001],
[0.000001, 0.000021, 0.000001],
[0.333347, 0.333332, 0.649191],
[0.333322, 0.333353, 0.850803],
[0.666666, 0.666686, 0.350813],
[0.666665, 0.666684, 0.149189]]
struct = Structure(latt, elts, coords)
li2o2_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
li2o2_entry_corrected = self.compat.process_entry(li2o2_entry)
self.assertRaises(AssertionError, self.assertAlmostEqual,
*(li2o2_entry_corrected.energy, -3 - 0.44317 * 4, 4))
self.assertAlmostEqual(li2o2_entry_corrected.energy, -3 - 0.66975 * 4, 4)
def test_ozonide(self):
el_li = Element("Li")
el_o = Element("O")
elts = [el_li, el_o, el_o, el_o]
latt = Lattice.from_parameters(3.999911, 3.999911, 3.999911,
133.847504, 102.228244, 95.477342)
coords = [[0.513004, 0.513004, 1.000000],
[0.017616, 0.017616, 0.000000],
[0.649993, 0.874790, 0.775203],
[0.099587, 0.874790, 0.224797]]
struct = Structure(latt, elts, coords)
lio3_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'}]})
lio3_entry_corrected = self.compat.process_entry(lio3_entry)
self.assertAlmostEqual(lio3_entry_corrected.energy, -3.0 - 3 * 0.66975)
class AqueousCorrectionTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.corr = AqueousCorrection(fp)
def test_compound_energy(self):
O2_entry = self.corr.correct_entry(ComputedEntry(Composition("O2"),
-4.9355 * 2))
H2_entry = self.corr.correct_entry(ComputedEntry(Composition("H2"), 3))
H2O_entry = self.corr.correct_entry(ComputedEntry(Composition("H2O"), 3))
H2O_formation_energy = H2O_entry.energy - (H2_entry.energy +
O2_entry.energy / 2.0)
self.assertAlmostEqual(H2O_formation_energy, -2.46, 2)
entry = ComputedEntry(Composition("H2O"), -16)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("H2O"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -14.916, 4)
entry = ComputedEntry(Composition("Cl"), -24)
entry = self.corr.correct_entry(entry)
self.assertAlmostEqual(entry.energy, -24.344373, 4)
class TestMITAqueousCompatibility(unittest.TestCase):
def setUp(self):
self.compat = MITCompatibility(check_potcar_hash=True)
self.aqcompat = MITAqueousCompatibility(check_potcar_hash=True)
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, os.path.pardir, "MITCompatibility.yaml")
self.aqcorr = AqueousCorrection(fp)
def test_aqueous_compat(self):
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
lioh_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_spec': [{'titel':'PAW_PBE Li 17Jan2003',
'hash': '65e83282d1707ec078c1012afbd05be8'},
{'titel': 'PAW_PBE O 08Apr2002',
'hash': '7a25bc5b9a5393f46600a4939d357982'},
{"titel": 'PAW_PBE H 15Jun2001',
'hash': "bb43c666e3d36577264afe07669e9582"}]})
lioh_entry_compat = self.compat.process_entry(lioh_entry)
lioh_entry_compat_aqcorr = self.aqcorr.correct_entry(lioh_entry_compat)
lioh_entry_aqcompat = self.aqcompat.process_entry(lioh_entry)
self.assertAlmostEqual(lioh_entry_compat_aqcorr.energy, lioh_entry_aqcompat.energy, 4)
def test_potcar_doenst_match_structure(self):
compat = MITCompatibility()
el_li = Element("Li")
el_o = Element("O")
el_h = Element("H")
latt = Lattice.from_parameters(3.565276, 3.565276, 4.384277, 90.000000, 90.000000, 90.000000)
elts = [el_h, el_h, el_li, el_li, el_o, el_o]
coords = [[0.000000, 0.500000, 0.413969],
[0.500000, 0.000000, 0.586031],
[0.000000, 0.000000, 0.000000],
[0.500000, 0.500000, 0.000000],
[0.000000, 0.500000, 0.192672],
[0.500000, 0.000000, 0.807328]]
struct = Structure(latt, elts, coords)
lioh_entry = ComputedStructureEntry(struct, -3,
parameters={'is_hubbard': False,
'hubbards': None,
'run_type': 'GGA',
'potcar_symbols':
['PAW_PBE Fe 17Jan2003', 'PAW_PBE O 08Apr2002', 'PAW_PBE H 15Jun2001']})
self.assertIsNone(compat.process_entry(lioh_entry))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
aykol/pymatgen
|
pymatgen/entries/tests/test_compatibility.py
|
Python
|
mit
| 36,806
|
[
"pymatgen"
] |
3f27e71dfcda71ed5937907e7509dd37efae0b6b6dc55cfe6b57557c8465a4ad
|
# rgToolFactory.py
# see https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
#
# all rights reserved
# Licensed under the LGPL
# suggestions for improvement and bug fixes welcome at https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# August 2014
# merged John Chilton's citation addition and ideas from Marius van den Beek to enable arbitrary
# data types for input and output - thanks!
#
# march 2014
# had to remove dependencies because cross toolshed dependencies are not possible - can't pre-specify a toolshed url for graphicsmagick and ghostscript
# grrrrr - night before a demo
# added dependencies to a tool_dependencies.xml if html page generated so generated tool is properly portable
#
# added ghostscript and graphicsmagick as dependencies
# fixed a wierd problem where gs was trying to use the new_files_path from universe (database/tmp) as ./database/tmp
# errors ensued
#
# august 2013
# found a problem with GS if $TMP or $TEMP missing - now inject /tmp and warn
#
# july 2013
# added ability to combine images and individual log files into html output
# just make sure there's a log file foo.log and it will be output
# together with all images named like "foo_*.pdf
# otherwise old format for html
#
# January 2013
# problem pointed out by Carlos Borroto
# added escaping for <>$ - thought I did that ages ago...
#
# August 11 2012
# changed to use shell=False and cl as a sequence
# This is a Galaxy tool factory for simple scripts in python, R or whatever ails ye.
# It also serves as the wrapper for the new tool.
#
# you paste and run your script
# Only works for simple scripts that read one input from the history.
# Optionally can write one new history dataset,
# and optionally collect any number of outputs into links on an autogenerated HTML page.
# DO NOT install on a public or important site - please.
# installed generated tools are fine if the script is safe.
# They just run normally and their user cannot do anything unusually insecure
# but please, practice safe toolshed.
# Read the fucking code before you install any tool
# especially this one
# After you get the script working on some test data, you can
# optionally generate a toolshed compatible gzip file
# containing your script safely wrapped as an ordinary Galaxy script in your local toolshed for
# safe and largely automated installation in a production Galaxy.
# If you opt for an HTML output, you get all the script outputs arranged
# as a single Html history item - all output files are linked, thumbnails for all the pdfs.
# Ugly but really inexpensive.
#
# Patches appreciated please.
#
#
# long route to June 2012 product
# Behold the awesome power of Galaxy and the toolshed with the tool factory to bind them
# derived from an integrated script model
# called rgBaseScriptWrapper.py
# Note to the unwary:
# This tool allows arbitrary scripting on your Galaxy as the Galaxy user
# There is nothing stopping a malicious user doing whatever they choose
# Extremely dangerous!!
# Totally insecure. So, trusted users only
#
# preferred model is a developer using their throw away workstation instance - ie a private site.
# no real risk. The universe_wsgi.ini admin_users string is checked - only admin users are permitted to run this tool.
#
import sys
import shutil
import subprocess
import os
import time
import tempfile
import optparse
import tarfile
import re
import shutil
import math
progname = os.path.split(sys.argv[0])[1]
myversion = 'V001.1 March 2014'
verbose = False
debug = False
toolFactoryURL = 'https://bitbucket.org/fubar/galaxytoolfactory'
reload(sys)
sys.setdefaultencoding('utf8')
# if we do html we need these dependencies specified in a tool_dependencies.xml file and referred to in the generated
# tool xml
toolhtmldepskel = """<?xml version="1.0"?>
<tool_dependency>
<package name="ghostscript" version="9.10">
<repository name="package_ghostscript_9_10" owner="devteam" prior_installation_required="True" />
</package>
<package name="graphicsmagick" version="1.3.18">
<repository name="package_graphicsmagick_1_3" owner="iuc" prior_installation_required="True" />
</package>
<readme>
%s
</readme>
</tool_dependency>
"""
protorequirements = """<requirements>
<requirement type="package" version="9.10">ghostscript</requirement>
<requirement type="package" version="1.3.18">graphicsmagick</requirement>
</requirements>"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
"$": "\$"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def parse_citations(citations_text):
"""
"""
citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
citation_tuples = []
for citation in citations:
if citation.startswith("doi"):
citation_tuples.append( ("doi", citation[len("doi"):].strip() ) )
else:
citation_tuples.append( ("bibtex", citation[len("bibtex"):].strip() ) )
return citation_tuples
class ScriptRunner:
"""class is a wrapper for an arbitrary script
"""
def __init__(self,opts=None,treatbashSpecial=True):
"""
cleanup inputs, setup some outputs
"""
self.useGM = cmd_exists('gm')
self.useIM = cmd_exists('convert')
self.useGS = cmd_exists('gs')
self.temp_warned = False # we want only one warning if $TMP not set
self.treatbashSpecial = treatbashSpecial
if opts.output_dir: # simplify for the tool tarball
os.chdir(opts.output_dir)
self.thumbformat = 'png'
self.opts = opts
self.toolname = re.sub('[^a-zA-Z0-9_]+', '', opts.tool_name) # a sanitizer now does this but..
self.toolid = self.toolname
self.myname = sys.argv[0] # get our name because we write ourselves out as a tool later
self.pyfile = self.myname # crude but efficient - the cruft won't hurt much
self.xmlfile = '%s.xml' % self.toolname
s = open(self.opts.script_path,'r').readlines()
s = [x.rstrip() for x in s] # remove pesky dos line endings if needed
self.script = '\n'.join(s)
fhandle,self.sfile = tempfile.mkstemp(prefix=self.toolname,suffix=".%s" % (opts.interpreter))
tscript = open(self.sfile,'w') # use self.sfile as script source for Popen
tscript.write(self.script)
tscript.close()
self.indentedScript = '\n'.join([' %s' % html_escape(x) for x in s]) # for restructured text in help
self.escapedScript = '\n'.join([html_escape(x) for x in s])
self.elog = os.path.join(self.opts.output_dir,"%s_error.log" % self.toolname)
if opts.output_dir: # may not want these complexities
self.tlog = os.path.join(self.opts.output_dir,"%s_runner.log" % self.toolname)
art = '%s.%s' % (self.toolname,opts.interpreter)
artpath = os.path.join(self.opts.output_dir,art) # need full path
artifact = open(artpath,'w') # use self.sfile as script source for Popen
artifact.write(self.script)
artifact.close()
self.cl = []
self.html = []
a = self.cl.append
a(opts.interpreter)
if self.treatbashSpecial and opts.interpreter in ['bash','sh']:
a(self.sfile)
else:
a('-') # stdin
a(opts.input_tab)
a(opts.output_tab)
self.outputFormat = self.opts.output_format
self.inputFormats = self.opts.input_formats
self.test1Input = '%s_test1_input.xls' % self.toolname
self.test1Output = '%s_test1_output.xls' % self.toolname
self.test1HTML = '%s_test1_output.html' % self.toolname
def makeXML(self):
"""
Create a Galaxy xml tool wrapper for the new script as a string to write out
fixme - use templating or something less fugly than this example of what we produce
<tool id="reverse" name="reverse" version="0.01">
<description>a tabular file</description>
<command interpreter="python">
reverse.py --script_path "$runMe" --interpreter "python"
--tool_name "reverse" --input_tab "$input1" --output_tab "$tab_file"
</command>
<inputs>
<param name="input1" type="data" format="tabular" label="Select a suitable input file from your history"/><param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="reverse"/>
</inputs>
<outputs>
<data format="tabular" name="tab_file" label="${job_name}"/>
</outputs>
<help>
**What it Does**
Reverse the columns in a tabular file
</help>
<configfiles>
<configfile name="runMe">
# reverse order of columns in a tabular file
import sys
inp = sys.argv[1]
outp = sys.argv[2]
i = open(inp,'r')
o = open(outp,'w')
for row in i:
rs = row.rstrip().split('\t')
rs.reverse()
o.write('\t'.join(rs))
o.write('\n')
i.close()
o.close()
</configfile>
</configfiles>
</tool>
"""
newXML="""<tool id="%(toolid)s" name="%(toolname)s" version="%(tool_version)s">
%(tooldesc)s
%(requirements)s
<command interpreter="python">
%(command)s
</command>
<inputs>
%(inputs)s
</inputs>
<outputs>
%(outputs)s
</outputs>
<configfiles>
<configfile name="runMe">
%(script)s
</configfile>
</configfiles>
%(tooltests)s
<help>
%(help)s
</help>
<citations>
%(citations)s
<citation type="doi">10.1093/bioinformatics/bts573</citation>
</citations>
</tool>""" # needs a dict with toolname, toolid, interpreter, scriptname, command, inputs as a multi line string ready to write, outputs ditto, help ditto
newCommand="""
%(toolname)s.py --script_path "$runMe" --interpreter "%(interpreter)s"
--tool_name "%(toolname)s" %(command_inputs)s %(command_outputs)s """
# may NOT be an input or htmlout - appended later
tooltestsTabOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s"/>
</test>
</tests>
"""
tooltestsHTMLOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="5"/>
</test>
</tests>
"""
tooltestsBoth = """<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s" />
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="10"/>
</test>
</tests>
"""
xdict = {}
xdict['outputFormat'] = self.outputFormat
xdict['inputFormats'] = self.inputFormats
xdict['requirements'] = ''
if self.opts.make_HTML:
if self.opts.include_dependencies == "yes":
xdict['requirements'] = protorequirements
xdict['tool_version'] = self.opts.tool_version
xdict['test1Input'] = self.test1Input
xdict['test1HTML'] = self.test1HTML
xdict['test1Output'] = self.test1Output
if self.opts.make_HTML and self.opts.output_tab <> 'None':
xdict['tooltests'] = tooltestsBoth % xdict
elif self.opts.make_HTML:
xdict['tooltests'] = tooltestsHTMLOnly % xdict
else:
xdict['tooltests'] = tooltestsTabOnly % xdict
xdict['script'] = self.escapedScript
# configfile is least painful way to embed script to avoid external dependencies
# but requires escaping of <, > and $ to avoid Mako parsing
if self.opts.help_text:
helptext = open(self.opts.help_text,'r').readlines()
helptext = [html_escape(x) for x in helptext] # must html escape here too - thanks to Marius van den Beek
xdict['help'] = ''.join([x for x in helptext])
else:
xdict['help'] = 'Please ask the tool author (%s) for help as none was supplied at tool generation\n' % (self.opts.user_email)
if self.opts.citations:
citationstext = open(self.opts.citations,'r').read()
citation_tuples = parse_citations(citationstext)
citations_xml = ""
for citation_type, citation_content in citation_tuples:
citation_xml = """<citation type="%s">%s</citation>""" % (citation_type, html_escape(citation_content))
citations_xml += citation_xml
xdict['citations'] = citations_xml
else:
xdict['citations'] = ""
coda = ['**Script**','Pressing execute will run the following code over your input file and generate some outputs in your history::']
coda.append('\n')
coda.append(self.indentedScript)
coda.append('\n**Attribution**\nThis Galaxy tool was created by %s at %s\nusing the Galaxy Tool Factory.\n' % (self.opts.user_email,timenow()))
coda.append('See %s for details of that project' % (toolFactoryURL))
coda.append('Please cite: Creating re-usable tools from scripts: The Galaxy Tool Factory. Ross Lazarus; Antony Kaspi; Mark Ziemann; The Galaxy Team. ')
coda.append('Bioinformatics 2012; doi: 10.1093/bioinformatics/bts573\n')
xdict['help'] = '%s\n%s' % (xdict['help'],'\n'.join(coda))
if self.opts.tool_desc:
xdict['tooldesc'] = '<description>%s</description>' % self.opts.tool_desc
else:
xdict['tooldesc'] = ''
xdict['command_outputs'] = ''
xdict['outputs'] = ''
if self.opts.input_tab <> 'None':
xdict['command_inputs'] = '--input_tab "$input1" ' # the space may matter a lot if we append something
xdict['inputs'] = '<param name="input1" type="data" format="%s" label="Select a suitable input file from your history"/> \n' % self.inputFormats
else:
xdict['command_inputs'] = '' # assume no input - eg a random data generator
xdict['inputs'] = ''
xdict['inputs'] += '<param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="%s"/> \n' % self.toolname
xdict['toolname'] = self.toolname
xdict['toolid'] = self.toolid
xdict['interpreter'] = self.opts.interpreter
xdict['scriptname'] = self.sfile
if self.opts.make_HTML:
xdict['command_outputs'] += ' --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes"'
xdict['outputs'] += ' <data format="html" name="html_file" label="${job_name}.html"/>\n'
else:
xdict['command_outputs'] += ' --output_dir "./"'
if self.opts.output_tab <> 'None':
xdict['command_outputs'] += ' --output_tab "$tab_file"'
xdict['outputs'] += ' <data format="%s" name="tab_file" label="${job_name}"/>\n' % self.outputFormat
xdict['command'] = newCommand % xdict
xmls = newXML % xdict
xf = open(self.xmlfile,'w')
xf.write(xmls)
xf.write('\n')
xf.close()
# ready for the tarball
def makeTooltar(self):
"""
a tool is a gz tarball with eg
/toolname/tool.xml /toolname/tool.py /toolname/test-data/test1_in.foo ...
"""
retval = self.run()
if retval:
print >> sys.stderr,'## Run failed. Cannot build yet. Please fix and retry'
sys.exit(1)
tdir = self.toolname
os.mkdir(tdir)
self.makeXML()
if self.opts.make_HTML:
if self.opts.help_text:
hlp = open(self.opts.help_text,'r').read()
else:
hlp = 'Please ask the tool author for help as none was supplied at tool generation\n'
if self.opts.include_dependencies:
tooldepcontent = toolhtmldepskel % hlp
depf = open(os.path.join(tdir,'tool_dependencies.xml'),'w')
depf.write(tooldepcontent)
depf.write('\n')
depf.close()
if self.opts.input_tab <> 'None': # no reproducible test otherwise? TODO: maybe..
testdir = os.path.join(tdir,'test-data')
os.mkdir(testdir) # make tests directory
shutil.copyfile(self.opts.input_tab,os.path.join(testdir,self.test1Input))
if self.opts.output_tab <> 'None':
shutil.copyfile(self.opts.output_tab,os.path.join(testdir,self.test1Output))
if self.opts.make_HTML:
shutil.copyfile(self.opts.output_html,os.path.join(testdir,self.test1HTML))
if self.opts.output_dir:
shutil.copyfile(self.tlog,os.path.join(testdir,'test1_out.log'))
outpif = '%s.py' % self.toolname # new name
outpiname = os.path.join(tdir,outpif) # path for the tool tarball
pyin = os.path.basename(self.pyfile) # our name - we rewrite ourselves (TM)
notes = ['# %s - a self annotated version of %s generated by running %s\n' % (outpiname,pyin,pyin),]
notes.append('# to make a new Galaxy tool called %s\n' % self.toolname)
notes.append('# User %s at %s\n' % (self.opts.user_email,timenow()))
pi = open(self.pyfile,'r').readlines() # our code becomes new tool wrapper (!) - first Galaxy worm
notes += pi
outpi = open(outpiname,'w')
outpi.write(''.join(notes))
outpi.write('\n')
outpi.close()
stname = os.path.join(tdir,self.sfile)
if not os.path.exists(stname):
shutil.copyfile(self.sfile, stname)
xtname = os.path.join(tdir,self.xmlfile)
if not os.path.exists(xtname):
shutil.copyfile(self.xmlfile,xtname)
tarpath = "%s.gz" % self.toolname
tar = tarfile.open(tarpath, "w:gz")
tar.add(tdir,arcname=self.toolname)
tar.close()
shutil.copyfile(tarpath,self.opts.new_tool)
shutil.rmtree(tdir)
## TODO: replace with optional direct upload to local toolshed?
return retval
def compressPDF(self,inpdf=None,thumbformat='png'):
"""need absolute path to pdf
note that GS gets confoozled if no $TMP or $TEMP
so we set it
"""
assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf,self.myName)
hlog = os.path.join(self.opts.output_dir,"compress_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'a')
our_env = os.environ.copy()
our_tmp = our_env.get('TMP',None)
if not our_tmp:
our_tmp = our_env.get('TEMP',None)
if not (our_tmp and os.path.exists(our_tmp)):
newtmp = os.path.join(self.opts.output_dir,'tmp')
try:
os.mkdir(newtmp)
except:
sto.write('## WARNING - cannot make %s - it may exist or permissions need fixing\n' % newtmp)
our_env['TEMP'] = newtmp
if not self.temp_warned:
sto.write('## WARNING - no $TMP or $TEMP!!! Please fix - using %s temporarily\n' % newtmp)
self.temp_warned = True
outpdf = '%s_compressed' % inpdf
cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH","-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf,inpdf]
x = subprocess.Popen(cl,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval1 = x.wait()
sto.close()
if retval1 == 0:
os.unlink(inpdf)
shutil.move(outpdf,inpdf)
os.unlink(hlog)
hlog = os.path.join(self.opts.output_dir,"thumbnail_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'w')
outpng = '%s.%s' % (os.path.splitext(inpdf)[0],thumbformat)
if self.useGM:
cl2 = ['gm', 'convert', inpdf, outpng]
else: # assume imagemagick
cl2 = ['convert', inpdf + '[0]', outpng]
x = subprocess.Popen(cl2,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval2 = x.wait()
sto.close()
if retval2 == 0:
os.unlink(hlog)
retval = retval1 or retval2
return retval
def getfSize(self,fpath,outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath,fpath)
if os.path.isfile(fp):
size = '0 B'
n = float(os.path.getsize(fp))
if n > 2**20:
size = '%1.1f MB' % (n/2**20)
elif n > 2**10:
size = '%1.1f KB' % (n/2**10)
elif n > 0:
size = '%d B' % (int(n))
return size
def makeHtml(self):
""" Create an HTML file content to list all the artifacts found in the output_dir
"""
galhtmlprefix = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="toolFormBody">
"""
galhtmlattr = """<hr/><div class="infomessage">This tool (%s) was generated by the <a href="https://bitbucket.org/fubar/galaxytoolfactory/overview">Galaxy Tool Factory</a></div><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
flist = os.listdir(self.opts.output_dir)
flist = [x for x in flist if x <> 'Rplots.pdf']
flist.sort()
html = []
html.append(galhtmlprefix % progname)
html.append('<div class="infomessage">Galaxy Tool "%s" run at %s</div><br/>' % (self.toolname,timenow()))
fhtml = []
if len(flist) > 0:
logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
logfiles.sort()
logfiles = [x for x in logfiles if os.path.abspath(x) <> os.path.abspath(self.tlog)]
logfiles.append(os.path.abspath(self.tlog)) # make it the last one
pdflist = []
npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf' or os.path.splitext(x)[-1].lower() == '.png'])
for rownum,fname in enumerate(flist):
dname,e = os.path.splitext(fname)
sfsize = self.getfSize(fname,self.opts.output_dir)
if e.lower() == '.pdf' or e.lower() == '.png': # compress and make a thumbnail
thumb = '%s.%s' % (dname,self.thumbformat)
pdff = os.path.join(self.opts.output_dir,fname)
retval = self.compressPDF(inpdf=pdff,thumbformat=self.thumbformat)
if retval == 0:
pdflist.append((fname,thumb))
else:
pdflist.append((fname,fname))
if (rownum+1) % 2 == 0:
fhtml.append('<tr class="odd_row"><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
else:
fhtml.append('<tr><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
for logfname in logfiles: # expect at least tlog - if more
if os.path.abspath(logfname) == os.path.abspath(self.tlog): # handled later
sectionname = 'All tool run'
if (len(logfiles) > 1):
sectionname = 'Other'
ourpdfs = pdflist
else:
realname = os.path.basename(logfname)
sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] <> sectionname] # remove
nacross = 1
npdf = len(ourpdfs)
if npdf > 0:
nacross = math.sqrt(npdf) ## int(round(math.log(npdf,2)))
if int(nacross)**2 != npdf:
nacross += 1
nacross = int(nacross)
width = min(400,int(1200/nacross))
html.append('<div class="toolFormTitle">%s images and outputs</div>' % sectionname)
html.append('(Click on a thumbnail image to download the corresponding original PDF image)<br/>')
ntogo = nacross # counter for table row padding with empty cells
html.append('<div><table class="simple" cellpadding="2" cellspacing="2">\n<tr>')
for i,paths in enumerate(ourpdfs):
fname,thumb = paths
s= """<td><a href="%s"><img src="%s" title="Click to download a PDF of %s" hspace="5" width="%d"
alt="Image called %s"/></a></td>\n""" % (fname,thumb,fname,width,fname)
if ((i+1) % nacross == 0):
s += '</tr>\n'
ntogo = 0
if i < (npdf - 1): # more to come
s += '<tr>'
ntogo = nacross
else:
ntogo -= 1
html.append(s)
if html[-1].strip().endswith('</tr>'):
html.append('</table></div>\n')
else:
if ntogo > 0: # pad
html.append('<td> </td>'*ntogo)
html.append('</tr></table></div>\n')
#logt = open(logfname,'r').readlines()
#logtext = [x for x in logt if x.strip() > '']
#html.append('<div class="toolFormTitle">%s log output</div>' % sectionname)
#if len(logtext) > 1:
# html.append('\n<pre>\n')
# html += logtext
# html.append('\n</pre>\n')
#else:
# html.append('%s is empty<br/>' % logfname)
if len(fhtml) > 0:
fhtml.insert(0,'<div><table class="colored" cellpadding="3" cellspacing="3"><tr><th>Output File Name (click to view)</th><th>Size</th></tr>\n')
fhtml.append('</table></div><br/>')
html.append('<div class="toolFormTitle">All output files available for downloading</div>\n')
html += fhtml # add all non-pdf files to the end of the display
else:
html.append('<div class="warningmessagelarge">### Error - %s returned no files - please confirm that parameters are sane</div>' % self.opts.interpreter)
html.append(galhtmlpostfix)
htmlf = file(self.opts.output_html,'w')
htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
self.html = html
def run(self):
"""
scripts must be small enough not to fill the pipe!
"""
if self.treatbashSpecial and self.opts.interpreter in ['bash','sh']:
retval = self.runBash()
else:
if self.opts.output_dir:
ste = open(self.elog,'w')
sto = open(self.tlog,'w')
sto.write('## Toolfactory generated command line = %s\n' % ' '.join(self.cl))
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
#p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=ste,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False,stdin=subprocess.PIPE)
p.stdin.write(self.script)
#p.stdin.close()
stdout_data, stderr_data = p.communicate()
p.stdin.close()
#retval = p.wait()
retval = p.returncode
if self.opts.output_dir:
sto.close()
ste.close()
err = stderr_data
#err = open(self.elog,'r').readlines()
print >> sys.stdout, stdout_data
if retval <> 0 and err: # problem
print >> sys.stderr,err.encode('raw_unicode_escape').decode('ascii')
if self.opts.make_HTML:
self.makeHtml()
return retval
def runBash(self):
"""
cannot use - for bash so use self.sfile
"""
if self.opts.output_dir:
s = '## Toolfactory generated command line = %s\n' % ' '.join(self.cl)
sto = open(self.tlog,'w')
sto.write(s)
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=sto,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False)
retval = p.wait()
if self.opts.output_dir:
sto.close()
if self.opts.make_HTML:
self.makeHtml()
return retval
def main():
u = """
This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
<command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath" --tool_name "foo" --interpreter "Rscript"
</command>
"""
op = optparse.OptionParser()
a = op.add_option
a('--script_path',default=None)
a('--tool_name',default=None)
a('--interpreter',default=None)
a('--output_dir',default='./')
a('--output_html',default=None)
a('--input_tab',default="None")
a('--input_formats',default="tabular,text")
a('--output_tab',default="None")
a('--output_format',default="tabular")
a('--user_email',default='Unknown')
a('--bad_user',default=None)
a('--make_Tool',default=None)
a('--make_HTML',default=None)
a('--help_text',default=None)
a('--citations',default=None)
a('--tool_desc',default=None)
a('--new_tool',default=None)
a('--tool_version',default=None)
a('--include_dependencies',default=None)
opts, args = op.parse_args()
assert not opts.bad_user,'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to admin_users in universe_wsgi.ini' % (opts.bad_user,opts.bad_user)
assert opts.tool_name,'## Tool Factory expects a tool name - eg --tool_name=DESeq'
assert opts.interpreter,'## Tool Factory wrapper expects an interpreter - eg --interpreter=Rscript'
assert os.path.isfile(opts.script_path),'## Tool Factory wrapper expects a script path - eg --script_path=foo.R'
if opts.output_dir:
try:
os.makedirs(opts.output_dir)
except:
pass
r = ScriptRunner(opts)
if opts.make_Tool:
retcode = r.makeTooltar()
else:
retcode = r.run()
os.unlink(r.sfile)
if retcode:
sys.exit(retcode) # indicate failure to job runner
if __name__ == "__main__":
main()
|
myoshimura080822/galaxy-mytools_ToolFactory
|
rgToolFactory.py
|
Python
|
mit
| 32,318
|
[
"Galaxy"
] |
2f90a5586d0b1cf11a953871f8cfba8306d20a135892b2222f840099b9359e53
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen import Structure, Lattice, Specie, DummySpecie
import numpy as np
"""
Classes for reading/writing mcsqs files following the rndstr.in format.
"""
__author__ = "Matthew Horton"
__copyright__ = "Copyright 2017, The Materials Project"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Production"
__date__ = "October 2017"
class Mcsqs:
def __init__(self, structure):
"""
Handle input/output for the crystal definition format
used by mcsqs and other ATAT codes.
:param structure: input Structure
"""
self.structure = structure
def to_string(self):
"""
Returns a structure in mcsqs rndstr.in format.
:return (str):
"""
# add lattice vectors
m = self.structure.lattice.matrix
output = ["{:6f} {:6f} {:6f}".format(*m[0]),
"{:6f} {:6f} {:6f}".format(*m[1]),
"{:6f} {:6f} {:6f}".format(*m[2])]
# define coord system, use Cartesian
output.append("1.0 0.0 0.0")
output.append("0.0 1.0 0.0")
output.append("0.0 0.0 1.0")
# add species
for site in self.structure:
species_str = []
for sp, occu in sorted(site.species.items()):
if isinstance(sp, Specie):
sp = sp.element
species_str.append("{}={}".format(sp, occu))
species_str = ",".join(species_str)
output.append("{:6f} {:6f} {:6f} {}".format(site.frac_coords[0],
site.frac_coords[1],
site.frac_coords[2],
species_str))
return "\n".join(output)
@staticmethod
def structure_from_string(data):
"""
Parses a rndstr.in or lat.in file into pymatgen's
Structure format.
:param data: contents of a rndstr.in or lat.in file
:return: Structure object
"""
data = data.splitlines()
data = [x.split() for x in data if x] # remove empty lines
# following specification/terminology given in manual
if len(data[0]) == 6: # lattice parameters
a, b, c, alpha, beta, gamma = map(float, data[0])
coord_system = Lattice.from_parameters(a, b, c,
alpha, beta, gamma).matrix
lattice_vecs = np.array([
[data[1][0], data[1][1], data[1][2]],
[data[2][0], data[2][1], data[2][2]],
[data[3][0], data[3][1], data[3][2]]
], dtype=float)
first_species_line = 4
else:
coord_system = np.array([
[data[0][0], data[0][1], data[0][2]],
[data[1][0], data[1][1], data[1][2]],
[data[2][0], data[2][1], data[2][2]]
], dtype=float)
lattice_vecs = np.array([
[data[3][0], data[3][1], data[3][2]],
[data[4][0], data[4][1], data[4][2]],
[data[5][0], data[5][1], data[5][2]]
], dtype=float)
first_species_line = 6
scaled_matrix = np.matmul(coord_system, lattice_vecs)
lattice = Lattice(scaled_matrix)
all_coords = []
all_species = []
for l in data[first_species_line:]:
all_coords.append(np.array([l[0], l[1], l[2]], dtype=float))
species_strs = "".join(l[3:]) # join multiple strings back together
species_strs = species_strs.replace(" ", "") # trim any white space
species_strs = species_strs.split(",") # comma-delimited
species = {}
for species_str in species_strs:
species_str = species_str.split('=')
if len(species_str) == 1:
# assume occupancy is 1.0
species_str = [species_str[0], 1.0]
try:
species[Specie(species_str[0])] = float(species_str[1])
except Exception:
species[DummySpecie(species_str[0])] = float(species_str[1])
all_species.append(species)
return Structure(lattice, all_species, all_coords)
|
fraricci/pymatgen
|
pymatgen/io/atat.py
|
Python
|
mit
| 4,449
|
[
"CRYSTAL",
"pymatgen"
] |
82e80b2190fdaf9e127996107bacde4a24f98edc8919e9edf6b1db4fb67c63fe
|
import numpy as np
filename = 'tomography_model.xyz'
ftop = open(filename,'r')
dummy = ftop.readline() # x,y,z min; x,y,z max
dummy = ftop.readline() # dx dy dz
numdim = ftop.readline() # num elements x,y,z
numdim = numdim.split()
numpoints = int(numdim[0])*int(numdim[1])*int(numdim[2])
dummy = ftop.readline() # min/max vp, vs, rho
ftop.close()
f = open('tomography_model.vtk','w')
f.write('# vtk DataFile Version 3.8.1\n')
f.write('material model VTK file\n')
f.write('ASCII\n')
f.write('DATASET STRUCTURED_GRID\n')
f.write('DIMENSIONS '+numdim[0]+' '+numdim[1]+' '+numdim[2]+'\n')
f.write('POINTS '+str(numpoints)+' float\n')
fin = np.loadtxt(filename,skiprows=4,usecols=(0,1,2,3)) # 0,1,2 are x,y,z; 3 is vp
#f.write(fin)
for p in range(len(fin)):
f.write('%f %f %f \n' % (fin[p,0],fin[p,1],fin[p,2]))
f.write('\n')
f.write('POINT_DATA '+str(numpoints)+'\n')
f.write('SCALARS Vp float 1\n')
f.write('LOOKUP_TABLE default\n')
for p in range(len(fin)):
f.write('%f \n' % (fin[p,3]))
f.close()
|
alanschiemenz/specfem_FWI_workflow
|
libraries/xyztovtk.py
|
Python
|
gpl-3.0
| 1,012
|
[
"VTK"
] |
9858c3988668253b9d3e235955df197efd074d702493c19d8716188eb4fedfce
|
import os
import sys
import pickle
import matplotlib
matplotlib.use('Agg')
import numpy as np
from ase.atoms import Atoms
from ase.utils import devnull
from ase.data import atomic_numbers, atomic_names, covalent_radii
from gpaw.atom.generator import Generator
from gpaw.atom.configurations import parameters
from gpaw.atom.analyse_setup import analyse
from gpaw import GPAW, ConvergenceError, Mixer, FermiDirac
import gpaw.mpi as mpi
b0 = {
'H': 0.753,
'He': 2.7,
'Li': 2.99,
'Be': 2.618,
'B': 1.694,
'C': 1.279,
'N': 1.102,
'O': 1.234,
'F': 1.413,
'Ne': 2.9,
'Na': 3.289,
'Mg': 3.5,
'Al': 2.868,
'Si': 2.218,
'P': 1.878,
'S': 1.893,
'Cl': 1.989,
'Ar': 3.7,
'K': 4.108,
'Ca': 2.805,
'Sc': 2.3,
'Ti': 2.0,
'V': 1.82,
'Cr': 1.75,
'Mn': 1.78,
'Fe': 1.850,
'Co': 2.0,
'Ni': 2.1,
'Cu': 2.281,
'Zn': 3.4,
'Ga': 2.837,
'Ge': 2.319,
'As': 2.071,
'Se': 2.154,
'Br': 2.281,
'Kr': 4.8,
'Rb': 4.360,
'Sr': 4.5,
'Y': 2.67,
'Zr': 2.36,
'Nb': 2.14,
'Mo': 1.95,
'Ru': 2.1,
'Rh': 2.20,
'Pd': 2.485,
'Ag': 2.626,
'Cd': 3.6,
'In': 3.1,
'Sb': 2.5,
'Xe': 4.5,
'Cs': 4.819,
'Ba': 4.60,
'La': 2.872,
'Hf': 2.38,
'Ta': 2.2,
'W': 2.1,
'Re': 2.1,
'Os': 2.2,
'Ir': 2.227,
'Pt': 2.373,
'Au': 2.555,
'Hg': 3.6,
'Pb': 2.85,
'Bi': 2.6,
'Rn': 4.7,
}
class MakeSetupPageData:
def __init__(self, symbol):
self.symbol = symbol
self.Z = atomic_numbers[symbol]
self.name = atomic_names[self.Z]
self.parameters = dict(occupations=FermiDirac(width=0.1), xc='PBE')
if mpi.rank == 0:
self.log = sys.stdout
else:
self.log = devnull
def run(self):
if os.path.isfile(self.symbol + '.pckl'):
self.log.write('Skipping %s\n' % self.symbol)
return
mpi.world.barrier()
if mpi.rank == 0:
self.file = open(self.symbol + '.pckl', 'w')
self.generate_setup()
self.prepare_box()
self.eggbox()
self.dimer()
self.pickle()
def generate_setup(self):
if mpi.rank == 0:
gen = Generator(self.symbol, 'PBE', scalarrel=True)
gen.run(logderiv=True, **parameters[self.symbol])
data = analyse(gen, show=False)
g = np.arange(gen.N)
r_g = gen.r
dr_g = gen.beta * gen.N / (gen.N - g)**2
rcutcomp = gen.rcutcomp
rcutfilter = gen.rcutfilter
# Find cutoff for core density:
if gen.Nc == 0:
rcore = 0.5
else:
N = 0.0
g = gen.N - 1
while N < 1e-7:
N += np.sqrt(4 * np.pi) * gen.nc[g] * r_g[g]**2 * dr_g[g]
g -= 1
rcore = r_g[g]
nlfer = []
for j in range(gen.njcore):
nlfer.append((gen.n_j[j], gen.l_j[j], gen.f_j[j], gen.e_j[j],
0.0))
for n, l, f, eps in zip(gen.vn_j, gen.vl_j, gen.vf_j, gen.ve_j):
nlfer.append((n, l, f, eps, gen.rcut_l[l]))
self.data = dict(Z=gen.Z,
Nv=gen.Nv,
Nc=gen.Nc,
rcutcomp=rcutcomp,
rcutfilter=rcutfilter,
rcore=rcore,
Ekin=gen.Ekin,
Epot=gen.Epot,
Exc=gen.Exc,
nlfer=nlfer)
def prepare_box(self):
if symbol in b0:
self.d0 = b0[symbol]
else:
self.d0 = 2 * covalent_radii[self.Z]
if symbol in ['He', 'Ne', 'Ar', 'Kr']:
self.a = round(2 / np.sqrt(3) * self.d0 / 0.2 / 4) * 4 * 0.2
else:
self.a = round(max(2.5 * self.d0, 5.5) / 0.2 / 4) * 4 * 0.2
gmin = 4 * int(self.a / 0.22 / 4 + 0.5)
gmax = 4 * int(self.a / 0.12 / 4 + 0.5)
self.ng = (gmax + 4 - gmin) // 4
self.gridspacings = self.a / np.arange(gmin, gmax + 4, 4)
def eggbox(self):
atom = Atoms(self.symbol, pbc=True, cell=(self.a, self.a, self.a))
negg = 25
self.Eegg = np.zeros((self.ng, negg))
self.Fegg = np.zeros((self.ng, negg))
eigensolver = 'rmm-diis'
if self.symbol in ['Ti', 'Sn', 'Te', 'Ba']: eigensolver = 'cg'
for i in range(self.ng):
h = self.gridspacings[i]
calc = GPAW(h=h, txt='%s-eggbox-%.3f.txt' % (self.symbol, h),
mixer=Mixer(beta=0.1, nmaxold=5, weight=50),
eigensolver=eigensolver,
maxiter=300,
nbands=-10,
**self.parameters)
atom.set_calculator(calc)
for j in range(negg):
x = h * j / (2 * negg - 2)
atom[0].x = x
try:
e = calc.get_potential_energy(atom, force_consistent=True)
self.Eegg[i, j] = e
except ConvergenceError:
raise
self.Fegg[i, j] = atom.get_forces()[0, 0]
def dimer(self):
dimer = Atoms([self.symbol, self.symbol],
pbc=True, cell=(self.a, self.a, self.a))
self.Edimer = np.zeros((self.ng, 7))
self.Fdimer = np.zeros((self.ng, 7, 2))
q0 = self.d0 / np.sqrt(3)
eigensolver = 'rmm-diis'
if self.symbol in ['Ti', 'Sn', 'Te', 'Ba']: eigensolver = 'cg'
for i in range(self.ng):
h = self.gridspacings[i]
calc = GPAW(h=h, txt='%s-dimer-%.3f.txt' % (self.symbol, h),
mixer=Mixer(beta=0.1, nmaxold=5, weight=50),
#mixer=Mixer(beta=0.05, nmaxold=7, weight=100),
eigensolver=eigensolver,
maxiter=300,
nbands=-10,
**self.parameters)
dimer.set_calculator(calc)
y = []
for j in range(-3, 4):
q = q0 * (1 + j * 0.02)
dimer.positions[1] = (q, q, q)
try:
e = calc.get_potential_energy(dimer, force_consistent=True)
self.Edimer[i, j + 3] = e
except ConvergenceError:
raise
self.Fdimer[i, j + 3] = dimer.get_forces()[:, 0]
def pickle(self):
if mpi.rank == 0:
self.data.update({'d0': self.d0,
'a': self.a,
'gridspacings': self.gridspacings,
'Eegg': self.Eegg,
'Fegg': self.Fegg,
'Edimer': self.Edimer,
'Fdimer': self.Fdimer})
pickle.dump(self.data, self.file, pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 0:
args = parameters.keys()
for symbol in args:
MakeSetupPageData(symbol).run()
|
qsnake/gpaw
|
doc/setups/make_setup_pages_data.py
|
Python
|
gpl-3.0
| 7,442
|
[
"ASE",
"GPAW"
] |
70edc6c61e789d9d4edfd6c2e9134d8a7df9c5c86e8010db22cadf126f0ab149
|
# $HeadURL$
"""
Context
This module contains the helpers needed to fetch the contextualisation setup
configuration. The usage is always the same. Instance a new ContextConfig object,
and it will make sure is the right one.
This module provides Context helpers for the contextualisation methods:
* cloudinit
* occiOpenNebula
* ssh
* adhoc
* amiconfig
"""
# DIRAC
from DIRAC import gConfig, gLogger, S_ERROR, S_OK
__RCSID__ = '$Id: $'
class ContextConfig( object ):
"""
ContextConfig class. Is the main and also the base class for all the different
context classes.
"""
# CS path where the context configuration lies
CONTEXT_PATH = '/Resources/VirtualMachines/Images/%s/%s'
# Mandatory keys for the basic Context Configuration. Are the options that
# will be used by other components. Is a sanity check against a miss-configured
# ConfigurationService.
MANDATORY_KEYS = [ 'bootImageName', 'flavorName', 'contextMethod' ]
def __new__( cls, _imageName, contextName ):
"""
Uses the contextName parameter to decide which class to load. If not in
`cloudinit`, `ssh`, `adhoc`, `amiconfig` or `OcciOpennebulaContext` raises a NotImplementedException
:Parameters:
**_imageName** - `string`
name of the image on the CS ( unused )
**contextName** - `string`
string with the type of context on the CS. It decides which class to load.
Either `cloudinit`, `ssh`,`adhoc`,`amiconfig`.
:raises: NotImplementedException
"""
if cls is ContextConfig:
if contextName == 'cloudinit':
cls = SSHContext
elif contextName == 'ssh':
cls = SSHContext
elif contextName == 'adhoc':
cls = AdHocContext
elif contextName == 'amiconfig':
cls = AmiconfigContext
elif contextName == 'occi_opennebula':
cls = OcciOpennebulaContext
else:
raise NotImplementedError( "No context implemented for %s" % contextName )
return super( ContextConfig, cls ).__new__( cls )
def __init__( self, imageName, contextName ):
"""
Constructor. Gets section from <CONTEXT_PATH>/<imageName>/<contextName> or
empty dictionary in case it fails.
:Parameters:
**imageName** - `string`
name of the image on the CS
**contextName** - `string`
string with the type of context on the CS. It decides which class to load.
Either `ssh`,`adhoc`,`amiconfig`, `occi_opennebula`.
"""
# Get sublogger with the class name loaded in __new__
self.log = gLogger.getSubLogger( self.__class__.__name__ )
contextOptions = gConfig.getOptionsDict( self.CONTEXT_PATH % ( imageName, contextName ) )
if not contextOptions[ 'OK' ]:
self.log.error( contextOptions[ 'Message' ] )
contextOptions = {}
else:
contextOptions = contextOptions[ 'Value' ]
self.__context = contextOptions
def config( self ):
"""
Method that returns a copy of the context dictionary.
:return: dictionary
"""
# A copy instead of the original one, just in case.
return self.__context.copy()
def validate( self ):
"""
Method that validates the context configuration obtained from the CS. If
<MANDATORY_KEYS> are not present in the context configuration dictionary
key set, returns S_ERROR. Otherwise, prints them and returns S_OK.
:return: S_OK | S_ERROR
"""
contextConfig = self.config()
missingKeys = set( self.MANDATORY_KEYS ).difference( set( contextConfig.keys() ) )
if missingKeys:
return S_ERROR( 'Missing mandatory keys on endpointConfig %s' % str( missingKeys ) )
# self.log.info( '*' * 50 )
# self.log.info( 'Displaying context info' )
# for key, value in self.contextConfig.iteritems:
# self.log.info( '%s : %s' % ( key, value ) )
# self.log.info( 'User and Password are NOT printed.')
# self.log.info( '*' * 50 )
return S_OK()
#...............................................................................
# SSH Context
class SSHContext( ContextConfig ):
"""
SSHContext defines the following mandatory keys:
* flavorName
* vmCertPath : the virtualmachine cert to be copied on the VM of a specific endpoint
* vmKeyPath : the virtualmachine key to be copied on the VM of a specific endpoint
* vmContextualizeScriptPath
* vmCvmfsContextURL : the cvmfs context URL
* vmDiracContextURL : the dirac specific context URL
* vmRunJobAgentURL : the runsvdir run file for JobAgent
* vmRunVmMonitorAgentURL : the runsvdir run file vmMonitorAgent
* vmRunVmUpdaterJobAgentURL : the runsvdir run file VirtualMachineConfigUpdater agent
* vmRunLogAgentURL : the runsvdir run.log file
* cpuTime : the VM cpuTime of the image
* cloudDriver : the endpoint dirac cloud driver
"""
MANDATORY_KEYS = [ 'vmCertPath',
'vmKeyPath', 'vmContextualizeScriptPath', 'vmCvmfsContextURL',
'vmDiracContextURL', 'vmRunJobAgentURL', 'vmRunVmMonitorAgentURL',
'vmRunVmUpdaterAgentURL', 'vmRunLogAgentURL']
#...............................................................................
# AdHoc Context
class AdHocContext( ContextConfig ):
"""
AdHocContext does not define any mandatory key.
"""
pass
#...............................................................................
# AmiconfigContext
class AmiconfigContext( ContextConfig ):
"""
AmiconfigContext defines the following mandatory keys:
* ex_size
* ex_image
* ex_security_groups
* ex_userdata
"""
MANDATORY_KEYS = [ 'ex_size', 'ex_image',
'ex_security_groups', 'ex_userdata' ]
def __init__( self, imageName, contextName ):
"""
Constructor. Gets section from <CONTEXT_PATH>/<imageName>/<contextName> or
empty dictionary in case it fails.
Extends ContextConfig constructor adding ex_metadata.
:Parameters:
**imageName** - `string`
name of the image on the CS
**contextName** - `string`
string with the type of context on the CS. It decides which class to load.
Either `cloudinit`, `ssh`,`adhoc`,`amiconfig`, `occi_opennebula`.
"""
super( AmiconfigContext, self ).__init__( imageName, contextName )
metadata = gConfig.getOptionsDict( ( self.CONTEXT_PATH % ( imageName, contextName ) ) + '/ex_metadata' )
if metadata[ 'OK' ]:
self._ContextConfig__context[ 'ex_metadata' ] = metadata[ 'Value' ]
#...............................................................................
# OcciOpennebulaContext
class OcciOpennebulaContext( ContextConfig ):
"""
AmiconfigContext defines the following mandatory keys:
* hdcImangeName
"""
MANDATORY_KEYS = [ 'hdcImageName','context_files_url' ]
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
vmendez/VMDIRAC
|
WorkloadManagementSystem/Utilities/Context.py
|
Python
|
gpl-3.0
| 7,046
|
[
"DIRAC"
] |
43adeec0e171ac7c79de800cdc6055454fef7e3ae5f74559fe5cf5fc5842e028
|
# -*- coding: utf-8 -*-
import ntpath
import egads
import os
import inspect
import logging
from PyQt5 import QtWidgets, QtCore, QtGui
from other_functions import check_compatibility_netcdf
from algorithm_window_functions import MyAlgorithmDisplay
def gui_position(self):
logging.debug('gui - gui_functions.py - gui_position')
self.resize(1150, 544)
screen_height = QtWidgets.QDesktopWidget().screenGeometry().height()
screen_width = QtWidgets.QDesktopWidget().screenGeometry().width()
_, _, w, h = self.geometry().getRect()
x = screen_width / 2 - w / 2
y = screen_height / 2 - h / 2
self.setGeometry(x, y, w, h)
def gui_initialization(self):
logging.debug('gui - gui_functions.py - gui_initialization')
self.actionSeparator.setText('')
self.actionSeparator2.setText('')
self.actionSeparator3.setText('')
self.actionSeparator4.setText('')
for i in range(self.tabWidget.count()):
self.tabWidget.tabBar().setTabTextColor(i, QtGui.QColor(0,0,0))
self.tabWidget.removeTab(2)
self.tabWidget.setEnabled(False)
self.tabWidget.setVisible(False)
def netcdf_gui_initialization(self):
logging.debug('gui - gui_functions.py - netcdf_gui_initialization')
self.tabWidget.setEnabled(True)
self.tabWidget.setVisible(True)
self.gm_details_lb.setVisible(True)
self.gm_compatibility_lb.setVisible(True)
self.gm_history_ln.setMinimumSize(QtCore.QSize(400, 150))
self.gm_history_ln.setMaximumSize(QtCore.QSize(16777215, 150))
self.gm_history_ln_2.setVisible(False)
self.gm_history_lb_2.setVisible(False)
self.gm_button_6.setVisible(False)
self.gm_project_lb.setText('Project')
self.gm_history_lb.setText('History:')
self.va_longName_lb.setVisible(True)
self.va_category_lb.setVisible(True)
self.va_egadsProcessor_lb.setVisible(True)
self.va_longName_ln.setVisible(True)
self.va_category_ln.setVisible(True)
self.va_egadsProcessor_ln.setVisible(True)
self.va_button_2.setVisible(True)
self.va_button_3.setVisible(True)
def nasaames_gui_initialization(self):
logging.debug('gui - gui_functions.py - nasaames_gui_initialization')
self.tabWidget.setEnabled(True)
self.tabWidget.setVisible(True)
self.gm_details_lb.setVisible(False)
self.gm_compatibility_lb.setVisible(False)
self.gm_history_ln.setMinimumSize(QtCore.QSize(400, 140))
self.gm_history_ln.setMaximumSize(QtCore.QSize(16777215, 140))
self.gm_history_ln_2.setMinimumSize(QtCore.QSize(400, 140))
self.gm_history_ln_2.setMaximumSize(QtCore.QSize(16777215, 140))
self.gm_history_lb.setText('<html><head/><body><p>Normal<br>comments:</p></body></html>')
self.gm_project_lb.setText('Author(s):')
self.va_longName_lb.setVisible(False)
self.va_category_lb.setVisible(False)
self.va_egadsProcessor_lb.setVisible(False)
self.va_longName_ln.setVisible(False)
self.va_category_ln.setVisible(False)
self.va_egadsProcessor_ln.setVisible(False)
self.va_button_2.setVisible(False)
self.va_button_3.setVisible(False)
def icons_initialization(self):
logging.debug('gui - gui_functions.py - icons_initialization')
self.actionOpenBar.setEnabled(True)
self.actionSaveBar.setEnabled(False)
self.actionSaveAsBar.setEnabled(False)
self.actionCloseBar.setEnabled(False)
self.actionAlgorithmsBar.setEnabled(False)
self.actionCreatealgorithmBar.setEnabled(True)
self.actionCreateVariableBar.setEnabled(False)
self.actionDeleteVariableBar.setEnabled(False)
self.actionMigrateVariableBar.setEnabled(False)
self.actionGlobalAttributesBar.setEnabled(False)
self.actionVariableAttributesBar.setEnabled(False)
self.actionDisplayBar.setEnabled(False)
self.actionPlotBar.setEnabled(False)
def algorithm_list_menu_initialization(self):
logging.debug('gui - gui_functions.py - algorithm_list_menu_initialization')
self.menuEmbedded_algorithms.clear()
self.menuUser_defined_algorithms.clear()
self.algorithm_folder_menu = []
self.algorithm_folder_actions = []
self.user_folder_menu = []
self.user_folder_actions = []
font = QtGui.QFont()
font.setFamily("fonts/SourceSansPro-Regular.ttf")
font.setPointSize(10)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/new_algo_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon2 = QtGui.QIcon()
icon2 .addPixmap(QtGui.QPixmap("icons/create_algo_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
algorithm_path = egads.__path__[0] + '/algorithms'
user_algorithm_path = egads.__path__[0] + '/algorithms/user'
folder_list = []
algorithm_structure = []
user_algorithm_structure = []
for item in os.walk(algorithm_path):
index = item[0].find('algorithms')
if item[0][index + 11:]:
if not 'file_templates' in item[0][index + 11:] and not 'user' in item[0][index + 11:]:
folder_list.append(item[0][index + 11:])
for folder in folder_list:
algorithm_tmp_list = dir(getattr(egads.algorithms, folder))
algorithm_list = []
for item in algorithm_tmp_list:
if isinstance(getattr(getattr(egads.algorithms, folder), item), type):
algorithm_list.append(item)
algorithm_structure.append([folder, sorted(algorithm_list)])
folder_list = []
for item in os.walk(user_algorithm_path):
index = item[0].find('user')
if item[0][index + 5:]:
if not 'file_templates' in item[0][index + 5:]:
folder_list.append(item[0][index + 5:])
for folder in folder_list:
algorithm_tmp_list = dir(getattr(egads.algorithms.user, folder))
algorithm_list = []
for item in algorithm_tmp_list:
if isinstance(getattr(getattr(egads.algorithms.user, folder), item), type):
algorithm_list.append(item)
user_algorithm_structure.append([folder, sorted(algorithm_list)])
i = 0
for sublist in sorted(algorithm_structure):
self.algorithm_folder_menu.append(QtWidgets.QMenu(self.menuEmbedded_algorithms))
self.algorithm_folder_menu[i].setObjectName('embedded_category_' + sublist[0])
self.algorithm_folder_menu[i].setTitle(sublist[0].title())
self.menuEmbedded_algorithms.addAction(self.algorithm_folder_menu[i].menuAction())
self.algorithm_folder_actions.append([])
j = 0
for algorithm in sublist[1]:
self.algorithm_folder_actions[i].append(QtWidgets.QAction(self))
self.algorithm_folder_actions[i][j].setIcon(icon1)
self.algorithm_folder_actions[i][j].setFont(font)
self.algorithm_folder_actions[i][j].setObjectName('embedded_' + sublist[0] + '_' + algorithm)
self.algorithm_folder_actions[i][j].setText(algorithm)
self.algorithm_folder_actions[i][j].triggered.connect(lambda: display_algorithm_information(self))
self.algorithm_folder_menu[i].addAction(self.algorithm_folder_actions[i][j])
j += 1
i += 1
i = 0
for sublist in sorted(user_algorithm_structure):
if sublist[1]:
self.user_folder_menu.append(QtWidgets.QMenu(self.menuUser_defined_algorithms))
self.user_folder_menu[i].setObjectName('user_category_' + sublist[0])
self.user_folder_menu[i].setTitle(sublist[0].title())
self.menuUser_defined_algorithms.addAction(self.user_folder_menu[i].menuAction())
self.user_folder_actions.append([])
j = 0
for algorithm in sublist[1]:
self.user_folder_actions[i].append(QtWidgets.QAction(self))
self.user_folder_actions[i][j].setIcon(icon2)
self.user_folder_actions[i][j].setFont(font)
self.user_folder_actions[i][j].setObjectName('user_' + sublist[0] + '_' + algorithm)
self.user_folder_actions[i][j].setText(algorithm)
self.user_folder_actions[i][j].triggered.connect(lambda: display_algorithm_information(self))
self.user_folder_menu[i].addAction(self.user_folder_actions[i][j])
j += 1
i += 1
def display_algorithm_information(self):
logging.debug('gui - gui_functions.py - display_algorithm_information')
if 'embedded' in self.sender().objectName():
second_index = self.sender().objectName().find('_', 9)
elif 'user' in self.sender().objectName():
second_index = self.sender().objectName().find('_', 5)
first_index = self.sender().objectName().find('_')
algorithm_category = self.sender().objectName()[first_index + 1 : second_index]
algorithm_name = self.sender().objectName()[second_index + 1 :]
try:
algorithm = getattr(getattr(egads.algorithms, algorithm_category), algorithm_name)
except AttributeError:
algorithm = getattr(getattr(egads.algorithms.user, algorithm_category), algorithm_name)
file_path = inspect.getfile(algorithm)[:-1]
algorithm_metadata = algorithm().metadata
output_metadata = algorithm().output_metadata
algorithm_dict = {}
algorithm_dict['Name'] = algorithm_metadata['Processor']
algorithm_dict['File'] = algorithm_name + '.py'
algorithm_dict['Purpose'] = algorithm_metadata['Purpose']
algorithm_dict['Description'] = algorithm_metadata['Description']
algorithm_dict['Source'] = algorithm_metadata['Source']
algorithm_dict['References'] = algorithm_metadata['References']
algorithm_dict['Version'] = algorithm_metadata['ProcessorVersion']
algorithm_dict['Date'] = algorithm_metadata['ProcessorDate']
inputs = algorithm_metadata['Inputs']
outputs = algorithm_metadata['Outputs']
algorithm_inputs = []
algorithm_outputs = []
for index, input in enumerate(inputs):
input_dict = {}
input_dict['Symbol'] = input
input_dict['Units'] = algorithm_metadata['InputUnits'][index]
input_dict['Type'] = algorithm_metadata['InputTypes'][index]
input_dict['Description'] = algorithm_metadata['InputDescription'][index]
algorithm_inputs.append(input_dict)
for index, output in enumerate(outputs):
output_dict = {}
output_dict['Symbol'] = output
output_dict['Units'] = algorithm_metadata['OutputUnits'][index]
output_dict['Type'] = algorithm_metadata['OutputTypes'][index]
output_dict['Description'] = algorithm_metadata['OutputDescription'][index]
try:
output_dict['StandardName'] = output_metadata[index]['standard_name']
output_dict['LongName'] = output_metadata[index]['long_name']
output_dict['Category'] = output_metadata[index]['Category']
except KeyError:
output_dict['StandardName'] = output_metadata['standard_name']
output_dict['LongName'] = output_metadata['long_name']
output_dict['Category'] = output_metadata['Category']
algorithm_outputs.append(output_dict)
algorithm_dict['Input'] = algorithm_inputs
algorithm_dict['Output'] = algorithm_outputs
lines = []
read = False
f = open(file_path, 'r')
for line in f:
if 'def _algorithm' in line:
read = True
continue
if read:
lines.append(line)
f.close()
algorithm_dict['Algorithm'] = ''.join(lines)
self.displayAlgorithmWindow = MyAlgorithmDisplay(algorithm_dict)
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.displayAlgorithmWindow.geometry().getRect()
x2 = x1 + w1/2 - w2/2
y2 = y1 + h1/2 - h2/2
self.displayAlgorithmWindow.setGeometry(x2, y2, w2, h2)
self.displayAlgorithmWindow.exec_()
def modify_attribute_gui(self, string):
logging.debug('gui - gui_functions.py - modify_attribute_gui : sender().objectName() '
+ str(self.sender().objectName()))
if self.sender().objectName() != "":
value = self.buttons_lines_dict[str(self.sender().objectName())]
widget = self.findChildren(QtWidgets.QLineEdit, value[0])
if not widget:
widget = self.findChildren(QtWidgets.QPlainTextEdit, value[0])
list_widget = value[1]
var_attr_list = value[2]
if widget[0].isEnabled() == False:
if string == 'left':
widget[0].setEnabled(True)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/save_as_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sender().setIcon(icon)
else:
if string == 'left':
self.modified = True
self.make_window_title()
widget[0].setEnabled(False)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/edit_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sender().setIcon(icon)
value = self.objects_metadata_dict[str(widget[0].objectName())]
if isinstance(value, list):
if self.open_file_ext == 'NetCDF Files (*.nc)':
value = value[0]
elif self.open_file_ext == 'NASA Ames Files (*.na)':
value = value[1]
if list_widget is not None:
try:
var_attr_list[str(list_widget.currentItem().text())][1][value] = str(widget[0].text())
except AttributeError:
var_attr_list[str(list_widget.currentItem().text())][1][value] = str(widget[0].toPlainText())
if value == "var_name":
if self.open_file_ext == 'NASA Ames Files (*.na)':
var_attr_list[str(list_widget.currentItem().text())][1]['standard_name'] = str(widget[0].text())
var_attr_list[str(widget[0].text())] = var_attr_list.pop(str(list_widget.currentItem().text()))
list_widget.currentItem().setText(str(widget[0].text()))
else:
try:
var_attr_list[value] = str(widget[0].text())
except AttributeError:
var_attr_list[value] = str(widget[0].toPlainText())
elif string == 'right':
widget[0].setEnabled(False)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/edit_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.sender().setIcon(icon)
value = self.objects_metadata_dict[str(widget[0].objectName())]
if isinstance(value, list):
if self.open_file_ext == 'NetCDF Files (*.nc)':
value = value[0]
elif self.open_file_ext == 'NASA Ames Files (*.na)':
value = value[1]
if list_widget is not None:
try:
widget[0].setText(var_attr_list[str(list_widget.currentItem().text())][1][value])
widget[0].setCursorPosition(0)
except AttributeError:
widget[0].toPlainText(var_attr_list[str(list_widget.currentItem().text())][1][value])
else:
if isinstance(var_attr_list[value], list):
long_string = ''
for string in var_attr_list[value]:
if isinstance(string, int):
long_string += str(string) + '-'
else:
long_string += string + ', '
if long_string[-1:] == '-':
text = long_string[:-1]
else:
text = long_string[:-2]
else:
text = var_attr_list[value]
try:
widget[0].setText(text)
widget[0].setCursorPosition(0)
except AttributeError:
widget[0].setPlainText(text)
def update_global_attribute_gui(self, source):
logging.debug('gui - gui_functions.py - update_global_attribute_gui : source ' + str(source))
if source == 'NetCDF':
read_set_attribute_gui(self, self.gm_title_ln, 'title', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_institution_ln, 'institution', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_source_ln, 'source', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_project_ln, 'project', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_dateCreation_ln, 'date_created', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_history_ln, 'history', self.list_of_global_attributes)
elif source == 'NASA Ames':
read_set_attribute_gui(self, self.gm_title_ln, 'MNAME', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_institution_ln, 'ORG', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_source_ln, 'SNAME', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_dateCreation_ln, 'DATE', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_history_ln, 'NCOM', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_history_ln_2, 'SCOM', self.list_of_global_attributes)
read_set_attribute_gui(self, self.gm_project_ln, 'ONAME', self.list_of_global_attributes)
def update_variable_attribute_gui(self, index=None):
logging.debug('gui - gui_functions.py - update_variable_attribute_gui : index ' + str(index))
if self.tabWidget.currentIndex() == 1 or index == 1:
list_object = self.listWidget
variables_and_attributes = self.list_of_variables_and_attributes
varName_ln = self.va_varName_ln
longName_ln = self.va_longName_ln
category_ln = self.va_category_ln
processor_ln = self.va_egadsProcessor_ln
units_ln = self.va_units_ln
fillValue_ln = self.va_fillValue_ln
dimensions_ln = self.va_dimensions_ln
elif self.tabWidget.currentIndex() == 2 or index == 2:
list_object = self.new_listwidget
variables_and_attributes = self.list_of_new_variables_and_attributes
varName_ln = self.new_varName_ln
longName_ln = self.new_longName_ln
category_ln = self.new_category_ln
processor_ln = self.new_egadsProcessor_ln
units_ln = self.new_units_ln
fillValue_ln = self.new_fillValue_ln
dimensions_ln = self.new_dimensions_ln
sublist = variables_and_attributes[str(list_object.currentItem().text())]
read_set_attribute_gui(self, varName_ln, 'var_name', sublist[1])
read_set_attribute_gui(self, longName_ln, 'long_name', sublist[1])
read_set_attribute_gui(self, units_ln, 'units', sublist[1])
read_set_attribute_gui(self, category_ln, 'Category', sublist[1])
read_set_attribute_gui(self, processor_ln, 'Processor', sublist[1])
read_set_attribute_gui(self, fillValue_ln, '_FillValue', sublist[1])
if not fillValue_ln.text():
read_set_attribute_gui(self, fillValue_ln, 'missing_value', sublist[1])
dimensions_str = ''
for key, value in sublist[2].iteritems():
dimensions_str = dimensions_str + str(value) + ' (' + key + '), '
read_set_attribute_gui(self, dimensions_ln, dimensions_str[:-2])
def update_new_variable_list_gui(self):
logging.debug('gui - gui_functions.py - update_new_variable_list_gui')
self.new_listwidget.clear()
for _, sublist in self.list_of_new_variables_and_attributes.items():
self.new_listwidget.addItem(sublist[0])
def add_new_variable_gui(self):
logging.debug('gui - gui_functions.py - add_new_variable_gui')
self.tabWidget.insertTab(2, self.tab_3, 'New variables')
self.tabWidget.tabBar().setTabTextColor(2, QtGui.QColor(0,0,0))
self.new_listwidget.itemClicked.connect(lambda: new_var_reading(self))
def new_var_reading(self):
logging.debug('gui - gui_functions.py - new_var_reading : variable ' + str(self.new_listwidget.currentItem().text()))
update_icons_state(self, 'new_var_reading')
clear_gui(self, 'new_variable')
all_lines_edit = self.tab_3.findChildren(QtWidgets.QLineEdit)
for widget in all_lines_edit:
widget.setEnabled(False)
all_text_edit = self.tab_3.findChildren(QtWidgets.QPlainTextEdit)
for widget in all_text_edit:
widget.setEnabled(False)
all_buttons = self.tab_3.findChildren(QtWidgets.QToolButton)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/edit_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
for widget in all_buttons:
widget.setIcon(icon)
sublist = self.list_of_new_variables_and_attributes[self.new_listwidget.currentItem().text()]
read_set_attribute_gui(self, self.new_varName_ln, 'var_name', sublist[1])
read_set_attribute_gui(self, self.new_longName_ln, 'long_name', sublist[1])
read_set_attribute_gui(self, self.new_units_ln, 'units', sublist[1])
read_set_attribute_gui(self, self.new_category_ln, 'Category', sublist[1])
read_set_attribute_gui(self, self.new_egadsProcessor_ln, 'Processor', sublist[1])
read_set_attribute_gui(self, self.new_fillValue_ln, '_FillValue', sublist[1])
if not self.new_fillValue_ln.text():
read_set_attribute_gui(self, self.new_fillValue_ln, 'missing_value', sublist[1])
dimensions_str = ''
for key, value in sublist[2].iteritems():
dimensions_str = dimensions_str + str(value) + ' (' + key + '), '
read_set_attribute_gui(self, self.new_dimensions_ln, dimensions_str[:-2])
def statusBar_loading(self):
logging.debug('gui - gui_functions.py - statusBar_loading')
self.sb_filename_lb = QtWidgets.QLabel()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sb_filename_lb.sizePolicy().hasHeightForWidth())
self.sb_filename_lb.setSizePolicy(sizePolicy)
self.sb_filename_lb.setMinimumSize(QtCore.QSize(0, 20))
self.sb_filename_lb.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setFamily("font/SourceSansPro-Regular.ttf")
font.setPointSize(9)
font.setItalic(True)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.sb_filename_lb.setFont(font)
self.sb_filename_lb.setObjectName("sb_filename_lb")
self.sb_filename_lb.setText("")
self.statusBar.addWidget(self.sb_filename_lb)
def statusBar_updating(self, filetype):
logging.debug('gui - gui_functions.py - statusBar_updating : filetype ' + str(filetype))
if filetype == 'close_file':
string = ''
else:
out_file_base, out_file_ext = ntpath.splitext(ntpath.basename(self.open_file_name))
open_file_size = humansize(self, ntpath.getsize(self.open_file_name))
filename = out_file_base + out_file_ext
try:
conventions = self.list_of_global_attributes['Conventions']
except KeyError:
logging.exception('gui - gui_functions.py - statusBar_updating : an exception occured')
conventions = 'no conventions'
if filetype == 'NASA Ames':
conventions = 'NASA Ames file conventions'
string = filename + ' | ' + open_file_size + ' | ' + filetype + ' | ' + conventions
self.sb_filename_lb.setText(string)
def update_icons_state(self, string=None):
logging.debug('gui - gui_functions.py - update_icons_state : string ' + str(string))
if string == 'close_file':
icons_initialization(self)
if string == 'open_file':
self.actionSaveAsBar.setEnabled(True)
self.actionCloseBar.setEnabled(True)
self.actionCreateVariableBar.setEnabled(True)
self.actionGlobalAttributesBar.setEnabled(True)
if string == 'var_reading':
self.va_button_1.setEnabled(True)
self.va_button_2.setEnabled(True)
self.va_button_3.setEnabled(True)
self.va_button_4.setEnabled(True)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/edit_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.va_button_1.setIcon(icon)
self.va_button_2.setIcon(icon)
self.va_button_3.setIcon(icon)
self.va_button_4.setIcon(icon)
self.actionAlgorithmsBar.setEnabled(True)
self.actionDeleteVariableBar.setEnabled(True)
self.actionVariableAttributesBar.setEnabled(True)
self.actionPlotBar.setEnabled(True)
self.actionDisplayBar.setEnabled(True)
if string == 'new_var_reading':
self.new_button_1.setEnabled(True)
self.new_button_2.setEnabled(True)
self.new_button_3.setEnabled(True)
self.new_button_4.setEnabled(True)
self.actionAlgorithmsBar.setEnabled(True)
self.actionDeleteVariableBar.setEnabled(True)
self.actionVariableAttributesBar.setEnabled(True)
self.actionPlotBar.setEnabled(True)
self.actionDisplayBar.setEnabled(True)
self.actionMigrateVariableBar.setEnabled(True)
if string == None:
self.actionAlgorithmsBar.setEnabled(False)
self.actionDeleteVariableBar.setEnabled(False)
self.actionVariableAttributesBar.setEnabled(False)
self.actionPlotBar.setEnabled(False)
self.actionDisplayBar.setEnabled(False)
self.actionMigrateVariableBar.setEnabled(False)
if self.tabWidget.currentIndex() == 1:
self.actionAlgorithmsBar.setEnabled(True)
self.actionPlotBar.setEnabled(True)
try:
if self.listWidget.currentItem().text() == "":
self.actionDisplayBar.setEnabled(False)
self.actionVariableAttributesBar.setEnabled(False)
self.actionDeleteVariableBar.setEnabled(False)
else:
self.actionDisplayBar.setEnabled(True)
self.actionVariableAttributesBar.setEnabled(True)
self.actionDeleteVariableBar.setEnabled(True)
except AttributeError:
self.actionDisplayBar.setEnabled(False)
self.actionVariableAttributesBar.setEnabled(False)
self.actionDeleteVariableBar.setEnabled(False)
elif self.tabWidget.currentIndex() == 2:
self.actionAlgorithmsBar.setEnabled(True)
self.actionPlotBar.setEnabled(True)
try:
if self.new_listwidget.currentItem().text() == "":
self.actionDisplayBar.setEnabled(False)
self.actionVariableAttributesBar.setEnabled(False)
self.actionDeleteVariableBar.setEnabled(False)
self.actionMigrateVariableBar.setEnabled(False)
else:
self.actionDisplayBar.setEnabled(True)
self.actionVariableAttributesBar.setEnabled(True)
self.actionDeleteVariableBar.setEnabled(True)
self.actionMigrateVariableBar.setEnabled(True)
except AttributeError:
self.actionDisplayBar.setEnabled(False)
self.actionVariableAttributesBar.setEnabled(False)
self.actionDeleteVariableBar.setEnabled(False)
self.actionMigrateVariableBar.setEnabled(False)
def clear_gui(self, part=None):
logging.debug('gui - gui_functions.py - clear_gui : part ' + str(part))
if part == None or part == 'global':
self.gm_filename_ln.setText('')
self.gm_title_ln.setText("")
self.gm_institution_ln.setText("")
self.gm_source_ln.setText("")
self.gm_project_ln.setText("")
self.gm_dateCreation_ln.setText("")
self.gm_history_ln.setPlainText("")
update_compatibility_label(self, 'clear')
if part == None or part == 'variable':
self.va_varName_ln.setText("")
self.va_longName_ln.setText("")
self.va_category_ln.setText("")
self.va_units_ln.setText("")
self.va_fillValue_ln.setText("")
self.va_dimensions_ln.setText("")
self.va_egadsProcessor_ln.setPlainText("")
if part == None or part == 'new_variable':
self.new_varName_ln.setText("")
self.new_longName_ln.setText("")
self.new_category_ln.setText("")
self.new_units_ln.setText("")
self.new_fillValue_ln.setText("")
self.new_dimensions_ln.setText("")
self.new_egadsProcessor_ln.setPlainText("")
if part == None:
self.listWidget.clear()
self.new_listwidget.clear()
def update_compatibility_label(self, string=None):
logging.debug('gui - gui_functions.py - update_compatibility_label : string ' + str(string))
if string is None:
result = check_compatibility_netcdf(self, self.list_of_global_attributes, self.list_of_variables_and_attributes)
sublist = self.compatibility_level[result]
self.gm_compatibility_lb.setEnabled(True)
self.gm_compatibility_lb.setVisible(True)
self.gm_compatibility_lb.setPixmap(QtGui.QPixmap(sublist[1]))
self.gm_details_lb.setText(sublist[2])
self.gm_button_7.setEnabled(sublist[3])
self.gm_button_7.setVisible(sublist[3])
else:
self.gm_compatibility_lb.setEnabled(False)
self.gm_compatibility_lb.setVisible(False)
self.gm_details_lb.setText('')
self.gm_button_7.setEnabled(False)
self.gm_button_7.setVisible(False)
def read_set_attribute_gui(self, gui_object, attr_name, attr_dict=None):
logging.debug('gui - gui_functions.py - read_set_attribute_gui : gui_object ' + str(gui_object)
+ ', attr_name ' + str(attr_name) + ', attr_dict ' + str(attr_dict))
if attr_dict is not None:
try:
value = attr_dict[attr_name]
except KeyError:
value = ''
if value == 'deleted':
value = ''
if isinstance(value, list):
long_string = ''
for string in value:
if isinstance(string, int):
long_string += str(string) + '-'
else:
long_string += string + ', '
if long_string[-1:] == '-':
value = long_string[:-1]
else:
value = long_string[:-2]
try:
gui_object.setText(str(value))
if not isinstance(gui_object, QtWidgets.QLabel):
gui_object.setCursorPosition(0)
except AttributeError:
gui_object.setPlainText(str(value))
else:
if attr_name == 'deleted':
attr_name = ''
try:
gui_object.setText(str(attr_name))
if not isinstance(gui_object, QtWidgets.QLabel):
gui_object.setCursorPosition(0)
except AttributeError:
gui_object.setPlainText(str(attr_name))
def clear_layout(self, layout):
logging.debug('gui - gui_functions.py - clear_layout')
for i in reversed(range(layout.count())):
item = layout.itemAt(i)
if isinstance(item, QtWidgets.QWidgetItem):
item.widget().deleteLater()
elif isinstance(item, QtWidgets.QLayout):
clear_layout(self, item.layout())
layout.removeItem(item)
def humansize(self, nbytes):
logging.debug('gui - gui_functions.py - humansize : nbytes ' + str(nbytes))
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
|
eufarn7sp/egads-gui
|
functions/gui_functions.py
|
Python
|
gpl-3.0
| 32,231
|
[
"NetCDF"
] |
1eea2560df359cfba3a9c08b9732e6202e8a599134e024a973bfeafa5b41ee1e
|
from context import blast
from context import pytest
import os
import glob
def test_make_BLAST_database(tmpdir):
"""
Test the make_BLAST_database() function
Function signature::
make_BLAST_database(fasta_file)
"""
fasta = os.path.abspath("../../example/Antibiotic_markers.fa")
tmpdir = str(tmpdir)
os.chdir(str(tmpdir))
os.mkdir("DBs")
blast.make_BLAST_database(fasta)
infs = glob.glob(os.path.join(tmpdir, "DBs")+"/*")
tidied = []
for f in infs:
tidied.append(f.split('/')[-1])
assert len(infs) == 4
assert "Antibiotic_markers.fa.nhr" in tidied
assert "Antibiotic_markers.fa.nin" in tidied
assert "Antibiotic_markers.fa.nsq" in tidied
assert "Antibiotic_markers.fa" in tidied
def test_run_BLAST():
"""
Test the run_BLAST() function
Function signature::
run_BLAST(query, database, args)
"""
pass
def test_parse_BLAST():
"""
Test the parse_BLAST() function
Function signature::
parse_BLAST(blast_results, tol, careful)
"""
pass
|
nbenzakour/SeqFindR
|
tests/unittests/test_blast.py
|
Python
|
apache-2.0
| 1,075
|
[
"BLAST"
] |
46cc152e8c502d57d9957208921a762d513591df53af27048d4acc973cf19d94
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ry_to_eV
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.lmto import LMTOCopl, LMTOCtrl
from pymatgen.util.num import round_to_sigfigs
from pymatgen.util.testing import PymatgenTest
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__email__ = "esters@uoregon.edu"
__date__ = "Nov 30, 2017"
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "cohp")
this_dir = os.path.dirname(os.path.abspath(__file__))
class CtrlTest(unittest.TestCase):
def setUp(self):
os.chdir(test_dir)
self.ctrl_bise = LMTOCtrl.from_file(filename="CTRL.BiSe")
self.ctrl_fe = LMTOCtrl.from_file()
def tearDown(self):
os.chdir(this_dir)
def test_dict(self):
self.assertEqual(self.ctrl_bise, LMTOCtrl.from_dict(self.ctrl_bise.as_dict()))
def test_structure(self):
bise_poscar = Structure.from_file("POSCAR.BiSe")
self.assertTrue(bise_poscar.matches(self.ctrl_bise.structure))
self.assertEqual(
self.ctrl_bise,
LMTOCtrl(self.ctrl_bise.structure, header="Bi6Se6, hexagonal"),
)
def test_read_write(self):
self.ctrl_bise.write_file(filename="CTRL.tmp")
ctrl_tmp = LMTOCtrl.from_file(filename="CTRL.tmp")
self.assertTrue(self.ctrl_bise.structure.matches(ctrl_tmp.structure))
os.remove("CTRL.tmp")
class CoplTest(PymatgenTest):
def setUp(self):
os.chdir(test_dir)
self.copl_bise = LMTOCopl("COPL.BiSe")
self.copl_bise_eV = LMTOCopl(filename="COPL.BiSe", to_eV=True)
self.copl_fe = LMTOCopl()
def tearDown(self):
os.chdir(this_dir)
def test_attributes(self):
self.assertFalse(self.copl_bise.is_spin_polarized)
self.assertTrue(self.copl_fe.is_spin_polarized)
self.assertEqual(len(self.copl_bise.energies), 801)
self.assertEqual(len(self.copl_fe.energies), 801)
self.assertEqual(len(self.copl_bise.cohp_data), 7)
self.assertEqual(len(self.copl_fe.cohp_data), 8)
def test_cohp_data(self):
lengths_sites_bise = {
"Bi1-Se7": (2.882, (0, 6)),
"Bi1-Se9": (3.102, (0, 8)),
"Bi3-Se11": (2.917, (2, 10)),
"Bi3-Se9": (3.050, (2, 8)),
"Bi5-Bi6": (3.073, (4, 5)),
"Bi5-Se11": (3.375, (4, 10)),
"Se7-Se8": (3.364, (6, 7)),
}
for bond in self.copl_bise.cohp_data:
self.assertEqual(self.copl_bise.cohp_data[bond]["length"], lengths_sites_bise[bond][0])
self.assertEqual(self.copl_bise.cohp_data[bond]["sites"], lengths_sites_bise[bond][1])
labels_fe = ["Fe1-Fe1"] + ["Fe1-Fe1-%d" % i for i in range(1, 8)]
self.assertEqual(sorted(self.copl_fe.cohp_data.keys()), labels_fe)
for bond in labels_fe:
self.assertEqual(self.copl_fe.cohp_data[bond]["length"], 2.482)
self.assertEqual(self.copl_fe.cohp_data[bond]["sites"], (0, 0))
def test_energies(self):
self.assertEqual(self.copl_bise.efermi, -0.17223)
self.assertEqual(self.copl_bise_eV.efermi, -2.3433)
self.assertEqual(self.copl_fe.efermi, -0.085683)
ener_eV = np.array(
[round_to_sigfigs(energy, 5) for energy in self.copl_bise.energies * Ry_to_eV],
dtype=float,
)
self.assertArrayEqual(ener_eV, self.copl_bise_eV.energies)
copl_icohp = self.copl_bise.cohp_data["Bi1-Se7"]["ICOHP"][Spin.up]
icohp = np.array([round_to_sigfigs(i, 5) for i in copl_icohp * Ry_to_eV], dtype=float)
icohp_eV = self.copl_bise_eV.cohp_data["Bi1-Se7"]["ICOHP"][Spin.up]
self.assertArrayEqual(icohp, icohp_eV)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/io/tests/test_lmto.py
|
Python
|
mit
| 3,983
|
[
"pymatgen"
] |
70342583ae24dc96aef30538c5312c9bfdb9d4b86c8e71628bb2b5dbbb9c9d1a
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Analytical nuclear gradients for 1-electron spin-free x2c method
Ref.
JCP 135, 084114 (2011); DOI:10.1063/1.3624397
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import x2c
def hcore_grad_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)
if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)
get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(atm_id):
h1 = get_h1_xmol(atm_id)
if contr_coeff is not None:
h1 = lib.einsum('pi,xpq,qj->xij', contr_coeff, h1, contr_coeff)
return numpy.asarray(h1)
return hcore_deriv
def gen_sf_hfw(mol, approx='1E'):
approx = approx.upper()
c = lib.param.LIGHT_SPEED
h0, s0 = _get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
aoslices = mol.aoslice_by_atom()
nao = mol.nao_nr()
if 'ATOM' in approx:
x0 = numpy.zeros((nao,nao))
for ia in range(mol.natm):
ish0, ish1, p0, p1 = aoslices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
t1 = mol.intor('int1e_kin', shls_slice=shls_slice)
s1 = mol.intor('int1e_ovlp', shls_slice=shls_slice)
with mol.with_rinv_at_nucleus(ia):
z = -mol.atom_charge(ia)
v1 = z * mol.intor('int1e_rinv', shls_slice=shls_slice)
w1 = z * mol.intor('int1e_prinvp', shls_slice=shls_slice)
x0[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)
else:
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
s_nesc0 = s0[:nao,:nao] + reduce(numpy.dot, (x0.T, s0[nao:,nao:], x0))
R0 = x2c._get_r(s0[:nao,:nao], s_nesc0)
c_fw0 = numpy.vstack((R0, numpy.dot(x0, R0)))
h0_fw_half = numpy.dot(h0, c_fw0)
get_h1_etc = _gen_first_order_quantities(mol, e0, c0, x0, approx)
def hcore_deriv(ia):
h1_ao, s1_ao, e1, c1, x1, s_nesc1, R1, c_fw1 = get_h1_etc(ia)
hfw1 = lib.einsum('xpi,pj->xij', c_fw1, h0_fw_half)
hfw1 = hfw1 + hfw1.transpose(0,2,1)
hfw1+= lib.einsum('pi,xpq,qj->xij', c_fw0, h1_ao, c_fw0)
return hfw1
return hcore_deriv
def _get_h0_s0(mol):
c = lib.param.LIGHT_SPEED
s = mol.intor_symmetric('int1e_ovlp')
t = mol.intor_symmetric('int1e_kin')
v = mol.intor_symmetric('int1e_nuc')
w = mol.intor_symmetric('int1e_pnucp')
nao = s.shape[0]
n2 = nao * 2
h = numpy.zeros((n2,n2), dtype=v.dtype)
m = numpy.zeros((n2,n2), dtype=v.dtype)
h[:nao,:nao] = v
h[:nao,nao:] = t
h[nao:,:nao] = t
h[nao:,nao:] = w * (.25/c**2) - t
m[:nao,:nao] = s
m[nao:,nao:] = t * (.5/c**2)
return h, m
def _gen_h1_s1(mol):
c = lib.param.LIGHT_SPEED
s1 = mol.intor('int1e_ipovlp', comp=3)
t1 = mol.intor('int1e_ipkin', comp=3)
v1 = mol.intor('int1e_ipnuc', comp=3)
w1 = mol.intor('int1e_ippnucp', comp=3)
aoslices = mol.aoslice_by_atom()
nao = s1.shape[1]
n2 = nao * 2
def get_h1_s1(ia):
h1 = numpy.zeros((3,n2,n2), dtype=v1.dtype)
m1 = numpy.zeros((3,n2,n2), dtype=v1.dtype)
ish0, ish1, i0, i1 = aoslices[ia]
with mol.with_rinv_origin(mol.atom_coord(ia)):
z = mol.atom_charge(ia)
rinv1 = -z*mol.intor('int1e_iprinv', comp=3)
prinvp1 = -z*mol.intor('int1e_ipprinvp', comp=3)
rinv1 [:,i0:i1,:] -= v1[:,i0:i1]
prinvp1[:,i0:i1,:] -= w1[:,i0:i1]
for i in range(3):
s1cc = numpy.zeros((nao,nao))
t1cc = numpy.zeros((nao,nao))
s1cc[i0:i1,:] =-s1[i,i0:i1]
s1cc[:,i0:i1]-= s1[i,i0:i1].T
t1cc[i0:i1,:] =-t1[i,i0:i1]
t1cc[:,i0:i1]-= t1[i,i0:i1].T
v1cc = rinv1[i] + rinv1[i].T
w1cc = prinvp1[i] + prinvp1[i].T
h1[i,:nao,:nao] = v1cc
h1[i,:nao,nao:] = t1cc
h1[i,nao:,:nao] = t1cc
h1[i,nao:,nao:] = w1cc * (.25/c**2) - t1cc
m1[i,:nao,:nao] = s1cc
m1[i,nao:,nao:] = t1cc * (.5/c**2)
return h1, m1
return get_h1_s1
def _gen_first_order_quantities(mol, e0, c0, x0, approx='1E'):
c = lib.param.LIGHT_SPEED
nao = e0.size // 2
n2 = nao * 2
epq = e0[:,None] - e0
degen_mask = abs(epq) < 1e-7
epq[degen_mask] = 1e200
cl0 = c0[:nao,nao:]
# cs0 = c0[nao:,nao:]
s0 = mol.intor('int1e_ovlp')
t0 = mol.intor('int1e_kin')
t0x0 = numpy.dot(t0, x0) * (.5/c**2)
s_nesc0 = s0[:nao,:nao] + numpy.dot(x0.T, t0x0)
w_s, v_s = scipy.linalg.eigh(s0)
w_sqrt = numpy.sqrt(w_s)
s_nesc0_vbas = reduce(numpy.dot, (v_s.T, s_nesc0, v_s))
R0_mid = numpy.einsum('i,ij,j->ij', 1./w_sqrt, s_nesc0_vbas, 1./w_sqrt)
wr0, vr0 = scipy.linalg.eigh(R0_mid)
wr0_sqrt = numpy.sqrt(wr0)
# R0 in v_s basis
R0 = numpy.dot(vr0/wr0_sqrt, vr0.T)
R0 *= w_sqrt
R0 /= w_sqrt[:,None]
# Transform R0 back
R0 = reduce(numpy.dot, (v_s, R0, v_s.T))
get_h1_s1 = _gen_h1_s1(mol)
def get_first_order(ia):
h1ao, s1ao = get_h1_s1(ia)
h1mo = lib.einsum('pi,xpq,qj->xij', c0.conj(), h1ao, c0)
s1mo = lib.einsum('pi,xpq,qj->xij', c0.conj(), s1ao, c0)
if 'ATOM' in approx:
e1 = c1_ao = x1 = None
s_nesc1 = lib.einsum('pi,xpq,qj->xij', x0, s1ao[:,nao:,nao:], x0)
s_nesc1+= s1ao[:,:nao,:nao]
else:
f1 = h1mo[:,:,nao:] - s1mo[:,:,nao:] * e0[nao:]
c1 = f1 / -epq[:,nao:]
e1 = f1[:,nao:]
e1[:,~degen_mask[nao:,nao:]] = 0
c1_ao = lib.einsum('pq,xqi->xpi', c0, c1)
cl1 = c1_ao[:,:nao]
cs1 = c1_ao[:,nao:]
tmp = cs1 - lib.einsum('pq,xqi->xpi', x0, cl1)
x1 = scipy.linalg.solve(cl0.T, tmp.reshape(-1,nao).T)
x1 = x1.T.reshape(3,nao,nao)
s_nesc1 = lib.einsum('xpi,pj->xij', x1, t0x0)
s_nesc1 = s_nesc1 + s_nesc1.transpose(0,2,1)
s_nesc1+= lib.einsum('pi,xpq,qj->xij', x0, s1ao[:,nao:,nao:], x0)
s_nesc1+= s1ao[:,:nao,:nao]
R1 = numpy.empty((3,nao,nao))
c_fw1 = numpy.empty((3,n2,nao))
for i in range(3):
R1[i] = _get_r1((w_sqrt,v_s), s_nesc0_vbas,
s1ao[i,:nao,:nao], s_nesc1[i], (wr0_sqrt,vr0))
c_fw1[i,:nao] = R1[i]
c_fw1[i,nao:] = numpy.dot(x0, R1[i])
if 'ATOM' not in approx:
c_fw1[i,nao:] += numpy.dot(x1[i], R0)
return h1ao, s1ao, e1, c1_ao, x1, s_nesc1, R1, c_fw1
return get_first_order
def _get_r1(s0_roots, s_nesc0, s1, s_nesc1, r0_roots):
# See JCP 135, 084114 (2011); DOI:10.1063/1.3624397, Eq (34)
w_sqrt, v_s = s0_roots
w_invsqrt = 1. / w_sqrt
wr0_sqrt, vr0 = r0_roots
wr0_invsqrt = 1. / wr0_sqrt
s1 = reduce(numpy.dot, (v_s.T, s1, v_s))
s_nesc1 = reduce(numpy.dot, (v_s.T, s_nesc1, v_s))
s1_sqrt = s1 / (w_sqrt[:,None] + w_sqrt)
s1_invsqrt = (numpy.einsum('i,ij,j->ij', w_invsqrt**2, s1, w_invsqrt**2)
/ -(w_invsqrt[:,None] + w_invsqrt))
R1_mid = numpy.dot(s1_invsqrt, s_nesc0) * w_invsqrt
R1_mid = R1_mid + R1_mid.T
R1_mid += numpy.einsum('i,ij,j->ij', w_invsqrt, s_nesc1, w_invsqrt)
R1_mid = reduce(numpy.dot, (vr0.T, R1_mid, vr0))
R1_mid /= -(wr0_invsqrt[:,None] + wr0_invsqrt)
R1_mid = numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, R1_mid, wr0_invsqrt**2)
vr0_wr0_sqrt = vr0 * wr0_invsqrt
vr0_s0_sqrt = vr0.T * w_sqrt
vr0_s0_invsqrt = vr0.T * w_invsqrt
R1 = reduce(numpy.dot, (vr0_s0_invsqrt.T, R1_mid, vr0_s0_sqrt))
R1 += reduce(numpy.dot, (s1_invsqrt, vr0_wr0_sqrt, vr0_s0_sqrt))
R1 += reduce(numpy.dot, (vr0_s0_invsqrt.T, vr0_wr0_sqrt.T, s1_sqrt))
R1 = reduce(numpy.dot, (v_s, R1, v_s.T))
return R1
if __name__ == '__main__':
bak = lib.param.LIGHT_SPEED
lib.param.LIGHT_SPEED = 10
def get_h(mol):
c = lib.param.LIGHT_SPEED
t = mol.intor_symmetric('int1e_kin')
v = mol.intor_symmetric('int1e_nuc')
s = mol.intor_symmetric('int1e_ovlp')
w = mol.intor_symmetric('int1e_pnucp')
return x2c._x2c1e_get_hcore(t, v, w, s, c)
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h_1 = get_h(mol)
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h_2 = get_h(mol)
h_ref = (h_1 - h_2) / 0.0002 * lib.param.BOHR
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0. )],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
hcore_deriv = gen_sf_hfw(mol)
h1 = hcore_deriv(0)
print(abs(h1[2]-h_ref).max())
lib.param.LIGHT_SPEED = bak
print(lib.finger(h1) - -1.4618392662849411)
hcore_deriv = gen_sf_hfw(mol, approx='atom1e')
h1 = hcore_deriv(0)
print(lib.finger(h1) - -1.3596826558976405)
|
sunqm/pyscf
|
pyscf/x2c/sfx2c1e_grad.py
|
Python
|
apache-2.0
| 10,187
|
[
"PySCF"
] |
6e09e2fed6aa8ec31155526038fa042ab604b4d2e7f65d09be54bdee314240ca
|
"""Wrappers for ast.Name nodes."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ast
from ..asttools import references as reftools
from ..asttools import name as nametools
from ..asttools import scope as scopetools
from ..asttools import visitor as visitortools
class NameGenerator(visitortools.NodeVisitorIter, nametools.NameVisitorMixin):
"""Visitor which produces Name objects."""
def visit(self):
"""Produce Name objects from a NameVisitorMixin."""
return (
Name(n)
for n in super(NameGenerator, self).visit()
)
class Name(object):
"""Wrapper for an ast.Name node for ease of use."""
__slots__ = (
'_node',
'_scope',
'_declaration',
'_declaration_scope',
'_source',
)
def __init__(self, node):
"""Initialize the object with as ast.Name node."""
if not isinstance(node, ast.Name):
raise TypeError("Node must be an ast.Name.")
self._node = node
self._scope = scopetools.parent_scope(self._node)
self._declaration = nametools.declaration(self._node)
self._source = nametools.name_source(self._node, self._declaration)
self._declaration_scope = None
if self._source is not nametools.NAME_SOURCE.BUILTIN:
self._declaration_scope = scopetools.parent_scope(
self._declaration,
)
@property
def node(self):
"""Get the raw ast.Name node."""
return self._node
@property
def declaration(self):
"""Get the first declaration of the Name."""
return self._declaration
@property
def source(self):
"""Get the asttools.name.NAME_SOURCE of the Name."""
return self._source
@property
def token(self):
"""Get the string which represents the Name."""
return self._node.id
@property
def uses(self):
"""Get an iterable of all uses of the name.
If the source is asttools.name.NAME_SOURCE.BUILTIN this iterable will
contain all uses of the name in the module. Otherwise only uses within
the lexical scope of the declaration are contained within the iterable.
"""
search_path = self._declaration_scope
if search_path is None:
search_path = reftools.get_top_node(self._node)
return (
n for n in NameGenerator(search_path).visit()
if n == self
)
@property
def assignments(self):
"""Get an iterable of all assignments to a name.
The scoping rules for this method are identical to that which produces
an iterable of name uses.
"""
return (
n for n in self.uses
if isinstance(n.node.ctx, ast.Store) or
isinstance(n.node.ctx, ast.Param)
)
@property
def constant(self):
"""True if name is assigned to only once within its lexical scope."""
# Built in names are never assigned to. Using == 1 to compensate for
# those names with 0 assignments.
return len(tuple(self.assignments)) == 1
def __repr__(self):
"""Get a string repr of the Name."""
return '<Name {0} - {1}>'.format(
self._node.id,
self._source,
)
def __lt__(self, other):
"""Less than for Name which compares the string token.
Input may be an ast.Name node or another Name object.
"""
if isinstance(other, ast.Name):
return self._node.id < other.id
if not isinstance(other, Name):
raise TypeError("Other must be a Name object or ast.Name node.")
return self.token < other.token
def __gt__(self, other):
"""Greater than for Name which negates the less than implementation."""
return not self.__lt__(other)
def __eq__(self, other):
"""Equal for Name which compares the string token and declaration.
Input may be an ast.Name node or another Name object.
"""
if isinstance(other, ast.Name):
other = Name(other)
if not isinstance(other, Name):
raise TypeError("Other must be a Name object or ast.Name node.")
return (
self.token == other.token and
self.declaration == other.declaration
)
def __ne__(self, other):
"""Not equal for Name which negates the equal implementation."""
return not self.__eq__(other)
def __le__(self, other):
"""Less than equal for Name."""
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
"""Greater than equal for Name."""
return self.__gt__(other) or self.__eq__(other)
def __hash__(self):
"""Hash for Name which combines the token and declaration."""
return hash((self.token, self.declaration))
|
kevinconway/pycc
|
pycc/astwrappers/name.py
|
Python
|
apache-2.0
| 5,032
|
[
"VisIt"
] |
4d2bef27306a90447d441bca4211b5d8df7df2e603cb44dfd1fd12d5ac66e213
|
from __future__ import absolute_import, division, print_function
from examples.helpers import authenticate
from trakt import Trakt
from pprint import pprint
import logging
import os
import time
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
# Configure
Trakt.configuration.defaults.client(
id=os.environ.get('CLIENT_ID'),
secret=os.environ.get('CLIENT_SECRET')
)
# Authenticate
Trakt.configuration.defaults.oauth.from_response(
authenticate()
)
# movie = {
# 'title': 'Guardians of the Galaxy',
# 'year': 2014,
# 'ids': {
# 'trakt': 28,
# 'slug': 'guardians-of-the-galaxy-2014',
# 'imdb': 'tt2015381',
# 'tmdb': 118340
# }
# }
show = {
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'trakt': 1,
'slug': 'breaking-bad',
'tvdb': 81189,
'imdb': 'tt0903747',
'tmdb': 1396,
'tvrage': 18164
}
}
episode = {
'season': 1,
'number': 1,
'title': 'Pilot',
'ids': {
'trakt': 16,
'tvdb': 349232,
'imdb': 'tt0959621',
'tmdb': 62085,
'tvrage': 637041
}
}
# - Start watching
pprint(Trakt['scrobble'].start(
show=show,
episode=episode,
progress=45
))
time.sleep(10)
pprint(Trakt['scrobble'].stop(
show=show,
episode=episode,
progress=90
))
|
fuzeman/trakt.py
|
examples/scrobbler.py
|
Python
|
mit
| 1,585
|
[
"Galaxy"
] |
346664e7e0d2c1235f651f454ddacc70ea6fba0a92c05dc5317548e68e876cb7
|
# Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for the PhyloXML and PhyloXMLIO modules.
"""
import os
import tempfile
import unittest
from itertools import chain
from Bio import Alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from Bio.Phylo import PhyloXML as PX, PhyloXMLIO
# Example PhyloXML files
EX_APAF = 'PhyloXML/apaf.xml'
EX_BCL2 = 'PhyloXML/bcl_2.xml'
EX_MADE = 'PhyloXML/made_up.xml'
EX_PHYLO = 'PhyloXML/phyloxml_examples.xml'
EX_DOLLO = 'PhyloXML/o_tol_332_d_dollo.xml'
# Temporary file name for Writer tests below
DUMMY = tempfile.mktemp()
# ---------------------------------------------------------
# Parser tests
def _test_read_factory(source, count):
"""Generate a test method for read()ing the given source.
The generated function reads an example file to produce a phyloXML object,
then tests for existence of the root node, and counts the number of
phylogenies under the root.
"""
fname = os.path.basename(source)
def test_read(self):
phx = PhyloXMLIO.read(source)
self.assertTrue(phx)
self.assertEqual(len(phx), count[0])
self.assertEqual(len(phx.other), count[1])
test_read.__doc__ = "Read %s to produce a phyloXML object." % fname
return test_read
def _test_parse_factory(source, count):
"""Generate a test method for parse()ing the given source.
The generated function extracts each phylogenetic tree using the parse()
function and counts the total number of trees extracted.
"""
fname = os.path.basename(source)
def test_parse(self):
trees = PhyloXMLIO.parse(source)
self.assertEqual(len(list(trees)), count)
test_parse.__doc__ = "Parse the phylogenies in %s." % fname
return test_parse
def _test_shape_factory(source, shapes):
"""Generate a test method for checking tree shapes.
Counts the branches at each level of branching in a phylogenetic tree, 3
clades deep.
"""
fname = os.path.basename(source)
def test_shape(self):
trees = PhyloXMLIO.parse(source)
for tree, shape_expect in zip(trees, shapes):
self.assertEqual(len(tree.clade), len(shape_expect))
for clade, sub_expect in zip(tree.clade, shape_expect):
self.assertEqual(len(clade), sub_expect[0])
for subclade, len_expect in zip(clade, sub_expect[1]):
self.assertEqual(len(subclade), len_expect)
test_shape.__doc__ = "Check the branching structure of %s." % fname
return test_shape
class ParseTests(unittest.TestCase):
"""Tests for proper parsing of example phyloXML files."""
test_read_apaf = _test_read_factory(EX_APAF, (1, 0))
test_read_bcl2 = _test_read_factory(EX_BCL2, (1, 0))
test_read_made = _test_read_factory(EX_MADE, (6, 0))
test_read_phylo = _test_read_factory(EX_PHYLO, (13, 1))
test_read_dollo = _test_read_factory(EX_DOLLO, (1, 0))
test_parse_apaf = _test_parse_factory(EX_APAF, 1)
test_parse_bcl2 = _test_parse_factory(EX_BCL2, 1)
test_parse_made = _test_parse_factory(EX_MADE, 6)
test_parse_phylo = _test_parse_factory(EX_PHYLO, 13)
test_parse_dollo = _test_parse_factory(EX_DOLLO, 1)
test_shape_apaf = _test_shape_factory(EX_APAF,
# lvl-2 clades, sub-clade counts, lvl-3 clades
( ( (2, (2, 2)),
(2, (2, 2)),
),
),
)
test_shape_bcl2 = _test_shape_factory(EX_BCL2,
( ( (2, (2, 2)),
(2, (2, 2)),
),
),
)
test_shape_phylo = _test_shape_factory(EX_PHYLO,
( ( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (0, ()),
(2, (0, 0)),
),
( (3, (0, 0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
( (2, (0, 0)),
(0, ()),
),
),
)
test_shape_dollo = _test_shape_factory(EX_DOLLO,
( ( (2, (2, 2)),
(2, (2, 2)),
),
),
)
class TreeTests(unittest.TestCase):
"""Tests for instantiation and attributes of each complex type."""
# ENH: also test check_str() regexps wherever they're used
def test_Phyloxml(self):
"""Instantiation of Phyloxml objects."""
phx = PhyloXMLIO.read(EX_PHYLO)
self.assertTrue(isinstance(phx, PX.Phyloxml))
for tree in phx:
self.assertTrue(isinstance(tree, PX.Phylogeny))
for otr in phx.other:
self.assertTrue(isinstance(otr, PX.Other))
def test_Other(self):
"""Instantiation of Other objects."""
phx = PhyloXMLIO.read(EX_PHYLO)
otr = phx.other[0]
self.assertTrue(isinstance(otr, PX.Other))
self.assertEqual(otr.tag, 'alignment')
self.assertEqual(otr.namespace, 'http://example.org/align')
self.assertEqual(len(otr.children), 3)
for child, name, value in zip(otr, ('A', 'B', 'C'), (
'acgtcgcggcccgtggaagtcctctcct', 'aggtcgcggcctgtggaagtcctctcct',
'taaatcgc--cccgtgg-agtccc-cct')):
self.assertEqual(child.tag, 'seq')
self.assertEqual(child.attributes['name'], name)
self.assertEqual(child.value, value)
def test_Phylogeny(self):
"""Instantiation of Phylogeny objects."""
trees = list(PhyloXMLIO.parse(EX_PHYLO))
# Monitor lizards
self.assertEqual(trees[9].name, 'monitor lizards')
self.assertEqual(trees[9].description,
'a pylogeny of some monitor lizards')
self.assertEqual(trees[9].rooted, True)
# Network (unrooted)
tree6 = trees[6]
self.assertEqual(trees[6].name,
'network, node B is connected to TWO nodes: AB and C')
self.assertEqual(trees[6].rooted, False)
def test_Clade(self):
"""Instantiation of Clade objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[6]
clade_ab, clade_c = tree.clade.clades
clade_a, clade_b = clade_ab.clades
for clade, id_source, name, blen in zip(
(clade_ab, clade_a, clade_b, clade_c),
('ab', 'a', 'b', 'c'),
('AB', 'A', 'B', 'C'),
(0.06, 0.102, 0.23, 0.4)):
self.assertTrue(isinstance(clade, PX.Clade))
self.assertEqual(clade.id_source, id_source)
self.assertEqual(clade.name, name)
self.assertAlmostEqual(clade.branch_length, blen)
def test_Annotation(self):
"""Instantiation of Annotation objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[3]
ann = tree.clade[1].sequences[0].annotations[0]
self.assertTrue(isinstance(ann, PX.Annotation))
self.assertEqual(ann.desc, 'alcohol dehydrogenase')
self.assertAlmostEqual(ann.confidence.value, 0.67)
self.assertEqual(ann.confidence.type, 'probability')
def test_BinaryCharacters(self):
"""Instantiation of BinaryCharacters objects."""
# Because we short circult interation, must close handle explicitly
# to avoid a ResourceWarning
handle = open(EX_DOLLO)
tree = next(PhyloXMLIO.parse(handle))
handle.close()
bchars = tree.clade[0, 0].binary_characters
self.assertTrue(isinstance(bchars, PX.BinaryCharacters))
self.assertEqual(bchars.type, 'parsimony inferred')
for name, count, value in (
('gained', 2, ['Cofilin_ADF', 'Gelsolin']),
('lost', 0, []),
('present', 2, ['Cofilin_ADF', 'Gelsolin']),
('absent', None, []),
):
self.assertEqual(getattr(bchars, name+'_count'), count)
self.assertEqual(getattr(bchars, name), value)
# TODO: BranchColor -- see made_up.xml
def test_CladeRelation(self):
"""Instantiation of CladeRelation objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[6]
crel = tree.clade_relations[0]
self.assertTrue(isinstance(crel, PX.CladeRelation))
self.assertEqual(crel.id_ref_0, 'b')
self.assertEqual(crel.id_ref_1, 'c')
self.assertEqual(crel.type, 'network_connection')
def test_Confidence(self):
"""Instantiation of Confidence objects."""
# Because we short circult interation, must close handle explicitly
handle = open(EX_MADE)
tree = next(PhyloXMLIO.parse(handle))
handle.close()
self.assertEqual(tree.name, 'testing confidence')
for conf, type, val in zip(tree.confidences,
('bootstrap', 'probability'),
(89.0, 0.71)):
self.assertTrue(isinstance(conf, PX.Confidence))
self.assertEqual(conf.type, type)
self.assertAlmostEqual(conf.value, val)
self.assertEqual(tree.clade.name, 'b')
self.assertAlmostEqual(tree.clade.width, 0.2)
for conf, val in zip(tree.clade[0].confidences,
(0.9, 0.71)):
self.assertTrue(isinstance(conf, PX.Confidence))
self.assertEqual(conf.type, 'probability')
self.assertAlmostEqual(conf.value, val)
def test_Date(self):
"""Instantiation of Date objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[11]
silurian = tree.clade[0, 0].date
devonian = tree.clade[0, 1].date
ediacaran = tree.clade[1].date
for date, desc, val in zip(
(silurian, devonian, ediacaran),
# (10, 20, 30), # range is deprecated
('Silurian', 'Devonian', 'Ediacaran'),
(425, 320, 600)):
self.assertTrue(isinstance(date, PX.Date))
self.assertEqual(date.unit, 'mya')
# self.assertAlmostEqual(date.range, rang)
self.assertEqual(date.desc, desc)
self.assertAlmostEqual(date.value, val)
def test_Distribution(self):
"""Instantiation of Distribution objects.
Also checks Point type and safe Unicode handling (?).
"""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[10]
hirschweg = tree.clade[0, 0].distributions[0]
nagoya = tree.clade[0, 1].distributions[0]
eth_zurich = tree.clade[0, 2].distributions[0]
san_diego = tree.clade[1].distributions[0]
for dist, desc, lati, longi, alti in zip(
(hirschweg, nagoya, eth_zurich, san_diego),
('Hirschweg, Winterthur, Switzerland',
'Nagoya, Aichi, Japan',
u'ETH Z\xfcrich',
'San Diego'),
(47.481277, 35.155904, 47.376334, 32.880933),
(8.769303, 136.915863, 8.548108, -117.217543),
(472, 10, 452, 104)):
self.assertTrue(isinstance(dist, PX.Distribution))
self.assertEqual(dist.desc, desc)
point = dist.points[0]
self.assertTrue(isinstance(point, PX.Point))
self.assertEqual(point.geodetic_datum, 'WGS84')
self.assertEqual(point.lat, lati)
self.assertEqual(point.long, longi)
self.assertEqual(point.alt, alti)
def test_DomainArchitecture(self):
"""Instantiation of DomainArchitecture objects.
Also checks ProteinDomain type.
"""
# Because we short circult interation, must close handle explicitly
handle = open(EX_APAF)
tree = next(PhyloXMLIO.parse(handle))
handle.close()
clade = tree.clade[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
darch = clade.sequences[0].domain_architecture
self.assertTrue(isinstance(darch, PX.DomainArchitecture))
self.assertEqual(darch.length, 1249)
for domain, start, end, conf, value in zip(darch.domains,
(6, 109, 605, 647, 689, 733, 872, 993, 1075, 1117, 1168),
(90, 414, 643, 685, 729, 771, 910, 1031, 1113, 1155, 1204),
(7.0e-26, 7.2e-117, 2.4e-6, 1.1e-12, 2.4e-7, 4.7e-14, 2.5e-8,
4.6e-6, 6.3e-7, 1.4e-7, 0.3),
('CARD', 'NB-ARC', 'WD40', 'WD40', 'WD40', 'WD40', 'WD40',
'WD40', 'WD40', 'WD40', 'WD40')):
self.assertTrue(isinstance(domain, PX.ProteinDomain))
self.assertEqual(domain.start + 1, start)
self.assertEqual(domain.end, end)
self.assertAlmostEqual(domain.confidence, conf)
self.assertEqual(domain.value, value)
def test_Events(self):
"""Instantiation of Events objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[4]
event_s = tree.clade.events
self.assertTrue(isinstance(event_s, PX.Events))
self.assertEqual(event_s.speciations, 1)
event_d = tree.clade[0].events
self.assertTrue(isinstance(event_d, PX.Events))
self.assertEqual(event_d.duplications, 1)
def test_Polygon(self):
"""Instantiation of Polygon objects."""
tree = PhyloXMLIO.read(EX_MADE).phylogenies[1]
self.assertEqual(tree.name, 'testing polygon')
dist = tree.clade[0].distributions[0]
for poly in dist.polygons:
self.assertTrue(isinstance(poly, PX.Polygon))
self.assertEqual(len(poly.points), 3)
self.assertEqual(dist.polygons[0].points[0].alt_unit, 'm')
for point, lati, longi, alti in zip(
chain(dist.polygons[0].points, dist.polygons[1].points),
(47.481277, 35.155904, 47.376334, 40.481277, 25.155904,
47.376334),
(8.769303, 136.915863, 8.548108, 8.769303, 136.915863,
7.548108),
(472, 10, 452, 42, 10, 452),
):
self.assertTrue(isinstance(point, PX.Point))
self.assertEqual(point.geodetic_datum, 'WGS84')
self.assertEqual(point.lat, lati)
self.assertEqual(point.long, longi)
self.assertEqual(point.alt, alti)
def test_Property(self):
"""Instantiation of Property objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[8]
for prop, id_ref, value in zip(
tree.properties,
('id_a', 'id_b', 'id_c'),
('1200', '2300', '200')):
self.assertTrue(isinstance(prop, PX.Property))
self.assertEqual(prop.id_ref, id_ref)
self.assertEqual(prop.datatype, "xsd:integer")
self.assertEqual(prop.ref, "NOAA:depth")
self.assertEqual(prop.applies_to, "node")
self.assertEqual(prop.unit, "METRIC:m")
self.assertEqual(prop.value, value)
def test_Reference(self):
"""Instantiation of Reference objects."""
# Because we short circult interation, must close handle explicitly
# to avoid a ResourceWarning
handle = open(EX_DOLLO)
tree = next(PhyloXMLIO.parse(handle))
handle.close()
reference = tree.clade[0, 0, 0, 0, 0, 0].references[0]
self.assertTrue(isinstance(reference, PX.Reference))
self.assertEqual(reference.doi, '10.1038/nature06614')
self.assertEqual(reference.desc, None)
def test_Sequence(self):
"""Instantiation of Sequence objects.
Also checks Accession and Annotation types.
"""
trees = list(PhyloXMLIO.parse(EX_PHYLO))
# Simple element with id_source
seq0 = trees[4].clade[1].sequences[0]
self.assertTrue(isinstance(seq0, PX.Sequence))
self.assertEqual(seq0.id_source, 'z')
self.assertEqual(seq0.symbol, 'ADHX')
self.assertEqual(seq0.accession.source, 'ncbi')
self.assertEqual(seq0.accession.value, 'Q17335')
self.assertEqual(seq0.name, 'alcohol dehydrogenase')
self.assertEqual(seq0.annotations[0].ref, 'InterPro:IPR002085')
# More complete elements
seq1 = trees[5].clade[0, 0].sequences[0]
seq2 = trees[5].clade[0, 1].sequences[0]
seq3 = trees[5].clade[1].sequences[0]
for seq, sym, acc, name, mol_seq, ann_refs in zip(
(seq1, seq2, seq3),
('ADHX', 'RT4I1', 'ADHB'),
('P81431', 'Q54II4', 'Q04945'),
('Alcohol dehydrogenase class-3',
'Reticulon-4-interacting protein 1 homolog, '
'mitochondrial precursor',
'NADH-dependent butanol dehydrogenase B'),
('TDATGKPIKCMAAIAWEAKKPLSIEEVEVAPPKSGEVRIKILHSGVCHTD',
'MKGILLNGYGESLDLLEYKTDLPVPKPIKSQVLIKIHSTSINPLDNVMRK',
'MVDFEYSIPTRIFFGKDKINVLGRELKKYGSKVLIVYGGGSIKRNGIYDK'),
(("EC:1.1.1.1", "GO:0004022"),
("GO:0008270", "GO:0016491"),
("GO:0046872", "KEGG:Tetrachloroethene degradation")),
):
self.assertTrue(isinstance(seq, PX.Sequence))
self.assertEqual(seq.symbol, sym)
self.assertEqual(seq.accession.source, 'UniProtKB')
self.assertEqual(seq.accession.value, acc)
self.assertEqual(seq.name, name)
self.assertEqual(seq.mol_seq.value, mol_seq)
self.assertEqual(seq.annotations[0].ref, ann_refs[0])
self.assertEqual(seq.annotations[1].ref, ann_refs[1])
def test_SequenceRelation(self):
"""Instantiation of SequenceRelation objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[4]
for seqrel, id_ref_0, id_ref_1, type in zip(
tree.sequence_relations,
('x', 'x', 'y'), ('y', 'z', 'z'),
('paralogy', 'orthology', 'orthology')):
self.assertTrue(isinstance(seqrel, PX.SequenceRelation))
self.assertEqual(seqrel.id_ref_0, id_ref_0)
self.assertEqual(seqrel.id_ref_1, id_ref_1)
self.assertEqual(seqrel.type, type)
def test_Taxonomy(self):
"""Instantiation of Taxonomy objects.
Also checks Id type.
"""
trees = list(PhyloXMLIO.parse(EX_PHYLO))
# Octopus
tax5 = trees[5].clade[0, 0].taxonomies[0]
self.assertTrue(isinstance(tax5, PX.Taxonomy))
self.assertEqual(tax5.id.value, '6645')
self.assertEqual(tax5.id.provider, 'NCBI')
self.assertEqual(tax5.code, 'OCTVU')
self.assertEqual(tax5.scientific_name, 'Octopus vulgaris')
# Nile monitor
tax9 = trees[9].clade[0].taxonomies[0]
self.assertTrue(isinstance(tax9, PX.Taxonomy))
self.assertEqual(tax9.id.value, '62046')
self.assertEqual(tax9.id.provider, 'NCBI')
self.assertEqual(tax9.scientific_name, 'Varanus niloticus')
self.assertEqual(tax9.common_names[0], 'Nile monitor')
self.assertEqual(tax9.rank, 'species')
def test_Uri(self):
"""Instantiation of Uri objects."""
tree = list(PhyloXMLIO.parse(EX_PHYLO))[9]
uri = tree.clade.taxonomies[0].uri
self.assertTrue(isinstance(uri, PX.Uri))
self.assertEqual(uri.desc, 'EMBL REPTILE DATABASE')
self.assertEqual(uri.value,
'http://www.embl-heidelberg.de/~uetz/families/Varanidae.html')
# ---------------------------------------------------------
# Serialization tests
class WriterTests(unittest.TestCase):
"""Tests for serialization of objects to phyloXML format.
Modifies the globally defined filenames in order to run the other parser
tests on files (re)generated by PhyloXMLIO's own writer.
"""
def _rewrite_and_call(self, orig_fname, test_cases):
"""Parse, rewrite and retest a phyloXML example file."""
infile = open(orig_fname, 'r')
phx = PhyloXMLIO.read(infile)
infile.close()
outfile = open(DUMMY, 'w')
PhyloXMLIO.write(phx, outfile)
outfile.close()
for cls, tests in test_cases:
inst = cls('setUp')
for test in tests:
getattr(inst, test)()
def test_apaf(self):
"""Round-trip parsing and serialization of apaf.xml."""
global EX_APAF
orig_fname = EX_APAF
try:
EX_APAF = DUMMY
self._rewrite_and_call(orig_fname, (
(ParseTests, [
'test_read_apaf', 'test_parse_apaf', 'test_shape_apaf']),
(TreeTests, ['test_DomainArchitecture']),
))
finally:
EX_APAF = orig_fname
def test_bcl2(self):
"""Round-trip parsing and serialization of bcl_2.xml."""
global EX_BCL2
orig_fname = EX_BCL2
try:
EX_BCL2 = DUMMY
self._rewrite_and_call(orig_fname, (
(ParseTests, [
'test_read_bcl2', 'test_parse_bcl2', 'test_shape_bcl2']),
(TreeTests, ['test_Confidence']),
))
finally:
EX_BCL2 = orig_fname
def test_made(self):
"""Round-trip parsing and serialization of made_up.xml."""
global EX_MADE
orig_fname = EX_MADE
try:
EX_MADE = DUMMY
self._rewrite_and_call(orig_fname, (
(ParseTests, ['test_read_made', 'test_parse_made']),
(TreeTests, ['test_Confidence', 'test_Polygon']),
))
finally:
EX_MADE = orig_fname
def test_phylo(self):
"""Round-trip parsing and serialization of phyloxml_examples.xml."""
global EX_PHYLO
orig_fname = EX_PHYLO
try:
EX_PHYLO = DUMMY
self._rewrite_and_call(orig_fname, (
(ParseTests, [
'test_read_phylo', 'test_parse_phylo', 'test_shape_phylo']),
(TreeTests, [
'test_Phyloxml', 'test_Other',
'test_Phylogeny', 'test_Clade',
'test_Annotation', 'test_CladeRelation',
'test_Date', 'test_Distribution',
'test_Events', 'test_Property',
'test_Sequence', 'test_SequenceRelation',
'test_Taxonomy', 'test_Uri',
]),
))
finally:
EX_PHYLO = orig_fname
def test_dollo(self):
"""Round-trip parsing and serialization of o_tol_332_d_dollo.xml."""
global EX_DOLLO
orig_fname = EX_DOLLO
try:
EX_DOLLO = DUMMY
self._rewrite_and_call(orig_fname, (
(ParseTests, ['test_read_dollo', 'test_parse_dollo']),
(TreeTests, ['test_BinaryCharacters']),
))
finally:
EX_DOLLO = orig_fname
# ---------------------------------------------------------
# Method tests
class MethodTests(unittest.TestCase):
"""Tests for methods on specific classes/objects."""
def setUp(self):
self.phyloxml = PhyloXMLIO.read(EX_PHYLO)
# Type conversions
def test_clade_to_phylogeny(self):
"""Convert a Clade object to a new Phylogeny."""
clade = self.phyloxml.phylogenies[0].clade[0]
tree = clade.to_phylogeny(rooted=True)
self.assertTrue(isinstance(tree, PX.Phylogeny))
def test_phylogeny_to_phyloxml(self):
"""Convert a Phylogeny object to a new Phyloxml."""
tree = self.phyloxml.phylogenies[0]
doc = tree.to_phyloxml_container()
self.assertTrue(isinstance(doc, PX.Phyloxml))
def test_sequence_conversion(self):
pseq = PX.Sequence(
type='protein',
# id_ref=None,
# id_source=None,
symbol='ADHX',
accession=PX.Accession('P81431', source='UniProtKB'),
name='Alcohol dehydrogenase class-3',
# location=None,
mol_seq=PX.MolSeq(
'TDATGKPIKCMAAIAWEAKKPLSIEEVEVAPPKSGEVRIKILHSGVCHTD'),
uri=None,
annotations=[PX.Annotation(ref='EC:1.1.1.1'),
PX.Annotation(ref='GO:0004022')],
domain_architecture=PX.DomainArchitecture(
length=50,
domains=[PX.ProteinDomain(*args) for args in (
# value, start, end, confidence
('FOO', 0, 5, 7.0e-26),
('BAR', 8, 13, 7.2e-117),
('A-OK', 21, 34, 2.4e-06),
('WD40', 40, 50, 0.3))],
))
srec = pseq.to_seqrecord()
# TODO: check seqrec-specific traits (see args)
# Seq(letters, alphabet), id, name, description, features
pseq2 = PX.Sequence.from_seqrecord(srec)
# TODO: check the round-tripped attributes again
def test_to_alignment(self):
tree = self.phyloxml.phylogenies[0]
aln = tree.to_alignment()
self.assertTrue(isinstance(aln, MultipleSeqAlignment))
self.assertEqual(len(aln), 0)
# Add sequences to the terminals
alphabet = Alphabet.Gapped(Alphabet.generic_dna)
for tip, seqstr in zip(tree.get_terminals(),
('AA--TTA', 'AA--TTG', 'AACCTTC')):
tip.sequences.append(PX.Sequence.from_seqrecord(
SeqRecord(Seq(seqstr, alphabet), id=str(tip))))
# Check the alignment
aln = tree.to_alignment()
self.assertTrue(isinstance(aln, MultipleSeqAlignment))
self.assertEqual(len(aln), 3)
self.assertEqual(aln.get_alignment_length(), 7)
# Syntax sugar
def test_clade_getitem(self):
"""Clade.__getitem__: get sub-clades by extended indexing."""
tree = self.phyloxml.phylogenies[3]
self.assertEqual(tree.clade[0, 0], tree.clade.clades[0].clades[0])
self.assertEqual(tree.clade[0, 1], tree.clade.clades[0].clades[1])
self.assertEqual(tree.clade[1], tree.clade.clades[1])
self.assertEqual(len(tree.clade[:]), len(tree.clade.clades))
self.assertEqual(len(tree.clade[0, :]),
len(tree.clade.clades[0].clades))
def test_phyloxml_getitem(self):
"""Phyloxml.__getitem__: get phylogenies by name or index."""
self.assertTrue(self.phyloxml.phylogenies[9] is self.phyloxml[9])
self.assertTrue(self.phyloxml['monitor lizards'] is self.phyloxml[9])
self.assertEqual(len(self.phyloxml[:]), len(self.phyloxml))
def test_events(self):
"""Events: Mapping-type behavior."""
evts = self.phyloxml.phylogenies[4].clade.events
# Container behavior: __len__, __contains__
self.assertEqual(len(evts), 1)
self.assertEqual('speciations' in evts, True)
self.assertEqual('duplications' in evts, False)
# Attribute access: __get/set/delitem__
self.assertEqual(evts['speciations'], 1)
self.assertRaises(KeyError, lambda k: evts[k], 'duplications')
evts['duplications'] = 3
self.assertEqual(evts.duplications, 3)
self.assertEqual(len(evts), 2)
del evts['speciations']
self.assertEqual(evts.speciations, None)
self.assertEqual(len(evts), 1)
# Iteration: __iter__, keys, values, items
self.assertEqual(list(iter(evts)), ['duplications'])
self.assertEqual(list(evts.keys()), ['duplications'])
self.assertEqual(list(evts.values()), [3])
self.assertEqual(list(evts.items()), [('duplications', 3)])
def test_singlular(self):
"""Clade, Phylogeny: Singular properties for plural attributes."""
conf = PX.Confidence(0.9, 'bootstrap')
taxo = PX.Taxonomy(rank='genus')
# Clade.taxonomy, Clade.confidence
clade = PX.Clade(confidences=[conf], taxonomies=[taxo])
self.assertEqual(clade.confidence.type, 'bootstrap')
self.assertEqual(clade.taxonomy.rank, 'genus')
# raise if len > 1
clade.confidences.append(conf)
self.assertRaises(AttributeError, getattr, clade, 'confidence')
clade.taxonomies.append(taxo)
self.assertRaises(AttributeError, getattr, clade, 'taxonomy')
# None if []
clade.confidences = []
self.assertEqual(clade.confidence, None)
clade.taxonomies = []
self.assertEqual(clade.taxonomy, None)
# Phylogeny.confidence
tree = PX.Phylogeny(True, confidences=[conf])
self.assertEqual(tree.confidence.type, 'bootstrap')
tree.confidences.append(conf)
self.assertRaises(AttributeError, getattr, tree, 'confidence')
tree.confidences = []
self.assertEqual(tree.confidence, None)
# Other methods
def test_color_hex(self):
"""BranchColor: to_hex() method."""
black = PX.BranchColor(0, 0, 0)
self.assertEqual(black.to_hex(), '#000000')
white = PX.BranchColor(255, 255, 255)
self.assertEqual(white.to_hex(), '#ffffff')
green = PX.BranchColor(14, 192, 113)
self.assertEqual(green.to_hex(), '#0ec071')
# ---------------------------------------------------------
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
# Clean up the temporary file
if os.path.exists(DUMMY):
os.remove(DUMMY)
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_PhyloXML.py
|
Python
|
gpl-2.0
| 30,329
|
[
"Biopython",
"Octopus"
] |
19d57a0b4458cc11e29b553cd09b079a0eec9fd707866a4f25a31d63cdd0b736
|
import numpy as np
from matplotlib import pyplot
import rft1d
#(0) Set parameters:
np.random.seed(0)
nTestStatFields = 3
nNodes = 101
nIterations = 10000
FWHM = 10.0
### initialize RFT calculator:
rftcalc = rft1d.prob.RFTCalculator(STAT='Z', nodes=nNodes, FWHM=FWHM, n=nTestStatFields)
#(1) Generate Gaussian 1D fields, compute test stat:
generator = rft1d.random.Generator1D(nTestStatFields, nNodes, FWHM)
Zmax = []
for i in range(nIterations):
y = generator.generate_sample()
Zconj = y.min(axis=0)
Zmax.append( Zconj.max() )
Zmax = np.array(Zmax)
#(2) Survival functions:
heights = np.linspace(0.5, 2, 21)
sf = np.array( [ (Zmax>h).mean() for h in heights] )
sfE = rftcalc.sf(heights) #theoretical
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P(z_\mathrm{conj} > u)$', size=20)
ax.legend()
ax.set_title('Conjunction validation (Gaussian fields)', size=20)
pyplot.show()
|
0todd0000/rft1d
|
rft1d/examples/val_conj_0_gauss.py
|
Python
|
gpl-3.0
| 1,159
|
[
"Gaussian"
] |
9895dac88abb2afcb0bb0b5650f1bb12bb465aac0ed2b5855ae8b992a83f86e0
|
# $Id: HDF.py,v 1.3 2005-07-14 01:36:41 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.2 2004/08/02 15:36:04 gosselin
# pyhdf-0.7-1
#
# Revision 1.1 2004/08/02 15:22:59 gosselin
# Initial revision
#
# Author: Andre Gosselin
# Maurice-Lamontagne Institute
# gosselina@dfo-mpo.gc.ca
"""
Basic API (:mod:`pyhdf.HDF`)
============================
A module of the pyhdf package implementing the basic API of the
NCSA HDF4 library.
(see: hdf.ncsa.uiuc.edu)
Introduction
------------
The role of the HDF module is to provide support to other modules of the
pyhdf package. It defines constants specifying file opening modes and
various data types, methods for accessing files, plus a few utility
functions to query library version and check if a file is an HDF one.
It should be noted that, among the modules of the pyhdf package, SD is
special in the sense that it is self-contained and does not need support
from the HDF module. For example, SD provides its own file opening and
closing methods, whereas VS uses methods of the HDF.HDF class for that.
Functions and classes summary
-----------------------------
The HDF module provides the following classes.
HC
The HC class holds constants defining opening modes and
various data types.
HDF
The HDF class provides methods to open and close an HDF file,
and return instances of the major HDF APIs (except SD).
To instantiate an HDF class, call the HDF() constructor.
methods:
constructors:
HDF() open an HDF file, creating the file if necessary,
and return an HDF instance
vstart() initialize the VS (Vdata) API over the HDF file and
return a VS instance
vgstart() initialize the V (Vgroup) interface over the HDF file
and return a V instance.
closing file
close() close the HDF file
inquiry
getfileversion() return info about the version of the HDF file
The HDF module also offers the following functions.
inquiry
getlibversion() return info about the version of the library
ishdf() determine whether a file is an HDF file
"""
import os, sys, types
from . import hdfext as _C
from .six.moves import xrange
from .HC import HC
# NOTE: The vstart() and vgstart() modules need to access the
# VS and V modules, resp. We could simply import those
# two modules, but then they would always be loaded and this
# may not be what the user wants. Instead of forcing the
# systematic import, we import the package `pyhdf',
# and access the needed constructors by writing
# 'pyhdf.VS.VS()' and 'pyhdf.V.V()'. Until the VS or
# V modules are imported, those statements will give an
# error (undefined attribute). Once the user has imported
# the modules, the error will disappear.
import pyhdf
from .error import HDF4Error, _checkErr
# List of names we want to be imported by an "from pyhdf.HDF import *"
# statement
__all__ = ['HDF', 'HDF4Error',
'HC',
'getlibversion', 'ishdf']
def getlibversion():
"""Get the library version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetlibversion()
_checkErr('getlibversion', status, "cannot get lib version")
return major_v, minor_v, release, info
def ishdf(filename):
"""Determine whether a file is an HDF file.
Args:
filename name of the file to check
Returns:
1 if the file is an HDF file, 0 otherwise
C library equivalent : Hishdf
"""
return _C.Hishdf(filename)
class HDF(object):
"""The HDF class encapsulates the basic HDF functions.
Its main use is to open and close an HDF file, and return
instances of the major HDF APIs (except for SD).
To instantiate an HDF class, call the HDF() constructor. """
def __init__(self, path, mode=HC.READ, nblocks=0):
"""HDF constructor: open an HDF file, creating the file if
necessary.
Args:
path name of the HDF file to open
mode file opening mode; this mode is a set of binary flags
which can be ored together
HC.CREATE combined with HC.WRITE to create file
if it does not exist
HC.READ open file in read-only access (default)
HC.TRUNC if combined with HC.WRITE, overwrite
file if it already exists
HC.WRITE open file in read-write mode; if file
exists it is updated, unless HC.TRUNC is
set, in which case it is erased and
recreated; if file does not exist, an
error is raised unless HC.CREATE is set,
in which case the file is created
Note an important difference in the way CREATE is
handled by the HDF C library and the pyhdf package.
In the C library, CREATE indicates that a new file should
always be created, overwriting an existing one if
any. For pyhdf, CREATE indicates a new file should be
created only if it does not exist, and the overwriting
of an already existing file must be explicitly asked
for by setting the TRUNC flag.
Those differences were introduced so as to harmonize
the way files are opened in the pycdf and pyhdf
packages. Also, this solves a limitation in the
hdf (and netCDF) library, where there is no easy way
to implement the frequent requirement that an existent
file be opened in read-write mode, or created
if it does not exist.
nblocks number of data descriptor blocks in a block wit which
to create the file; the parameter is ignored if the file
is not created; 0 asks to use the default
Returns:
an HDF instance
C library equivalent : Hopen
"""
# Private attributes:
# _id: file id (NOTE: not compatile with the SD file id)
# See if file exists.
exists = os.path.exists(path)
if HC.WRITE & mode:
if exists:
if HC.TRUNC & mode:
try:
os.remove(path)
except Exception as msg:
raise HDF4Error(msg)
mode = HC.CREATE
else:
mode = HC.WRITE
else:
if HC.CREATE & mode:
mode = HC.CREATE
else:
raise HDF4Error("HDF: no such file")
else:
if exists:
if mode & HC.READ:
mode = HC.READ # clean mode
else:
raise HDF4Error("HDF: invalid mode")
else:
raise HDF4Error("HDF: no such file")
id = _C.Hopen(path, mode, nblocks)
_checkErr('HDF', id, "cannot open %s" % path)
self._id = id
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._id:
self.close()
except:
pass
def close(self):
"""Close the HDF file.
Args:
no argument
Returns:
None
C library equivalent : Hclose
"""
_checkErr('close', _C.Hclose(self._id), "cannot close file")
self._id = None
def getfileversion(self):
"""Get file version info.
Args:
no argument
Returns:
4-element tuple with the following components:
-major version number (int)
-minor version number (int)
-complete library version number (int)
-additional information (string)
C library equivalent : Hgetlibversion
"""
status, major_v, minor_v, release, info = _C.Hgetfileversion(self._id)
_checkErr('getfileversion', status, "cannot get file version")
return major_v, minor_v, release, info
def vstart(self):
"""Initialize the VS API over the file and return a VS instance.
Args:
no argument
Returns:
VS instance
C library equivalent : Vstart (in fact: Vinitialize)
"""
# See note at top of file.
return pyhdf.VS.VS(self)
def vgstart(self):
"""Initialize the V API over the file and return a V instance.
Args:
no argument
Returns:
V instance
C library equivalent : Vstart (in fact: Vinitialize)
"""
# See note at top of file.
return pyhdf.V.V(self)
###########################
# Support functions
###########################
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
# Strip NULL at end
if chrs[-1] == '\0':
del chrs[-1]
return ''.join(chrs)
|
ryfeus/lambda-packs
|
HDF4_H5_NETCDF/source2.7/pyhdf/HDF.py
|
Python
|
mit
| 10,551
|
[
"NetCDF"
] |
8e53dc1d1c9b90f638d200a49c4dd1960e166eb6e2cfd194100ddeed828345b5
|
#!/usr/bin/env python
import numpy as np
import scipy.interpolate as interp
from qharv.reel import ascii_out
def get_dsk_amat(floc):
""" extract A matrix from qmcfinitesize output
k->0 behavior of 3D structure factor S(k) is fitted to a Gaussian
S(k) = k^T A k
Args:
floc (str): location of qmcfinitesize output
Returns:
np.array: A matrix (3x3)
"""
mm = ascii_out.read(floc)
amat = np.zeros([3,3])
# step 1: fill upper triangular part of amat
xyzm = {'x':0,'y':1,'z':2} # map x,y,z to index
keyl = ['a_xx','a_yy','a_zz','a_xy','a_xz','a_yz']
for key in keyl: # order of key matters!
val = ascii_out.name_sep_val(mm,key)
xyz_xyz = key.split('_')[-1]
idx = tuple([xyzm[xyz] for xyz in xyz_xyz])
amat[idx] = val
# end for
# step 2: symmetrize amat
amat[(1,0)] = amat[(0,1)]
amat[(2,1)] = amat[(1,2)]
amat[(2,0)] = amat[(0,2)]
return amat
# end def get_dsk_amat
def get_volume(fout):
mm = ascii_out.read(fout)
omega = ascii_out.name_sep_val(mm, 'Vol', pos=1)
return omega
def get_data_block(floc, name, nhead=0):
start_tag = '#'+name + '_START#'
stop_tag = '#'+name + '_STOP#'
mm = ascii_out.read(floc)
text = ascii_out.block_text(mm,start_tag,stop_tag)
lines= text.split('\n')[nhead:-1] # empty after the last \n
data = np.array(
[map(float,line.split()) for line in lines]
,dtype=float)
return data
# end def get_data_block
def add_mixed_vint(df2):
""" add mixed vint (\int vk Sk) column to extrapolated entries
df2 must have columns ['timestep','vint'], there must be a timestep=0
entry, and a timestep > 0 entry.
Args:
df2 (pd.DataFrame): DMC database
Returns:
None
"""
df2['vmixed'] = np.nan
for subdir in df2.subdir.unique():
sel = (df2.subdir==subdir)
ts0_sel = (df2.timestep==0)
# !!!! assume smallest non-zero timestep is best DMC
min_ts = df2.loc[sel&(~ts0_sel),'timestep'].min()
ts1_sel = (df2.timestep==min_ts)
# get mixed vint entry
entry = df2.loc[sel&(ts1_sel),'vint']
assert len(entry) == 1
vmixed = entry.values[0]
# transfer to pure entry
df2.loc[ts0_sel,'vmixed'] = vmixed
# end for
# end def add_mixed_vint
# ================= reproduce QMCPACK implementation ================= #
# step 1: get long-range Coulomb pair potential vk
def get_vk(fout):
""" long-range coulomb pair potential """
data = get_data_block(fout, 'VK')
vkx, vky = data.T
# QMCPACK vk is divided by volume, undo!
omega = get_volume(fout)
vky *= omega
return vkx, vky
def get_fvk(fout):
""" interpolated long-range coulomb pair potential """
vkx, vky = get_vk(fout)
tck = interp.splrep(vkx, vky)
fvk = lambda k:interp.splev(k, tck)
return fvk
# step 2: get raw static structure factor S(k)
def get_dsk(fjson, obs='dsk'):
""" raw structure factor """
import pandas as pd
df = pd.read_json(fjson)
kvecs = np.array(df.loc[0,'kvecs'])
skm = np.array(df.loc[0,'%s_mean'%obs])
ske = np.array(df.loc[0,'%s_error'%obs])
return kvecs, skm, ske
# step 3: get sum
def get_vsum(vk, skm, omega):
"""
skm should contain S(k) values at ALL supercell reciprocal vectors used
vk should be the same length as skm and NOT divided by volume omega
"""
summand = 0.5*vk*skm
vsum = 1/omega* summand.sum()
return vsum
def get_qmcpack_vsum(fjson, fout):
kvecs, skm, ske = get_dsk(fjson)
kmags = np.linalg.norm(kvecs, axis=1)
fvk = get_fvk(fout)
vk = fvk(kmags)
omega = get_volume(fout)
vsum = get_vsum(vk, skm, omega)
return vsum
# step 4: get sphericall averaged Savg(k) spline
def get_fsk(fout):
""" interpolated spherically-averaged structure factor """
data = get_data_block(fout, 'SK_SPLINE')
skx, sky = data.T
tck = interp.splrep(skx, sky)
fsk = lambda k:interp.splev(k, tck)
return fsk
# step 4: get 1D integrand
def get_intx_inty(fout):
fsk = get_fsk(fout)
vkx, vky = get_vk(fout)
myinty = 0.5*vkx**2*vky*fsk(vkx)
return vkx, myinty
# step 5: interpolate 1D integrand
def get_fint(fout):
intx, inty = get_intx_inty(fout)
padx = np.array([0.0])
pady = np.array([0.0]*len(padx))
myx = np.concatenate([padx, intx])
myy = np.concatenate([pady, inty])
tck = interp.splrep(myx, myy)
fint = lambda k:interp.splev(k, tck)
return fint
# step 6: get integral
def get_vint(fout):
from scipy.integrate import quad
vkx, vky = get_vk(fout)
fint = get_fint(fout)
intnorm = 1./(2*np.pi**2)
intval = quad(fint,0,max(vkx))[0]
vint = intnorm * intval
return vint
|
Paul-St-Young/solid_hydrogen
|
qmcpack_fs_reader.py
|
Python
|
mit
| 6,461
|
[
"Gaussian",
"QMCPACK"
] |
dc4d2a7aae216124f6b85470fd536a1d802ef8f79fecd0c2ad139ce7ac832379
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import six
import testtools
from neutron.agent.common import config as a_cfg
from neutron.agent import firewall
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesFirewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP')
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_filter_ipv4_ingress(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule('ifake_dev', '-p icmp -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule('ofake_dev', '-p icmp -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p icmp -m icmp --icmp-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_port(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport(self):
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv4_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv4']
rule = {'ethertype': 'IPv4',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress'}
ingress = mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p tcp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p ipv6-icmp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_filter_ingress_tcp_min_port_0(self, ethertype):
rule = {'ethertype': ethertype,
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 0,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p tcp -m tcp -m multiport --dports 0:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ingress_tcp_min_port_0_for_ipv4(self):
self._test_filter_ingress_tcp_min_port_0('IPv4')
def test_filter_ingress_tcp_min_port_0_for_ipv6(self):
self._test_filter_ingress_tcp_min_port_0('IPv6')
def test_filter_ipv6_ingress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp'}
ingress = mock.call.add_rule(
'ifake_dev', '-p udp -j RETURN', comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'source_ip_prefix': prefix}
ingress = mock.call.add_rule('ifake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
ingress = mock.call.add_rule('ifake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
ingress = mock.call.add_rule(
'ifake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_ingress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'ingress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
ingress = mock.call.add_rule(
'ifake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
egress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress'}
egress = mock.call.add_rule('ofake_dev', '-j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp'}
egress = mock.call.add_rule(
'ofake_dev', '-p tcp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p tcp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp'}
egress = mock.call.add_rule(
'ofake_dev', '-p ipv6-icmp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev', '-s %s -p ipv6-icmp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_name(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 'echo-request',
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type echo-request '
'-j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_icmp_type_code(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'icmp',
'source_port_range_min': 8,
'source_port_range_max': 0,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p ipv6-icmp -m icmp6 --icmpv6-type 8/0 -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p tcp -m tcp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p tcp -m tcp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_tcp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'tcp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p tcp -m tcp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp'}
egress = mock.call.add_rule(
'ofake_dev', '-p udp -j RETURN', comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'source_ip_prefix': prefix}
egress = mock.call.add_rule('ofake_dev',
'-s %s -p udp -j RETURN' % prefix,
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_port(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 10}
egress = mock.call.add_rule('ofake_dev',
'-p udp -m udp --dport 10 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport(self):
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100}
egress = mock.call.add_rule(
'ofake_dev',
'-p udp -m udp -m multiport --dports 10:100 -j RETURN',
comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def test_filter_ipv6_egress_udp_mport_prefix(self):
prefix = FAKE_PREFIX['IPv6']
rule = {'ethertype': 'IPv6',
'direction': 'egress',
'protocol': 'udp',
'port_range_min': 10,
'port_range_max': 100,
'source_ip_prefix': prefix}
egress = mock.call.add_rule(
'ofake_dev',
'-s %s -p udp -m udp -m multiport --dports 10:100 '
'-j RETURN' % prefix, comment=None)
ingress = None
self._test_prepare_port_filter(rule, ingress, egress)
def _test_prepare_port_filter(self,
rule,
ingress_expected_call=None,
egress_expected_call=None):
port = self._fake_port()
ethertype = rule['ethertype']
prefix = utils.ip_to_cidr(FAKE_IP[ethertype])
filter_inst = self.v4filter_inst
dhcp_rule = [mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None)]
if ethertype == 'IPv6':
filter_inst = self.v6filter_inst
dhcp_rule = [mock.call.add_rule('ofake_dev',
'-s ::/128 -d ff02::/16 '
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j RETURN' %
icmp6_type,
comment=None) for icmp6_type
in constants.ICMPV6_ALLOWED_UNSPEC_ADDR_TYPES]
sg = [rule]
port['security_group_rules'] = sg
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG)
]
if ethertype == 'IPv6':
for icmp6_type in firewall.ICMPV6_ALLOWED_TYPES:
calls.append(
mock.call.add_rule('ifake_dev',
'-p ipv6-icmp -m icmp6 --icmpv6-type '
'%s -j RETURN' %
icmp6_type, comment=None))
calls += [
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None
)
]
if ingress_expected_call:
calls.append(ingress_expected_call)
calls += [mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s %s -m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN'
% prefix,
comment=ic.PAIR_ALLOW)]
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('sfake_dev',
'-s fe80::fdff:ffff:feff:ffff/128 -m mac '
'--mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW))
calls.append(mock.call.add_rule('sfake_dev', '-j DROP',
comment=ic.PAIR_DROP))
calls += dhcp_rule
calls.append(mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None))
if ethertype == 'IPv4':
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None))
if ethertype == 'IPv6':
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -m icmp6 '
'--icmpv6-type %s -j DROP' %
constants.ICMPV6_TYPE_RA,
comment=None))
calls.append(mock.call.add_rule('ofake_dev',
'-p ipv6-icmp -j RETURN',
comment=None))
calls.append(mock.call.add_rule('ofake_dev', '-p udp -m udp '
'--sport 546 -m udp --dport 547 '
'-j RETURN', comment=None))
calls.append(mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 547 -m udp --dport 546 -j DROP',
comment=None))
calls += [
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
]
if egress_expected_call:
calls.append(egress_expected_call)
calls += [mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
comb = zip(calls, filter_inst.mock_calls)
for (l, r) in comb:
self.assertEqual(l, r)
filter_inst.assert_has_calls(calls)
def _test_remove_conntrack_entries(self, ethertype, protocol,
direction):
port = self._fake_port()
port['security_groups'] = 'fake_sg_id'
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_rule_sg_ids = set(['fake_sg_id'])
self.firewall.sg_rules['fake_sg_id'] = [
{'direction': direction, 'ethertype': ethertype,
'protocol': protocol}]
self.firewall.filter_defer_apply_on()
self.firewall.sg_rules['fake_sg_id'] = []
self.firewall.filter_defer_apply_off()
cmd = ['conntrack', '-D']
if protocol:
cmd.extend(['-p', protocol])
if ethertype == 'IPv4':
cmd.extend(['-f', 'ipv4'])
if direction == 'ingress':
cmd.extend(['-d', '10.0.0.1'])
else:
cmd.extend(['-s', '10.0.0.1'])
else:
cmd.extend(['-f', 'ipv6'])
if direction == 'ingress':
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
cmd.extend(['-w', 10])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_remove_conntrack_entries_for_delete_rule_ipv4(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv4', pro, direction)
def test_remove_conntrack_entries_for_delete_rule_ipv6(self):
for direction in ['ingress', 'egress']:
for pro in [None, 'tcp', 'icmp', 'udp']:
self._test_remove_conntrack_entries(
'IPv6', pro, direction)
def test_remove_conntrack_entries_for_port_sec_group_change(self):
port = self._fake_port()
port['security_groups'] = ['fake_sg_id']
self.firewall.filtered_ports[port['device']] = port
self.firewall.updated_sg_members = set(['tapfake_dev'])
self.firewall.filter_defer_apply_on()
new_port = copy.deepcopy(port)
new_port['security_groups'] = ['fake_sg_id2']
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
calls = [
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv4', '-s', '10.0.0.1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-s', 'fe80::1',
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
def test_user_sg_rules_deduped_before_call_to_iptables_manager(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}] * 2
self.firewall.prepare_port_filter(port)
rules = [''.join(c[1]) for c in self.v4filter_inst.add_rule.mock_calls]
self.assertEqual(len(set(rules)), len(rules))
def test_update_delete_port_filter(self):
port = self._fake_port()
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'ingress'}]
self.firewall.prepare_port_filter(port)
port['security_group_rules'] = [{'ethertype': 'IPv4',
'direction': 'egress'}]
self.firewall.update_port_filter(port)
self.firewall.update_port_filter({'device': 'no-exist-device'})
self.firewall.remove_port_filter(port)
self.firewall.remove_port_filter({'device': 'no-exist-device'})
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback',
'-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ifake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev', '-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged -j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule(
'FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $sg-chain',
comment=ic.VM_INT_SG),
mock.call.add_rule(
'sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT'),
mock.call.remove_chain('ifake_dev'),
mock.call.remove_chain('ofake_dev'),
mock.call.remove_chain('sfake_dev'),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain')]
self.v4filter_inst.assert_has_calls(calls)
def test_remove_unknown_port(self):
port = self._fake_port()
self.firewall.remove_port_filter(port)
# checking no exception occurs
self.assertFalse(self.v4filter_inst.called)
def test_defer_apply(self):
with self.firewall.defer_apply():
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def test_filter_defer_with_exception(self):
try:
with self.firewall.defer_apply():
raise Exception("same exception")
except Exception:
pass
self.iptables_inst.assert_has_calls([mock.call.defer_apply_on(),
mock.call.defer_apply_off()])
def _mock_chain_applies(self):
class CopyingMock(mock.MagicMock):
"""Copies arguments so mutable arguments can be asserted on.
Copied verbatim from unittest.mock documentation.
"""
def __call__(self, *args, **kwargs):
args = copy.deepcopy(args)
kwargs = copy.deepcopy(kwargs)
return super(CopyingMock, self).__call__(*args, **kwargs)
# Need to use CopyingMock because _{setup,remove}_chains_apply are
# usually called with that's modified between calls (i.e.,
# self.firewall.filtered_ports).
chain_applies = CopyingMock()
self.firewall._setup_chains_apply = chain_applies.setup
self.firewall._remove_chains_apply = chain_applies.remove
return chain_applies
def test_mock_chain_applies(self):
chain_applies = self._mock_chain_applies()
port_prepare = {'device': 'd1', 'mac_address': 'prepare'}
port_update = {'device': 'd1', 'mac_address': 'update'}
self.firewall.prepare_port_filter(port_prepare)
self.firewall.update_port_filter(port_update)
self.firewall.remove_port_filter(port_update)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({'d1': port_prepare}, {}),
mock.call.remove({'d1': port_prepare}, {}),
mock.call.setup({'d1': port_update}, {}),
mock.call.remove({'d1': port_update}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_need_pre_defer_copy(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
device2port = {port['device']: port}
self.firewall.prepare_port_filter(port)
with self.firewall.defer_apply():
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {}),
mock.call.remove(device2port, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_simple(self):
chain_applies = self._mock_chain_applies()
port = self._fake_port()
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port)
self.firewall.update_port_filter(port)
self.firewall.remove_port_filter(port)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup({}, {})])
def test_defer_chain_apply_coalesce_multiple_ports(self):
chain_applies = self._mock_chain_applies()
port1 = {'device': 'd1', 'mac_address': 'mac1', 'network_id': 'net1'}
port2 = {'device': 'd2', 'mac_address': 'mac2', 'network_id': 'net1'}
device2port = {'d1': port1, 'd2': port2}
with self.firewall.defer_apply():
self.firewall.prepare_port_filter(port1)
self.firewall.prepare_port_filter(port2)
chain_applies.assert_has_calls([mock.call.remove({}, {}),
mock.call.setup(device2port, {})])
def test_ip_spoofing_filter_with_multiple_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': ['10.0.0.1', 'fe80::1', '10.0.0.2']}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.2/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ofake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
def test_ip_spoofing_no_fixed_ips(self):
port = {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': []}
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule('ifake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-m mac --mac-source FF:FF:FF:FF:FF:FF -j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sg-fallback',
comment=None),
mock.call.add_rule('sg-chain', '-j ACCEPT')]
self.v4filter_inst.assert_has_calls(calls)
class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(IptablesFirewallEnhancedIpsetTestCase, self).setUp()
self.firewall.ipset = mock.Mock()
self.firewall.ipset.get_name.side_effect = (
ipset_manager.IpsetManager.get_name)
self.firewall.ipset.set_name_exists.return_value = True
def _fake_port(self, sg_id=FAKE_SGID):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']],
'security_groups': [sg_id],
'security_group_source_groups': [sg_id]}
def _fake_sg_rule_for_ethertype(self, ethertype, remote_group):
return {'direction': 'ingress', 'remote_group_id': remote_group,
'ethertype': ethertype}
def _fake_sg_rules(self, sg_id=FAKE_SGID, remote_groups=None):
remote_groups = remote_groups or {_IPv4: [FAKE_SGID],
_IPv6: [FAKE_SGID]}
rules = []
for ip_version, remote_group_list in six.iteritems(remote_groups):
for remote_group in remote_group_list:
rules.append(self._fake_sg_rule_for_ethertype(ip_version,
remote_group))
return {sg_id: rules}
def _fake_sg_members(self, sg_ids=None):
return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])}
def test_prepare_port_filter_with_new_members(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {}
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6',
['fe80::1'])
]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def _setup_fake_firewall_members_and_rules(self, firewall):
firewall.sg_rules = self._fake_sg_rules()
firewall.pre_sg_rules = self._fake_sg_rules()
firewall.sg_members = self._fake_sg_members()
firewall.pre_sg_members = firewall.sg_members
def _prepare_rules_and_members_for_removal(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.pre_sg_members[OTHER_SGID] = (
self.firewall.pre_sg_members[FAKE_SGID])
def test_determine_remote_sgs_to_remove(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([OTHER_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_determine_remote_sgs_to_remove_ipv6_unreferenced(self):
self._prepare_rules_and_members_for_removal()
ports = [self._fake_port()]
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID, FAKE_SGID],
_IPv6: [FAKE_SGID]})
self.assertEqual(
{_IPv4: set(), _IPv6: set([OTHER_SGID])},
self.firewall._determine_remote_sgs_to_remove(ports))
def test_get_remote_sg_ids_by_ipversion(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [OTHER_SGID]})
ports = [self._fake_port()]
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids_sets_by_ipversion(ports))
def test_get_remote_sg_ids(self):
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID, FAKE_SGID, FAKE_SGID],
_IPv6: [OTHER_SGID, OTHER_SGID, OTHER_SGID]})
port = self._fake_port()
self.assertEqual(
{_IPv4: set([FAKE_SGID]), _IPv6: set([OTHER_SGID])},
self.firewall._get_remote_sg_ids(port))
def test_determine_sg_rules_to_remove(self):
self.firewall.pre_sg_rules = self._fake_sg_rules(sg_id=OTHER_SGID)
ports = [self._fake_port()]
self.assertEqual(set([OTHER_SGID]),
self.firewall._determine_sg_rules_to_remove(ports))
def test_get_sg_ids_set_for_ports(self):
sg_ids = set([FAKE_SGID, OTHER_SGID])
ports = [self._fake_port(sg_id) for sg_id in sg_ids]
self.assertEqual(sg_ids,
self.firewall._get_sg_ids_set_for_ports(ports))
def test_remove_sg_members(self):
self.firewall.sg_members = self._fake_sg_members([FAKE_SGID,
OTHER_SGID])
remote_sgs_to_remove = {_IPv4: set([FAKE_SGID]),
_IPv6: set([FAKE_SGID, OTHER_SGID])}
self.firewall._remove_sg_members(remote_sgs_to_remove)
self.assertIn(OTHER_SGID, self.firewall.sg_members)
self.assertNotIn(FAKE_SGID, self.firewall.sg_members)
def test_remove_unused_security_group_info_clears_unused_rules(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.prepare_port_filter(self._fake_port())
# create another SG which won't be referenced by any filtered port
fake_sg_rules = self.firewall.sg_rules['fake_sgid']
self.firewall.pre_sg_rules[OTHER_SGID] = fake_sg_rules
self.firewall.sg_rules[OTHER_SGID] = fake_sg_rules
# call the cleanup function, and check the unused sg_rules are out
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_rules)
def test_remove_unused_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertNotIn(OTHER_SGID, self.firewall.sg_members)
def test_not_remove_used_security_group_info(self):
self.firewall.sg_members = {OTHER_SGID: {_IPv4: [], _IPv6: []}}
self.firewall.pre_sg_members = self.firewall.sg_members
self.firewall.sg_rules = self._fake_sg_rules(
remote_groups={_IPv4: [OTHER_SGID], _IPv6: [OTHER_SGID]})
self.firewall.pre_sg_rules = self.firewall.sg_rules
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._remove_unused_security_group_info()
self.assertIn(OTHER_SGID, self.firewall.sg_members)
def test_remove_all_unused_info(self):
self._setup_fake_firewall_members_and_rules(self.firewall)
self.firewall.filtered_ports = {}
self.firewall._remove_unused_security_group_info()
self.assertFalse(self.firewall.sg_members)
self.assertFalse(self.firewall.sg_rules)
def test_single_fallback_accept_rule(self):
p1, p2 = self._fake_port(), self._fake_port()
self.firewall._setup_chains_apply(dict(p1=p1, p2=p2), {})
v4_adds = self.firewall.iptables.ipv4['filter'].add_rule.mock_calls
v6_adds = self.firewall.iptables.ipv6['filter'].add_rule.mock_calls
sg_chain_v4_accept = [call for call in v4_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
sg_chain_v6_accept = [call for call in v6_adds
if call == mock.call('sg-chain', '-j ACCEPT')]
self.assertEqual(1, len(sg_chain_v4_accept))
self.assertEqual(1, len(sg_chain_v6_accept))
def test_prepare_port_filter_with_deleted_member(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [
'10.0.0.1', '10.0.0.3', '10.0.0.4', '10.0.0.5'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.prepare_port_filter(self._fake_port())
calls = [
mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.3', '10.0.0.4',
'10.0.0.5']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
self.firewall.ipset.assert_has_calls(calls, True)
def test_remove_port_filter_with_destroy_ipset_chain(self):
self.firewall.sg_rules = self._fake_sg_rules()
port = self._fake_port()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.prepare_port_filter(port)
self.firewall.filter_defer_apply_on()
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [],
'IPv6': []}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.1'],
'IPv6': ['fe80::1']}}
self.firewall.remove_port_filter(port)
self.firewall.filter_defer_apply_off()
calls = [
mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']),
mock.call.get_name('fake_sgid', 'IPv4'),
mock.call.set_name_exists('NIPv4fake_sgid'),
mock.call.get_name('fake_sgid', 'IPv6'),
mock.call.set_name_exists('NIPv6fake_sgid'),
mock.call.destroy('fake_sgid', 'IPv4'),
mock.call.destroy('fake_sgid', 'IPv6')]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_prepare_port_filter_with_sg_no_member(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.sg_rules[FAKE_SGID].append(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid2',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall.sg_members['fake_sgid'] = {
'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}
self.firewall.pre_sg_members = {}
port = self._fake_port()
port['security_group_source_groups'].append('fake_sgid2')
self.firewall.prepare_port_filter(port)
calls = [mock.call.set_members('fake_sgid', 'IPv4',
['10.0.0.1', '10.0.0.2']),
mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])]
self.firewall.ipset.assert_has_calls(calls, any_order=True)
def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self):
self.firewall.sg_rules = self._fake_sg_rules()
self.firewall.pre_sg_rules = self._fake_sg_rules()
self.firewall.ipset_chains = {'IPv4fake_sgid': ['10.0.0.2'],
'IPv6fake_sgid': ['fe80::1']}
self.firewall.sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.pre_sg_members = {'fake_sgid': {
'IPv4': ['10.0.0.2'],
'IPv6': ['fe80::1']}}
self.firewall.sg_rules['fake_sgid'].remove(
{'direction': 'ingress', 'remote_group_id': 'fake_sgid',
'ethertype': 'IPv4'})
self.firewall.sg_rules.update()
self.firewall._defer_apply = True
port = self._fake_port()
self.firewall.filtered_ports['tapfake_dev'] = port
self.firewall._pre_defer_filtered_ports = {}
self.firewall._pre_defer_unfiltered_ports = {}
self.firewall.filter_defer_apply_off()
calls = [mock.call.destroy('fake_sgid', 'IPv4')]
self.firewall.ipset.assert_has_calls(calls, True)
def test_sg_rule_expansion_with_remote_ips(self):
other_ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4']
self.firewall.sg_members = {'fake_sgid': {
'IPv4': [FAKE_IP['IPv4']] + other_ips,
'IPv6': [FAKE_IP['IPv6']]}}
port = self._fake_port()
rule = self._fake_sg_rule_for_ethertype(_IPv4, FAKE_SGID)
rules = self.firewall._expand_sg_rule_with_remote_ips(
rule, port, 'ingress')
self.assertEqual(list(rules),
[dict(list(rule.items()) +
[('source_ip_prefix', '%s/32' % ip)])
for ip in other_ips])
def test_build_ipv4v6_mac_ip_list(self):
mac_oth = 'ffff-ff0f-ffff'
mac_unix = 'FF:FF:FF:0F:FF:FF'
ipv4 = FAKE_IP['IPv4']
ipv6 = FAKE_IP['IPv6']
fake_ipv4_pair = []
fake_ipv4_pair.append((mac_unix, ipv4))
fake_ipv6_pair = []
fake_ipv6_pair.append((mac_unix, ipv6))
fake_ipv6_pair.append((mac_unix, 'fe80::fdff:ffff:fe0f:ffff'))
mac_ipv4_pairs = []
mac_ipv6_pairs = []
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs)
self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6,
mac_ipv4_pairs, mac_ipv6_pairs)
self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs)
def test_update_ipset_members(self):
self.firewall.sg_members[FAKE_SGID][_IPv4] = []
self.firewall.sg_members[FAKE_SGID][_IPv6] = []
sg_info = {constants.IPv4: [FAKE_SGID]}
self.firewall._update_ipset_members(sg_info)
calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])]
self.firewall.ipset.assert_has_calls(calls)
class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(OVSHybridIptablesFirewallTestCase, self).setUp()
self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver()
# initial data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
def test__populate_initial_zone_map(self):
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
def test__generate_device_zone(self):
# initial data has 1, 2, and 9 in use.
# we fill from top up first.
self.assertEqual(10, self.firewall._generate_device_zone('test'))
# once it's maxed out, it scans for gaps
self.firewall._device_zone_map['someport'] = (
iptables_firewall.MAX_CONNTRACK_ZONES)
for i in range(3, 9):
self.assertEqual(i, self.firewall._generate_device_zone(i))
# 9 and 10 are taken so next should be 11
self.assertEqual(11, self.firewall._generate_device_zone('p11'))
# take out zone 1 and make sure it's selected
self.firewall._device_zone_map.pop('e804433b-61')
self.assertEqual(1, self.firewall._generate_device_zone('p1'))
# fill it up and then make sure an extra throws an error
for i in range(1, 65536):
self.firewall._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall._find_open_zone()
# with it full, try again, this should trigger a cleanup and return 1
self.assertEqual(1, self.firewall._generate_device_zone('p12'))
self.assertEqual({'p12': 1}, self.firewall._device_zone_map)
def test_get_device_zone(self):
# initial data has 1, 2, and 9 in use.
self.assertEqual(10,
self.firewall.get_device_zone('12345678901234567'))
# should have been truncated to 11 chars
self._dev_zone_map.update({'12345678901': 10})
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
|
bigswitch/neutron
|
neutron/tests/unit/agent/linux/test_iptables_firewall.py
|
Python
|
apache-2.0
| 85,885
|
[
"FEFF"
] |
edba7db50de50644d6c06fce734373b7888643fdeda773562032e910ae8f9c3e
|
from __future__ import print_function
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import json
import string
import random
import pickle
from time import time
from sys import maxsize
from DBNlogic.nets import DBN, RBM
from DBNlogic.sets import DataSet
from DBNlogic.util import Configuration, heatmap
# password checking:
PASSWORD = 'user'
def authorized(password):
return password == PASSWORD
# pending training jobs on the server:
training_jobs = {}
# actual datasets, for caching input examples:
datasets_cache = {}
# datasets available on the server:
datasets_info = {} # datasets shapes
datasets_name = sorted(DataSet.allSets(), key = lambda s: s.lower())
for d in datasets_name:
try:
datasets_info[d] = DataSet.fromWhatever(d).shape[1]
except (pickle.UnpicklingError, IndexError):
pass
def index(request):
"""Return the main page."""
context = {
'config': Configuration(), # default configuration
'datasets': datasets_info # available datasets
}
return render(request, 'DBNtrain/index.html', context)
@csrf_exempt
def train(request):
"""Set up a network to be trained according to
the parameters in the HTTP request."""
print(request.POST['pass'])
if not authorized(request.POST['pass']):
return HttpResponse(status = 401)
trainset_name = request.POST['dataset']
trainset = DataSet.fromWhatever(trainset_name)
try:
num_layers = 1 + int(request.POST['num_hid_layers'])
except ValueError:
num_layers = 1
# return HttpResponse({'error': 'you haven\'t specified [...]'})
std_dev = float(request.POST['std_dev'])
net = DBN(name = trainset_name)
vis_size = int(request.POST['vis_sz'])
for layer in range(1, num_layers):
hid_size = int(request.POST['hid_sz_' + str(layer)])
print('creating a', vis_size, 'x', hid_size, 'RBM...')
net.append(RBM(vis_size, hid_size, std_dev = std_dev))
vis_size = hid_size # for constructing the next RBM
epochs = request.POST['epochs']
config = {
'max_epochs' : int(epochs if (epochs != 'inf') else maxsize),
'batch_size' : int(request.POST['batch_size']),
'learn_rate' : float(request.POST['learn_rate']),
'momentum' : float(request.POST['momentum']),
'std_dev' : std_dev,
'spars_target' : float(request.POST['spars_target'])
}
# sanity check for batch size:
if len(trainset) % config['batch_size'] != 0:
print('encountered batch size', config['batch_size'], 'for dataset with', len(trainset), 'examples: adjusting batch size to', end = ' ')
while len(trainset) % config['batch_size'] != 0:
config['batch_size'] -= 1
print(config['batch_size'])
random_id = ''.join(random.choice(string.ascii_uppercase + string.digits) for i in range(10))
training_jobs[random_id] = {
'birthday': time(),
'network': net,
'generator': net.learn(trainset, Configuration(**config))
}
# delete the old client job that is being replaced (if any):
last_job = request.POST['last_job_id']
if last_job in training_jobs:
del training_jobs[last_job]
# delete a random pending job older than five hours:
random_old_job = random.choice(list(training_jobs.keys()))
if time() - training_jobs[random_old_job]['birthday'] > 18000:
print('deleting old job n.', random_old_job)
del training_jobs[random_old_job] # risky...
return HttpResponse(random_id)
@csrf_exempt
def getError(request):
"""Run one training iteration for a particular
network and return the reconstruction error."""
# body = json.loads(request.body.decode())
if not authorized(request.POST['pass']):
return HttpResponse(status = 401)
job = request.POST['job_id']
train_gen = training_jobs[job]['generator']
net = training_jobs[job]['network']
if request.POST['goto_next_rbm'] == 'yes' and net.curr_trainer != None:
net.curr_trainer.handbrake = True
curr_rbm = None
next_err = None
stop = False
try:
train_info = next(train_gen)
curr_rbm = train_info['rbm']
next_err = round(train_info['err'], 3)
except StopIteration:
# net.save()
del training_jobs[job]['generator']
stop = True
response = {
'curr_rbm': curr_rbm,
'error': next_err,
'stop': stop
}
json_response = json.dumps(response)
return HttpResponse(json_response, content_type = 'application/json')
def getInput(request):
"""Return a specific input image of a specific dataset."""
if not authorized(request.GET['pass']):
return HttpResponse(status = 401)
dataset_name = request.GET['dataset']
if dataset_name not in datasets_cache:
datasets_cache[dataset_name] = DataSet.fromWhatever(dataset_name)
print('cached', dataset_name, 'dataset')
dataset = datasets_cache[dataset_name]
index = int(request.GET['index'])
if index < 0:
index = random.randint(0, len(dataset) - 1)
image = dataset[index].tolist()
response = heatmap(image)
json_response = json.dumps(response)
return HttpResponse(json_response, content_type = 'application/json')
def getReceptiveField(request):
"""Return the receptive field of a specific
neuron in a specific layer of a DBN."""
if 'job_id' not in request.GET:
return HttpResponse('', content_type = 'application/json')
job = request.GET['job_id']
net = training_jobs[job]['network']
layer = int(request.GET['layer'])
neuron = int(request.GET['neuron'])
rec_field = net.receptiveField(layer, neuron).tolist()
response = heatmap(rec_field)
json_response = json.dumps(response)
return HttpResponse(json_response, content_type = 'application/json')
def analyseReceptiveField(request):
"""Return a HTML page with just the receptive field
of a specific neuron in a specific layer of a DBN."""
if 'job_id' not in request.GET:
return HttpResponse('')
job = request.GET['job_id']
net = training_jobs[job]['network']
layer = int(request.GET['layer'])
neuron = int(request.GET['neuron'])
rec_field = net.receptiveField(layer, neuron).tolist()
response = heatmap(rec_field)
json_response = json.dumps(response)
context = {
'data': json_response
}
return render(request, 'DBNtrain/receptive_field.html', context)
def getHistogram(request):
"""Return a histogram of the distribution of the weights
of a specific RBM inside a specific DBN."""
if 'job_id' not in request.GET:
return HttpResponse('', content_type = 'application/json')
job = request.GET['job_id']
net = training_jobs[job]['network']
rbm = int(request.GET['rbm'])
response = net[rbm].weightsHistogram()
json_response = json.dumps(response)
return HttpResponse(json_response, content_type = 'application/json')
def saveNet(request):
"""Return a Pickle file containing the desired network."""
if 'job_id' not in request.GET:
return HttpResponse('', content_type = 'application/json')
job = request.GET['job_id']
net = training_jobs[job]['network']
response = HttpResponse(content_type = 'application/octet-stream')
pickle.dump(net, response, protocol = 2)
response['Content-Disposition'] = 'attachment; filename="network.pkl"'
return response
@csrf_exempt
def getArchFromNet(request):
"""Given a pickle file containing a network, return
the architecture specifications for that network."""
print('>>>', request)
print('>>>', request.POST)
print('>>>', request.POST['netfile'])
pass
response = {}
json_response = json.dumps(response)
return HttpResponse(json_response, content_type = 'application/json')
|
ggiuffre/DBNsim
|
DBNsite/DBNtrain/views.py
|
Python
|
mit
| 7,977
|
[
"NEURON"
] |
a77f3812ac786b1ba56db033675129f9664c2e0b45c7c82b177644169019e562
|
#!/usr/bin/env python3
#
# Copyright (C) 2011, 2012, 2014, 2015, 2016, 2017, 2018, 2019, 2020 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import PISM
from petsc4py import PETSc
import os
context = PISM.Context()
ctx = context.ctx
config = context.config
PISM.set_abort_on_sigint(True)
usage = """\
sia_forward.py -i IN.nc [-o file.nc]
where:
-i IN.nc is input file in NetCDF format: contains PISM-written model state
notes:
* -i is required
"""
PISM.show_usage_check_req_opts(ctx.log(), "sia_forward.py", ["-i"], usage)
input_filename = config.get_string("input.file")
if len(input_filename) == 0:
import sys
sys.exit(1)
config.set_string("output.file_name", "sia_" + os.path.basename(input_filename), PISM.CONFIG_DEFAULT)
output_file = config.get_string("output.file_name")
is_regional = PISM.OptionBool("-regional", "Compute SIA using regional model semantics")
registration = PISM.CELL_CENTER
if is_regional:
registration = PISM.CELL_CORNER
input_file = PISM.File(ctx.com(), input_filename, PISM.PISM_NETCDF3, PISM.PISM_READONLY)
grid = PISM.IceGrid.FromFile(ctx, input_file, "enthalpy", registration)
config.set_flag("basal_resistance.pseudo_plastic.enabled", False)
enthalpyconverter = PISM.EnthalpyConverter(config)
modeldata = PISM.model.ModelData(grid)
modeldata.setPhysics(enthalpyconverter)
vecs = modeldata.vecs
vecs.add(PISM.model.createIceSurfaceVec(grid))
vecs.add(PISM.model.createIceThicknessVec(grid))
vecs.add(PISM.model.createBedrockElevationVec(grid))
vecs.add(PISM.model.createEnthalpyVec(grid))
vecs.add(PISM.model.createIceMaskVec(grid))
# Read in the PISM state variables that are used directly in the SSA solver
for v in [vecs.thk, vecs.topg, vecs.enthalpy]:
v.regrid(input_file, critical=True)
# variables mask and surface are computed from the geometry previously read
sea_level = PISM.model.createSeaLevelVec(grid)
sea_level.set(0.0)
gc = PISM.GeometryCalculator(config)
gc.compute(sea_level, vecs.topg, vecs.thk, vecs.mask, vecs.surface_altitude)
# If running in regional mode, load in regional variables
if is_regional:
vecs.add(PISM.model.createNoModelMask(grid))
vecs.no_model_mask.regrid(input_file, critical=True)
if PISM.util.fileHasVariable(input_file, 'usurfstore'):
vecs.add(PISM.model.createIceSurfaceStoreVec(grid))
vecs.usurfstore.regrid(input_file, critical=True)
else:
vecs.add(vecs.surface, 'usurfstore')
solver = PISM.SIAFD_Regional
else:
solver = PISM.SIAFD
PISM.verbPrintf(2, context.com, "* Computing SIA velocities...\n")
vel_sia = PISM.sia.computeSIASurfaceVelocities(modeldata, siasolver=solver)
PISM.verbPrintf(2, context.com, "* Saving results to %s...\n" % output_file)
pio = PISM.util.prepare_output(output_file)
pio.close()
# Save time & command line & results
PISM.util.writeProvenance(output_file)
vel_sia.write(output_file)
|
pism/pism
|
examples/python/sia_forward.py
|
Python
|
gpl-3.0
| 3,592
|
[
"NetCDF"
] |
be45a880f32fc9c2dbb0602aed4cb19f9d9b0c039be0422dae085ceeec533074
|
#!/homes/dthybert/software/Python-2.7.5/python
import pysam
import scipy.stats
import sys
import argparse
import math
def Z_score(val, mean,std):
return (float(val)-float(mean))/float(std)
def isGoodRead(read,threshold_pval,dicoStats,bamFile):
if read.alignment.is_reverse ==read.alignment.mate_is_reverse:#read and mate are in the same orientation so inversion
return -1
rend=read.alignment.reference_end
startMate=read.alignment.next_reference_start
d=startMate-rend
if d < 0:
if not read.alignment.is_reverse:#in this case the read and the mate are not facing each other but in but are botom to botom
return 0
delta=abs(startMate-rend)
mean=dicoStats[bamFile][0]
std=dicoStats[bamFile][1]
z=Z_score(delta,mean,std)
p_value = scipy.stats.norm.sf([abs(z)])[0]
# print delta,mean, std, p_value
if p_value < threshold_pval:
return 0
return 1
def loadStatistics(strconfigFile):
statByFile={}
objFile=open(strconfigFile)
for line in objFile:
if line[0]=="#":
continue
tabLine=line.split()
file=tabLine[0]
mean=float(tabLine[1])
std=float(tabLine[2])
statByFile[file]=[mean,std]
return statByFile
def getPositionInTabCoordinate(start,CurrPosition):
return CurrPosition-start
def addSupport(tabSupport,start,end):
i=start
if start < 0 or end < 0:
return tabSupport
#print i,end
while i <= end:
if i >=len(tabSupport):
break
tabSupport[i]=tabSupport[i]+1
i=i+1
return tabSupport
def MergeList(lstOfList):
length=len(lstOfList[0])
#print lstOfList[0],lstOfList[1]
i=0
lstNew=[0]*length
while i < length:
j=0
while j < len(lstOfList):
lstNew[i]=lstNew[i]+lstOfList[j][i]
j=j+1
i=i+1
return lstNew
def getWrongRegions(tabGood,tabWrong, start, threshold):
i=0
CurStart=0
CurEnd=0
lstPosition=[]
while i < len(tab):
good=tabGood[i]
wrong=tabWrong[i]
ratio=0.0
if good==0:
ratio=0.0
elif wrong==0:
ratio=math.log(good)
else:
ratio=math.log(float(good)/float(wrong))
if ratio > threshold:
if CurStart!=CurEnd:
regStart=CurStart+start
regEnd=i+start
lstPosition.append([regStart,regEnd])
CurStart=i
CurEnd=i
else:
CurEnd=i
i=i+1
if CurStart!=CurEnd:
regStart=CurStart+start
regEnd=CurEnd+start
lstPosition.append([regStart,regEnd])
return lstPosition
def getConfidentRegions(tab, start, threshold):
i=0
CurStart=0
CurEnd=0
lstPosition=[]
while i < len(tab):
if tab[i] < threshold:
if CurStart!=CurEnd:
regStart=CurStart+start
regEnd=i+start
lstPosition.append([regStart,regEnd])
CurStart=i
CurEnd=i
else:
CurEnd=i
i=i+1
if CurStart!=CurEnd:
regStart=CurStart+start
regEnd=CurEnd+start
lstPosition.append([regStart,regEnd])
return lstPosition
def defineRegionFile(bamFile,dicoStats,chr,start,end,threshold_pval,readLength, bin,buffer,f,mult,map_qual):
samfile = pysam.AlignmentFile(bamFile, "rb")
size=end-start+buffer
tabSupport=[0]*(size)
tabWrong=[0]*(size)
CurStart=start
CurEnd=start+bin
if CurEnd > end:
CurEnd=end
while CurStart < end: #Parse the genomic region to analyse
i=0
for pileupcolumn in samfile.pileup(chr,CurStart,CurEnd):#the analysis is divided in bin for memorry purpose
position=pileupcolumn.reference_pos
lst=[]
if position < start:
continue
if position > end:
break
posTab=position-start
if i % f==0:
for pReads in pileupcolumn.pileups:#analyse each reads of a position
if pReads.alignment.mate_is_unmapped:
continue
elif samfile.getrname(pReads.alignment.next_reference_id) != chr:
continue
elif pReads.alignment.mapping_quality < map_qual:
continue
else:
tag=isGoodRead(pReads,threshold_pval,dicoStats,bamFile)
if tag==1:#in the case the read satisfy insert constraint we can take it into account
rend=pReads.alignment.reference_end
startMate=pReads.alignment.next_reference_start
delta=startMate-rend
if delta > 0:# take into account only whenm the mate pair is forward , this is not to count twice the relationship
startTab=getPositionInTabCoordinate(start,pReads.alignment.reference_start)
endMate=startMate+readLength
endTab=getPositionInTabCoordinate(start,endMate)
tabSupport=addSupport(tabSupport,startTab,endTab)
else:
rend=pReads.alignment.reference_end
startMate=pReads.alignment.next_reference_start
delta=startMate-rend
if delta > 0:
if delta < dicoStats[bamFile][0]*mult:
startTab=getPositionInTabCoordinate(start,pReads.alignment.reference_start)
endMate=startMate+readLength
endTab=getPositionInTabCoordinate(start,endMate)
tabWrong=addSupport(tabWrong,startTab,endTab)
i=i+1
CurStart=CurEnd+1
CurEnd=CurStart+bin
if CurEnd > end:
CurEnd=end
#print tabSupport
return tabSupport,tabWrong
def saveRegions(outfile,bedList,chr):
objFile=open(outfile,"w")
for list in bedList:
string=chr+"\t"+str(list[0])+"\t"+str(list[1])
objFile.write(string+"\n")
objFile.close()
def saveScore(outfile,ListPosition,ListWrong,chr, start):
objFile=open(outfile,"w")
i=0
while i < len(ListPosition):
pos=start+i
val=ListPosition[i]
wrong=ListWrong[i]
p=str(pos)
v=str(val)
w=str(wrong)
ratio=0.0
if val==0:
ratio=0.0
elif wrong==0:
ratio=math.log(val)
else:
ratio=math.log(float(val)/float(wrong))
string=chr+"\t"+p+"\t"+v+"\t"+w+"\t"+str(ratio)
objFile.write(string+"\n")
i=i+1
objFile.close()
def main(param):
dicoStats=loadStatistics(param.strConfigFile)
lstBams=param.lstBamFiles.split(",")
lstLstGood=[]
lstLstWrong=[]
for bam in lstBams:
print "start analysing "+bam+ " file"
###Analyse a bam file
lstG,lstW=defineRegionFile(bam,dicoStats,param.chr,param.start,param.end,param.pvalMate,param.readLength,param.bin, param.buffer,param.frequency,param.mult,param.mapQual)
lstLstGood.append(lstG)
lstLstWrong.append(lstW)
print bam +" file treated"
###merge all data from the different bamfile
FinalListGood=MergeList(lstLstGood)
FinalListWrong=MergeList(lstLstWrong)
## save the results
outScore=param.outFile+".score"
saveScore(outScore, FinalListGood,FinalListWrong, param.chr, param.start)
outregions=param.outFile+".bed"
wrongRegion=param.outFile+".wrong.bed"
bedList=getConfidentRegions(FinalListGood,param.start,param.threshold)
saveRegions(outregions,bedList,param.chr)
wrgonList=getConfidentRegions(FinalListWrong,param.start,param.thrWrong)
saveRegions(wrongRegion,wrgonList,param.chr)
parser = argparse.ArgumentParser()
parser.add_argument('--bam_files', action='store', dest='lstBamFiles', default ="", help='liste of bam file to analyse format : bam1,bam2,...,bamN',required=True)
parser.add_argument('--config', action='store', dest='strConfigFile', help='configuration file describing the mean and std of the insert per library', required=True)
parser.add_argument('--out', action='store', dest='outFile', help='output file prefix where the data will be stored ', required=True)
parser.add_argument('--chr', action='store', dest='chr', help='chromosome to analyse',required=True)
parser.add_argument('--start', action='store', dest='start', help='start of the region to analyse',required=True, type=int)
parser.add_argument('--end', action='store', dest='end', help='end of the region to analyse\n',required=True,type=int)
parser.add_argument('--pval_mate', action='store', dest='pvalMate', help='pval threshold that two mates are in a good distance [0.01]', default=0.01, type=float)
parser.add_argument('--threshold', action='store', dest='threshold', help='coverage threshold to define a "good" region [1]', default=1, type=int)
#parser.add_argument('--min_freq', action='store', dest='minFreq', help='frequency threshold of reads satisfying the pair-end constraints to have a good regions [0.1]', default=0.1, type=float)
parser.add_argument('--bin', action='store', dest='bin', help='number of position evaluated before storing in file (this is for performances issues) [30000]', default=30000, type=int)
parser.add_argument('--read_length', action='store', dest='readLength', help='the length of the mapped read [100]', default=100, type=int)
parser.add_argument('--buffer', action='store', dest='buffer', help='the buffer size define the what is the distance after the last postion we can take into account the a the mate of the read treated.Because of the good regions can go beyond the end of the end position. Need to be at least the size of the insert [20000]', default=20000, type=int)
parser.add_argument('--f', action='store', dest='frequency', help='positon will be evaluated at every f nt[100]', default=100, type=int)
parser.add_argument('--thrWrong', action='store', dest='thrWrong', help='all region with a score below the threshold is define as wrong region (log(good/wrong) [0.0]', default=0.0, type=float)
parser.add_argument('--multSize', action='store', dest='mult', help='define the upper size to considere wrongly apparied reads. It multiply by multSize the mean of the insert [10]', default=10, type=int)
parser.add_argument('--map_qual', action='store', dest='mapQual', help='mapping quality threshold [0]', default=0, type=int)
param = parser.parse_args()
main(param)
|
tk2/assembly-eval
|
consistent/GetRegionWellAssembled_v20150722.py
|
Python
|
mit
| 10,308
|
[
"pysam"
] |
0fcb318df843b15d5aec6c17a994fb23bd2a311c1a88bac46900d477be156f18
|
from __future__ import print_function
import re
import argparse
import os.path
import io
parser = argparse.ArgumentParser()
parser.add_argument('version', help='file version')
parser.add_argument('outfile', help='outfile with extension .c/.h')
parser.add_argument('inputs', nargs='*', action='store', help='input filenames')
args = parser.parse_args()
outname = args.outfile.split("/")[-1]
is_c = False
if outname[-2:] == ".c":
is_c = True
pos = outname.find(".")
if pos > 0:
outname = outname[:pos]
include_re = re.compile("^#include (\".*\").*$")
guard_re = re.compile("^#(?:(?:ifndef|define) [A-Z_]+_H_|endif /\* [A-Z_]+_H_ \*/)")
includes = []
print ("Starting amalgamating file "+ args.outfile)
file = io.open(args.outfile, 'w')
file.write(u'''/* THIS IS A SINGLE-FILE DISTRIBUTION CONCATENATED FROM THE OPEN62541 SOURCES
* visit http://open62541.org/ for information about this software
* Git-Revision: %s
*/
/*
* Copyright (C) 2015 the contributors as stated in the AUTHORS file
*
* This file is part of open62541. open62541 is free software: you can
* redistribute it and/or modify it under the terms of the GNU Lesser General
* Public License, version 3 (as published by the Free Software Foundation) with
* a static linking exception as stated in the LICENSE file provided with
* open62541.
*
* open62541 is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*/\n\n''' % args.version)
if not is_c:
file.write(u'''#ifndef %s
#define %s
#ifdef __cplusplus
extern "C" {
#endif\n\n''' % (outname.upper() + u"_H_", outname.upper() + u"_H_") )
if not is_c:
for inc in includes:
file.write(u"#include " + inc + "\n")
else:
file.write(u'''#ifndef UA_DYNAMIC_LINKING
# define UA_DYNAMIC_LINKING
#endif
#ifndef UA_INTERNAL
#define UA_INTERNAL
#endif
\n''')
file.write(u"#include \"" + outname + ".h\"\n")
for fname in args.inputs:
with io.open(fname, encoding="utf8") as infile:
file.write(u"\n/*********************************** amalgamated original file \"" + fname + u"\" ***********************************/\n\n")
print ("Integrating file '" + fname + "'...", end=""),
for line in infile:
inc_res = include_re.match(line)
guard_res = guard_re.match(line)
if not inc_res and not guard_res:
file.write(line)
print ("done."),
if not is_c:
file.write(u'''
#ifdef __cplusplus
} // extern "C"
#endif
#endif /* %s */''' % (outname.upper() + u"_H_"))
file.close()
print ("The size of "+args.outfile+" is "+ str(os.path.getsize(args.outfile))+" Bytes.")
|
joyhope/open62541
|
tools/amalgamate.py
|
Python
|
lgpl-3.0
| 2,785
|
[
"VisIt"
] |
5de490a49ef06a53813a5bf6ca6b846b7d1f7ef460d7132e0945a213ae796c76
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2007 Philippe LAWRENCE
#
# This file is part of pyBar.
# pyBar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyBar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
# revoir sys.platform dans Const !!!!
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, Gtk, Gdk, Pango, GObject, GdkPixbuf, GLib
print(Gtk.MAJOR_VERSION, Gtk.MINOR_VERSION, Gtk.MICRO_VERSION)
import cairo
#import gio # debug py2exe windows remettre?
import classEditor
import classDrawing
import classRdm
import classLigneInflu
import classDialog
import Const
import classProfilManager
import classPrefs
import threading
import copy
import os
#import pickle
import function
import file_tools
#from time import sleep
import xml.etree.ElementTree as ET
#import signal
#signal.signal(signal.SIGINT, signal.SIG_DFL)
# -------------
file_tools.set_user_dir()
if Const.SYS == "win32":
path = os.path.join(Const.PATH, "stdout.log")
sys.stdout = open(path, "w")
path = os.path.join(Const.PATH, "stderr.log")
sys.stderr = open(path, "w")
#GObject.threads_init()
__version__ = Const.VERSION
__author__ = Const.AUTHOR
__date__ = "2014-06-01"
__file__ = Const.SOFT # redéfini pour py2exe en attendant mieux
print("%s%s Copyright (C) 2007 %s\nThis program comes with ABSOLUTELY NO WARRANTY\nThis is free software, and you are welcome to redistribute it under certain conditions." % (Const.SOFT, __version__, __author__))
screen = Gdk.Screen.get_default()
css_provider = Gtk.CssProvider()
css_provider.load_from_path('gtk-widgets.css')
context = Gtk.StyleContext()
context.add_provider_for_screen(screen, css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
class CombiButton(Gtk.CheckButton):
"""Boutons à cocher des combinaisons"""
def __init__(self, label):
Gtk.CheckButton.__init__(self, label=label)
#self.n_type = n_type
def About():
dialog = Gtk.AboutDialog()
dialog.set_icon_from_file("glade/logo.png")
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size("glade/logo.png", 25, 25)
dialog.set_logo(pixbuf)
dialog.set_program_name(Const.SOFT)
dialog.set_version(Const.VERSION)
dialog.set_authors([Const.AUTHOR])
dialog.set_website(Const.SITE_URL)
dialog.set_comments("%s est un logiciel libre de calcul de structures planes, basé sur la méthode des déplacements, écrit en Python et GTK\n%s" % (Const.SOFT, Const.CONTACT))
dialog.set_license("Vous pouvez modifier et redistribuer ce programme\nsous les conditions énoncées\npar la licence GNU GPL (version 2 ou ultérieure).\nUne copie de la licence GPL\nest dans le fichier « COPYING » fourni avec %s.\nAucune garantie n'est fournie pour l'utilisation de ce programme." % Const.SOFT)
result = dialog.run()
dialog.destroy()
class CombiBox(Gtk.VBox):
# revoir main_win, study
def __init__(self, **kwargs):
Gtk.VBox.__init__(self, **kwargs)
self.set_name("combi")
def fill_box(self, study, main_win):
self.handler_list = []
rdm = study.rdm
#try:/
# status = rdm.status
#except AttributeError: # for EmptyRdm
# status = -1
#if status == -1:
# return
Cases = rdm.Cases
n_cases = len(Cases)
try:
ErrorCases = rdm.char_error
except AttributeError:
ErrorCases = []
CombiCoef = rdm.CombiCoef
combis = function.sortedDictKeys(CombiCoef)
n_combi = len(combis)
# création de la liste des cas de charge
label = Gtk.Label(label="Cas de charge:")
#label.set_alignment(0.2, 0.7)
self.pack_start(label, False, False, 0)
for i, val in enumerate(Cases):
button = CombiButton(val)
id = button.connect("clicked", main_win.event_combi_button, i)
self.handler_list.append(id)
button.set_name(str(i))
if val in ErrorCases:
button.set_sensitive(False)
self.pack_start(button, False, False, 0)
# création de la liste des combinaisons
if not n_combi == 0:
label = Gtk.Label(label="Combinaisons:")
#label.set_alignment(0.2, 0.7)
self.pack_start(label, False, False, 0)
for i, val in enumerate(combis):
button = CombiButton(val)
n = i + n_cases
# numéro pour combinaison négatif à partir de -1
id = button.connect("clicked", main_win.event_combi_button, n)
self.handler_list.append(id)
button.set_name(str(n))
self.pack_start(button, False, False, 0)
self.show_all()
#####################################################################
#
# CLASSE PRINCIPALE
#
#####################################################################
class MainWindow(object):
def __init__(self):
# initialisation de la page d'accueil
builder = self.builder = Gtk.Builder()
builder.add_from_file("glade/main.glade")
# XXX enlever le mapping comme dans Editor
builder.connect_signals(self)
self.window = builder.get_object("window1")
self.main_box = builder.get_object("main_box")
self._ini_first_page()
self.window.show() # après le resize
self._handler_id = {}
self._tabs = []
self.studies = {}
self.message = classDialog.Message()
self.is_press = False # attribut pour le bouton "Clic Gauche"
self.key_press = False # attribut pour la clavier "Control_L"
# XXX enlever dans main.glade
def on_state_event(self, widget, event):
"""Evènement de type passage en plein écran ou retour"""
#print event.type
return
if event.changed_mask & Gdk.WindowState.ICONIFIED:
if event.new_window_state & Gdk.WindowState.ICONIFIED:
print('Window was minimized!')
else:
print('Window was unminimized!')
def on_w1_configure(self, widget, event):
"""Gère les évènements correspondant au redimensionnement de la fenetre"""
#print("Main::_configure_event")
pass
def on_w1_destroy(self, widget, event=None):
"""Closing main window - Save user preferences"""
self.new_version = False
menu_button = self.builder.get_object("menu_cas")
display_combi = menu_button.get_active() == True and 'on' or 'off'
w, h = self.window.get_size()
self.UP.save_w1_config(w, h, display_combi, self.options)
if hasattr(self, "editor"):
changes = self.editor.get_modified_studies()
must_save = self._get_record_id(changes)
if must_save is None:
# must return True to prevent window closing
return True
ed_data = self.editor.data_editors
for id in must_save:
self._set_name(id)
ed_study = ed_data[id]
if not ed_study.path is None:
self.editor.save_study(ed_study)
self.UP.save_w2_config(self.editor._w, self.editor._h)
# sauvegarde des préférences des études
studies = self.studies
for study in studies.values():
self.save_drawing_prefs(study)
Gtk.main_quit()
def expose_first_page(self, widget, cr):
"""Méthode expose-event pour le drawingarea du lancement"""
#print("expose_first_page", cr)
cr.set_source_surface(self.surface, 0, 0)
cr.paint()
#w_alloc = widget.get_allocated_width()
#h_alloc = widget.get_allocated_height()
#widget.move(self.tools, 200, 500)
#self.draw_first_tools(widget, x+20, max(h_alloc-70, 0))
def configure_first_page(self, widget, event):
"""Méthode configure-event pour le drawingarea du lancement"""
#print("configure_first_page")
w_alloc = widget.get_allocated_width()
h_alloc = widget.get_allocated_height()
self.surface = cairo.ImageSurface(cairo.FORMAT_RGB24, w_alloc, h_alloc)
cr = cairo.Context(self.surface)
cr.save()
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.rectangle(0, 0, w_alloc, h_alloc)
cr.fill()
cr.restore()
img = cairo.ImageSurface.create_from_png("glade/home.png")
w, h = img.get_width(), img.get_height()
x, y = (w_alloc-w)/2, (h_alloc-h)/2
cr.set_source_surface(img, x, y)
cr.paint()
cr.save()
cr.set_source_rgb(0.0, 0.0, 1.0)
cr.set_font_size(60)
cr.move_to(x+280, y+180)
cr.show_text(Const.VERSION)
cr.stroke()
cr.restore()
self.draw_first_tools(widget, int(x+20), max(h_alloc-70, 0))
def draw_first_tools(self, layout, x, y):
#print("draw_first_tools", x, y)
if hasattr(self, 'tools'):
layout.move(self.tools, x, y)
return
hbox = Gtk.HBox(homogeneous=False, spacing=10)
b = Gtk.Button.new_from_icon_name('document-open', Gtk.IconSize.DIALOG)
b.set_relief(Gtk.ReliefStyle.NONE)
b.set_tooltip_text("Ouvrir une étude existante")
b.connect('clicked', self.on_open_file)
hbox.pack_start(b, False, False, 0)
b = Gtk.Button.new_from_icon_name('document-new', Gtk.IconSize.DIALOG)
b.set_relief(Gtk.ReliefStyle.NONE)
b.set_tooltip_text("Ouvrir une nouvelle étude")
b.connect('clicked', self.on_new_file)
hbox.pack_start(b, False, False, 0)
layout.put(hbox, x, y)
hbox.show_all()
self.tools = hbox
def _ini_first_page(self):
"""Dessine la page de lancement de l'application
Read size User preferences"""
self.new_version = None
#self._set_user_dir()
self.UP = classPrefs.UserPrefs()
menu_button = self.builder.get_object("menu_cas")
tag = self.UP.get_w1_box()
menu_button.set_active(tag)
menu_button.connect('activate', self._manage_combi_window)
sizes = self.UP.get_w1_size()
if sizes is None:
height = Gdk.Screen.height()
width = Gdk.Screen.width()
self.window.resize(int(0.6*width), int(0.8*height))
else:
w, h = sizes
self.window.resize(w, h)
layout = Gtk.Layout()
layout.connect("size-allocate", self.configure_first_page)
layout.connect("draw", self.expose_first_page)
self.main_box.add(layout)
self.draw_first_tools(layout, 0, 0)
layout.show()
# new version search
opt = self.UP.get_version()
if opt == 0:
try:
GLib.timeout_add(1000, self._get_info_version, opt) # destroy if callback return False
except:
pass
else:
self.UP.save_version(opt-1)
# options d'affichage (à déplacer?)
self.options = self.UP.get_w1_options()
# -----------------------------------------------------------
#
# Méthodes relatives au notebook des drawings
#
# -----------------------------------------------------------
def _ini_application(self):
"""drawings notebook initilisation - button setup
return drawing_book"""
# suppression image accueil
self.main_box.remove(self.main_box.get_children()[0])
self.surface.finish()
del(self.surface)
del(self.tools)
# drawings notebook ini
book = Gtk.Notebook()
book.set_scrollable(True)
#book.set_show_tabs(False)
b = Gtk.Button.new_from_icon_name('list-add', Gtk.IconSize.MENU)
b.set_relief(Gtk.ReliefStyle.NONE)
b.connect('clicked', self.on_new_tab)
page = Gtk.HBox()
book.append_page(page, b)
self.book = book
self._handler_id['book'] = book.connect("switch_page", self.on_switch_page)
# ligne pour les messages
hbox = Gtk.HBox()
hbox.set_size_request(-1, 40)
hbox.set_property('border_width', 4)
self.message.ini_message(hbox) # évite pb avec singleton
self.main_box.pack_start(book, True, True, 0)
self.main_box.pack_start(hbox, False, True, 0)
self.main_box.show_all()
return book
def _ini_drawing_page(self, position):
"""Initialisation d'un onglet de dessin"""
#print "Main::_ini_drawing_page"
book = self.main_box.get_children()[0]
# initialisation
if not isinstance(book, Gtk.Notebook):
book = self._ini_application()
self._set_buttons_ini()
book.disconnect(self._handler_id["book"])
self._add_book_page(book, position)
tab = self.active_tab
# scrolling arrows for notebook
n_pages = book.get_n_pages()
#if n_pages >= 2:
# book.set_show_tabs(True)
self._handler_id['book'] = book.connect("switch_page", self.on_switch_page)
layout = tab.layout
layout.set_can_focus(True)
layout.grab_focus()
layout.connect("size-allocate", tab.configure_event)
# modif
layout.connect("draw", tab.draw_event)
tab.handler_layout = layout.connect("motion-notify-event", self.motion_notify_event)
layout.connect("leave-notify-event", self.leave_notify_event)
layout.connect("button-press-event", self.button_press_event)
layout.connect("button-release-event", self._button_release_event)
layout.connect("key-press-event", self._key_press_event)
layout.connect("key-release-event", self._key_release_event)
def _add_book_page(self, book, position):
"""Ajoute une page au notebook des dessins"""
#print("Main::_add_book_page")
tab = classDrawing.Tab(self)
self.active_tab = tab
vbox = Gtk.VBox(homogeneous=False, spacing=0)
w = self.window.get_allocated_width()-280
hpaned = Gtk.HPaned()
hpaned.set_position(w)
hpaned.add1(tab.sw)
menu_button = self.builder.get_object("menu_cas")
if menu_button.get_active() == True:
sw = self._make_combi_box(tab)
hpaned.pack2(sw, False)
else:
tab.right_menu = None
hpaned.show()
vbox.pack_start(hpaned, True, True, 0)
vbox.show()
tab_box = Gtk.HBox(homogeneous=False, spacing=2)
tab_label = Gtk.Label() # gérer en fonction de la longueur dispo
tab.title = tab_label
close_b = Gtk.Button.new_from_icon_name('window-close', Gtk.IconSize.MENU)
close_b.set_relief(Gtk.ReliefStyle.NONE)
close_b.connect('clicked', self._on_remove_page, book, vbox)
tab_box.pack_start(tab_label, False, True, 0)
tab_box.pack_start(close_b, False, True, 0)
tab_box.show_all()
self._tabs.insert(position, tab)
book.insert_page(vbox, tab_box, position)
# à tester fonctionne pour les widgets mais évidemment pas pour les Tab
#book.set_tab_reorderable(vbox, True)
book.set_current_page(position)
def _remove_page(self, book, n_page):
"""Fonction de suppression de page
Attention le notebook n'est pas actualisé
avant le changement de page qui suit"""
studies = self.studies
tabs = self._tabs
closed_tab = tabs[n_page]
opened_studies = [] # études ouvertes sur une autre page
for tab in tabs:
if tab is closed_tab:
continue
drawings = tab.drawings
for drawing in drawings.values():
id_study = drawing.id_study
if not id_study in opened_studies:
opened_studies.append(id_study)
drawings = closed_tab.drawings
for drawing in drawings.values():
id_study = drawing.id_study
if id_study in opened_studies:
continue
try:
del (studies[id_study])
except KeyError:
continue
if hasattr(self, "editor"):
ed_data = self.editor.data_editors
for id in list(ed_data):
if id in opened_studies:
continue
del(self.editor.data_editors[id])
del(self._tabs[n_page])
frame = book.get_nth_page(n_page)
# on déconnecte le changement de page pour éviter un numéro de page éroné
book.disconnect(self._handler_id["book"])
book.remove(frame)
self._handler_id['book'] = book.connect("switch_page", self.on_switch_page)
n_page = book.get_current_page()
n_pages = book.get_n_pages()
if n_page == n_pages-1:
book.set_current_page(n_page-1)
def _on_remove_page(self, button, book, frame):
"""Méthode de suppression d'une page"""
n_page = book.page_num(frame)
n_pages = self.book.get_n_pages()
if n_pages == 2:
return
tab = self._tabs[n_page]
if hasattr(self, "editor"):
for drawing in tab.drawings.values():
try:
ed_data = self.editor.data_editors[drawing.id_study]
except KeyError:
continue
if ed_data.is_changed:
if file_tools.exit_as_ok_func2("Enregistrer le fichier '%s'?" % ed_data.name):
if ed_data.path is None:
path = file_tools.recursive_file_select(self.UP.get_default_path())
if not path is None:
ed_data.path = path
self.editor.save_study(ed_data)
self._remove_page(book, n_page)
def on_switch_page(self, widget=None, page=None, n=0):
"""Gestionnaire des évènements lors du changement de page du notebook"""
book = self.book
n_pages = book.get_n_pages()
#print ('Main::on_switch_page', n, n_pages)
if n == n_pages-1:
# suppression 15/6/2019 debug -------
# book.stop_emission("switch-page")
#-----------------------------------
return
self.active_tab = tab = self._tabs[n]
drawing = tab.active_drawing
if drawing is None:
rdm_status = 0
errors = []
else:
id_study = drawing.id_study
study = self.studies[id_study]
rdm_status = study.rdm.status
errors = study.rdm.errors
# mise à jour de l'éditeur
if hasattr(self, "editor"):
if self.editor.w2.get_window() is None:
del (self.editor)
else:
self._update_editor()
self._set_buttons_rdm(rdm_status)
self._update_titles()
self._show_message(errors, False)
# -----------------------------------------------------------
#
# Méthodes relatives aux évènements
#
# -----------------------------------------------------------
def _key_release_event(self, widget, event):
tab = self.active_tab
key = Gdk.keyval_name (event.keyval)
if key == 'Control_L':
self.key_press = False
tab.layout_motion_event(tab.layout, event)
# attention si la fenetre de pybar n'a pas le focus, les événements clavier ne sont pas interceptés alors que les évènements souris le sont.
def _key_press_event(self, widget, event):
key = Gdk.keyval_name (event.keyval)
tab = self.active_tab
is_selected = tab.is_selected
if key == 'Control_L':
self.key_press = True
elif key == 'Escape':
tab.is_selected = False
tab.remove_tools_box()
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.ARROW)
screen = Gdk.Screen.get_default()
window = screen.get_root_window()
window.set_cursor(watch)
tab.set_surface(tab.area_w, tab.area_h)
cr = cairo.Context(tab.surface)
tab.paint_all_struct(cr, None, 1.)
tab.layout.queue_draw()
elif key == 'Return':
if not is_selected:
return
selected = is_selected[0]
if selected == 'draw':
drawing = is_selected[1]
tab.active_drawing = drawing
tab.do_new_drawing(False)
self._update_combi_box()
elif key == 'Delete':
if not is_selected:
return
selected = is_selected[0]
if selected == 'value':
drawing, n_case, legend = is_selected[1:]
self.on_hide_value(None, drawing, n_case, legend)
def _button_release_event(self, widget, event):
#print('_button_release_event')
self.active_tab.finish_dnd(event, self.is_press)
self.is_press = False
def motion_notify_event(self, area, event):
#if Gtk.events_pending():
# return
self.active_tab.start_dnd(area, event, self.is_press)
# mettre une info ici
def leave_notify_event(self, layout, event):
"""événement : le curseur quitte la zone du layout"""
#print("leave_notify_event")
if not self.is_press is False:
return
tab = self.active_tab
tab.set_surface(tab.area_w, tab.area_h)
cr = cairo.Context(tab.surface)
tab.paint_all_struct(cr, None, 1.)
layout.queue_draw()
self.is_press = False
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.ARROW)
screen = Gdk.Screen.get_default()
window = screen.get_root_window()
window.set_cursor(watch)
# Double clic : génére : press -> release -> press -> press -> 2Button -> release
def button_press_event(self, widget, event):
#print("button_press_event")
tab = self.active_tab
screen = Gdk.Screen.get_default()
window = screen.get_root_window()
try:
obj_selected = tab.is_selected
except AttributeError:
obj_selected = False
if event.type == Gdk.EventType.BUTTON_PRESS:
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.FLEUR)
if event.get_button()[1] == 1:
self.is_press = (event.x, event.y)
tab.motion = (0, 0) # provisoire, en attendant mieux
if obj_selected is False:
return
drawing = obj_selected[1]
status = drawing.status
if obj_selected[0] == 'entry':
entry = obj_selected[2]
tab.remove_entry_box()
tab.remove_tools_box()
tab.is_selected = ('draw', drawing)
tab.layout_motion_event(tab.layout, event)
return
if obj_selected[0] == 'curve':
self._select_curve(drawing, obj_selected[2])
return
elif obj_selected[0] == 'draw':
window.set_cursor(watch)
self._select_drawing(obj_selected[1])
return
elif obj_selected[0] == 'info':
window.set_cursor(watch)
return
elif obj_selected[0] == 'value':
window.set_cursor(watch)
return
elif obj_selected[0] == 'node':
content = tab.get_message()
self.message.set_message(content)
return
elif obj_selected[0] == 'bar':
content = tab.get_message()
self.message.set_message(content)
return
elif event.get_button()[1] == 3:
#self.is_press = (event.x, event.y)
x, y = event.x, event.y
if obj_selected is False:
self._create_menu5(event)
return True
drawing = obj_selected[1]
if obj_selected[0] == 'value':
self._create_menu7(event, obj_selected[1], obj_selected[2], obj_selected[3])
return
if obj_selected[0] == 'curve':
self._create_menu6(event, obj_selected[1], obj_selected[2], obj_selected[4])
return
if obj_selected[0] == 'node':
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.ARROW)
window.set_cursor(watch)
node = obj_selected[2]
# ajouter ici les menus pour les noeuds
self._create_menu3(event, drawing, node)
return
if obj_selected[0] == 'bar':
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.ARROW)
window.set_cursor(watch)
self._create_menu2(event, obj_selected[2])
return
self._create_menu1(event, obj_selected[1])
return
elif event.type == Gdk.EventType._2BUTTON_PRESS:
if obj_selected is False:
return
drawing = obj_selected[1]
status = drawing.status
if obj_selected[0] == 'node':
return
# ajouter ici les menus pour les noeuds
if obj_selected[0] == 'bar':
self.on_bar_select(None, barre=obj_selected[2])
return
if obj_selected[0] == 'curve':
self._select_curve(drawing, obj_selected[2])
return
if obj_selected[0] == 'info':
if not drawing.title_id == obj_selected[2]:
return
self._on_edit_title(drawing, obj_selected[2])
return
if obj_selected[0] == 'value':
self._on_edit_value(drawing, obj_selected[2], obj_selected[3])
return
# double clic
def on_delete_value(self, widget, data):
"""Supprime une valeur sur une courbe"""
drawing, n_curve, legend = data
drawing.delete_value(n_curve, legend)
drawing.s_case = n_curve
drawing.del_patterns()
tab = self.active_tab
#tab.del_surface()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
def on_hide_value(self, widget, data):
"""Cache une valeur sur une courbe"""
drawing, n_curve, legend = data
drawing.set_hide_value(n_curve, legend)
drawing.s_case = n_curve
drawing.del_patterns()
tab = self.active_tab
#tab.del_surface()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
def on_set_anchor(self, widget, data):
"""Ancre une valeur sur le dessin"""
drawing, n_curve, obj = data
user_values = drawing.user_values
tab = self.active_tab
is_selected = tab.is_selected
barre = is_selected[3]
if not drawing.status in user_values:
user_values[drawing.status] = {}
values = user_values[drawing.status]
if not n_curve in values:
values[n_curve] = {}
if not barre in values[n_curve]:
values[n_curve][barre] = {}
value = values[n_curve][barre]
pos = obj.is_selected[2]
if pos is None:
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
arc = rdm.struct.Curves[barre]
pos = arc.get_curve_abs(obj.is_selected[1], obj.is_selected[0], rdm.struct.Lengths)
#pos = arc.pos[obj.is_selected[1]]
value[pos] = {0: (0, 0, False)} # dx, dy, hidden
drawing.del_patterns()
#tab.del_surface()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
def on_display_value(self, widget, data):
"""Affiche les valeurs sur la courbe n_curve"""
drawing, n_curve = data
if widget.get_active():
drawing.s_values.append(n_curve)
drawing.restore_values(n_curve)
else:
# provisoire : astuce pour remettre les valeurs de la courbe s_curve
if drawing.s_curve == n_curve:
drawing.restore_values(n_curve)
try:
drawing.s_values.remove(n_curve)
except ValueError:
pass
self._do_new_drawing()
def on_display_char(self, widget, data):
"""Ouvre un dessin du chargement"""
drawing, n_curve, curve = data
tab = self.active_tab
#drawing = tab.active_drawing
drawing.s_case = n_curve
tab.add_char_drawing(drawing)
def on_select_curve(self, widget, data):
"""Sélectionne une courbe sur un dessin depuis un menu"""
drawing, n, curve = data
self._select_curve(drawing, n)
def _select_curve(self, drawing, n_curve):
"""Sélectionne une courbe sur un dessin"""
#print("_select_curve", drawing.id)
tab = self.active_tab
tab.active_drawing = drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
if drawing.status == 8:
drawing.s_influ = n_curve
content = drawing.get_influ_message(study, n_curve)
self.message.set_message(content)
return
drawing.s_curve = n_curve
drawing.del_patterns()
#tab.del_surface()
# actualisation dessin de chargement si il existe
#char_drawing = drawing.char_drawing
key = drawing.get_char_drawing()
if not key is None:
child = drawing.childs[key]
child.s_case = n_curve
child.del_patterns()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
content = tab.get_char_message(rdm, n_curve)
self.message.set_message(content)
def _on_edit_value(self, drawing, n_case, legend):
"""Modification de la position en x (sur la barre) de la légende"""
tab = self.active_tab
tab.on_show_value_box(drawing, n_case, legend)
def _on_edit_title(self, drawing, info_id):
"""Action de modification du titre d'un dessin"""
#print "_on_edit_title", info_id
tab = self.active_tab
tab.on_show_title_box(drawing)
def on_select_drawing(self, widget, drawing):
"""Sélectionne le diagramme"""
#print "on_select_drawing"
drawing.options['Select'] = widget.get_active()
self._select_drawing(drawing)
def _select_drawing(self, drawing):
#print('_select_drawing remettre')
tab = self.active_tab
prec_drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
if drawing.get_is_char_drawing():
tab.do_new_drawing(False)
content = tab.get_char_message(rdm, drawing.s_case)
self.message.set_message(content)
return
tab.active_drawing = drawing
tab.paint_drawings()
self._fill_right_menu()
self._update_combi_box()
# maj de l'éditeur
if hasattr(self, "editor") and not (prec_drawing is drawing):
self._update_editor()
self._update_titles()
self._show_message(rdm.errors, False)
self._set_buttons_rdm(rdm.status)
def on_select_bars(self, widget, drawing):
"""Lance l'ouverture de la fenetre de choix des barres et remplace l'set s_influ_bars"""
#tab = self.active_tab
#drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
bars = rdm.struct.GetBarsNames()
# trier barre ?? XXX
#bars.sort()
try:
s_influ_bars = drawing.s_influ_bars
except AttributeError:
s_influ_bars = []
bars = file_tools.open_dialog_bars(bars, s_influ_bars)
if bars is False or bars == []:
return
drawing.s_influ_bars = bars
self._fill_right_menu()
self._do_new_drawing()
def on_node_display(self, widget, drawing):
"""Relance un affichage en fonction de l'état de l'option"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.options['Node'] = widget.get_active()
self._do_new_drawing()
def on_barre_display(self, widget, drawing):
"""Relance un affichage en fonction de l'état de l'option"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.options['Barre'] = widget.get_active()
self._do_new_drawing()
def on_axis_display(self, widget, drawing):
"""Relance un affichage en fonction de l'état de l'option"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.options['Axis'] = widget.get_active()
self._do_new_drawing()
def on_title_display(self, widget, drawing):
"""Affichage du titre du dessin"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.set_title_visibility(widget.get_active())
drawing.options['Title'] = widget.get_active()
self._do_new_drawing()
def on_series_display(self, widget, drawing):
"""Affiche les légendes des courbes"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.set_series_visibility(widget.get_active())
drawing.options['Series'] = widget.get_active()
self._do_new_drawing()
def on_synchronise(self, widget, drawing):
drawing.options['Sync'] = widget.get_active()
if widget.get_active():
drawing.s_cases = drawing.parent.s_cases
drawing.s_case = drawing.parent.s_case
else:
drawing.s_cases = copy.copy(drawing.parent.s_cases)
self._do_new_drawing()
self._fill_right_menu()
self._update_combi_box()
def on_add_sigma_drawing(self, widget, drawing):
"""Ajoute un dessin des contraintes normales"""
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
tab.add_sigma_drawing(drawing, study)
def on_add_drawing(self, widget, drawing):
"""Ajoute un diagramme à partir du diagramme sélectionné"""
#print "on_add_drawing"
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
tab.add_drawing(drawing, study)
self._fill_right_menu()
self._update_combi_box()
def save_drawing_prefs(self, study):
""""Sauve les préférences du dessin de l'étude study"""
#print "save_drawing_prefs"
id_study = study.id
tab = self.active_tab
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
return
xml = rdm.struct.XML
root = xml.getroot()
node = xml.find('draw')
if not node is None:
root.remove(node)
drawing_pref = ET.SubElement(root, "draw", {"id": "prefs"})
for id in tab.drawings:
drawing = tab.drawings[id]
if not drawing.id_study == id_study:
continue
if not drawing.get_is_parent():
continue
node1 = drawing.get_xml_prefs(drawing_pref)
for key in drawing.childs:
d = drawing.childs[key]
node2 = d.get_xml_prefs(node1)
path = study.path
if path is None:
return
function.indent(root)
#print ET.tostring(root)
#return
try:
xml.write(path, encoding="UTF-8", xml_declaration=True)
except IOError:
print("Ecriture impossible dans %s" % path)
def on_save_drawings(self, widget, drawing):
"""Enregistre l'état de l'étude (graphes et préférences)"""
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
self.save_drawing_prefs(study)
tab.remove_drawings_by_study(drawing)
self._fill_right_menu()
self._update_combi_box()
if hasattr(self, "editor"):
try:
del(self.editor.data_editors[id_study])
except KeyError:
pass
self._update_editor()
self._update_titles()
drawing = tab.active_drawing
if drawing is None:
status = 2
else:
status = study.rdm.status
self._set_buttons_rdm(status)
def on_del_drawing(self, widget, drawing):
"""Supprime le diagramme sélectionné"""
#print "on_del_drawing", len(self.studies), drawing.id_study
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
if drawing.get_is_parent():
self.save_drawing_prefs(study)
tab.remove_drawing(drawing)
self._fill_right_menu()
self._update_combi_box()
if hasattr(self, "editor"):
try:
del(self.editor.data_editors[id_study])
except KeyError:
pass
self._update_editor()
self._update_titles()
drawing = tab.active_drawing
if drawing is None:
status = 2
else:
status = study.rdm.status
self._set_buttons_rdm(status)
def on_bar_select(self, widget, barre):
tab = self.active_tab
drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
li = drawing.get_bar_drawings()
for key in li:
child = drawing.childs[key]
child.draw_new_bar(tab, study.rdm.struct, barre.name)
drawing.s_bar = barre.name
def on_del_influ(self, widget, data):
"""Efface une courbe de ligne d'influence donnée par n"""
drawing, n, curve = data
try:
del(drawing.user_values[drawing.status][n])
except KeyError:
pass
del(drawing.influ_list[n])
drawing.s_influ = None
self._do_new_drawing()
def open_node_dialog(self, widget, node):
"""Clic droit sur un noeud à terminer ou supprimer"""
pass
# --------------------------------------------------
#
# Menus contextuels
#
# --------------------------------------------------
def _create_menu1(self, event, drawing):
"""Crée et affiche le menu contextuel pour le survol zone drawing
"""
drawing = self.active_tab.active_drawing
options = drawing.get_menu_options()
menu1 = Gtk.Menu()
if 'Node' in options:
menuitem1 = Gtk.CheckMenuItem(label="Afficher les noeuds", active=options['Node'])
menuitem1.connect("activate", self.on_node_display, drawing)
menu1.append(menuitem1)
if 'Barre' in options:
menuitem2 = Gtk.CheckMenuItem(label="Afficher les barres", active=options['Barre'])
menuitem2.connect("activate", self.on_barre_display, drawing)
menu1.append(menuitem2)
if 'Axis' in options:
menuitem3 = Gtk.CheckMenuItem(label="Afficher les repères", active=options['Axis'])
menuitem3.connect("activate", self.on_axis_display, drawing)
menu1.append(menuitem3)
if 'Title' in options:
menuitem4 = Gtk.CheckMenuItem(label="Afficher le titre", active=options['Title'])
menuitem4.connect("activate", self.on_title_display, drawing)
menu1.append(menuitem4)
if 'Series' in options:
menuitem5 = Gtk.CheckMenuItem(label="Afficher les légendes", active=options['Series'])
menuitem5.connect("activate", self.on_series_display, drawing)
menu1.append(menuitem5)
if "Sync" in options:
menuitem6 = Gtk.CheckMenuItem(label="Synchroniser", active=options['Sync'])
menuitem6.connect("activate", self.on_synchronise, drawing)
menu1.append(menuitem6)
if 'Select' in options:
menuitem7 = Gtk.CheckMenuItem(label="Sélectionner le diagramme", active=options['Select'])
menuitem7.connect("activate", self.on_select_drawing, drawing)
menu1.append(menuitem7)
menuitem8 = Gtk.CheckMenuItem(label="Supprimer le diagramme", active=options['Select'])
menuitem8.connect("activate", self.on_del_drawing, drawing)
menu1.append(menuitem8)
if 'Save' in options:
menuitem9 = Gtk.CheckMenuItem(label="Fermer et enregistrer l\'étude", active=options['Save'])
menuitem9.connect("activate", self.on_save_drawings, drawing)
menu1.append(menuitem9)
if 'Add' in options:
menuitem10 = Gtk.MenuItem(label="Ajouter un diagramme")
menuitem10.connect("activate", self.on_add_drawing, drawing)
menu1.append(menuitem10)
if 'Sigma' in options:
menuitem11 = Gtk.MenuItem(label="Diagramme de contraintes")
menuitem11.connect("activate", self.on_add_sigma_drawing, drawing)
menu1.append(menuitem11)
if 'InfluB' in options:
menuitem12 = Gtk.MenuItem(label="Choix des barres")
menuitem12.connect("activate", self.on_select_bars, drawing)
menu1.append(menuitem12)
menu_button = self.builder.get_object("menu_cas")
if not menu_button.get_active():
menu1.append(Gtk.SeparatorMenuItem())
if 'Case' in options:
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
cases = rdm.Cases
CombiCoef = rdm.CombiCoef
combis = list(CombiCoef.keys())
combis.sort()
n_cases = len(cases)
n_combis = len(combis)
view = drawing.get_combi_view(rdm)
# case
for i, val in enumerate(cases):
etat = view[i]
if etat[1] == 0:
continue
menuitem = Gtk.CheckMenuItem(label=val, active=etat[0])
menuitem.connect("activate", self.event_menu_button, (drawing, i))
menu1.append(menuitem)
# combinaisons
if not n_combis == 0:
for i, val in enumerate(combis):
n = i + n_cases
etat = view[n]
if etat[1] == 0:
continue
menuitem = Gtk.CheckMenuItem(label=val, active=etat[0])
menuitem.connect("activate", self.event_menu_button, (drawing, n))
menu1.append(menuitem)
menu1.show_all()
menu1.popup_at_pointer(event)
return True
def _create_menu2(self, event, barre):
"""Crée et affiche le menu contextuel survol barre"""
tab = self.active_tab
drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
n_barres = rdm.struct.GetBars()
menu1 = Gtk.Menu()
if not n_barres == 1:
menuitem1 = Gtk.MenuItem(label="Sélectionner la barre")
menuitem1.connect("activate", self.on_bar_select, barre)
menu1.append(menuitem1)
menu1.show_all()
menu1.popup_at_pointer(event)
return True
# désactivé, ne pas effacer
def _create_menu3(self, event, drawing, node):
"""Menu contextuel survol des noeuds"""
pass
def _create_menu4(self, event, chart):
"""Menu contextuel survol"""
pass
def _create_menu5(self, event):
"""Menu contextuel survol zone vide"""
menu1 = Gtk.Menu()
menuitem1 = Gtk.MenuItem(label="Ouvrir une étude")
menuitem1.connect("activate", self.on_open_file)
menu1.append(menuitem1)
menuitem2 = Gtk.MenuItem(label="Nouvelle étude")
menuitem2.connect("activate", self.on_new_study)
menu1.append(menuitem2)
menu1.show_all()
menu1.popup_at_pointer(event)
return True
def _create_menu6(self, event, drawing, n_curve, curve):
"""Menu contextuel survol courbe"""
menu1 = Gtk.Menu()
menuitem1 = Gtk.MenuItem(label="Sélectionner la courbe")
menuitem1.connect("activate", self.on_select_curve, (drawing, n_curve, curve))
menu1.append(menuitem1)
menuitem2 = Gtk.MenuItem(label="Ancrer une valeur")
menuitem2.connect("activate", self.on_set_anchor, (drawing, n_curve, curve))
menu1.append(menuitem2)
char_drawing_id = drawing.get_char_drawing()
if char_drawing_id is None:
menuitem3 = Gtk.MenuItem(label="Afficher le chargement")
menuitem3.connect("activate", self.on_display_char, (drawing, n_curve, curve))
menu1.append(menuitem3)
s_values = drawing.s_values
if n_curve in s_values or drawing.s_curve == n_curve:
has_values = True
else:
has_values = False
menuitem4 = Gtk.CheckMenuItem(label="Afficher les valeurs", active=has_values)
menuitem4.connect("activate", self.on_display_value, (drawing, n_curve))
menu1.append(menuitem4)
menu1.show_all()
menu1.popup_at_pointer(event)
return True
def _create_menu7(self, event, drawing, n_curve, legend):
menu1 = Gtk.Menu()
menuitem1 = Gtk.MenuItem(label="Masquer")
menuitem1.connect("activate", self.on_hide_value, (drawing, n_curve, legend))
menu1.append(menuitem1)
menuitem2 = Gtk.MenuItem(label="Supprimer")
menuitem2.connect("activate", self.on_delete_value, (drawing, n_curve, legend))
menu1.append(menuitem2)
menu1.show_all()
menu1.popup_at_pointer(event)
return True
# -----------------------------------------------------------
#
# Méthodes relatives au menu des combinaisons et cas
#
# -----------------------------------------------------------
def _click_close_combi(self, widget):
"""Gère l'évènement de fermeture de la boite des combinaisons"""
#print 'Main::_click_close_combi'
menu_button = self.builder.get_object("menu_cas")
menu_button.set_active(False)
def _close_combi(self):
"""Ferme la boite de gestion des combinaisons"""
book = self.book
n_pages = book.get_n_pages()
for i in range(n_pages-1): # dernier onglet = bouton
page = book.get_nth_page(i)
hbox = page.get_children()[0]
child = hbox.get_children()
if not len(child) == 2:
break
sw = child[1]
hbox.remove(sw)
tab = self._tabs[i]
tab.right_menu = None
def _open_combi(self, widget=None):
"""Ouvre la boite de gestion des combinaisons"""
#print 'Main::_open_combi'
book = self.book
n_pages = book.get_n_pages()
for i in range(n_pages-1): # dernier onglet pour bouton
book_page = book.get_nth_page(i)
paned = book_page.get_children()[0]
tab = self._tabs[i]
sw = self._make_combi_box(tab)
paned.add2(sw)
self._fill_right_menu(i)
self._update_combi_box()
def _manage_combi_window(self, widget):
"""Gère l'évènement d'ouverture ou de fermeture de la fenetre des combi"""
#print "Main::_manage_combi_window"
if not widget.get_active():
self._close_combi()
else:
self._open_combi()
def _make_combi_box(self, tab):
"""Crée la boite pour les combi (sw, bouton fermeture, box pour contenu"""
#print("_make_combi_box")
sw = Gtk.ScrolledWindow()
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
pbox = Gtk.VBox(homogeneous=False, spacing=0)
# close button
button = Gtk.Button.new_from_icon_name('window-close', Gtk.IconSize.MENU)
button.set_property('halign', Gtk.Align.END)
button.set_relief(Gtk.ReliefStyle.NONE)
button.connect('clicked', self._click_close_combi)
pbox.pack_start(button, False, False, 2)
# combi and cas in box
sw.add(pbox)
sw.show_all()
tab.right_menu = pbox
return sw
def _fill_right_menu(self, n=None):
"""Supprime et crée un nouveau contenu dans la boite des menu de droite"""
#print "_fill_right_menu"
if n is None:
tab = self.active_tab
else:
tab = self._tabs[n]
box = tab.right_menu
if box is None:
return
drawing = tab.active_drawing
if drawing is None:
self._fill_combi_menu(tab, box)
elif drawing.status == 8:
self._fill_influ_menu(tab, box)
else:
self._fill_combi_menu(tab, box)
def _fill_combi_menu(self, tab, box):
"""Supprime et crée un nouveau contenu dans la boite des combinaisons"""
#print("_fill_combi_menu")
drawing = tab.active_drawing
childs = box.get_children()
try:
child = childs[1]
box.remove(child)
except IndexError:
pass
if drawing is None:
return
pbox = CombiBox(homogeneous=False, spacing=0)
box.pack_start(pbox, False, False, 0)
study = self.studies[drawing.id_study]
pbox.fill_box(study, self)
# renommer
def _update_combi_box(self):
"""Positionne la sensibilité et l'activité des boutons des cas et combis en fonction du status et des erreurs rencontrées"""
#print("_update_combi_box")
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
box = tab.right_menu
if box is None:
return
if drawing.status == 8:
box.set_sensitive(True)
return
study = self.studies[drawing.id_study]
rdm = study.rdm
view = drawing.get_combi_view(rdm, self._get_has_textview())
if view is None:
box.set_sensitive(False)
return
box.set_sensitive(True)
pbox = box.get_children()[1]
buttons = pbox.get_children()
i = 0
for button in buttons:
if not isinstance(button, CombiButton):
continue
ind = int(button.get_name())
button.handler_block(pbox.handler_list[ind])
etat = view[i]
button.set_active(etat[0])
button.set_sensitive(etat[1])
button.handler_unblock(pbox.handler_list[ind])
i += 1
def _event_combi_radio(self, widget):
"""Fonctionnement des boutons des combis en mode radio"""
#print "_event_combi_radio"
tab = self.active_tab
box = tab.right_menu.get_children()[1]
drawing = tab.active_drawing
if widget.get_active():
buttons = box.get_children()
for button in buttons:
if not isinstance(button, CombiButton):
continue
ind = int(button.get_name())
if widget is button:
drawing.s_case = ind
drawing.s_curve = ind
continue
button.handler_block(box.handler_list[ind])
button.set_active(False)
button.handler_unblock(box.handler_list[ind])
return True
ind = int(widget.get_name())
widget.handler_block(box.handler_list[ind])
widget.set_active(True)
widget.handler_unblock(box.handler_list[ind])
return False
def _event_combi_check(self, widget, n_case):
"""Fonctionnement des boutons des combis en mode case à cocher"""
#print "_event_combi_check", n_case
tab = self.active_tab
drawing = tab.active_drawing
study = self.studies[drawing.id_study]
rdm = study.rdm
s_cases = drawing.s_cases
if widget.get_active():
if not n_case in s_cases:
s_cases.append(n_case)
drawing.s_curve = n_case
else:
if n_case in s_cases:
s_cases.remove(n_case)
try:
drawing.s_curve = s_cases[0]
except IndexError:
drawing.s_curve = None
s_cases.sort()
content = tab.get_char_message(rdm, drawing.s_curve)
self.message.set_message(content)
def event_menu_button(self, widget, data):
"""Evènement sur un bouton à cocher de combinaisons depuis le menu contextuel"""
drawing, n_case = data
#drawing = tab.active_drawing
status = drawing.status
if status in [0, 2, 3]:
drawing.s_case = n_case
else:
self._event_combi_check(widget, n_case)
self._do_new_drawing()
def event_combi_button(self, widget, n_case):
"""Evènement sur un bouton à cocher de combinaisons"""
tab = self.active_tab
drawing = tab.active_drawing
status = drawing.status
study = self.studies[drawing.id_study]
sw = tab.sw
if self._get_has_textview() == True:
is_drawing = False
else:
is_drawing = True
# mode radio
if not is_drawing or status in [0, 2, 3]:
if not self._event_combi_radio(widget):
return
# mode checkbutton
else:
self._event_combi_check(widget, n_case)
if is_drawing:
self._do_new_drawing()
else:
textview = sw.get_child()
self._print_message(textview)
# Maj sensibilité boutons
self._set_buttons_rdm(study.rdm.status)
# -----------------------------------------------------------
#
# Méthodes relatives à la mise à jour des boutons et titre
#
# -----------------------------------------------------------
def _update_titles(self):
"""Affichage du titre de la zone de dessin"""
#print "Main::update_titles"
book = self.book
w1 = self.window
tab = self.active_tab
tab_label = tab.title
drawing = tab.active_drawing
if drawing is None:
w1.set_title("%s" % Const.SOFT)
tab_label.set_text("(Vide)")
tab_label.set_tooltip_text("")
return
status = drawing.status
study = self.studies[drawing.id_study]
name = study.name
path = study.path
tab_label.set_text(name)
if not path is None:
tab_label.set_tooltip_text(path)
titre = "%s - " % name
if status == 0:
titre += "Noeuds"
elif status == 1:
titre += "Barres"
elif status == 2:
titre += "Chargement"
elif status == 3:
titre += "Réaction d'appuis"
elif status == 4:
titre += "Effort normal"
elif status == 5:
titre += "Effort tranchant"
elif status == 6:
titre += "Moment fléchissant"
elif status == 7:
titre += "Déformée"
elif status == 8:
titre += "Ligne d'influence"
w1.set_title("%s - %s" % (Const.SOFT, titre))
def _set_buttons_ini(self):
"""Activation des boutons après la page d'accueil"""
items = [
"menu_save",
"menu_save_as",
"menu_save_copy",
"menu_export",
"menu_reload",
"menu_cas",
"button_export",
"button_zoom_best",
"button_zoom_more",
"button_zoom_less",
"button_chart_less",
"button_chart_more",
]
# activation des boutons
for item in items:
widget = self.builder.get_object(item)
widget.set_sensitive(True)
def _set_buttons_rdm(self, rdm_status):
"""Fonction qui sert à modifier la sensibilité des boutons
en fonction de l'état de l'objet rdm"""
#print "_set_buttons_rdm", rdm_status
items1 = ["button_ddl",
"menu_ddl",
"button_eq",
"menu_eq",
"button_barre",
"menu_barre",
"menu_char",
"button_char",
"menu_degree",
"menu_reac",
"button_reac",
"menu_n",
"button_n",
"menu_v",
"button_v",
"menu_m",
"button_m",
"menu_defo",
"button_defo",
"menu_influ",
"button_influ",
]
items2 = ["button_build",
"menu_build"
]
items3 = ["button_editor",
"button_error",
"menu_editor",
"menu_error",
]
# activation des boutons
if rdm_status == 1:
status = True
else:
status = False
for item in items1:
widget = self.builder.get_object(item)
widget.set_sensitive(status)
if rdm_status == -1:
status = False
else:
status = True
for item in items2:
widget = self.builder.get_object(item)
widget.set_sensitive(status)
for item in items3:
widget = self.builder.get_object(item)
widget.set_sensitive(status)
# -----------------------------------------------------------
#
# Méthodes relatives aux actions sur des boutons
#
# -----------------------------------------------------------
def on_zoom_more(self, widget):
"""Agrandit la taille du drawing_area"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
drawing.set_zoom("+")
drawing.set_scale(study.rdm.struct)
tab.get_layout_size([drawing])
drawing.del_patterns()
#tab.del_surface()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
def on_zoom_100(self, widget):
"""Agrandit la taille du drawing_area"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
if drawing is None:
return
status = drawing.status
study = self.studies[drawing.id_study]
w, h = drawing.width, drawing.height
m = Const.AREA_MARGIN_MIN
sw = tab.sw
sw_w = float(sw.get_hadjustment().get_page_size()) - 2*m
sw_h = float(sw.get_vadjustment().get_page_size()) - 2*m
if w == 0 and h == 0:
return
if w == 0:
coef = sw_h/h
elif h == 0:
coef = sw_w/w
else:
coef = min(sw_w/w, sw_h/h)
drawing.zoom_best(coef, study.rdm.struct)
tab.get_layout_size(list(tab.drawings.values()))
drawing.del_patterns()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
def on_zoom_less(self, widget):
"""Agrandit la taille du drawing_area"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
if drawing is None:
return
status = drawing.status
study = self.studies[drawing.id_study]
drawing.set_zoom("-")
drawing.set_scale(study.rdm.struct)
drawing.del_patterns()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
def on_chart_zoom_more(self, widget):
tab = self.active_tab
if not tab.status == 0:
return
self._set_chart_zoom(None, 'more')
def on_chart_zoom_less(self, widget):
tab = self.active_tab
if not tab.status == 0:
return
self._set_chart_zoom(None, 'less')
def _set_chart_zoom(self, widget=None, tag='more'):
"""Augmente ou diminue la valeur du zoom du graphe"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
#zoom = drawing.chart_zoom
if tag == "more":
zoom = 1.2
else:
zoom = 1 / 1.2
status = drawing.status
if status >= 4:
if status in drawing.chart_zoom:
drawing.chart_zoom[status] *= zoom
else:
drawing.chart_zoom[status] = zoom
self._do_new_drawing()
def on_recents(self, widget=None):
import urllib
try:
book = self.book
except AttributeError:
self._ini_drawing_page(0)
path = widget.get_current_uri()
p = urllib.parse.urlparse(path)
path = os.path.abspath(os.path.join(p.netloc, p.path))
path = urllib.parse.unquote(path)
GLib.idle_add(self._on_open_study, path) # permet à la zone de dessin de se mettre en place
def _on_open_study(self, path):
self._open_study(path)
self._update_titles()
if hasattr(self, "editor"):
self._update_editor()
def on_open_file(self, widget=None):
"""Evènement d'ouverture d'une étude existante"""
try:
book = self.book
except AttributeError:
self._ini_drawing_page(0)
path = self.UP.get_default_path()
path = file_tools.file_selection(path, self.window)
#print(path)
self._on_open_study(path)
#self._update_titles()
#if hasattr(self, "editor"):
# self._update_editor()
def on_new_tab(self, widget):
"""Evènement d'ouverture d'un onglet"""
#print "on_new_tab"
try:
book = self.book
pos = book.get_n_pages() - 1
except AttributeError:
pos = 0
self._ini_drawing_page(pos)
if hasattr(self, "editor"):
self._update_editor()
self._update_titles()
def _write_save_file(self, file):
"""Ecriture du fichier de sauvegarde"""
#print "_write_save_file"
tab = self.active_tab
study = self.studies[tab.active_drawing.id_study]
content = study.rdm.struct.RawReadFile()
try:
f = open(file, 'w')
f.write(content)
f.close()
except IOError as e:
content = ("%s" % e, 0) # formatage obligatoire
classDialog.Message().set_message(content)
except:
content = ("Enregistrement impossible", 0)
classDialog.Message().set_message(content)
def _open_study(self, path):
"""Ouverture d'une étude dans l'onglet actif"""
#print("_open_study")
def ConvertDXF2XML(path): # pour test
return path[-3:]+"dat"
book = self.book
page = book.get_current_page()
tab = self.active_tab
if not path:
return False
if os.path.splitext(path)[1].lower() == '.dxf':
#path = ConvertDXF2XML(path) # convertit et retourne le chemin du .dat
if path is None:
return False
self.UP.save_default_path(os.path.dirname(path))
if self._file_is_closed(path):
study, drawings = tab.add_study(path, self.options)
if drawings is None:
self._show_message([("Une erreur s'est produite dans %s" % path, 0)])
return
if drawings == []:
self._show_message(study.rdm.errors)
return
tab.get_layout_size(drawings)
tab.configure_event(tab.layout)
tab.layout.queue_draw()
rdm = study.rdm
self._fill_right_menu()
self._update_combi_box()
rdm_status = rdm.status
self._set_buttons_rdm(rdm_status)
self._show_message(rdm.errors)
else:
file_tools.open_as_ok_func(path)
def on_save(self, widget=None):
"""Evènement d'enregistrement d'une étude modifiée"""
if not hasattr(self, 'editor'):
content = ("Etude déjà enregistrée ou vide", 2)
classDialog.Message().set_message(content)
return
if not hasattr(self.editor, 'w2'):
content = ("Etude déjà enregistrée", 2)
classDialog.Message().set_message(content)
return
win = self.editor.w2.get_window()
if win is None:
content = ("Etude déjà enregistrée", 2)
classDialog.Message().set_message(content)
return
self.update_from_editor()
def on_save_as(self, widget):
"""Enregistre une étude et l'ouvre à la place de l'étude précédente"""
#print "on_save_as"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
self.on_save()
return
content = rdm.struct.RawReadFile()
path = file_tools.file_save(self.UP.get_default_path())
if not path:
return
self.UP.save_default_path(os.path.dirname(path))
if not file_tools.save_as_ok_func(path):
return
try:
f = open(path, 'w')
f.write(content)
f.close()
except IOError as e:
content = ("%s" % e, 0) # formatage obligatoire
classDialog.Message().set_message(content)
return
name = os.path.basename(path)
if self._file_is_closed(path):
rdm.struct.RenameObject(path)
study.path = path
study.name = name
drawing.set_status(1)
self._do_new_drawing()
else:
content = ("Enregistrement impossible: étude déjà ouverte", 0)
classDialog.Message().set_message(content)
# mise à jour de la fenetre de l'éditeur
if hasattr(self, "editor"):
self._update_editor()
self._update_titles()
self._update_combi_box()
def on_save_copy(self, widget):
"""Enregistre une étude et l'ouvre à la place de l'étude précédente"""
#print "on_save_copy"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
content = ("Impossible de copier une étude vide", 1)
classDialog.Message().set_message(content)
return
content = rdm.struct.RawReadFile()
path = file_tools.file_save(self.UP.get_default_path())
if not path:
return
self.UP.save_default_path(os.path.dirname(path))
if not file_tools.save_as_ok_func(path):
return
try:
f = open(path, 'w')
f.write(content)
f.close()
except IOError as e:
content = ("%s" % e, 0)
classDialog.Message().set_message(content)
def on_reload(self, widget):
"""Recharge l'étude active"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
return
structure = classRdm.StructureFile(study.path)
if structure.status == -1: # suppression fichier ou erreur
content = ("Une erreur est survenue durant le chargement", 0)
classDialog.Message().set_message(content)
return
study.rdm = classRdm.R_Structure(structure)
self._do_new_drawing()
if hasattr(self, "editor"):
self._update_editor()
self._update_titles()
self._fill_right_menu()
self._update_combi_box()
def on_new_file(self, widget):
"""Ouverture d'une nouvelle étude dans l'onglet actif"""
try:
book = self.book
except AttributeError:
self._ini_drawing_page(0)
GLib.idle_add(self.on_new_study) # permet à la zone de dessin de se mettre en place
def on_new_study(self, widget=None, x=None, y=None):
"""Ouverture d'une nouvelle étude dans l'onglet actif"""
book = self.book
current_page = book.get_current_page()
tab = self._tabs[current_page]
study, drawing = tab.add_empty_study(self.options, x, y)
#if hasattr(tab, "surface"):
#tab.del_surface()
# tab.configure_event(tab.layout)
GLib.idle_add(tab.configure_event, tab.layout)
tab.layout.queue_draw()
name = study.name
if hasattr(self, "editor"):
self._update_editor()
else:
self.editor = classEditor.Editor(study, self)
self.editor.w2.connect("delete-event", self._destroy_editor)
self._update_titles()
self._set_buttons_rdm(0)
self._fill_right_menu()
self._update_combi_box()
def _file_is_closed(self, path):
"""Vérifie si une étude de chemin path est déjà ouverte"""
#print "Main::_file_is_closed"
#for tab in self._tabs:
for study in self.studies.values():
if study.path == path:
return False
return True
def on_open_editor(self, widget):
"""Ouverture de l'éditeur"""
#print "Main::_open_editor"
if hasattr(self, 'editor') and not self.editor.w2 is None:
self.editor.w2.present()
else:
book = self.book
n_pages = book.get_n_pages()
current_page = book.get_current_page()
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
self.editor = classEditor.Editor(study, self)
self.editor.w2.connect("delete-event", self._destroy_editor)
#self.editor.record_button.connect("clicked", self.update_from_editor)
def on_edit_ddl(self, widget):
"""Ouvre le textview pour les résultats numériques"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
if tab.status == 1:
tab.status = 0
self._expose_commun(drawing.status)
self._update_combi_box()
return
tab.status = 1
self._textview_commun()
self._clear_sw_content()
textview = self._add_textview()
self._print_message(textview)
self._update_combi_box()
def on_edit_error(self, widget):
"""Ouvre le textview pour les messages d'erreurs"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
if tab.status == 2:
tab.status = 0
self._expose_commun(drawing.status)
self._update_combi_box()
return
self._textview_commun()
tab.status = 2
self._clear_sw_content()
textview = self._add_textview()
self._print_message(textview)
def on_edit_eq(self, widget):
"""Ouvre le textview pour l'affichage des équations"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
if tab.status == 3:
tab.status = 0
self._expose_commun(drawing.status)
self._update_combi_box()
return
status = drawing.status
if not status in [4, 5, 6, 7, 8]:
return
if status == 8 and drawing.s_influ is None:
return
if not drawing.status == 8:
self._textview_commun()
tab.status = 3
self._clear_sw_content()
textview = self._add_textview()
self._print_message(textview)
def _textview_commun(self):
tab = self.active_tab
box = tab.right_menu
if box is None:
return
pbox = box.get_children()[1]
if pbox.get_name() == 'influ':
self._fill_combi_menu(tab, box)
# not supported
def _export_jpg(self, file, reso):
"""Exporte le tracé au format jpeg"""
print("not implemented yet")
#return
self.active_tab.draw_jpg_file(file)
def _export_jpgsauv(self, file, reso):
"""Exporte le tracé au format jpeg"""
tab = self.active_tab
area = tab.layout
width = tab.area_w
height = tab.area_h
pixbuf = GdkPixbuf.Pixbuf.new(GdkPixbuf.Colorspace.RGB, True, 8, width, height)
rect = (0, 0, width, height)
tab.draw_event(area, rect)
try:
drawable = area.bin_window
except AttributeError:
return
colormap = Gdk.colormap_get_system()
pixbuf.get_from_drawable(drawable, colormap, 0, 0, 0, 0, width, height)
pixbuf.save(file, "jpeg", {"quality": str(reso)})
def _export_svg(self, file):
"""Exporte le tracé au format svg"""
self.active_tab.draw_svg_file(file)
def _export_png(self, file):
"""Exporte le tracé au format svg"""
self.active_tab.draw_png_file(file)
def on_export(self, widget):
"""Effectue une sauvegarde de l'écran au format jpg ou svg"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
try:
status = drawing.status
except AttributeError:
status = -1
if status == -1:
return
data = file_tools.file_export(self.UP.get_default_path())
if data is None:
return
file = data[0]
format = data[1]
if not file_tools.save_as_ok_func(file):
return
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.WATCH)
screen = Gdk.Screen.get_default()
window = screen.get_root_window()
window.set_cursor(watch)
if format == 'JPEG': # ne fonctionne plus
reso = file_tools.open_dialog_resol()
if reso == False:
return
self._export_jpg(file, reso)
if format == 'PNG':
self._export_png(file)
elif format == 'SVG':
self._export_svg(file)
watch = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.ARROW)
screen = Gdk.Screen.get_default()
window = screen.get_root_window()
window.set_cursor(watch)
def on_about(self, widget):
About()
def _get_info_version(self, value):
"""Vérifie la dernière version et lance Dialog - Return False"""
#print "_get_info_version", self.new_version, value
if self.new_version is None:
return True
if self.new_version is False:
return False
self._open_dialog_version(self.new_version)
return False
def _open_dialog_version(self, last):
"""Ouverture du Dialog de la nouvelle version"""
dialog = Gtk.Dialog("Nouvelle version",
None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
dialog.set_icon_from_file("glade/logo.png")
text = "La version %s de %s est disponible." % (last, Const.SOFT)
button = Gtk.LinkButton(Const.DOWNLOAD_URL, text)
# todo ne fonctionne pas sous windows
button.set_relief(Gtk.ReliefStyle.NONE)
button.connect('clicked', self._dialog_destroy)
button.set_border_width(20)
vbox = dialog.vbox
vbox.add(button)
button = Gtk.CheckButton(label="Me le rappeler plus tard")
button.connect('clicked', self._set_version_pref)
vbox.add(button)
vbox.show_all()
result = dialog.run()
dialog.destroy()
def _set_version_pref(self, widget):
"""Enregistre la préférence pour la recherche de la nouvelle version"""
if widget.get_active():
self.UP.save_version(10)
else:
self.UP.save_version(0)
def _dialog_destroy(self, widget):
"""Fermeture du Dialog de la nouvelle version"""
widget.get_parent().get_parent().destroy()
def on_open_help(self, widget):
import webbrowser
try:
webbrowser.open(Const.HELP_URL)
except:
classDialog.Message().set_message("Erreur avec le navigateur", 0)
def on_edit_degree(self, widget):
"""Affiche le degré d'hyperstaticité"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
try:
deg = str(rdm.struct.CalculDegreH())
state = 2
except AttributeError:
deg = 'Une erreur est survenue'
state = 0
content = ("Degré d'hyperstaticité de la structure: %s" % deg, state)
classDialog.Message().set_message(content)
def _show_message(self, content, dialog=True):
#print("_show_message", content)
errors = [i[0] for i in content if i[1] == 0]
warnings = [i[0] for i in content if i[1] == 1]
if errors:
self.message.set_message((errors[0], 0))
if dialog:
classDialog.Dialog(errors)
elif warnings:
self.message.set_message(('', 1))
else:
self.message.set_message(None)
# -----------------------------------------------------------
#
# Méthodes relatives au dessin
#
# -----------------------------------------------------------
def _do_new_drawing(self):
"""Lance une mise à jour de l'area sans refaire de calcul de l'instance rdm """
#print("Main::_do_new_drawing")
tab = self.active_tab
sw = tab.sw
if isinstance(sw.get_child(), Gtk.TextView):
self._clear_sw_content()
self._add_drawing_widget()
tab.do_new_drawing(True)
def update_drawing(self, case_page=None):
"""Met à jour le dessin en status 0 depuis l'éditeur de données"""
#print ("update_drawing")
if self.editor.data_editor.need_drawing == False:
return
tab = self.active_tab
drawing = tab.active_drawing
if not drawing.parent is None:
drawing = drawing.parent
if not drawing.status == 0:
return
study = self.studies[drawing.id_study]
if case_page is None:
if not self.editor.xml_status == -1:
self.editor.data_editor.set_xml_structure()
study.rdm = classRdm.EmptyRdm(self.editor.data_editor.XML, self.editor.data_editor.name)
self._fill_right_menu()
self._update_combi_box()
self._set_buttons_rdm(study.rdm.status)
self.editor.data_editor.need_drawing = False
else:
drawing.s_case = case_page
self._fill_right_menu()
self._update_combi_box()
tab.do_new_drawing2(study, drawing)
self.message.set_message(("Enregistrer l'étude pour continuer", 1))
def on_dynamic_expose(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
try:
n_cases = rdm.n_cases
except AttributeError:
n_cases = 1
if drawing.s_case > n_cases-1:
drawing.s_case = 0
if hasattr(self, "editor"):
ed_data = self.editor.data_editors[drawing.id_study]
drawing.status = 0
if ed_data.is_changed:
self.update_drawing()
else:
self._expose_commun(0)
self._update_combi_box()
self._update_titles()
else:
self._expose_commun(0)
self._update_combi_box()
self._update_titles()
def on_bar_expose(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(1)
self._update_combi_box()
self._update_titles()
def on_char_expose(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(2)
self._update_combi_box()
self._update_titles()
def on_expose_reac(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(3)
self._update_combi_box()
self._update_titles()
def on_expose_n(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(4)
self._update_combi_box()
self._update_titles()
def on_expose_v(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(5)
self._update_combi_box()
self._update_titles()
def on_expose_m(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(6)
self._update_combi_box()
self._update_titles()
def on_expose_defo(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(7)
self._update_combi_box()
self._update_titles()
def _expose_commun(self, new_status):
#print("_expose_commun")
tab = self.active_tab
tab.status = 0
drawing = tab.active_drawing
old_status = drawing.status
drawing.set_status(new_status)
if old_status == 8:
self._fill_right_menu()
if not new_status in [4, 5, 6, 7]:
key = drawing.get_char_drawing()
if not key is None:
char_drawing = tab.drawings[key]
del(drawing.childs[key])
char_drawing.mapping.remove_map(key)
del(tab.drawings[key])
li = drawing.get_bar_drawings()
for key in li:
child = drawing.childs[key]
sync = child.options['Sync']
if sync:
child.del_patterns()
child.set_status(new_status)
if not drawing.get_is_parent():
parent = drawing.parent
sync = drawing.options['Sync']
if sync:
parent.del_patterns()
parent.set_status(new_status)
drawing.del_patterns()
layout = tab.layout
sw = tab.sw
if isinstance(sw.get_child(), Gtk.TextView):
self._clear_sw_content()
self._add_drawing_widget()
tab.configure_event(layout)
tab.layout.queue_draw()
# -----------------------------------------------------------
#
# Méthodes relatives au basculement de mode (graphe/info)
#
# -----------------------------------------------------------
# pas très utile
def _get_has_textview(self):
"""Return True if screen is textview"""
tab = self.active_tab
if tab.status == 0:
return False
return True
def _add_drawing_widget(self, tab=None):
"""Ajoute le dessin"""
#print("_add_drawing_widget finir")
if tab is None:
tab = self.active_tab
tab.status = 0
sw = tab.sw
area = tab.layout
sw.add(area) # déclenche 1 configure_event
def _add_textview(self):
"""Ajoute le textview"""
tab = self.active_tab
sw = tab.sw
textview = Gtk.TextView()
textview.show()
#sw.add_with_viewport(textview)
sw.add(textview)
return textview
#à renommer
def _clear_sw_content(self, tab=None):
"""Supprime le layout ou le textview"""
#print "_clear_sw_content"
if tab is None:
tab = self.active_tab
sw = tab.sw
#viewport = sw.get_children()[0]
child = sw.get_child()
sw.remove(child)
def _do_buffer_error(self, textview):
"""Crée le buffer avec mise en forme pour afficher
les erreurs"""
#print 'Main::_do_buffer_error'
textbuffer = Gtk.TextBuffer()
end_iter = textbuffer.get_end_iter()
h1 = textbuffer.create_tag("h1", weight = Pango.Weight.BOLD,
size_points = 12.0, foreground = "purple")
h2 = textbuffer.create_tag("h2", weight = Pango.Weight.BOLD,
size_points = 11.0)
p = textbuffer.create_tag("p", weight = Pango.Weight.NORMAL,
size_points = 9.0)
id_image = {0 : 'dialog-error', 1 : 'dialog-warning', 2 : 'dialog-information', 3 : 'dialog-information'} # finir XXX
tab = self.active_tab
drawing = tab.active_drawing
study = self.studies[drawing.id_study]
rdm = study.rdm
try:
errors = rdm.errors
except AttributeError:
errors = None
text = "Messages pour l'étude \"%s\"\n" % study.name
textbuffer.insert_with_tags(end_iter, text, h1)
# li contient toujours un élément
li_anchor = []
if errors is None:
anchor = textbuffer.create_child_anchor(end_iter)
li_anchor.append((anchor, 3))
text = " Veuillez enregistrer l'étude en cours.\n"
textbuffer.insert_with_tags(end_iter, text, p)
elif len(errors) == 0:
anchor = textbuffer.create_child_anchor(end_iter)
li_anchor.append((anchor, 3))
text = " Aucune erreur a été détectée pendant la lecture des données.\n"
textbuffer.insert_with_tags(end_iter, text, p)
else:
for elem in errors:
code = elem[1]
text = elem[0]
anchor = textbuffer.create_child_anchor(end_iter)
li_anchor.append((anchor, code))
text = ' %s' % text
textbuffer.insert_with_tags(end_iter, '%s\n' % text, p)
textview.set_buffer(textbuffer)
# insertion des images
for elem in li_anchor:
code = elem[1]
image = Gtk.Image()
image.set_from_icon_name(id_image[code], Gtk.IconSize.MENU)
image.show()
textview.add_child_at_anchor(image, elem[0])
#textview.scroll_to_iter(end_iter, 0) fonctionne pas
#return textbuffer
def _do_buffer_eq(self):
textbuffer = Gtk.TextBuffer()
#pixbuf = GdkPixbuf.Pixbuf.new_from_xpm_data(book_closed_xpm)
tab = self.active_tab
drawing = tab.active_drawing
status = drawing.status
if status == 8:
self.fill_buffer1(textbuffer, drawing)
else:
self.fill_buffer2(textbuffer, drawing)
return textbuffer
def fill_buffer1(self, textbuffer, drawing):
"""Remplit le buffer pour une ligne d'influence"""
h1 = textbuffer.create_tag("h1", weight=Pango.Weight.BOLD, size_points=12.0, foreground="purple")
h2 = textbuffer.create_tag("h2", weight=Pango.Weight.BOLD, size_points=11.0)
end_iter = textbuffer.get_end_iter()
study = self.studies[drawing.id_study]
rdm = study.influ_rdm
struct = rdm.struct
units = struct.units
factor_F = units['F']
factor_L = units['L']
unit_F = study.get_unit_name('F')
unit_L = study.get_unit_name('L')
if drawing.s_influ is None:
return
obj = drawing.influ_list[drawing.s_influ]
status = obj.status
u = obj.u
elem = obj.elem
if status == 1 or status == 4:
type = "F"
elif status == 2:
type = "M"
elif status == 3:
type = "L"
texts = {1: "Effort tranchant", 2: "Moment fléchissant", 3: "Déformée", 4: "Réaction d'appui"}
text = "Equations des courbes d'influence :\n%s\n" % texts[status]
textbuffer.insert_with_tags(end_iter, text, h1)
if status == 4:
text = "Noeud : %s\n" % elem
else:
text = "Barre : %s, position x=%s\n" % (elem, u)
textbuffer.insert(end_iter, text)
try:
bars = drawing.s_influ_bars
except AttributeError:
bars = rdm.struct.Barres
for barre in bars:
data = rdm.InfluBarre(barre, elem, u, status, True)
text = "\tBarre = %s\n" % barre
textbuffer.insert_with_tags(end_iter, text, h2)
text2 = ''
xprec = 0.
for tu in data:
x, coefs = tu[0], tu[1]
x /= factor_L
text2 += "x compris entre %s et %s %s\n" % (xprec, x, unit_L)
text2 += self.set_equation_string(coefs, factor_L, factor_F, unit_L, unit_F, type)
xprec = x
textbuffer.insert(end_iter, text2)
def fill_buffer2(self, textbuffer, drawing):
"""Remplit le buffer pour les sollicitations ou déformée"""
h1 = textbuffer.create_tag("h1", weight=Pango.Weight.BOLD, size_points=12.0, foreground="purple")
h2 = textbuffer.create_tag("h2", weight=Pango.Weight.BOLD, size_points=11.0)
end_iter = textbuffer.get_end_iter()
study = self.studies[drawing.id_study]
status = drawing.status
rdm = study.rdm
struct = rdm.struct
units = struct.units
factor_F = units['F']
factor_L = units['L']
unit_F = study.get_unit_name('F')
unit_L = study.get_unit_name('L')
if status == 4 or status == 5:
type = "F"
elif status == 6:
type = "M"
elif status == 7:
type = "L"
n_case = drawing.s_curve
if n_case is None:
textbuffer.insert_with_tags(end_iter, "Données indisponibles", h1)
return
Char = rdm.GetCharByNumber(n_case)
name = rdm.GetCharNameByNumber(n_case)
text = "Equations des courbes pour \"%s\"\n" % name
textbuffer.insert_with_tags(end_iter, text, h1)
texts = {4: "Effort normal", 5: "Effort tranchant", 6: "Moment fléchissant", 7: "Déformée"}
text = texts[status]
textbuffer.insert(end_iter, "(%s)\n" % text)
for barre in rdm.struct.Barres:
text = "\tBarre = %s\n" % barre
textbuffer.insert_with_tags(end_iter, text, h2)
data = rdm.GetDataEq(barre, Char, status)
if data == []:
text = "\tpas d'équation disponible\n"
textbuffer.insert(end_iter, text)
continue
text2 = ''
xprec = 0.
for tu in data:
x, coefs = tu[0], tu[1]
x /= factor_L
text2 += "x compris entre %s et %s %s\n" % (xprec, x, unit_L)
text2 += self.set_equation_string(coefs, factor_L, factor_F, unit_L, unit_F, type)
xprec = x
textbuffer.insert(end_iter, text2)
# XXX suppression des 0 à faire
def set_equation_string(self, coefs, factor_L, factor_F, name_L, name_F, type):
"""Met en forme l'équation donnée par les coefficients"""
n_coefs = len(coefs)
li = []
if type == 'F':
name = name_F
conv = 1./factor_F
elif type == 'M':
name = "%s.%s" % (name_F, name_L)
conv = 1./factor_F/factor_L
elif type == 'L':
name = name_L
conv = 1./factor_L
for c in reversed(coefs):
c *= conv
conv *= factor_L
li.append(c)
li.reverse()
coefs = li
text = ""
if n_coefs == 2:
a, b = coefs
text += "y(%s)=%s*x+%s\n" % (name, a, b)
elif n_coefs == 4:
a, b, c, d = coefs
if a == 0.:
text += "y(%s)=%s*x^2+%s*x+%s\n" % (name, b, c, d)
else:
text += "y(%s)=%s*x^3+%s*x^2+%s*x+%s\n" % (name, a, b, c, d)
elif len(coefs) == 5:
a, b, c, d, e = coefs
if a == 0.:
text += "y(%s)=%s*x^3+%s*x^2+%s*x+%s\n" % (name, b, c, d, e)
else:
text += "y(%s)=%s*x^4+%s*x^3+%s*x^2 +%s*x+%s\n" % (name, a, b, c, d, e)
elif len(coefs) == 6:
a, b, c, d, e, f = coefs
if a == 0.:
text += "y(%s)=%s*x^4+%s*x^3+%s*x^2 +%s*x+%s\n" % (name, b, c, d, e, f)
else:
text += "y(%s)=%s*x^5+%s*x^4+%s*x^3+%s*x^2+%s*x+%s\n" % (name, a, b, c, d, e, f)
else:
print('debug in do_buffer_eq',len(tu))
text = text.replace('+-', '-')
return text
def _do_buffer_resu(self):
"""Crée le buffer avec mise en forme pour afficher
les ddl et autres résultats"""
textbuffer = Gtk.TextBuffer()
#pixbuf = GdkPixbuf.Pixbuf.new_from_xpm_data(function.book_closed_xpm)
end_iter = textbuffer.get_end_iter()
h1 = textbuffer.create_tag("h1", weight=Pango.Weight.BOLD, size_points=12.0, foreground="purple")
h2 = textbuffer.create_tag("h2", weight=Pango.Weight.BOLD, size_points=11.0)
h3 = textbuffer.create_tag("h3", weight=Pango.Weight.BOLD, size_points=10.0)
tab = self.active_tab
drawing = tab.active_drawing
study = self.studies[drawing.id_study]
rdm = study.rdm
struct = rdm.struct
units = struct.units
RotuleElast = struct.RotuleElast
case = drawing.s_case
if case is None:
try:
case = drawing.s_cases[0]
except IndexError:
case = drawing.get_first_case(rdm)
if case is None:
textbuffer.insert_with_tags(end_iter, "Aucune valeur disponible", h1)
return textbuffer
Char = rdm.GetCharByNumber(case)
if Char.status == 0:
textbuffer.insert_with_tags(end_iter, "Aucune valeur disponible", h1)
return textbuffer
factor_F = units['F']
factor_L = units['L']
unit_F = study.get_unit_name('F')
unit_L = study.get_unit_name('L')
text = "Principales valeurs numériques\npour le chargement \"%s\"\n" % Char.name
textbuffer.insert_with_tags(end_iter, text, h1)
text = "Valeurs des degrés de liberté\n"
textbuffer.insert_with_tags(end_iter, text, h2)
w_relax = Char.GetBarreRotation()
texts = ['u', 'v', 'w']
if Char.KS.n_ddl == 0:
text = '\tAucun degré de liberté non nul\n'
textbuffer.insert(end_iter, text)
for node in struct.Nodes:
ddls = Char.ddlValue[node]
text = '\tNoeud %s\n' % node
textbuffer.insert_with_tags(end_iter, text, h3)
for i, ddl in enumerate(ddls):
if i == 0 or i == 1:
name = texts[i]
unit = unit_L
ddl /= factor_L
textbuffer.insert(end_iter, '\t\t%s=%s %s\n' % (name, ddl, unit))
elif i == 2:
name = texts[2]
unit = 'rad'
if node in RotuleElast:
barre = RotuleElast[node][0]
textbuffer.insert(end_iter, '\t\t%s=%s %s\n' % (name, ddl, unit))
textbuffer.insert(end_iter, '\t\tw=%s %s (%s)\n' % (ddls[3], unit, barre))
elif node in w_relax:
for barre, w in w_relax[node].items():
textbuffer.insert(end_iter, '\t\tw=%s %s (%s)\n' % (w, unit, barre))
else:
textbuffer.insert(end_iter, '\t\t%s=%s %s\n' % (name, ddl, unit))
text = "Sollicitations aux extrémités des barres\n"
textbuffer.insert_with_tags(end_iter, text, h2)
di = Char.GetSollicitationBarre(rdm.conv)
texts = ['N', 'V', 'M']
#unit = function.return_key(Const.UNITS['F'], factor)
for barre, nodes in di.items():
text = '\tBarre %s\n' % barre
textbuffer.insert_with_tags(end_iter, text, h3)
for node, forces in nodes.items():
text = '\t\tNoeud %s\n' % node
textbuffer.insert(end_iter, text)
for i, force in enumerate(forces):
if force == 0:
continue
force /= factor_F
name = texts[i]
if i == 2:
force /= factor_L
textbuffer.insert(end_iter, '\t\t\t%s=%s %s.%s\n' % (name, force, unit_F, unit_L))
else:
textbuffer.insert(end_iter, '\t\t\t%s=%s %s\n' % (name, force, unit_F))
text = "Calcul des réactions d'appui\n"
textbuffer.insert_with_tags(end_iter, text, h2)
try:
di = Char.Reactions
except AttributeError:
di = Char.GetCombiReac()
for node, forces in di.items():
text = '\t\tNoeud %s\n' % node
textbuffer.insert(end_iter, text)
for name, force in forces.items():
force /= factor_F
if name == 'Mz':
force /= factor_L
textbuffer.insert(end_iter, '\t\t\t%s=%s %s.%s\n' % (name, force, unit_F, unit_L))
else:
textbuffer.insert(end_iter, '\t\t\t%s=%s %s\n' % (name, force, unit_F))
return textbuffer
def _print_message(self, textview):
"""Affichage des messages écrits et mise en forme
type = 0 : errors
type = 1 : numerical values"""
#print "Main::print_message"
status = self.active_tab.status
textview.set_left_margin(10)
textview.set_pixels_above_lines(10)
textbuffer = Gtk.TextBuffer()
if status == 1:
textbuffer = self._do_buffer_resu()
textview.set_buffer(textbuffer)
elif status == 2:
self._do_buffer_error(textview)
elif status == 3:
textbuffer = self._do_buffer_eq()
textview.set_buffer(textbuffer)
# -----------------------------------------------------------
#
# Méthodes en relation avec les charges roulantes
#
# -----------------------------------------------------------
def on_expose_move(self, widget):
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
#drawing.status = 9
drawing.set_status(9)
id_study = drawing.id_study
study = self.studies[id_study]
self._do_new_drawing()
# -----------------------------------------------------------
#
# Méthodes en relation avec les lignes d'influence
#
# -----------------------------------------------------------
def on_expose_influ(self, widget):
#print "on_expose_influ"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
if drawing.get_is_bar_drawing():
drawing = tab.active_drawing = drawing.parent
drawing.set_status(8)
id_study = drawing.id_study
study = self.studies[id_study]
self._fill_right_menu()
self._do_new_drawing()
self._update_combi_box()
self._update_titles()
def _fill_influ_menu(self, tab, box):
"""Crée le menu pour les lignes d'influence"""
#print "_fill_influ_menu"
childs = box.get_children()
try:
child = childs[1]
box.remove(child)
except IndexError:
pass
drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
struct = rdm.struct
barres = struct.UserBars
if len(barres) == 0 and len(struct.SuperBars) == 0:
self.message.set_message(("Les lignes d'influence ne fonctionnent que sur des barres rectilignes", 0))
try:
obj = drawing.influ_list[drawing.s_influ]
except (KeyError, AttributeError, TypeError):
obj = None
try:
drawing.s_influ
except AttributeError:
drawing.s_influ = None
try:
bars = drawing.s_influ_bars
except AttributeError:
bars = []
tab.influ_menu = classLigneInflu.LigneInfluBox(self, study, obj, bars)
tab.right_menu.pack_start(tab.influ_menu.get_box(), False, False, 0)
def area_expose_influ(self, widget, reset=True):
"""Méthode de lancement du calcul des lignes d'influ. Récupère les paramètres depuis la fenetre de dialogue. Gère les boutons et titre"""
#print "area_expose_influ"
tab = self.active_tab
drawing = tab.active_drawing
if drawing.get_is_bar_drawing():
drawing = tab.active_drawing = drawing.parent
if reset:
drawing.influ_list = {}
params = tab.influ_menu.get_data()
if params is None:
self.message.set_message(("Choisir un élément", 1))
return
influ_list = drawing.influ_list
id = 0
while True:
if not id in influ_list:
break
id += 1
Obj = classDrawing.InfluParams(id)
Obj.add(params)
influ_list[Obj.id] = Obj
drawing.s_influ = Obj.id
self._do_new_drawing()
self._update_combi_box()
self._update_titles()
self.message.set_message(None) # mettre autre message
def on_del_influs(self, widget):
"""Efface toutes les courbes de lignes d'influence"""
tab = self.active_tab
drawing = tab.active_drawing
drawing.influ_list = {}
try:
del(drawing.user_values[drawing.status])
except (KeyError, AttributeError):
pass
drawing.s_influ = None
self._do_new_drawing()
# -----------------------------------------------------------
#
# Méthodes en relation avec l'éditeur de données
#
# -----------------------------------------------------------
def _set_name(self, id_study):
"""Donne un nom à l'étude s'il n'existe pas"""
study = self.studies[id_study]
path = study.path
if not path is None:
return True
path = file_tools.recursive_file_select(self.UP.get_default_path())
if path is None:
return False
ed_data = self.editor.data_editors[id_study]
ed_data.path = path
name = os.path.basename(path)
ed_data.name = name
study.name = name
study.path = path
return True
def update_from_editor(self, widget=None):
"""Gère les évènements liés à l'enregistrement depuis l'éditeur"""
#print("Main::update_from_editor")
tab = self.active_tab
drawing = tab.active_drawing
drawings = tab.drawings
id_study = drawing.id_study
ed_data = self.editor.data_editors[id_study]
study = self.studies[id_study]
book = self.book
status = drawing.status
old_path = study.path
if not self._set_name(id_study):
return
if old_path is None: # maj du titre du dessin
drawing.mapping.infos[drawing.id][drawing.title_id].text = study.name
resize = False
if ed_data.size_changed:
resize = True
if hasattr(study, "influ_rdm"):
del(study.influ_rdm)
self._save_rdm_instance(id_study)
rdm = study.rdm # après _save_rdm_instance
p_drawings = tab.get_parent_drawings()
Barres = rdm.struct.GetBars()
reset = False # suppression des dessins enfant
del_drawings = []
if len(Barres) == 0:
reset = True
for d in p_drawings:
if not d.id_study == id_study:
continue
d.update_s_data(rdm, Barres)
if reset:
for child in d.childs:
del(tab.drawings[child.id])
d.childs = {}
continue
childs = d.childs
for key in childs:
child = d.childs[key]
resu = child.update_s_data(rdm, Barres)
if resu is False:
del_drawings.append(child.id)
#del(d.childs[key])
for key in del_drawings:
d = tab.drawings[key]
tab.remove_drawing(d)
status_prec = drawing.status
self.editor.update_editor_title()
self.editor.set_is_changed
rdm_status = rdm.status
if not rdm_status == 1:
drawing.status = 0
if tab.status == 1:
tab.status = 2
if tab.status == 0:
drawings = tab.drawings
for id, drawing1 in drawings.items():
if not drawing1.id_study == id_study:
continue
if resize:
drawing1.set_scale(rdm.struct)
drawing1.del_patterns()
self._fill_right_menu()
#tab.del_surface()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
else: # status = 1 ou 2
layout = tab.sw.get_child()
tab.sw.remove(layout)
textview = Gtk.TextView()
self._print_message(textview)
textview.show()
tab.sw.add(textview)
self._fill_right_menu()
self._update_combi_box()
self._update_titles()
self._show_message(rdm.errors, False)
# mise à jour des boutons
self._set_buttons_rdm(rdm_status)
def _save_rdm_instance(self, id_study):
"""Recalcule l'instance de RDM de l'étude active afin de tenir compte des modifications apportées par l'éditeur"""
# file writing
data_editor = self.editor.data_editors[id_study]
self.editor.save_study(data_editor)
study = self.studies[id_study]
path = data_editor.path
xml = data_editor.get_xml()
structure = classRdm.Structure(xml, path)
study.rdm = classRdm.R_Structure(structure)
def _restore_rdm_instance(self, id_study):
"""Recalcule l'instance de RDM de l'étude active à partir d'une nouvelle lecture du fichier"""
data_editor = self.editor.data_editors[id_study]
study = self.studies[id_study]
path = data_editor.path
if path is None: # XXX l'étude n'est pas effacée du dessin
return
structure = classRdm.StructureFile(path)
study.rdm = classRdm.R_Structure(structure)
def _update_editor(self):
"""Effectue les mises à jours de la fenetre de l'éditeur en cas de changement d'étude"""
#print("_update_editor")
if self.editor.w2.get_window() is None:
return
tab = self.active_tab
drawing = tab.active_drawing
ed_data = self.editor.data_editors
#print len(self.studies), len(ed_data)
if drawing is None:
if len(ed_data) == 0:
self.editor.w2.destroy()
del (self.editor)
return
self.editor.w2.set_sensitive(False)
return
id_study = drawing.id_study
study = self.studies[id_study]
try:
status = drawing.status
except AttributeError:
status = 0
if status == -1:
self.editor.w2.set_sensitive(False)
else:
self.editor.w2.set_sensitive(True)
self.editor.new_page_editor(study)
#assert len(self.studies) == len(ed_data)
def _get_record_id(self, changes):
"""Récupère la liste des études qui doivent être enregistrées à partir des réponses de l'utilisateur"""
ed_data = self.editor.data_editors
must_save = []
action = 0
for i, id in enumerate(changes):
study = ed_data[id]
action = file_tools.exit_as_ok_func(study.name)
if action == -1:
return None
elif action == 1:
must_save.append(id)
elif action == 2:
must_save.extend(changes[i:])
return must_save
return must_save
def _destroy_editor(self, widget, event):
"""Gère les actions à la fermeture de l'éditeur"""
tab = self.active_tab
studies = self.studies
if not tab.status == 0:
self._clear_sw_content()
self._add_drawing_widget()
tab.status = 0
# maj des études dans la page active
changes = self.editor.get_modified_studies()
must_save = self._get_record_id(changes) # études qui doivent être enregistres
if must_save is None:
return True # keep True
else:
self.editor.w2.destroy()
for id in changes:
if id in must_save:
self._set_name(id)
self._save_rdm_instance(id)
else:
self._restore_rdm_instance(id)
for id in tab.drawings: # actualisation des dessins de l'onglet actif
drawing = tab.drawings[id]
if drawing.id_study in changes:
struct = studies[drawing.id_study].rdm.struct
drawing.set_scale(struct)
drawing.del_patterns()
self._fill_right_menu()
if not tab.active_drawing is None:
study = studies[tab.active_drawing.id_study]
self._set_buttons_rdm(study.rdm.status)
#tab.del_surface()
tab.configure_event(tab.layout)
tab.layout.queue_draw()
self._update_titles()
self._update_combi_box()
#self._show_message(study.rdm.errors, False)
GLib.idle_add(self._bg_from_editor_update, changes)
def _bg_from_editor_update(self, changes):
"""Met à jour les dessins des onglets non visibles en arrière plan"""
studies = self.studies
for tab in self._tabs:
if tab is self.active_tab:
continue # already done
if not tab.status == 0:
self._clear_sw_content(tab)
self._add_drawing_widget(tab)
tab.status = 0
for id in tab.drawings:
drawing = tab.drawings[id]
if drawing.id_study in changes:
struct = studies[drawing.id_study].rdm.struct
drawing.set_scale(struct)
drawing.del_patterns()
#tab.del_surface()
tab.configure_event(tab.layout)
del (self.editor) # enlever si gobject
# -----------------------------------------------------------
#
# Méthodes en relation la fenetre Library
#
# -----------------------------------------------------------
def on_open_lib(self, widget):
"""Ouverture de la librairie des profils depuis la fenetre principale"""
lib = classProfilManager.ProfilManager()
lib.window.connect("delete_event", self._close_library, lib)
def _close_library(self, widget, event, lib):
"""Fermeture de la librairie des profils depuis la fenetre principale"""
#print "Main::_close_library"
lib.destroy()
#lib.window = None
del(lib)
if hasattr(self, 'editor'):
self.editor._active_selection_button(False)
if hasattr(self.editor, 'profil_manager'):
self.editor.profil_manager.button.set_sensitive(True)
del(self.editor.profil_manager)
# ------------ tools ---------------------
def print_rdm_status(self, rdm):
"""Affichage des status des classes de Rdm :: debug"""
print("Structure::status=", rdm.struct.status)
print("R_Structure::status=", rdm.status)
if rdm.status == -1: return
for i in rdm.Chars:
Char = rdm.Chars[i]
print("Case Name=%s Status lecture=%s Status Inv=%s" % (Char.name, Char.status, Char.status))
class MyThread(threading.Thread):
def __init__(self, main):
super(MyThread, self).__init__()
self.main = main
self.daemon = True
self.start()
def run(self):
self.main.new_version = self._get_next_version()
#print("new_version=", self.main.new_version)
def _get_next_version(self):
"""Vérifie la dernière version en ligne et lance Dialog - Return False"""
import urllib.request
try:
sock = urllib.request.urlopen(Const.VERSION_URL)
except (IOError, EOFError):
return False
version = sock.read()
sock.close()
try:
next = float(version.strip())
except ValueError:
return False
if next > float(Const.VERSION):
return next
return False
if __name__ == "__main__":
#try:
MyApp = MainWindow()
MyThread(MyApp)
Gtk.main()
#except KeyboardInterrupt:
# print('eeee')
# sys.exit(0)
|
Philippe-Lawrence/pyBar
|
pyBar.py
|
Python
|
gpl-3.0
| 101,841
|
[
"FLEUR"
] |
37d3f64d505130fcf0c1eea8779de4cddccae61d59eb45a1da143ab1e8b83bd9
|
# -----------------------------------------------------------------------------
# Download data:
# - Browser:
# http://midas3.kitware.com/midas/folder/10409 => VisibleMale/vm_head_frozenct.mha
# - Terminal
# curl "http://midas3.kitware.com/midas/download?folders=&items=235235" -o vm_head_frozenct.mha
# -----------------------------------------------------------------------------
from vtk import *
from tonic.vtk import *
from tonic.vtk.dataset_builder import *
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/head_ct_4_features'
file_path = '/Users/seb/Downloads/vm_head_frozenct.mha'
field = 'MetaImage'
fieldRange = [0.0, 4095.0]
features = [
(100, 800), # Fluid 450 => 0.10
(900, 1250), # Skin 1075 => 0.26
(1400, 2525), # Skull 1962.5 => 0.47
(2525, 4000) # Teeth 3262.5 => 0.79
]
sections = {
'LookupTables': {
"VolumeScalar": {
"controlpoints": [
{"x": 0.00, "r": 0.5, "g": 0.5, "b": 0.5}, # Fluid
{"x": 0.11, "r": 0.5, "g": 0.5, "b": 0.5}, # Fluid
{"x": 0.12, "r": 1.0, "g": 0.8, "b": 0.4}, # Skin
{"x": 0.27, "r": 1.0, "g": 0.8, "b": 0.4}, # Skin
{"x": 0.28, "r": 1.0, "g": 1.0, "b": 1.0}, # Skull
{"x": 0.48, "r": 1.0, "g": 1.0, "b": 1.0}, # Skull
{"x": 0.49, "r": 1.0, "g": 0.8, "b": 0.6}, # Teeth
{"x": 1.00, "r": 1.0, "g": 0.8, "b": 0.6} # Teeth
],
"discrete" : True
}
}
}
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWiseAsStep(pwf, dataRange, start, end):
scalarOpacity.RemoveAllPoints()
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(start-1, 0.0)
scalarOpacity.AddPoint(start, 1.0)
scalarOpacity.AddPoint(end, 1.0)
scalarOpacity.AddPoint(end+1, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
reader = vtkMetaImageReader()
reader.SetFileName(file_path)
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(reader.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(fieldRange[0], 1.0, 1.0, 1.0)
colorFunction.AddRGBPoint(fieldRange[1], 1.0, 1.0, 1.0)
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(512, 512)
renderer = vtkRenderer()
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
# Camera setting
camera = {
'position': [-0.264, -890.168, -135.0],
'focalPoint': [-0.264, -30.264, -135.0],
'viewUp': [0,0,1]
}
update_camera(renderer, camera)
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
phi = range(0, 360, 30)
theta = range(-60, 61, 30)
vcdsb = SortedCompositeDataSetBuilder(dataset_destination_path, {'type': 'spherical', 'phi': phi, 'theta': theta}, sections=sections)
idx = 0
vcdsb.start(window, renderer)
for feature in features:
idx += 1
updatePieceWiseAsStep(scalarOpacity, fieldRange, feature[0], feature[1])
# Capture layer
vcdsb.activateLayer(field, (feature[0] + feature[1])/2)
# Write data
vcdsb.writeData(mapper)
vcdsb.stop()
|
Kitware/tonic-data-generator
|
scripts/vtk/medical/head-ct-volume-step-func.py
|
Python
|
bsd-3-clause
| 4,130
|
[
"VTK"
] |
fe87ee6e98f9def48a12a3680fd934996c9fd24a205c66b9083dfa430f33d626
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 2 03:58:30 2017
This file contains multiple functions that contribute to userinput
@author: Simon Moe Sørensen, moe.simon@gmail.com
"""
import numpy as np
def inputStr(prompt):
"""
Userinput that only allows strings
INPUT:
prompt: String
OUTPUT:
str: The inputted string
USAGE:
inputStr("Please enter a string: ")
"""
while True:
try:
str = input(prompt)
break
except ValueError:
print("Not a valid string. Please try again")
return str
def displayMenu(options):
"""
INPUT:
options: An array of strings
OUTPUT:
menu: an integer of the user's choice
USAGE:
menu = displayMenu(options)
"""
#Print menu
for i in range(len(options)):
print("{}. {}".format(i+1,options[i]))
#Initial variable
choice = 0
#Get menu choice
while not choice in np.arange(1,len(options)+1):
choice = inputNumber("Please choose a menu item: ")
if choice > len(options) or choice <= 0:
print("\nChoice out of menu range")
return choice
def inputNumber(prompt):
"""
Userinput that only allows any number and converts them to float values
INPUT:
prompt: any number
OUTPUT:
num = Float
USAGE:
inputStr("Please enter a number: ")
"""
while True:
try:
num = float(input(prompt))
break
except ValueError:
print("Not valid number. Please try again")
return num
|
danmark2312/Project-Electricity
|
functions/userinput.py
|
Python
|
gpl-3.0
| 1,617
|
[
"MOE"
] |
cdedc9389e0d3ec1238e2f94b861f5a11a9e78d2948e5eda54a08bcfacc778d0
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QAD Quantum Aided Design plugin ok
classe per la gestione delle quote
-------------------
begin : 2014-02-20
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.core import *
from qgis.gui import *
import qgis.utils
import os
import codecs
import math
import sys
from .qad_msg import QadMsg
from . import qad_utils
from .qad_line import getBoundingPtsOnOnInfinityLine, QadLine
from .qad_arc import QadArc
from .qad_geom_relations import *
from . import qad_stretch_fun
from . import qad_layer
from . import qad_label
from .qad_entity import *
from .qad_variables import QadVariables
from .qad_multi_geom import fromQgsGeomToQadGeom
"""
La classe quotatura é composta da tre layer: testo, linea, simbolo con lo stesso sistema di coordinate.
Il layer testo deve avere tutte le caratteristiche del layer testo di QAD ed in più:
- il posizionamento dell'etichetta con modalita "Intorno al punto" con distanza = 0
(che vuol dire punto di inserimento in basso a sx)
- la dimensione del testo in unità mappa (la dimensione varia a seconda dello zoom).
- dimStyleFieldName = "dim_style"; nome del campo che contiene il nome dello stile di quota (opzionale)
- dimTypeFieldName = "dim_type"; nome del campo che contiene il tipo dello stile di quota (opzionale)
- l'opzione "Mostra etichette capovolte" deve essere su "sempre" nel tab "Etichette"->"Visualizzazione"
- rotFieldName = "rot"; nome del campo che contiene la rotazione del testo
- la rotazione deve essere letta dal campo indicato da rotFieldName
- idFieldName = "id"; nome del campo che contiene il codice della quota (opzionale)
- la rotazione deve essere derivata dal campo rotFieldName
- il font del carattere può essere derivata da un campo
- la dimensione del carattere può essere derivata da un campo
- il colore del testo può essere derivato da un campo (opzionale)
Il layer simbolo deve avere tutte le caratteristiche del layer simbolo di QAD ed in più:
- il simbolo freccia con rotazione 0 deve essere orizzontale con la freccia rivolta verso destra
ed il suo punto di inserimento deve essere sulla punta della freccia
- la dimensione del simbolo in unità mappa (la dimensione varia a seconda dello zoom),
impostare la dimensione del simbolo in modo che la larghezza della freccia sia 1 unità di mappa.
- componentFieldName = "type"; nome del campo che contiene il tipo di componente della quota (vedi QadDimComponentEnum) (opzionale)
- symbolFieldName = "block"; nome del campo che contiene il nome del simbolo (opzionale)
- idParentFieldName = "id_parent"; nome del campo che contiene il codice del testo della quota (opzionale)
- scaleFieldName = "scale"; nome del campo che contiene il fattore di scala del simbolo (opzionale)
se usato usare lo stile "singolo simbolo" (unico che consente di impostare la scala come diametro scala)
la scala deve essere impostata su attraverso Stile->avanzato->campo di dimensione della scala-><nome del campo scala>
la modalità di scala deve essere impostata su attraverso Stile->avanzato->campo di dimensione della scala->diametro scala
- rotFieldName = "rot"; nome del campo che contiene la rotazione del simbolo
la rotazione deve essere letta dal campo indicato da rotFieldName (360-rotFieldName)
Il layer linea deve avere tutte le caratteristiche del layer linea ed in più:
- componentFieldName = "type"; nome del campo che contiene il tipo di componente della quota (vedi QadDimComponentEnum) (opzionale)
- lineTypeFieldName = "line_type"; nome del campo che contiene il tipolinea (opzionale)
- colorFieldName = "color"; nome del campo che contiene il colore 'r,g,b,alpha'; alpha é opzionale (0=trasparente, 255=opaco) (opzionale)
- idParentFieldName = "id_parent"; nome del campo che contiene il codice del testo della quota (opzionale)
"""
#===============================================================================
# QadDimTypeEnum class.
#===============================================================================
class QadDimTypeEnum():
ALIGNED = "AL" # quota lineare allineata ai punti di origine delle linee di estensione
ANGULAR = "AN" # quota angolare, misura l'angolo tra i 3 punti o tra gli oggetti selezionati
BASE_LINE = "BL" # quota lineare, angolare o coordinata a partire dalla linea di base della quota precedente o di una quota selezionata
DIAMETER = "DI" # quota per il diametro di un cerchio o di un arco
LEADER = "LD" # crea una linea che consente di collegare un'annotazione ad una lavorazione
LINEAR = "LI" # quota lineare con una linea di quota orizzontale o verticale
RADIUS = "RA" # quota radiale, misura il raggio di un cerchio o di un arco selezionato e visualizza il testo di quota con un simbolo di raggio davanti
ARC_LENTGH = "AR" # quota per la lunghezza di un arco
#===============================================================================
# QadDimComponentEnum class.
#===============================================================================
class QadDimComponentEnum():
DIM_LINE1 = "D1" # linea di quota ("Dimension line 1")
DIM_LINE2 = "D2" # linea di quota ("Dimension line 2")
DIM_LINE_EXT1 = "X1" # estensione della linea di quota ("Dimension line eXtension 1")
DIM_LINE_EXT2 = "X2" # estensione della linea di quota ("Dimension line eXtension 2")
EXT_LINE1 = "E1" # prima linea di estensione ("Extension line 1")
EXT_LINE2 = "E2" # seconda linea di estensione ("Extension line 2")
LEADER_LINE = "L" # linea porta quota usata quando il testo é fuori dalla quota ("Leader")
ARC_LEADER_LINE = "AL" # linea porta quota usata per collegare il testo di quota con l'arco da quotare (vedi "dimarc" opzione "leader")
BLOCK1 = "B1" # primo blocco della freccia ("Block 1")
BLOCK2 = "B2" # secondo blocco della freccia ("Block 2")
LEADER_BLOCK = "LB" # blocco della freccia nel caso leader ("Leader Block")
ARC_BLOCK = "AB" # simbolo dell'arco ("Arc Block")
DIM_PT1 = "D1" # primo punto da quotare ("Dimension point 1")
DIM_PT2 = "D2" # secondo punto da quotare ("Dimension point 2")
TEXT_PT = "T" # punto del testo di quota ("Text")
CENTER_MARKER_LINE = "CL" # linea che definisce il marcatore del centro di un arco o di un cerchio
#===============================================================================
# QadDimStyleAlignmentEnum class.
#===============================================================================
class QadDimStyleAlignmentEnum():
HORIZONTAL = 0 # orizzontale
VERTICAL = 1 # verticale
ALIGNED = 2 # allineata
FORCED_ROTATION = 3 # rotazione forzata
#===============================================================================
# QadDimStyleTxtVerticalPosEnum class.
#===============================================================================
class QadDimStyleTxtVerticalPosEnum():
CENTERED_LINE = 0 # testo centrato alla linea di quota
ABOVE_LINE = 1 # testo sopra alla linea di quota ma nel caso la linea di quota non sia orizzontale
# e il testo sia dentro le linee di estensione e forzato orizzontale allora il testo diventa centrato
EXTERN_LINE = 2 # testo posizionato nella parte opposta ai punti di quotatura
BELOW_LINE = 4 # testo sotto alla linea di quota ma nel caso la linea di quota non sia orizzontale
# e il testo sia dentro le linee di estensione e forzato orizzontale allora il testo diventa centrato
#===============================================================================
# QadDimStyleTxtHorizontalPosEnum class.
#===============================================================================
class QadDimStyleTxtHorizontalPosEnum():
CENTERED_LINE = 0 # testo centrato alla linea di quota
FIRST_EXT_LINE = 1 # testo vicino alla prima linea di estensione
SECOND_EXT_LINE = 2 # testo vicino alla seconda linea di estensione
FIRST_EXT_LINE_UP = 3 # testo sopra e allineato alla prima linea di estensione
SECOND_EXT_LINE_UP = 4 # testo sopra e allineato alla seconda linea di estensione
#===============================================================================
# QadDimStyleTxtRotEnum class.
#===============================================================================
class QadDimStyleTxtRotModeEnum():
HORIZONTAL = 0 # testo orizzontale
ALIGNED_LINE = 1 # testo allineato con la linea di quota
ISO = 2 # testo allineato con la linea di quota se tra le linee di estensione,
# altrimenti testo orizzontale
FORCED_ROTATION = 3 # testo con rotazione forzata
#===============================================================================
# QadDimStyleArcSymbolPosEnum class.
#===============================================================================
class QadDimStyleArcSymbolPosEnum():
BEFORE_TEXT = 0 # simbolo prima del testo
ABOVE_TEXT = 1 # simbolo sopra il testo
NONE = 2 # niente simbolo
#===============================================================================
# QadDimStyleArcSymbolPosEnum class.
#===============================================================================
class QadDimStyleTxtDirectionEnum():
SX_TO_DX = 0 # da sinistra a destra
DX_TO_SX = 1 # da destra a sinistra
#===============================================================================
# QadDimStyleTextBlocksAdjustEnum class.
#===============================================================================
class QadDimStyleTextBlocksAdjustEnum():
BOTH_OUTSIDE_EXT_LINES = 0 # sposta testo e frecce fuori dalle linee di estensione
FIRST_BLOCKS_THEN_TEXT = 1 # sposta prima le frecce poi, se non basta, anche il testo
FIRST_TEXT_THEN_BLOCKS = 2 # sposta prima il testo poi, se non basta, anche le frecce
WHICHEVER_FITS_BEST = 3 # Sposta indistintamente il testo o le frecce (l'oggetto che si adatta meglio)
#===============================================================================
# QadDim dimension style class
#===============================================================================
class QadDimStyle():
def __init__(self, dimStyle = None):
self.name = "standard" # nome dello stile
self.description = ""
self.path = "" # percorso e nome del file in cui è stato salvato/caricato
self.dimType = QadDimTypeEnum.ALIGNED # tipo di quotatura
# testo di quota
self.textPrefix = "" # prefisso per il testo della quota
self.textSuffix = "" # suffisso per il testo della quota
self.textSuppressLeadingZeros = False # per sopprimere o meno gli zero all'inizio del testo
self.textDecimalZerosSuppression = True # per sopprimere gli zero finali nei decimali
self.textHeight = 1.0 # altezza testo (DIMTXT) in unità di mappa
self.textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE # posizione verticale del testo rispetto la linea di quota (DIMTAD)
self.textHorizontalPos = QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE # posizione orizzontale del testo rispetto la linea di quota (DIMTAD)
self.textOffsetDist = 0.5 # distanza aggiunta intorno al testo quando per inserirlo viene spezzata la linea di quota (DIMGAP)
self.textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE # modalità di rotazione del testo (DIMTIH e DIMTOH)
self.textForcedRot = 0.0 # rotazione forzata del testo
self.textDecimals = 2 # numero di decimali (DIMDEC)
self.textDecimalSep = "." # Separatore dei decimali (DIMDSEP)
self.textFont = "Arial" # nome del font di testo (DIMTXSTY)
self.textColor = "255,255,255,255" # Colore per i testi della quota (DIMCLRT); bianco con opacità totale
self.textDirection = QadDimStyleTxtDirectionEnum.SX_TO_DX # specifica la direzione del testo di quota (DIMTXTDIRECTION) 0 = da sx a dx, 1 = da dx a sx
self.arcSymbPos = QadDimStyleArcSymbolPosEnum.BEFORE_TEXT # disegna o meno il simbolo dell'arco con DIMARC (DIMARCSYM).
# linee di quota
self.dimLine1Show = True # Mostra o nasconde la prima linea di quota (DIMSD1)
self.dimLine2Show = True # Mostra o nasconde la seconda linea di quota (DIMSD2)
self.dimLineLineType = "continuous" # Tipo di linea per le linee di quota (DIMLTYPE)
self.dimLineColor = "255,255,255,255" # Colore per le linee di quota (DIMCLRD); bianco con opacità totale
self.dimLineSpaceOffset = 3.75 # Controlla la spaziatura delle linee di quota nelle quote da linea di base (DIMDLI)
self.dimLineOffsetExtLine = 0.0 # distanza della linea di quota oltre la linea di estensione (DIMDLE)
# simboli per linee di quota
# il blocco per la freccia é una freccia verso destra con il punto di inserimento sulla punta della freccia
self.block1Name = "triangle2" # nome del simbolo da usare come punta della freccia sulla prima linea di quota (DIMBLK1)
self.block2Name = "triangle2" # nome del simbolo da usare come punta della freccia sulla seconda linea di quota (DIMBLK2)
self.blockLeaderName = "triangle2" # nome del simbolo da usare come punta della freccia sulla linea della direttrice (DIMLDRBLK)
self.blockWidth = 0.5 # larghezza del simbolo (in orizzontale) quando la dimensione in unità di mappa = 1 (vedi "triangle2")
self.blockScale = 1.0 # scala della dimensione del simbolo (DIMASZ)
self.centerMarkSize = 0.0 # disegna o meno il marcatore di centro o le linee d'asse per le quote create con
# DIMCENTER, DIMDIAMETER, e DIMRADIUS (DIMCEN).
# 0 = niente, > 0 dimensione marcatore di centro, < 0 dimensione linee d'asse
# adattamento del testo e delle frecce
self.textBlockAdjust = QadDimStyleTextBlocksAdjustEnum.WHICHEVER_FITS_BEST # (DIMATFIT)
self.blockSuppressionForNoSpace = False # Sopprime le punte della frecce se non c'é spazio sufficiente all'interno delle linee di estensione (DIMSOXD)
# linee di estensione
self.extLine1Show = True # Mostra o nasconde la prima linea di estensione (DIMSE1)
self.extLine2Show = True # Mostra o nasconde la seconda linea di estensione (DIMSE2)
self.extLine1LineType = "continuous" # Tipo di linea per la prima linea di estensione (DIMLTEX1)
self.extLine2LineType = "continuous" # Tipo di linea per la seconda linea di estensione (DIMLTEX2)
self.extLineColor = "255,255,255,255" # Colore per le linee di estensione (DIMCLRE); bianco con opacità totale
self.extLineOffsetDimLine = 0.0 # distanza della linea di estensione oltre la linea di quota (DIMEXE)
self.extLineOffsetOrigPoints = 0.0 # distanza della linea di estensione dai punti da quotare (DIMEXO)
self.extLineIsFixedLen = False # Attiva lunghezza fissa delle line di estensione (DIMFXLON)
self.extLineFixedLen = 1.0 # lunghezza fissa delle line di estensione (DIMFXL) dalla linea di quota
# al punto da quotare spostato di extLineOffsetOrigPoints
# (la linea di estensione non va oltre il punto da quotare)
# layer e loro caratteristiche
# devo allocare i campi a livello di classe QadDimStyle perché QgsFeature.setFields usa solo il puntatore alla lista fields
# che, se allocata privatamente in qualsiasi funzione, all'uscita della funzione verrebbe distrutta
self.textualLayerName = None # nome layer per memorizzare il testo della quota
self.__textualLayer = None # layer per memorizzare il testo della quota
self.__textFields = None
self.__textualFeaturePrototype = None
self.linearLayerName = None # nome layer per memorizzare le linee della quota
self.__linearLayer = None # layer per memorizzare le linee della quota
self.__lineFields = None
self.__linearFeaturePrototype = None
self.symbolLayerName = None # nome layer per memorizzare i blocchi delle frecce della quota
self.__symbolLayer = None # layer per memorizzare i blocchi delle frecce della quota
self.__symbolFields = None
self.__symbolFeaturePrototype = None
self.componentFieldName = "type" # nome del campo che contiene il tipo di componente della quota (vedi QadDimComponentEnum)
self.lineTypeFieldName = "line_type" # nome del campo che contiene il tipolinea
self.colorFieldName = "color" # nome del campo che contiene il colore 'r,g,b,alpha'; alpha é opzionale (0=trasparente, 255=opaco)
self.idFieldName = "id" # nome del campo che contiene il codice del della quota nel layer di tipo testo
self.idParentFieldName = "id_parent" # nome del campo che contiene il codice della quota nei layer simbolo e linea
self.dimStyleFieldName = "dim_style" # nome del campo che contiene il nome dello stile di quota
self.dimTypeFieldName = "dim_type" # nome del campo che contiene il tipo dello stile di quota
self.symbolFieldName = "block" # nome del campo che contiene il nome del simbolo
self.scaleFieldName = "scale" # nome del campo che contiene la dimensione
self.rotFieldName = "rot" # nome del campo che contiene rotazione in gradi
if dimStyle is None:
return
self.set(dimStyle)
#============================================================================
# FUNZIONI GENERICHE - INIZIO
#============================================================================
def set(self, dimStyle):
self.name = dimStyle.name
self.description = dimStyle.description
self.path = dimStyle.path
self.dimType = dimStyle.dimType
# testo di quota
self.textPrefix = dimStyle.textPrefix
self.textSuffix = dimStyle.textSuffix
self.textSuppressLeadingZeros = dimStyle.textSuppressLeadingZeros
self.textDecimaZerosSuppression = dimStyle.textDecimalZerosSuppression
self.textHeight = dimStyle.textHeight
self.textVerticalPos = dimStyle.textVerticalPos
self.textHorizontalPos = dimStyle.textHorizontalPos
self.textOffsetDist = dimStyle.textOffsetDist
self.textRotMode = dimStyle.textRotMode
self.textForcedRot = dimStyle.textForcedRot
self.textDecimals = dimStyle.textDecimals
self.textDecimalSep = dimStyle.textDecimalSep
self.textFont = dimStyle.textFont
self.textColor = dimStyle.textColor
self.textDirection = dimStyle.textDirection
self.arcSymbPos = dimStyle.arcSymbPos
# linee di quota
self.dimLine1Show = dimStyle.dimLine1Show
self.dimLine2Show = dimStyle.dimLine2Show
self.dimLineLineType = dimStyle.dimLineLineType
self.dimLineColor = dimStyle.dimLineColor
self.dimLineSpaceOffset = dimStyle.dimLineSpaceOffset
self.dimLineOffsetExtLine = dimStyle.dimLineOffsetExtLine
# simboli per linee di quota
self.block1Name = dimStyle.block1Name
self.block2Name = dimStyle.block2Name
self.blockLeaderName = dimStyle.blockLeaderName
self.blockWidth = dimStyle.blockWidth
self.blockScale = dimStyle.blockScale
self.blockSuppressionForNoSpace = dimStyle.blockSuppressionForNoSpace
self.centerMarkSize = dimStyle.centerMarkSize
# adattamento del testo e delle frecce
self.textBlockAdjust = dimStyle.textBlockAdjust
# linee di estensione
self.extLine1Show = dimStyle.extLine1Show
self.extLine2Show = dimStyle.extLine2Show
self.extLine1LineType = dimStyle.extLine1LineType
self.extLine2LineType = dimStyle.extLine2LineType
self.extLineColor = dimStyle.extLineColor
self.extLineOffsetDimLine = dimStyle.extLineOffsetDimLine
self.extLineOffsetOrigPoints = dimStyle.extLineOffsetOrigPoints
self.extLineIsFixedLen = dimStyle.extLineIsFixedLen
self.extLineFixedLen = dimStyle.extLineFixedLen
# layer e loro caratteristiche
self.textualLayerName = dimStyle.textualLayerName
self.__textualLayer = dimStyle.__textualLayer
self.__textFields = dimStyle.__textFields
self.__textualFeaturePrototype = dimStyle.__textualFeaturePrototype
self.linearLayerName = dimStyle.linearLayerName
self.__linearLayer = dimStyle.__linearLayer
self.__lineFields = dimStyle.__lineFields
self.__linearFeaturePrototype = dimStyle.__linearFeaturePrototype
self.symbolLayerName = dimStyle.symbolLayerName
self.__symbolLayer = dimStyle.__symbolLayer
self.__symbolFields = dimStyle.__symbolFields
self.__symbolFeaturePrototype = dimStyle.__symbolFeaturePrototype
self.componentFieldName = dimStyle.componentFieldName
self.symbolFieldName = dimStyle.symbolFieldName
self.lineTypeFieldName = dimStyle.lineTypeFieldName
self.colorFieldName = dimStyle.colorFieldName
self.idFieldName = dimStyle.idFieldName
self.idParentFieldName = dimStyle.idParentFieldName
self.dimStyleFieldName = dimStyle.dimStyleFieldName
self.dimTypeFieldName = dimStyle.dimTypeFieldName
self.scaleFieldName = dimStyle.scaleFieldName
self.rotFieldName = dimStyle.rotFieldName
#============================================================================
# getPropList
#============================================================================
def getPropList(self):
proplist = dict() # dizionario di nome con lista [descrizione, valore]
propDescr = QadMsg.translate("Dimension", "Name")
proplist["name"] = [propDescr, self.name]
propDescr = QadMsg.translate("Dimension", "Description")
proplist["description"] = [propDescr, self.description]
propDescr = QadMsg.translate("Dimension", "File path")
proplist["path"] = [propDescr, self.path]
# testo di quota
value = self.textPrefix
if len(self.textPrefix) > 0:
value += "<>"
value += self.textSuffix
propDescr = QadMsg.translate("Dimension", "Text prefix and suffix")
proplist["textPrefix"] = [propDescr, value]
propDescr = QadMsg.translate("Dimension", "Leading zero suppression")
proplist["textSuppressLeadingZeros"] = [propDescr, self.textSuppressLeadingZeros]
propDescr = QadMsg.translate("Dimension", "Trailing zero suppression")
proplist["textDecimalZerosSuppression"] = [propDescr, self.textDecimalZerosSuppression]
propDescr = QadMsg.translate("Dimension", "Text height")
proplist["textHeight"] = [propDescr, self.textHeight]
propDescr = QadMsg.translate("Dimension", "Vertical text position")
proplist["textVerticalPos"] = [propDescr, self.textVerticalPos]
propDescr = QadMsg.translate("Dimension", "Horizontal text position")
proplist["textHorizontalPos"] = [propDescr, self.textHorizontalPos]
propDescr = QadMsg.translate("Dimension", "Text offset")
proplist["textOffsetDist"] = [propDescr, self.textOffsetDist]
propDescr = QadMsg.translate("Dimension", "Text alignment")
proplist["textRotMode"] = [propDescr, self.textRotMode]
propDescr = QadMsg.translate("Dimension", "Fixed text rotation")
proplist["textForcedRot"] = [propDescr, self.textForcedRot]
propDescr = QadMsg.translate("Dimension", "Precision")
proplist["textDecimals"] = [propDescr, self.textDecimals]
propDescr = QadMsg.translate("Dimension", "Decimal separator")
proplist["textDecimalSep"] = [propDescr, self.textDecimalSep]
propDescr = QadMsg.translate("Dimension", "Text font")
proplist["textFont"] = [propDescr, self.textFont]
propDescr = QadMsg.translate("Dimension", "Text color")
proplist["textColor"] = [propDescr, self.textColor]
if self.textDirection == QadDimStyleTxtDirectionEnum.SX_TO_DX:
value = QadMsg.translate("Dimension", "From left to right")
else:
value = QadMsg.translate("Dimension", "From right to left")
propDescr = QadMsg.translate("Dimension", "Text direction")
proplist["textDirection"] = [propDescr, value]
propDescr = QadMsg.translate("Dimension", "Arc len. symbol")
proplist["arcSymbPos"] = [propDescr, self.arcSymbPos]
# linee di quota
propDescr = QadMsg.translate("Dimension", "Dim line 1 visible")
proplist["dimLine1Show"] = [propDescr, self.dimLine1Show]
propDescr = QadMsg.translate("Dimension", "Dim line 2 visible")
proplist["dimLine2Show"] = [propDescr, self.dimLine2Show]
propDescr = QadMsg.translate("Dimension", "Dim line linetype")
proplist["dimLineLineType"] = [propDescr, self.dimLineLineType]
propDescr = QadMsg.translate("Dimension", "Dim line color")
proplist["dimLineColor"] = [propDescr, self.dimLineColor]
propDescr = QadMsg.translate("Dimension", "Offset from origin")
proplist["dimLineSpaceOffset"] = [propDescr, self.dimLineSpaceOffset]
propDescr = QadMsg.translate("Dimension", "Dim line extension")
proplist["dimLineOffsetExtLine"] = [propDescr, self.dimLineOffsetExtLine]
# simboli per linee di quota
propDescr = QadMsg.translate("Dimension", "Arrow 1")
proplist["block1Name"] = [propDescr, self.block1Name]
propDescr = QadMsg.translate("Dimension", "Arrow 2")
proplist["block2Name"] = [propDescr, self.block2Name]
propDescr = QadMsg.translate("Dimension", "Leader arrow")
proplist["blockLeaderName"] = [propDescr, self.blockLeaderName]
propDescr = QadMsg.translate("Dimension", "Arrowhead width")
proplist["blockWidth"] = [propDescr, self.blockWidth]
propDescr = QadMsg.translate("Dimension", "Arrowhead scale")
proplist["blockScale"] = [propDescr, self.blockScale]
propDescr = QadMsg.translate("Dimension", "Center mark size")
proplist["centerMarkSize"] = [propDescr, self.centerMarkSize]
# adattamento del testo e delle frecce
propDescr = QadMsg.translate("Dimension", "Fit: arrows and text")
proplist["textBlockAdjust"] = [propDescr, self.textBlockAdjust]
propDescr = QadMsg.translate("Dimension", "Suppress arrows for lack of space")
proplist["blockSuppressionForNoSpace"] = [propDescr, self.blockSuppressionForNoSpace]
# linee di estensione
propDescr = QadMsg.translate("Dimension", "Ext. line 1 visible")
proplist["extLine1Show"] = [propDescr, self.extLine1Show]
propDescr = QadMsg.translate("Dimension", "Ext. line 2 visible")
proplist["extLine2Show"] = [propDescr, self.extLine2Show]
propDescr = QadMsg.translate("Dimension", "Ext. line 1 linetype")
proplist["extLine1LineType"] = [propDescr, self.extLine1LineType]
propDescr = QadMsg.translate("Dimension", "Ext. line 2 linetype")
proplist["extLine2LineType"] = [propDescr, self.extLine2LineType]
propDescr = QadMsg.translate("Dimension", "Ext. line color")
proplist["extLineColor"] = [propDescr, self.extLineColor]
propDescr = QadMsg.translate("Dimension", "Ext. line extension")
proplist["extLineOffsetDimLine"] = [propDescr, self.extLineOffsetDimLine]
propDescr = QadMsg.translate("Dimension", "Ext. line offset")
proplist["extLineOffsetOrigPoints"] = [propDescr, self.extLineOffsetOrigPoints]
propDescr = QadMsg.translate("Dimension", "Fixed length ext. line activated")
proplist["extLineIsFixedLen"] = [propDescr, self.extLineIsFixedLen]
propDescr = QadMsg.translate("Dimension", "Fixed length ext. line")
proplist["extLineFixedLen"] = [propDescr, self.extLineFixedLen]
# layer e loro caratteristiche
propDescr = QadMsg.translate("Dimension", "Layer for dim texts")
proplist["textualLayerName"] = [propDescr, self.textualLayerName]
propDescr = QadMsg.translate("Dimension", "Layer for dim lines")
proplist["linearLayerName"] = [propDescr, self.linearLayerName]
propDescr = QadMsg.translate("Dimension", "Layer for dim arrows")
proplist["symbolLayerName"] = [propDescr, self.symbolLayerName]
propDescr = QadMsg.translate("Dimension", "Field for component type")
proplist["componentFieldName"] = [propDescr, self.componentFieldName]
propDescr = QadMsg.translate("Dimension", "Field for linetype")
proplist["lineTypeFieldName"] = [propDescr, self.lineTypeFieldName]
propDescr = QadMsg.translate("Dimension", "Field for color")
proplist["colorFieldName"] = [propDescr, self.colorFieldName]
propDescr = QadMsg.translate("Dimension", "Field for dim ID in texts")
proplist["idFieldName"] = [propDescr, self.idFieldName]
propDescr = QadMsg.translate("Dimension", "Field for dim ID in lines and arrows")
proplist["idParentFieldName"] = [propDescr, self.idParentFieldName]
propDescr = QadMsg.translate("Dimension", "Field for dim style name")
proplist["dimStyleFieldName"] = [propDescr, self.dimStyleFieldName]
propDescr = QadMsg.translate("Dimension", "Field for dim type")
proplist["dimTypeFieldName"] = [propDescr, self.dimTypeFieldName]
propDescr = QadMsg.translate("Dimension", "Field for symbol name")
proplist["symbolFieldName"] = [propDescr, self.symbolFieldName]
propDescr = QadMsg.translate("Dimension", "Field for arrows scale")
proplist["scaleFieldName"] = [propDescr, self.scaleFieldName]
propDescr = QadMsg.translate("Dimension", "Field for arrows rotation")
proplist["rotFieldName"] = [propDescr, self.rotFieldName]
return proplist
#============================================================================
# getLayer
#============================================================================
def getLayer(self, layerName):
if layerName is not None:
layerList = QgsProject.instance().mapLayersByName(layerName)
if len(layerList) == 1:
return layerList[0]
return None
#============================================================================
# layer testuale
def getTextualLayer(self):
if self.__textualLayer is None:
self.__textualLayer = self.getLayer(self.textualLayerName)
return self.__textualLayer
def getTextualLayerFields(self):
if self.__textFields is None:
self.__textFields = None if self.getTextualLayer() is None else self.getTextualLayer().fields()
return self.__textFields
def getTextualFeaturePrototype(self):
if self.__textualFeaturePrototype is None:
if self.getTextualLayerFields() is not None:
self.__textualFeaturePrototype = QgsFeature(self.getTextualLayerFields())
self.initFeatureToDefautlValues(self.getTextualLayer(), self.__textualFeaturePrototype)
return self.__textualFeaturePrototype
#============================================================================
# layer lineare
def getLinearLayer(self):
if self.__linearLayer is None:
self.__linearLayer = self.getLayer(self.linearLayerName)
return self.__linearLayer
def getLinearLayerFields(self):
if self.__lineFields is None:
self.__lineFields = None if self.getLinearLayer() is None else self.getLinearLayer().fields()
return self.__lineFields
def getLinearFeaturePrototype(self):
if self.__linearFeaturePrototype is None:
if self.getLinearLayerFields() is not None:
self.__linearFeaturePrototype = QgsFeature(self.getLinearLayerFields())
self.initFeatureToDefautlValues(self.getLinearLayer(), self.__linearFeaturePrototype)
return self.__linearFeaturePrototype
#============================================================================
# layer simbolo
def getSymbolLayer(self):
if self.__symbolLayer is None:
self.__symbolLayer = self.getLayer(self.symbolLayerName)
return self.__symbolLayer
def getSymbolLayerFields(self):
if self.__symbolFields is None:
self.__symbolFields = None if self.getSymbolLayer() is None else self.getSymbolLayer().fields()
return self.__symbolFields
def getSymbolFeaturePrototype(self):
if self.__symbolFeaturePrototype is None:
if self.getSymbolLayerFields() is not None:
self.__symbolFeaturePrototype = QgsFeature(self.getSymbolLayerFields())
self.initFeatureToDefautlValues(self.getSymbolLayer(), self.__symbolFeaturePrototype)
return self.__symbolFeaturePrototype
#============================================================================
# initFeatureToDefautlValues
#============================================================================
def initFeatureToDefautlValues(self, layer, f):
# assegno i valori di default
provider = layer.dataProvider()
fields = f.fields()
for field in fields.toList():
i = fields.indexFromName(field.name())
f[field.name()] = provider.defaultValue(i)
#============================================================================
# getDefaultDimFilePath
#============================================================================
def getDefaultDimFilePath(self):
# ottiene il percorso automatico dove salvare/caricare il file della quotatura
# se esiste un progetto caricato il percorso è quello del progetto
prjFileInfo = QFileInfo(QgsProject.instance().fileName())
path = prjFileInfo.absolutePath()
if len(path) == 0:
# se non esiste un progetto caricato uso il percorso di installazione di qad
path = QDir.cleanPath(QgsApplication.qgisSettingsDirPath() + "python/plugins/qad")
return path + "/"
#============================================================================
# save
#============================================================================
def save(self, path = "", overwrite = True):
"""
Salva le impostazioni dello stile di quotatura in un file.
"""
if path == "" and self.path != "":
_path = self.path
else:
dir, base = os.path.split(path) # ritorna percorso e nome file con estensione
if dir == "":
dir = self.getDefaultDimFilePath()
else:
dir = QDir.cleanPath(dir) + "/"
name, ext = os.path.splitext(base)
if name == "":
name = self.name
if ext == "": # se non c'è estensione la aggiungo
ext = ".dim"
_path = dir + name + ext
if overwrite == False: # se non si vuole sovrascrivere
if os.path.exists(_path):
return False
dir = QFileInfo(_path).absoluteDir()
if not dir.exists():
os.makedirs(dir.absolutePath())
config = qad_utils.QadRawConfigParser(allow_no_value=True)
config.add_section("dimension_options")
config.set("dimension_options", "name", str(self.name))
config.set("dimension_options", "description", self.description)
config.set("dimension_options", "dimType", str(self.dimType))
# testo di quota
config.set("dimension_options", "textPrefix", str(self.textPrefix))
config.set("dimension_options", "textSuffix", str(self.textSuffix))
config.set("dimension_options", "textSuppressLeadingZeros", str(self.textSuppressLeadingZeros))
config.set("dimension_options", "textDecimalZerosSuppression", str(self.textDecimalZerosSuppression))
config.set("dimension_options", "textHeight", str(self.textHeight))
config.set("dimension_options", "textVerticalPos", str(self.textVerticalPos))
config.set("dimension_options", "textHorizontalPos", str(self.textHorizontalPos))
config.set("dimension_options", "textOffsetDist", str(self.textOffsetDist))
config.set("dimension_options", "textRotMode", str(self.textRotMode))
config.set("dimension_options", "textForcedRot", str(self.textForcedRot))
config.set("dimension_options", "textDecimals", str(self.textDecimals))
config.set("dimension_options", "textDecimalSep", str(self.textDecimalSep))
config.set("dimension_options", "textFont", str(self.textFont))
config.set("dimension_options", "textColor", str(self.textColor))
config.set("dimension_options", "textDirection", str(self.textDirection))
config.set("dimension_options", "arcSymbPos", str(self.arcSymbPos))
# linee di quota
config.set("dimension_options", "dimLine1Show", str(self.dimLine1Show))
config.set("dimension_options", "dimLine2Show", str(self.dimLine2Show))
config.set("dimension_options", "dimLineLineType", str(self.dimLineLineType))
config.set("dimension_options", "dimLineColor", str(self.dimLineColor))
config.set("dimension_options", "dimLineSpaceOffset", str(self.dimLineSpaceOffset))
config.set("dimension_options", "dimLineOffsetExtLine", str(self.dimLineOffsetExtLine))
# simboli per linee di quota
config.set("dimension_options", "block1Name", str(self.block1Name))
config.set("dimension_options", "block2Name", str(self.block2Name))
config.set("dimension_options", "blockLeaderName", str(self.blockLeaderName))
config.set("dimension_options", "blockWidth", str(self.blockWidth))
config.set("dimension_options", "blockScale", str(self.blockScale))
config.set("dimension_options", "blockSuppressionForNoSpace", str(self.blockSuppressionForNoSpace))
config.set("dimension_options", "centerMarkSize", str(self.centerMarkSize))
# adattamento del testo e delle frecce
config.set("dimension_options", "textBlockAdjust", str(self.textBlockAdjust))
# linee di estensione
config.set("dimension_options", "extLine1Show", str(self.extLine1Show))
config.set("dimension_options", "extLine2Show", str(self.extLine2Show))
config.set("dimension_options", "extLine1LineType", str(self.extLine1LineType))
config.set("dimension_options", "extLine2LineType", str(self.extLine2LineType))
config.set("dimension_options", "extLineColor", str(self.extLineColor))
config.set("dimension_options", "extLineOffsetDimLine", str(self.extLineOffsetDimLine))
config.set("dimension_options", "extLineOffsetOrigPoints", str(self.extLineOffsetOrigPoints))
config.set("dimension_options", "extLineIsFixedLen", str(self.extLineIsFixedLen))
config.set("dimension_options", "extLineFixedLen", str(self.extLineFixedLen))
# layer e loro caratteristiche
config.set("dimension_options", "textualLayerName", "" if self.textualLayerName is None else self.textualLayerName)
config.set("dimension_options", "linearLayerName", "" if self.linearLayerName is None else self.linearLayerName)
config.set("dimension_options", "symbolLayerName", "" if self.symbolLayerName is None else self.symbolLayerName)
config.set("dimension_options", "componentFieldName", str(self.componentFieldName))
config.set("dimension_options", "symbolFieldName", str(self.symbolFieldName))
config.set("dimension_options", "lineTypeFieldName", str(self.lineTypeFieldName))
config.set("dimension_options", "colorFieldName", str(self.colorFieldName))
config.set("dimension_options", "idFieldName", str(self.idFieldName))
config.set("dimension_options", "idParentFieldName", str(self.idParentFieldName))
config.set("dimension_options", "dimStyleFieldName", str(self.dimStyleFieldName))
config.set("dimension_options", "dimTypeFieldName", str(self.dimTypeFieldName))
config.set("dimension_options", "scaleFieldName", str(self.scaleFieldName))
config.set("dimension_options", "rotFieldName", str(self.rotFieldName))
with codecs.open(_path, 'w', 'utf-8') as configFile:
config.write(configFile)
self.path = _path
return True
#============================================================================
# load
#============================================================================
def load(self, path):
"""
Carica le impostazioni dello stile di quotatura da un file.
"""
if path is None or path == "":
return False
if os.path.dirname(path) == "": # path contiene solo il nome del file (senza dir)
_path = self.getDefaultDimFilePath()
_path = _path + path
else:
_path = path
if not os.path.exists(_path):
return False
config = qad_utils.QadRawConfigParser(allow_no_value=True)
config.readfp(codecs.open(_path, "r", "utf-8"))
#config.read(_path)
value = config.get("dimension_options", "name")
if value is not None:
self.name = value
value = config.get("dimension_options", "description")
if value is not None:
self.description = value
value = config.get("dimension_options", "dimType")
if value is not None:
self.dimType = value
# testo di quota
value = config.get("dimension_options", "textPrefix")
if value is not None:
self.textPrefix = value
value = config.get("dimension_options", "textSuffix")
if value is not None:
self.textSuffix = value
value = config.getboolean("dimension_options", "textSuppressLeadingZeros")
if value is not None:
self.textSuppressLeadingZeros = value
value = config.getboolean("dimension_options", "textDecimalZerosSuppression")
if value is not None:
self.textDecimalZerosSuppression = value
value = config.getfloat("dimension_options", "textHeight")
if value is not None:
self.textHeight = value
value = config.getint("dimension_options", "textVerticalPos")
if value is not None:
self.textVerticalPos = value
value = config.getint("dimension_options", "textHorizontalPos")
if value is not None:
self.textHorizontalPos = value
value = config.getfloat("dimension_options", "textOffsetDist")
if value is not None:
self.textOffsetDist = value
value = config.getint("dimension_options", "textRotMode")
if value is not None:
self.textRotMode = value
value = config.getfloat("dimension_options", "textForcedRot")
if value is not None:
self.textForcedRot = value
value = config.getint("dimension_options", "textDecimals")
if value is not None:
self.textDecimals = value
value = config.get("dimension_options", "textDecimalSep")
if value is not None:
self.textDecimalSep = value
value = config.get("dimension_options", "textFont")
if value is not None:
self.textFont = value
value = config.get("dimension_options", "textColor")
if value is not None:
self.textColor = value
value = config.getint("dimension_options", "textDirection")
if value is not None:
self.textDirection = value
value = config.getint("dimension_options", "arcSymbPos")
if value is not None:
self.arcSymbPos = value
# linee di quota
value = config.getboolean("dimension_options", "dimLine1Show")
if value is not None:
self.dimLine1Show = value
value = config.getboolean("dimension_options", "dimLine2Show")
if value is not None:
self.dimLine2Show = value
value = config.get("dimension_options", "dimLineLineType")
if value is not None:
self.dimLineLineType = value
value = config.get("dimension_options", "dimLineColor")
if value is not None:
self.dimLineColor = value
value = config.getfloat("dimension_options", "dimLineSpaceOffset")
if value is not None:
self.dimLineSpaceOffset = value
value = config.getfloat("dimension_options", "dimLineOffsetExtLine")
if value is not None:
self.dimLineOffsetExtLine = value
# simboli per linee di quota
value = config.get("dimension_options", "block1Name")
if value is not None:
self.block1Name = value
value = config.get("dimension_options", "block2Name")
if value is not None:
self.block2Name = value
value = config.get("dimension_options", "blockLeaderName")
if value is not None:
self.blockLeaderName = value
value = config.getfloat("dimension_options", "blockWidth")
if value is not None:
self.blockWidth = value
value = config.getfloat("dimension_options", "blockScale")
if value is not None:
self.blockScale = value
value = config.getboolean("dimension_options", "blockSuppressionForNoSpace")
if value is not None:
self.blockSuppressionForNoSpace = value
value = config.getfloat("dimension_options", "centerMarkSize")
if value is not None:
self.centerMarkSize = value
# adattamento del testo e delle frecce
value = config.getint("dimension_options", "textBlockAdjust")
if value is not None:
self.textBlockAdjust = value
# linee di estensione
value = config.getboolean("dimension_options", "extLine1Show")
if value is not None:
self.extLine1Show = value
value = config.getboolean("dimension_options", "extLine2Show")
if value is not None:
self.extLine2Show = value
value = config.get("dimension_options", "extLine1LineType")
if value is not None:
self.extLine1LineType = value
value = config.get("dimension_options", "extLine2LineType")
if value is not None:
self.extLine2LineType = value
value = config.get("dimension_options", "extLineColor")
if value is not None:
self.extLineColor = value
value = config.getfloat("dimension_options", "extLineOffsetDimLine")
if value is not None:
self.extLineOffsetDimLine = value
value = config.getfloat("dimension_options", "extLineOffsetOrigPoints")
if value is not None:
self.extLineOffsetOrigPoints = value
value = config.getboolean("dimension_options", "extLineIsFixedLen")
if value is not None:
self.extLineIsFixedLen = value
value = config.getfloat("dimension_options", "extLineFixedLen")
if value is not None:
self.extLineFixedLen = value
# layer e loro caratteristiche
value = config.get("dimension_options", "textualLayerName")
if value is not None:
self.textualLayerName = value
value = config.get("dimension_options", "linearLayerName")
if value is not None:
self.linearLayerName = value
value = config.get("dimension_options", "symbolLayerName")
if value is not None:
self.symbolLayerName = value
value = config.get("dimension_options", "componentFieldName")
if value is not None:
self.componentFieldName = value
value = config.get("dimension_options", "symbolFieldName")
if value is not None:
self.symbolFieldName = value
value = config.get("dimension_options", "lineTypeFieldName")
if value is not None:
self.lineTypeFieldName = value
value = config.get("dimension_options", "colorFieldName")
if value is not None:
self.colorFieldName = value
value = config.get("dimension_options", "idFieldName")
if value is not None:
self.idFieldName = value
value = config.get("dimension_options", "idParentFieldName")
if value is not None:
self.idParentFieldName = value
value = config.get("dimension_options", "dimStyleFieldName")
if value is not None:
self.dimStyleFieldName = value
value = config.get("dimension_options", "dimTypeFieldName")
if value is not None:
self.dimTypeFieldName = value
value = config.get("dimension_options", "scaleFieldName")
if value is not None:
self.scaleFieldName = value
value = config.get("dimension_options", "rotFieldName")
if value is not None:
self.rotFieldName = value
self.path = _path
return True
#============================================================================
# remove
#============================================================================
def remove(self):
"""
Cancella il file delle impostazioni dello stile di quotatura.
"""
currDimStyleName = QadVariables.get(QadMsg.translate("Environment variables", "DIMSTYLE"))
if self.name == currDimStyleName: # lo stile da cancellare è quello corrente
return False
if self.path is not None and self.path != "":
if os.path.exists(self.path):
try:
os.remove(self.path)
except:
return False
return True
#============================================================================
# rename
#============================================================================
def rename(self, newName):
"""
Rinomina il nome dello stile e del file delle impostazioni dello stile di quotatura.
"""
if newName == self.name: # nome uguale
return True
oldName = self.name
if self.path is not None or self.path != "":
if os.path.exists(self.path):
try:
dir, base = os.path.split(self.path)
dir = QDir.cleanPath(dir) + "/"
name, ext = os.path.splitext(base)
newPath = dir + "/" + newName + ext
os.rename(self.path, newPath)
self.path = newPath
self.name = newName
self.save()
except:
return False
else:
self.name = newName
currDimStyleName = QadVariables.get(QadMsg.translate("Environment variables", "DIMSTYLE"))
if oldName == currDimStyleName: # lo stile da rinominare è quello corrente
QadVariables.set(QadMsg.translate("Environment variables", "DIMSTYLE"), newName)
self.name = newName
return True
#============================================================================
# getInValidErrMsg
#============================================================================
def getInValidErrMsg(self):
"""
Verifica se lo stile di quotatura é invalido e in caso affermativo ritorna il messaggio di errore.
Se la quotatura é valida ritorna None.
"""
prefix = QadMsg.translate("Dimension", "\nThe dimension style \"{0}\" ").format(self.name)
if self.getTextualLayer() is None:
return prefix + QadMsg.translate("Dimension", "has not the textual layer for dimension.\n")
if qad_layer.isTextLayer(self.getTextualLayer()) == False:
errPartial = QadMsg.translate("Dimension", "has the textual layer for dimension ({0}) which is not a textual layer.")
errMsg = prefix + errPartial.format(self.getTextualLayer().name())
errMsg = errMsg + QadMsg.translate("QAD", "\nA textual layer is a vector punctual layer having a label and the symbol transparency no more than 10%.\n")
return errMsg
if self.getSymbolLayer() is None:
return prefix + QadMsg.translate("Dimension", "has not the symbol layer for dimension.\n")
if qad_layer.isSymbolLayer(self.getSymbolLayer()) == False:
errPartial = QadMsg.translate("Dimension", "has the symbol layer for dimension ({0}) which is not a symbol layer.")
errMsg = prefix + errPartial.format(self.getSymbolLayer().name())
errMsg = errMsg + QadMsg.translate("QAD", "\nA symbol layer is a vector punctual layer without label.\n")
return errMsg
if self.getLinearLayer() is None:
return prefix + QadMsg.translate("Dimension", "has not the linear layer for dimension.\n")
# deve essere un VectorLayer di tipo linea
if (self.getLinearLayer().type() != QgsMapLayer.VectorLayer) or (self.getLinearLayer().geometryType() != QgsWkbTypes.LineGeometry):
errPartial = QadMsg.translate("Dimension", "has the linear layer for dimension ({0}) which is not a linear layer.")
errMsg = prefix + errPartial.format(self.getSymbolLayer().name())
return errMsg
# i layer devono avere lo stesso sistema di coordinate
if not (self.getTextualLayer().crs() == self.getLinearLayer().crs() and self.getLinearLayer().crs() == self.getSymbolLayer().crs()):
errMsg = prefix + QadMsg.translate("Dimension", "has not the layers with the same coordinate reference system.")
return errMsg
return None
#============================================================================
# isValid
#============================================================================
def isValid(self):
"""
Verifica se lo stile di quotatura é valido e in caso affermativo ritorna True.
Se la quotatura non é valida ritorna False.
"""
return True if self.getInValidErrMsg() is None else False
#===============================================================================
# getNotGraphEditableErrMsg
#===============================================================================
def getNotGraphEditableErrMsg(self):
"""
Verifica se i layer dello stile di quotatura sono in sola lettura e in caso affermativo ritorna il messaggio di errore.
Se i layer dello stile di quotatura sono modificabili ritorna None.
"""
prefix = QadMsg.translate("Dimension", "\nThe dimension style \"{0}\" ").format(self.name)
# layer dei testi
textualLayer = self.getTextualLayer()
if textualLayer is None:
errPartial = QadMsg.translate("Dimension", "hasn't the textual layer ({0}).")
return prefix + errPartial.format(self.textualLayerName)
provider = textualLayer.dataProvider()
if not (provider.capabilities() & QgsVectorDataProvider.EditingCapabilities):
errPartial = QadMsg.translate("Dimension", "has the textual layer ({0}) not editable.")
return prefix + errPartial.format(self.textualLayerName)
if not textualLayer.isEditable():
errPartial = QadMsg.translate("Dimension", "has the textual layer ({0}) not editable.")
return prefix + errPartial.format(self.textualLayerName)
# layer dei simboli
symbolLayer = self.getSymbolLayer()
if symbolLayer is None:
errPartial = QadMsg.translate("Dimension", "hasn't the symbol layer ({0}).")
return prefix + errPartial.format(self.symbolLayerName)
provider = symbolLayer.dataProvider()
if not (provider.capabilities() & QgsVectorDataProvider.EditingCapabilities):
errPartial = QadMsg.translate("Dimension", "has the symbol layer ({0}) not editable.")
return prefix + errPartial.format(self.symbolLayerName)
if not symbolLayer.isEditable():
errPartial = QadMsg.translate("Dimension", "has the symbol layer ({0}) not editable.")
return prefix + errPartial.format(self.symbolLayerName)
# layer delle linee
linearLayer = self.getLinearLayer()
if linearLayer is None:
errPartial = QadMsg.translate("Dimension", "hasn't the symbol layer ({0}).")
return prefix + errPartial.format(self.linearLayerName)
provider = linearLayer.dataProvider()
if not (provider.capabilities() & QgsVectorDataProvider.EditingCapabilities):
errPartial = QadMsg.translate("Dimension", "has the linear layer ({0}) not editable.")
return prefix + errPartial.format(self.linearLayerName)
if not linearLayer.isEditable():
errPartial = QadMsg.translate("Dimension", "has the linear layer ({0}) not editable.")
return prefix + errPartial.format(self.linearLayerName)
return None
#============================================================================
# adjustLineAccordingTextRect
#============================================================================
def adjustLineAccordingTextRect(self, textRect, line, textLinearDimComponentOn):
"""
Data una linea, che tipo di componente di quota rappresenta (textLinearDimComponentOn)
e un rettangolo che rappresenta l'occupazione del testo di quota (sottoforma di una QadPolyline),
la funzione restituisce 2 linee (possono essere None) in modo che il testo non si sovrapponga alla linea e che le
impostazioni di quota siano rispettate (dimLine1Show, dimLine2Show, extLine1Show, extLine2Show)
"""
line1 = None
line2 = None
# Restituisce i punti di intersezione tra il rettangolo <textRect> (QadPolyline) che rappresenta il testo
# e un segmento <line>. La lista é ordinata per distanza dal punto iniziale di line.
intPts = QadIntersections.getOrderedPolylineIntersectionPtsWithBasicGeom(textRect, line, True)[0] # orderByStartPtOfLinearObject = True
if textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1: # linea di quota ("Dimension line")
if len(intPts) == 2: # il rettangolo é sulla linea
if self.dimLine1Show:
line1 = QadLine().set(line.getStartPt(), intPts[0])
if self.dimLine2Show:
line2 = QadLine().set(intPts[1], line.getEndPt())
else: # il rettangolo non é sulla linea
if self.dimLine1Show and self.dimLine2Show:
line1 = line.copy()
else:
space1, space2 = self.getSpaceForBlock1AndBlock2OnLine(textRect, line)
rot = qad_utils.getAngleBy2Pts(line.getStartPt(), line.getEndPt()) # angolo della linea di quota
intPt1 = qad_utils.getPolarPointByPtAngle(line.getStartPt(), rot, space1)
intPt2 = qad_utils.getPolarPointByPtAngle(line.getEndPt(), rot - math.pi, space2)
if self.dimLine1Show:
line1 = QadLine().set(line.getStartPt(), intPt2)
elif self.dimLine2Show:
line2 = QadLine().set(line.getEndPt(), intPt1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE1: # prima linea di estensione ("Extension line 1")
if self.extLine1Show:
if len(intPts) > 0:
line1 = QadLine().set(line.getStartPt(), intPts[0])
else:
line1 = line.copy()
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE2: # seconda linea di estensione ("Extension line 2")
if self.extLine2Show:
if len(intPts) > 0:
line1 = QadLine().set(line.getStartPt(), intPts[0])
else:
line1 = line.copy()
elif textLinearDimComponentOn == QadDimComponentEnum.LEADER_LINE: # linea porta quota usata quando il testo é fuori dalla quota ("Leader")
if len(intPts) > 0:
line1 = QadLine().set(line.getEndPt(), intPts[0])
else:
line1 = line.copy()
return line1, line2
#============================================================================
# adjustArcAccordingTextRect
#============================================================================
def adjustArcAccordingTextRect(self, textRect, arc, textLinearDimComponentOn):
"""
Data un arco (<arc>), che tipo di componente di quota rappresenta (textLinearDimComponentOn)
e un rettangolo che rappresenta l'occupazione del testo di quota, la funzione restituisce
due archi (possono essere None) in modo che il testo non si sovrapponga all'arco e che le
impostazioni di quota siano rispettate (dimLine1Show, dimLine2Show, extLine1Show, extLine2Show)
"""
intPts = QadIntersections.getOrderedPolylineIntersectionPtsWithBasicGeom(textRect, arc, True)[0] # orderByStartPtOfPart = True
arc1 = None
arc2 = None
if textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1: # linea di quota ("Dimension line")
if len(intPts) >= 2: # il rettangolo é sulla linea
if self.dimLine1Show:
arc1 = QadArc(arc)
arc1.setEndAngleByPt(intPts[0])
if self.dimLine2Show:
arc2 = QadArc(arc)
arc2.setStartAngleByPt(intPts[-1])# ultimo punto
else: # il rettangolo non é sulla linea
if self.dimLine1Show and self.dimLine2Show:
arc1 = QadArc(arc)
else:
space1, space2 = self.getSpaceForBlock1AndBlock2OnArc(textRect, arc)
if self.dimLine1Show:
arc1 = QadArc(arc)
pt, dummyTg = arc1.getPointFromStart(space1)
arc1.setEndAngleByPt(pt)
elif self.dimLine2Show:
arc2 = QadArc(arc)
pt, dummyTg = arc2.getPointFromStart(arc2.length() - space2)
arc2.setStartAngleByPt(pt)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE1: # prima linea di estensione ("Extension line 1")
if self.extLine1Show:
if len(intPts) > 0:
arc1 = QadArc(arc)
arc1.setEndAngleByPt(intPts[0])
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE2: # seconda linea di estensione ("Extension line 2")
if self.extLine2Show:
if len(intPts) > 0:
arc1 = QadArc(arc)
arc1.setEndAngleByPt(intPts[0])
elif textLinearDimComponentOn == QadDimComponentEnum.LEADER_LINE: # linea porta quota usata quando il testo é fuori dalla quota ("Leader")
if len(intPts) > 0:
arc1 = QadArc(arc)
arc1.setEndAngleByPt(intPts[0])
return arc1, arc2
#============================================================================
# setDimId
#============================================================================
def setDimId(self, dimId, features, parentId = False):
"""
Setta tutte le feature passate nella lista <features> con il codice della quota.
"""
fieldName = self.idParentFieldName if parentId else self.idFieldName
if len(fieldName) == 0:
return True
i = 0
tot = len(features)
while i < tot:
try:
f = features[i]
if f is not None:
# imposto il codice della quota
f.setAttribute(fieldName, dimId)
except:
return False
i = i + 1
return True
#============================================================================
# recodeDimIdOnFeatures
#============================================================================
def recodeDimIdOnFeatures(self, oldDimId, newDimId, features, parentId = False):
"""
Cerca tutte le feature passate nella lista <features> con il codice della
quota oldDimId e le ricodifica con newDimId.
"""
fieldName = self.idParentFieldName if parentId else self.idFieldName
if len(fieldName) == 0:
return True
i = 0
tot = len(features)
while i < tot:
try:
f = features[i]
if f is not None:
if f.attribute(fieldName) == oldDimId:
# imposto il codice della quota
f.setAttribute(fieldName, newDimId)
except:
return False
i = i + 1
return True
def textCommitChangesOnSave(self, plugIn):
"""
Salva i testi delle quote per ottenere i nuovi ID
e richiamare updateTextReferencesOnSave tramite il segnale committedFeaturesAdded.
"""
# salvo i testi per avere la codifica definitiva
if self.getTextualLayer() is not None:
# segno che questo layer è salvato da QAD
plugIn.layerStatusList.setStatus(self.getTextualLayer().id(), qad_layer.QadLayerStatusEnum.COMMIT_BY_INTERNAL)
res = self.getTextualLayer().commitChanges()
plugIn.layerStatusList.remove(self.getTextualLayer().id())
return res
else:
return False
#============================================================================
# updateTextReferencesOnSave
#============================================================================
def updateTextReferencesOnSave(self, plugIn, textAddedEntitySet):
"""
Aggiorna e salva i reference delle entità dello stile di quotatura contenuti in textAddedEntitySet.
"""
if self.startEditing() == False:
return False
plugIn.beginEditCommand("Dimension recoded", [self.getSymbolLayer(), self.getLinearLayer(), self.getTextualLayer()])
entity = QadEntity()
entityIterator = textAddedEntitySet.getEntities()
for entity in entityIterator:
oldDimId = entity.getAttribute(self.idFieldName)
newDimId = entity.getFeature().id()
if oldDimId is None or self.recodeDimId(plugIn, oldDimId, newDimId) == False:
return False
plugIn.endEditCommand()
return True
#============================================================================
# startEditing
#============================================================================
def startEditing(self):
if self.getTextualLayer() is not None and self.getTextualLayer().isEditable() == False:
if self.getTextualLayer().startEditing() == False:
return False
if self.getLinearLayer() is not None and self.getLinearLayer().isEditable() == False:
if self.getLinearLayer().startEditing() == False:
return False
if self.getSymbolLayer() is not None and self.getSymbolLayer().isEditable() == False:
if self.getSymbolLayer().startEditing() == False:
return False
#============================================================================
# commitChanges
#============================================================================
def commitChanges(self, plugIn):
if self.startEditing() == False:
return False
excludedLayer = plugIn.beforeCommitChangesDimLayer
if (excludedLayer is None) or excludedLayer.id() != self.getTextualLayer().id():
# segno che questo layer è salvato da QAD
plugIn.layerStatusList.setStatus(self.getTextualLayer().id(), qad_layer.QadLayerStatusEnum.COMMIT_BY_INTERNAL)
# salvo le entità testuali
self.getTextualLayer().commitChanges()
plugIn.layerStatusList.remove(self.getTextualLayer().id())
if (excludedLayer is None) or excludedLayer.id() != self.getLinearLayer().id():
# segno che questo layer è salvato da QAD
plugIn.layerStatusList.setStatus(self.getLinearLayer().id(), qad_layer.QadLayerStatusEnum.COMMIT_BY_INTERNAL)
# salvo le entità lineari
self.getLinearLayer().commitChanges()
plugIn.layerStatusList.remove(self.getLinearLayer().id())
if (excludedLayer is None) or excludedLayer.id() != self.getSymbolLayer().id():
# segno che questo layer è salvato da QAD
plugIn.layerStatusList.setStatus(self.getSymbolLayer().id(), qad_layer.QadLayerStatusEnum.COMMIT_BY_INTERNAL)
# salvo le entità puntuali
self.getSymbolLayer().commitChanges()
plugIn.layerStatusList.remove(self.getSymbolLayer().id())
#============================================================================
# recodeDimId
#============================================================================
def getEntitySet(self, dimId):
"""
Ricava un QadEntitySet con tutte le feature della quota dimId.
"""
result = QadEntitySet()
if len(self.idFieldName) == 0 or len(self.idParentFieldName) == 0:
return result
if self.isValid() == False: return result;
layerEntitySet = QadLayerEntitySet()
# ricerco l'entità testo
expression = "\"" + self.idFieldName + "\"=" + str(dimId)
featureIter = self.getTextualLayer().getFeatures(QgsFeatureRequest().setFilterExpression(expression))
layerEntitySet.set(self.getTextualLayer())
layerEntitySet.addFeatures(featureIter)
result.addLayerEntitySet(layerEntitySet)
expression = "\"" + self.idParentFieldName + "\"=" + str(dimId)
# ricerco le entità linea
layerEntitySet.clear()
featureIter = self.getLinearLayer().getFeatures(QgsFeatureRequest().setFilterExpression(expression))
layerEntitySet.set(self.getLinearLayer())
layerEntitySet.addFeatures(featureIter)
result.addLayerEntitySet(layerEntitySet)
# ricerco e setto id_parent per le entità puntuali
layerEntitySet.clear()
featureIter = self.getSymbolLayer().getFeatures(QgsFeatureRequest().setFilterExpression(expression))
layerEntitySet.set(self.getSymbolLayer())
layerEntitySet.addFeatures(featureIter)
result.addLayerEntitySet(layerEntitySet)
return result
#============================================================================
# recodeDimId
#============================================================================
def recodeDimId(self, plugIn, oldDimId, newDimId):
"""
Ricodifica tutte le feature della quota oldDimId con il nuovo codice newDimId.
"""
if len(self.idFieldName) == 0 or len(self.idParentFieldName) == 0:
return True
entitySet = self.getEntitySet(oldDimId)
# setto l'entità testo
layerEntitySet = entitySet.findLayerEntitySet(self.getTextualLayer())
if layerEntitySet is not None:
features = layerEntitySet.getFeatureCollection()
if self.setDimId(newDimId, features, False) == False:
return False
# plugIn, layer, features, refresh, check_validity
if qad_layer.updateFeaturesToLayer(plugIn, self.getTextualLayer(), features, False, False) == False:
return False
# setto id_parent per le entità linea
layerEntitySet = entitySet.findLayerEntitySet(self.getLinearLayer())
if layerEntitySet is not None:
features = layerEntitySet.getFeatureCollection()
if self.setDimId(newDimId, features, True) == False:
return False
# plugIn, layer, features, refresh, check_validity
if qad_layer.updateFeaturesToLayer(plugIn, self.getLinearLayer(), features, False, False) == False:
return False
# setto id_parent per le entità puntuali
layerEntitySet = entitySet.findLayerEntitySet(self.getSymbolLayer())
if layerEntitySet is not None:
features = layerEntitySet.getFeatureCollection()
if self.setDimId(newDimId, features, True) == False:
return False
# plugIn, layer, features, refresh, check_validity
if qad_layer.updateFeaturesToLayer(plugIn, self.getSymbolLayer(), features, False, False) == False:
return False
return True
#============================================================================
# addDimEntityToLayers
#============================================================================
def addDimEntityToLayers(self, plugIn, dimEntity):
"""
Aggiunge un'entità quota ai layer di pertinenza ricodificando i componenti.
"""
if dimEntity is None:
return False
plugIn.beginEditCommand("Dimension added", [self.getSymbolLayer(), self.getLinearLayer(), self.getTextualLayer()])
# prima di tutto inserisco il testo di quota
# plugIn, layer, feature, coordTransform, refresh, check_validity
if qad_layer.addFeatureToLayer(plugIn, self.getTextualLayer(), dimEntity.textualFeature, None, False, False) == False:
plugIn.destroyEditCommand()
return False
dimId = dimEntity.textualFeature.id()
if self.setDimId(dimId, [dimEntity.textualFeature], False) == True: # setto id
# plugIn, layer, feature, refresh, check_validity
if qad_layer.updateFeatureToLayer(plugIn, self.getTextualLayer(), dimEntity.textualFeature, False, False) == False:
plugIn.destroyEditCommand()
return False
# features puntuali
self.setDimId(dimId, dimEntity.symbolFeatures, True) # setto id_parent
# plugIn, layer, features, coordTransform, refresh, check_validity
if qad_layer.addFeaturesToLayer(plugIn, self.getSymbolLayer(), dimEntity.symbolFeatures, None, False, False) == False:
plugIn.destroyEditCommand()
return False
# features lineari
self.setDimId(dimId, dimEntity.linearFeatures, True) # setto id_parent
# plugIn, layer, features, coordTransform, refresh, check_validity
if qad_layer.addFeaturesToLayer(plugIn, self.getLinearLayer(), dimEntity.linearFeatures, None, False, False) == False:
plugIn.destroyEditCommand()
return False
plugIn.endEditCommand()
return True
#============================================================================
# getDimIdByEntity
#============================================================================
def getDimIdByEntity(self, entity):
"""
La funzione, data un'entità, verifica se fa parte dello stile di quotatura e,
in caso di successo, restituisce il codice della quotatura altrimenti None.
In più, la funzione, setta il tipo di quotatura se é possibile.
"""
if entity.layer.name() == self.textualLayerName:
dimId = entity.getAttribute(self.idFieldName)
if dimId is None:
return None
f = entity.getFeature()
elif entity.layer.name() == self.linearLayerName or \
entity.layer.name() == self.symbolLayerName:
textualLayer = self.getTextualLayer()
if textualLayer is None: return None
dimId = entity.getAttribute(self.idParentFieldName)
if dimId is None:
return None
# ricerco l'entità testo
expression = "\"" + self.idFieldName + "\"=" + str(dimId)
f = QgsFeature()
if textualLayer.getFeatures(QgsFeatureRequest().setFilterExpression(expression)).nextFeature(f) == False:
return None
else:
return None
try:
# leggo il nome dello stile di quotatura
dimName = f.attribute(self.dimStyleFieldName)
if dimName != self.name:
return None
except:
return None
try:
# leggo il tipo dello stile di quotatura
self.dimType = f.attribute(self.dimTypeFieldName)
except:
pass
return dimId
#============================================================================
# isDimLayer
#============================================================================
def isDimLayer(self, layer):
"""
La funzione, dato un layer, verifica se fa parte dello stile di quotatura.
"""
if layer.name() == self.textualLayerName or \
layer.name() == self.linearLayerName or \
layer.name() == self.symbolLayerName:
return True
else:
return False
#============================================================================
# getFilteredLayerEntitySet
#============================================================================
def getFilteredLayerEntitySet(self, layerEntitySet):
"""
La funzione, dato un QadLayerEntitySet, filtra e restituisce solo quelle appartenenti allo stile di quotatura.
"""
result = QadLayerEntitySet()
entity = QadEntity()
entityIterator = layerEntitySet.getEntities()
for entity in entityIterator:
if self.getDimIdByEntity(entity) is not None:
result.addEntity(entity)
return result
#============================================================================
# FUNZIONI PER I BLOCCHI - INIZIO
#============================================================================
#============================================================================
# getBlock1Size
#============================================================================
def getBlock1Size(self):
"""
Restituisce la dimensione del blocco 1 delle frecce in unità di mappa.
"""
return 0 if self.block1Name == "" else self.blockWidth * self.blockScale
#============================================================================
# getBlock2Size
#============================================================================
def getBlock2Size(self):
"""
Restituisce la dimensione del blocco 2 delle frecce in unità di mappa.
"""
# blockWidth = larghezza del simbolo (in orizzontale) quando la dimensione in unità di mappa = 1 (vedi "triangle2")
# blockScale = scala della dimensione del simbolo (DIMASZ)
return 0 if self.block2Name == "" else self.blockWidth * self.blockScale
#============================================================================
# getBlocksRotOnLine
#============================================================================
def getBlocksRotOnLine(self, dimLine, inside):
"""
Restituisce una lista di 2 elementi che descrivono le rotazioni dei due blocchi:
- il primo elemento é la rotazione del blocco 1
- il secondo elemento é la rotazione del blocco 2
dimLine = linea di quota
inside = flag di modo, se = true le frecce sono interne altrimenti sono esterne
"""
rot = dimLine.getTanDirectionOnPt() # angolo della linea di quota
if inside:
rot1 = rot + math.pi
rot2 = rot
else:
rot1 = rot
rot2 = rot + math.pi
return qad_utils.normalizeAngle(rot1), qad_utils.normalizeAngle(rot2)
#============================================================================
# getBlocksRotOnArc
#============================================================================
def getBlocksRotOnArc(self, dimLineArc, inside):
"""
Restituisce una lista di 2 elementi che descrivono le rotazioni dei due blocchi:
- il primo elemento é la rotazione del blocco 1
- il secondo elemento é la rotazione del blocco 2
dimLineArc = arco rappresentante la linea di quota (QadArc)
inside = flag di modo, se = true le frecce sono interne altrimenti sono esterne
"""
rot1 = dimLineArc.getTanDirectionOnPt(dimLineArc.getStartPt()) # angolo della linea di quota all'inizio dell'arco
rot2 = dimLineArc.getTanDirectionOnPt(dimLineArc.getEndPt()) # angolo della linea di quota alla fine dell'arco
if inside:
rot1 = rot1 + math.pi
else:
rot2 = rot2 + math.pi
return qad_utils.normalizeAngle(rot1), qad_utils.normalizeAngle(rot2)
#============================================================================
# getSpaceForBlock1AndBlock2OnLine
#============================================================================
def getSpaceForBlock1AndBlock2OnLineAuxiliary(self, dimLine, rectCorner):
# calcolo la proiezione di un vertice del rettangolo sulla linea dimLine
perpPt = QadPerpendicularity.fromPointToInfinityLine(rectCorner, dimLine)
# se la proiezione non é nel segmento
if dimLine.containsPt(perpPt) == False:
# se la proiezione ricade oltre il punto iniziale di dimLine
if qad_utils.getDistance(dimLine.getStartPt(), perpPt) < qad_utils.getDistance(dimLine.getEndPt(), perpPt):
return 0, dimLine.length()
else: # se la proiezione ricade oltre il punto finale di dimLine
return dimLine.length(), 0
else:
return qad_utils.getDistance(dimLine.getStartPt(), perpPt), qad_utils.getDistance(dimLine.getEndPt(), perpPt)
def getSpaceForBlock1AndBlock2OnLine(self, txtRect, dimLine):
"""
txtRect = rettangolo di occupazione del testo (QadPolyline) o None se non c'é il testo
dimLine = linea di quotatura
Restituisce lo spazio disponibile per i blocchi 1 e 2 considerando il rettangolo (QadPolyline) che rappresenta il testo
e la linea di quota dimLine.
"""
if txtRect is None: # se non c'é il testo (é stato spostato fuori dalla linea di quota)
spaceForBlock1 = dimLine.length() / 2
spaceForBlock2 = spaceForBlock1
else:
# calcolo la proiezione dei quattro vertici del rettangolo sulla linea dimLine
linearObject = txtRect.getLinearObjectAt(0)
partial1SpaceForBlock1, partial1SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLineAuxiliary(dimLine, \
linearObject.getStartPt())
linearObject = txtRect.getLinearObjectAt(1)
partial2SpaceForBlock1, partial2SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLineAuxiliary(dimLine, \
linearObject.getStartPt())
spaceForBlock1 = partial1SpaceForBlock1 if partial1SpaceForBlock1 < partial2SpaceForBlock1 else partial2SpaceForBlock1
spaceForBlock2 = partial1SpaceForBlock2 if partial1SpaceForBlock2 < partial2SpaceForBlock2 else partial2SpaceForBlock2
linearObject = txtRect.getLinearObjectAt(2)
partial3SpaceForBlock1, partial3SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLineAuxiliary(dimLine, \
linearObject.getStartPt())
if partial3SpaceForBlock1 < spaceForBlock1:
spaceForBlock1 = partial3SpaceForBlock1
if partial3SpaceForBlock2 < spaceForBlock2:
spaceForBlock2 = partial3SpaceForBlock2
linearObject = txtRect.getLinearObjectAt(3)
partial4SpaceForBlock1, partial4SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLineAuxiliary(dimLine, \
linearObject.getStartPt())
if partial4SpaceForBlock1 < spaceForBlock1:
spaceForBlock1 = partial4SpaceForBlock1
if partial4SpaceForBlock2 < spaceForBlock2:
spaceForBlock2 = partial4SpaceForBlock2
return spaceForBlock1, spaceForBlock2
#============================================================================
# getSpaceForBlock1AndBlock2OnArc
#============================================================================
def getSpaceForBlock1AndBlock2OnArcAuxiliary(self, dimLineArc, rectCorner):
# calcolo la proiezione di un vertice del rettangolo sull'arco dimLineArc
angle = qad_utils.getAngleBy2Pts(dimLineArc.center, rectCorner)
perpPt = qad_utils.getPolarPointByPtAngle(dimLineArc.center, angle, dimLineArc.radius)
startPt = dimLineArc.getStartPt()
endPt = dimLineArc.getEndPt()
# se la proiezione non é nell'arco
if dimLineArc.containsPt(perpPt) == False:
# se la proiezione ricade oltre il punto startPt (uso le corde)
if qad_utils.getDistance(startPt, perpPt) < qad_utils.getDistance(endPt, perpPt):
return 0, dimLineArc.length()
else: # se la proiezione ricade oltre il punto endPt
return dimLineArc.length(), 0
else:
arc1 = QadArc(dimLineArc)
arc1.setEndAngleByPt(perpPt)
arc2 = QadArc(dimLineArc)
arc2.setStartAngleByPt(perpPt)
return arc1.length(), arc2.length()
def getSpaceForBlock1AndBlock2OnArc(self, txtRect, dimLineArc):
"""
txtRect = rettangolo di occupazione del testo o None se non c'é il testo
dimLineArc = arco rappresentante la linea di quotatura
Restituisce lo spazio disponibile per i blocchi 1 e 2 considerando il rettangolo (QadPolyline) che rappresenta il testo
e la linea di quota dimLineArc.
"""
if txtRect is None: # se non c'é il testo (é stato spostato fuori dalla linea di quota)
spaceForBlock1 = dimLineArc.length() / 2
spaceForBlock2 = spaceForBlock1
else:
# rettangolo del testo
p1 = txtRect.getLinearObjectAt(0).getStartPt()
p2 = txtRect.getLinearObjectAt(1).getStartPt()
p3 = txtRect.getLinearObjectAt(2).getStartPt()
p4 = txtRect.getLinearObjectAt(3).getStartPt()
rect1 = QgsGeometry.fromPolygonXY([[p1, p2, p3, p4, p1]])
# quadrato del primo blocco
pt = dimLineArc.getStartPt()
lineRot = dimLineArc.getTanDirectionOnPt(pt)
p1 = qad_utils.getPolarPointByPtAngle(pt, lineRot + math.pi / 2, self.getBlock1Size() / 2)
p2 = qad_utils.getPolarPointByPtAngle(p1, lineRot, self.getBlock1Size())
p3 = qad_utils.getPolarPointByPtAngle(p2, lineRot - math.pi / 2, self.getBlock1Size())
p4 = qad_utils.getPolarPointByPtAngle(p3, lineRot, - self.getBlock1Size())
rect2 = QgsGeometry.fromPolygonXY([[p1, p2, p3, p4, p1]])
if rect1.intersects(rect2):
spaceForBlock1 = 0
else:
spaceForBlock1 = dimLineArc.length() / 2
# quadrato del primo blocco
pt = dimLineArc.getEndPt()
lineRot = dimLineArc.getTanDirectionOnPt(pt) - 2 * math.pi
p1 = qad_utils.getPolarPointByPtAngle(pt, lineRot + math.pi / 2, self.getBlock2Size() / 2)
p2 = qad_utils.getPolarPointByPtAngle(p1, lineRot, self.getBlock2Size())
p3 = qad_utils.getPolarPointByPtAngle(p2, lineRot - math.pi / 2, self.getBlock2Size())
p4 = qad_utils.getPolarPointByPtAngle(p3, lineRot - 2 * math.pi, self.getBlock2Size())
rect2 = QgsGeometry.fromPolygonXY([[p1, p2, p3, p4, p1]])
if rect1.intersects(rect2):
spaceForBlock2 = 0
else:
spaceForBlock2 = dimLineArc.length() / 2
# # calcolo la proiezione dei quattro vertici del rettangolo sulla linea dimLinePt1, dimLinePt2
# linearObject = txtRect.getLinearObjectAt(0)
# partial1SpaceForBlock1, partial1SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArcAuxiliary(dimLineArc, \
# linearObject.getStartPt())
# linearObject = txtRect.getLinearObjectAt(1)
# partial2SpaceForBlock1, partial2SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArcAuxiliary(dimLineArc, \
# linearObject.getStartPt())
# spaceForBlock1 = partial1SpaceForBlock1 if partial1SpaceForBlock1 < partial2SpaceForBlock1 else partial2SpaceForBlock1
# spaceForBlock2 = partial1SpaceForBlock2 if partial1SpaceForBlock2 < partial2SpaceForBlock2 else partial2SpaceForBlock2
#
# linearObject = txtRect.getLinearObjectAt(2)
# partial3SpaceForBlock1, partial3SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArcAuxiliary(dimLineArc, \
# linearObject.getStartPt())
# if partial3SpaceForBlock1 < spaceForBlock1:
# spaceForBlock1 = partial3SpaceForBlock1
# if partial3SpaceForBlock2 < spaceForBlock2:
# spaceForBlock2 = partial3SpaceForBlock2
#
# linearObject = txtRect.getLinearObjectAt(3)
# partial4SpaceForBlock1, partial4SpaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArcAuxiliary(dimLineArc, \
# linearObject.getStartPt())
# if partial4SpaceForBlock1 < spaceForBlock1:
# spaceForBlock1 = partial4SpaceForBlock1
# if partial4SpaceForBlock2 < spaceForBlock2:
# spaceForBlock2 = partial4SpaceForBlock2
return spaceForBlock1, spaceForBlock2
#============================================================================
# getSymbolFeature
#============================================================================
def getSymbolFeature(self, insPt, rot, isBlock1, textLinearDimComponentOn):
"""
Restituisce la feature per il simbolo delle frecce.
insPt = punto di inserimento
rot = rotazione espressa in radianti
isBlock1 = se True si tratta del blocco1 altrimenti del blocco2
textLinearDimComponentOn = indica il componente della quota dove é situato il testo di quota (QadDimComponentEnum)
"""
# se non c'é il simbolo di quota
if insPt is None or rot is None:
return None
# se si tratta del simbolo 1
if isBlock1 == True:
# se non deve essere mostrata la linea 1 di quota (vale solo se il testo é sulla linea di quota)
if self.dimLine1Show == False and \
(textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1 or textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE2):
return None
else: # se si tratta del simbolo 2
# se non deve essere mostrata la linea 2 di quota (vale solo se il testo é sulla linea di quota)
if self.dimLine2Show == False and \
(textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1 or textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE2):
return None
f = QgsFeature(self.getSymbolFeaturePrototype())
g = fromQadGeomToQgsGeom(QadPoint().set(insPt), self.getSymbolLayer().crs())
f.setGeometry(g)
# imposto la scala del blocco
try:
if len(self.scaleFieldName) > 0:
f.setAttribute(self.scaleFieldName, self.blockScale)
except:
pass
# imposto la rotazione
try:
if len(self.rotFieldName) > 0:
f.setAttribute(self.rotFieldName, qad_utils.toDegrees(rot)) # Converte da radianti a gradi
except:
pass
# imposto il colore
try:
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
# imposto il tipo di componente della quotatura
if self.dimType == QadDimTypeEnum.RADIUS: # se quotatura tipo raggio
try:
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.LEADER_BLOCK)
except:
pass
try:
if len(self.symbolFieldName) > 0:
f.setAttribute(self.symbolFieldName, self.blockLeaderName)
except:
pass
else:
try:
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.BLOCK1 if isBlock1 else QadDimComponentEnum.BLOCK2)
except:
pass
try:
if len(self.symbolFieldName) > 0:
f.setAttribute(self.symbolFieldName, self.block1Name if isBlock1 else self.block2Name)
except:
pass
return f
#============================================================================
# getDimPointFeature
#============================================================================
def getDimPointFeature(self, insPt, isDimPt1):
"""
Restituisce la feature per il punto di quotatura.
insPt = punto di inserimento
isDimPt1 = se True si tratta del punto di quotatura 1 altrimenti del punto di quotatura 2
"""
symbolFeaturePrototype = self.getSymbolFeaturePrototype()
if symbolFeaturePrototype is None:
return None
f = QgsFeature(symbolFeaturePrototype)
g = fromQadGeomToQgsGeom(QadPoint().set(insPt), self.getSymbolLayer().crs()) # trasformo la geometria
f.setGeometry(g)
# imposto il tipo di componente della quotatura
try:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.DIM_PT1 if isDimPt1 else QadDimComponentEnum.DIM_PT2)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
return f
#============================================================================
# getLeaderSymbolFeature
#============================================================================
def getLeaderSymbolFeature(self, insPt, rot):
"""
Restituisce la feature per il simbolo delle frecce per la linea direttrice.
insPt = punto di inserimento
rot = rotazione espressa in radianti
"""
# se non c'é il simbolo di quota
if insPt is None or rot is None:
return None
f = QgsFeature(self.getSymbolFeaturePrototype())
g = fromQadGeomToQgsGeom(QadPoint().set(insPt), self.getSymbolLayer().crs()) # trasformo la geometria
f.setGeometry(g)
# imposto la scala del blocco
try:
if len(self.scaleFieldName) > 0:
f.setAttribute(self.scaleFieldName, self.blockScale)
except:
pass
# imposto la rotazione
try:
if len(self.rotFieldName) > 0:
f.setAttribute(self.rotFieldName, qad_utils.toDegrees(rot)) # Converte da radianti a gradi
except:
pass
# imposto il colore
try:
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
# imposto il tipo di componente della quotatura
try:
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.LEADER_BLOCK)
except:
pass
try:
if len(self.symbolFieldName) > 0:
f.setAttribute(self.symbolFieldName, self.blockLeaderName)
except:
pass
return f
#============================================================================
# getArcSymbolLineFeature
#============================================================================
def getArcSymbolLineFeature(self, arc):
"""
Restituisce la feature per il simbolo dell'arco.
arc = arco
"""
# se non c'é l'arco
if arc is None:
return None
f = QgsFeature(self.getLinearFeaturePrototype())
g = fromQadGeomToQgsGeom(arc, self.getSymbolLayer().crs()) # trasformo la geometria
f.setGeometry(g)
try:
# imposto il tipo di componente della quotatura
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.ARC_BLOCK)
except:
pass
try:
# imposto il tipo di linea
if len(self.lineTypeFieldName) > 0:
f.setAttribute(self.lineTypeFieldName, self.dimLineLineType)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
return f
#============================================================================
# FUNZIONI PER I BLOCCHI - FINE
# FUNZIONI PER IL TESTO - INIZIO
#============================================================================
#============================================================================
# getFormattedText
#============================================================================
def getFormattedText(self, measure):
"""
Restituisce il testo della misura della quota formattato
"""
if type(measure) == int or type(measure) == float:
return qad_utils.numToStringFmt(measure, self.textDecimals, self.textDecimalSep, \
self.textSuppressLeadingZeros, self.textDecimalZerosSuppression, \
self.textPrefix, self.textSuffix)
elif type(measure) == unicode or type(measure) == str:
return measure
else:
return ""
#============================================================================
# getNumericText
#============================================================================
def getNumericText(self, text):
"""
Restituisce il valore numerico del testo della misura della quota formattato
"""
textToConvert = text.lstrip(self.textPrefix)
textToConvert = textToConvert.rstrip(self.textSuffix)
textToConvert = textToConvert.replace(self.textDecimalSep, ".")
return qad_utils.str2float(textToConvert)
#============================================================================
# textRectToQadPolyline
#============================================================================
def textRectToQadPolyline(self, ptBottomLeft, textWidth, textHeight, rot):
"""
Restituisce il rettangolo che rappresenta il testo sotto forma di una QadPolyline.
<2>----width----<3>
| |
height height
| |
<1>----width----<4>
"""
pt2 = qad_utils.getPolarPointByPtAngle(ptBottomLeft, rot + (math.pi / 2), textHeight)
pt3 = qad_utils.getPolarPointByPtAngle(pt2, rot, textWidth)
pt4 = qad_utils.getPolarPointByPtAngle(ptBottomLeft, rot , textWidth)
res = QadPolyline()
res.fromPolyline([ptBottomLeft, pt2, pt3, pt4, ptBottomLeft])
return res
#============================================================================
# getBoundingPointsTextRectProjectedToLine
#============================================================================
def getBoundingPointsTextRectProjectedToLine(self, line, textRect):
"""
Restituisce una lista di 2 punti che sono i punti estremi della proiezione dei 4 angoli del rettangolo
sulla linea <line>.
"""
rectCorners = textRect.asPolyline()
# calcolo la proiezione degli angoli del rettangolo sulla linea pt1-pt2
perpPts = []
p = QadPerpendicularity.fromPointToInfinityLine(rectCorners[0], line)
qad_utils.appendUniquePointToList(perpPts, p)
p = QadPerpendicularity.fromPointToInfinityLine(rectCorners[1], line)
qad_utils.appendUniquePointToList(perpPts, p)
p = QadPerpendicularity.fromPointToInfinityLine(rectCorners[2], line)
qad_utils.appendUniquePointToList(perpPts, p)
p = QadPerpendicularity.fromPointToInfinityLine(rectCorners[3], line)
qad_utils.appendUniquePointToList(perpPts, p)
return getBoundingPtsOnOnInfinityLine(perpPts)
#============================================================================
# getTextPositionOnLine
#============================================================================
def getTextPositionOnLine(self, pt1, pt2, textWidth, textHeight, horizontalPos, verticalPos, rotMode):
"""
pt1 = primo punto della linea
pt2 = secondo punto della linea
textWidth = larghezza testo
textHeight = altezza testo
Restituisce il punto di inserimento e la rotazione del testo lungo la linea pt1-pt2 con le modalità:
horizontalPos = QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE (centrato alla linea)
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE (vicino al punto pt1)
QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE (vicino al punto pt2)
verticalPos = QadDimStyleTxtVerticalPosEnum.CENTERED_LINE (centrato alla linea)
QadDimStyleTxtVerticalPosEnum.ABOVE_LINE (sopra alla linea)
QadDimStyleTxtVerticalPosEnum.BELOW_LINE (sotto alla linea)
rotMode = QadDimStyleTxtRotModeEnum.HORIZONTAL (testo orizzontale)
QadDimStyleTxtRotModeEnum.ALIGNED_LINE (testo allineato con la linea)
QadDimStyleTxtRotModeEnum.FORCED_ROTATION (testo con rotazione forzata)
"""
lineRot = qad_utils.getAngleBy2Pts(pt1, pt2) # angolo della linea
if (lineRot > math.pi * 3 / 2 and lineRot <= math.pi * 2) or \
(lineRot >= 0 and lineRot <= math.pi / 2): # da sx a dx
textInsPtCloseToPt1 = True
else: # da dx a sx
textInsPtCloseToPt1 = False
if rotMode == QadDimStyleTxtRotModeEnum.ALIGNED_LINE: # testo allineato alla linea
if lineRot > (math.pi / 2) and lineRot <= math.pi * 3 / 2: # se il testo é capovolto lo giro
textRot = lineRot - math.pi
else:
textRot = lineRot
# allineamento orizzontale
#=========================
if horizontalPos == QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE: # testo centrato alla linea
middlePt = qad_utils.getMiddlePoint(pt1, pt2)
if textInsPtCloseToPt1: # il punto di inserimento del testo é vicino a pt1
insPt = qad_utils.getPolarPointByPtAngle(middlePt, lineRot - math.pi, textWidth / 2)
else: # il punto di inserimento del testo é vicino a pt2
insPt = qad_utils.getPolarPointByPtAngle(middlePt, lineRot, textWidth / 2)
elif horizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE: # testo vicino a pt1
# uso 2 volte textOffsetDist perché una volta é la distanza dal punto pt1 + un offset intorno al testo
if textInsPtCloseToPt1: # il punto di inserimento del testo é vicino a pt1
insPt = qad_utils.getPolarPointByPtAngle(pt1, lineRot, self.textOffsetDist + self.textOffsetDist)
else: # il punto di inserimento del testo é vicino a pt2
insPt = qad_utils.getPolarPointByPtAngle(pt1, lineRot, textWidth + self.textOffsetDist + self.textOffsetDist)
elif horizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE: # testo vicino a pt2
# uso 2 volte textOffsetDist perché una volta é la distanza dal punto pt1 + un offset intorno al testo
lineLen = qad_utils.getDistance(pt1, pt2)
if textInsPtCloseToPt1: # il punto di inserimento del testo é vicino a pt1
insPt = qad_utils.getPolarPointByPtAngle(pt1, lineRot, lineLen - textWidth - (self.textOffsetDist + self.textOffsetDist))
else: # il punto di inserimento del testo é vicino a pt2
insPt = qad_utils.getPolarPointByPtAngle(pt1, lineRot, lineLen - (self.textOffsetDist + self.textOffsetDist))
# allineamento verticale
#=========================
if verticalPos == QadDimStyleTxtVerticalPosEnum.CENTERED_LINE: # testo centrato alla linea
if textInsPtCloseToPt1: # il punto di inserimento del testo é vicino a pt1
insPt = qad_utils.getPolarPointByPtAngle(insPt, lineRot - math.pi / 2, textHeight / 2)
else: # il punto di inserimento del testo é vicino a pt2
insPt = qad_utils.getPolarPointByPtAngle(insPt, lineRot + math.pi / 2, textHeight / 2)
elif verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
# uso 2 volte textOffsetDist perché una volta é la distanza dalla linea + un offset intorno al testo
if textInsPtCloseToPt1: # il punto di inserimento del testo é vicino a pt1
insPt = qad_utils.getPolarPointByPtAngle(insPt, lineRot + math.pi / 2, self.textOffsetDist + self.textOffsetDist)
else: # il punto di inserimento del testo é vicino a pt2
insPt = qad_utils.getPolarPointByPtAngle(insPt, lineRot - math.pi / 2, self.textOffsetDist + self.textOffsetDist)
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
# uso 2 volte textOffsetDist perché una volta é la distanza dalla linea + un offset intorno al testo
if textInsPtCloseToPt1: # il punto di inserimento del testo é vicino a pt1
insPt = qad_utils.getPolarPointByPtAngle(insPt, lineRot - math.pi / 2, textHeight + (self.textOffsetDist + self.textOffsetDist))
else: # il punto di inserimento del testo é vicino a pt2
insPt = qad_utils.getPolarPointByPtAngle(insPt, lineRot + math.pi / 2, textHeight + (self.textOffsetDist + self.textOffsetDist))
# testo orizzontale o testo con rotazione forzata
elif rotMode == QadDimStyleTxtRotModeEnum.HORIZONTAL or rotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
lineLen = qad_utils.getDistance(pt1, pt2) # lunghezza della linea
textRot = 0.0 if rotMode == QadDimStyleTxtRotModeEnum.HORIZONTAL else self.textForcedRot
# cerco qual'é l'angolo del rettangolo più vicino alla linea
# <2>----width----<3>
# | |
# height height
# | |
# <1>----width----<4>
# ricavo il rettangolo che racchiude il testo e lo posiziono con il suo angolo in basso a sinistra sul punto pt1
textRect = self.textRectToQadPolyline(pt1, textWidth, textHeight, textRot)
# ottengo i punti estremi della proiezione del rettangolo sulla linea
pts = self.getBoundingPointsTextRectProjectedToLine(QadLine().set(pt1, pt2), textRect)
projectedTextWidth = qad_utils.getDistance(pts[0], pts[1])
# allineamento orizzontale
#=========================
if horizontalPos == QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE: # testo centrato alla linea
closestPtToPt1 = qad_utils.getPolarPointByPtAngle(pt1, lineRot, (lineLen - projectedTextWidth) / 2)
elif horizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE: # testo vicino a pt1
closestPtToPt1 = qad_utils.getPolarPointByPtAngle(pt1, lineRot, self.textOffsetDist)
elif horizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE: # testo vicino a pt2
closestPtToPt1 = qad_utils.getPolarPointByPtAngle(pt1, lineRot, lineLen - self.textOffsetDist - projectedTextWidth)
# se la linea ha una angolo tra (0-90] gradi (primo quadrante)
if lineRot > 0 and lineRot <= math.pi / 2:
# il punto più vicino a pt1 corrisponde all'angolo in basso a sinistra del rettangolo che racchiude il testo
# mi ricavo il punto di inserimento del testo (angolo in basso a sinistra)
insPt = QgsPointXY(closestPtToPt1)
textRect = self.textRectToQadPolyline(insPt, textWidth, textHeight, textRot)
rectCorners = textRect.asPolyline()
# allineamento verticale
#=========================
if verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
# l'angolo 4 deve essere sopra la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[3]
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
# l'angolo 2 deve essere sotto la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[1]
# se la linea ha una angolo tra (90-180] gradi (secondo quadrante)
elif lineRot > math.pi / 2 and lineRot <= math.pi:
# il punto più vicino a pt1 corrisponde all'angolo in basso a destra del rettangolo che racchiude il testo
# mi ricavo il punto di inserimento del testo (angolo in basso a sinistra)
insPt = QgsPointXY(closestPtToPt1.x() - textWidth, closestPtToPt1.y())
textRect = self.textRectToQadPolyline(insPt, textWidth, textHeight, textRot)
rectCorners = textRect.asPolyline()
# allineamento verticale
#=========================
if verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
# l'angolo 1 deve essere sopra la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[0]
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
# l'angolo 3 deve essere sotto la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[2]
# se la linea ha una angolo tra (180-270] gradi (terzo quadrante)
elif lineRot > math.pi and lineRot <= math.pi * 3 / 2:
# il punto più vicino a pt1 corrisponde all'angolo in alto a destra del rettangolo che racchiude il testo
# mi ricavo il punto di inserimento del testo (angolo in basso a sinistra)
insPt = QgsPointXY(closestPtToPt1.x() - textWidth, closestPtToPt1.y() - textHeight)
textRect = self.textRectToQadPolyline(insPt, textWidth, textHeight, textRot)
rectCorners = textRect.asPolyline()
# allineamento verticale
#=========================
if verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
# l'angolo 4 deve essere sopra la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[3]
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
# l'angolo 2 deve essere sotto la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[1]
# se la linea ha una angolo tra (270-360] gradi (quarto quadrante)
elif (lineRot > math.pi * 3 / 2 and lineRot <= 360) or lineRot == 0:
# il punto più vicino a pt1 corrisponde all'angolo in alto a destra del rettangolo che racchiude il testo
# mi ricavo il punto di inserimento del testo (angolo in alto a sinistra)
insPt = QgsPointXY(closestPtToPt1.x(), closestPtToPt1.y() - textHeight)
textRect = self.textRectToQadPolyline(insPt, textWidth, textHeight, textRot)
rectCorners = textRect.asPolyline()
# allineamento verticale
#=========================
if verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
# l'angolo 1 deve essere sopra la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[0]
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
# l'angolo 3 deve essere sotto la linea distante self.textOffsetDist dalla stessa
rectPt = rectCorners[2]
# allineamento verticale
#=========================
if verticalPos == QadDimStyleTxtVerticalPosEnum.CENTERED_LINE: # testo centrato alla linea
# il centro del rettangolo deve essere sulla linea
centerPt = qad_utils.getPolarPointByPtAngle(rectCorners[0], \
qad_utils.getAngleBy2Pts(rectCorners[0], rectCorners[2]), \
qad_utils.getDistance(rectCorners[0], rectCorners[2]) / 2)
perpPt = qad_utils.getPerpendicularPointOnInfinityLine(pt1, pt2, centerPt)
offsetAngle = qad_utils.getAngleBy2Pts(centerPt, perpPt)
offsetDist = qad_utils.getDistance(centerPt, perpPt)
elif verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
# l'angolo deve essere sopra la linea distante self.textOffsetDist dalla stessa
perpPt = qad_utils.getPerpendicularPointOnInfinityLine(pt1, pt2, rectPt)
# se la linea ha una angolo tra (90-270] gradi
if lineRot > math.pi / 2 and lineRot <= math.pi * 3 / 2:
offsetAngle = lineRot - math.pi / 2
else: # se la linea ha una angolo tra (270-90] gradi
offsetAngle = lineRot + math.pi / 2
offsetDist = qad_utils.getDistance(rectPt, perpPt) + self.textOffsetDist
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
# l'angolo deve essere sotto la linea distante self.textOffsetDist dalla stessa
perpPt = qad_utils.getPerpendicularPointOnInfinityLine(pt1, pt2, rectPt)
# se la linea ha una angolo tra (90-270] gradi
if lineRot > math.pi / 2 and lineRot <= math.pi * 3 / 2:
offsetAngle = lineRot + math.pi / 2
else: # se la linea ha una angolo tra (270-90] gradi
offsetAngle = lineRot - math.pi / 2
offsetDist = qad_utils.getDistance(rectPt, perpPt) + self.textOffsetDist
# traslo il rettangolo
insPt = qad_utils.getPolarPointByPtAngle(insPt, offsetAngle, offsetDist)
textRect = self.textRectToQadPolyline(insPt, textWidth, textHeight, textRot)
return insPt, textRot
#============================================================================
# getTextPositionOnArc
#============================================================================
def getTextPositionOnArc(self, arc, textWidth, textHeight, horizontalPos, verticalPos, rotMode):
"""
arc = oggetto QadArc
textWidth = larghezza testo compreso l'offset (2 volte offset, davanti e dietro il testo)
textHeight = altezza testo compreso l'offset (2 volte offset, sopra e sotto il testo)
Restituisce il punto di inserimento e la rotazione del testo lungo l'arco <arc> con le modalità:
horizontalPos = QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE (centrato alla linea)
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE (vicino al punto pt1)
QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE (vicino al punto pt2)
verticalPos = QadDimStyleTxtVerticalPosEnum.CENTERED_LINE (centrato alla linea)
QadDimStyleTxtVerticalPosEnum.ABOVE_LINE (sopra alla linea)
QadDimStyleTxtVerticalPosEnum.BELOW_LINE (sotto alla linea)
rotMode = QadDimStyleTxtRotModeEnum.HORIZONTAL (testo orizzontale)
QadDimStyleTxtRotModeEnum.ALIGNED_LINE (testo allineato con la linea)
QadDimStyleTxtRotModeEnum.FORCED_ROTATION (testo con rotazione forzata)
"""
arcLength = arc.length()
# calcolo lo sviluppo della lunghezza del testo (con gli offset) sull'arco (il testo è una linea retta)
myArc = QadArc()
if myArc.fromStartCenterPtsChord(arc.getStartPt(), arc.center, textWidth):
TextWidthOnArc = myArc.length()
else:
TextWidthOnArc = textWidth
# calcolo lo sviluppo della lunghezza dell'offset sull'arco (il testo è una linea retta)
if myArc.fromStartCenterPtsChord(arc.getStartPt(), arc.center, self.textOffsetDist):
textOffsetDistOnArc = myArc.length()
else:
textOffsetDistOnArc = self.textOffsetDist
if rotMode == QadDimStyleTxtRotModeEnum.HORIZONTAL: # testo orizzontale
textRot = 0.0
elif rotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION: # testo con rotazione forzata
textRot = self.textForcedRot
# allineamento orizzontale
#=========================
if horizontalPos == QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE: # testo centrato alla linea
insPtCenterTxt = arc.getMiddlePt()
lineRot = arc.getTanDirectionOnPt(insPtCenterTxt)
if rotMode == QadDimStyleTxtRotModeEnum.ALIGNED_LINE: # testo allineato alla linea
textRot = lineRot
if textRot > (math.pi / 2) and textRot <= math.pi * 3 / 2: # se il testo é capovolto lo giro
textRot = textRot - math.pi
elif horizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE: # testo vicino a pt1
# uso 2 volte textOffsetDist perché una volta é la distanza dal punto pt1 + un offset intorno al testo
insPtCenterTxt, dummyTg = arc.getPointFromStart(textOffsetDistOnArc + textOffsetDistOnArc + TextWidthOnArc / 2)
lineRot = arc.getTanDirectionOnPt(insPtCenterTxt)
if rotMode == QadDimStyleTxtRotModeEnum.ALIGNED_LINE: # testo allineato alla linea
textRot = lineRot
if textRot > (math.pi / 2) and textRot <= math.pi * 3 / 2: # se il testo é capovolto lo giro
textRot = textRot - math.pi
elif horizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE: # testo vicino a pt2
# uso 2 volte textOffsetDist perché una volta é la distanza dal punto pt1 + un offset intorno al testo
insPtCenterTxt, dummyTg = arc.getPointFromStart(arcLength - TextWidthOnArc / 2 - textOffsetDistOnArc - textOffsetDistOnArc)
lineRot = arc.getTanDirectionOnPt(insPtCenterTxt)
if rotMode == QadDimStyleTxtRotModeEnum.ALIGNED_LINE: # testo allineato alla linea
textRot = lineRot
if textRot > (math.pi / 2) and textRot <= math.pi * 3 / 2: # se il testo é capovolto lo giro
textRot = textRot - math.pi
# angolo della linea che congiunge il centro dell'arco con il centro del testo
angleOnCenterTxt = qad_utils.getAngleBy2Pts(arc.center, insPtCenterTxt)
# normalizzo l'angolo
textRot = qad_utils.normalizeAngle(textRot)
if (textRot > math.pi * 3 / 2 and textRot <= math.pi * 2) or \
(textRot >= 0 and textRot < math.pi / 2): # da sx a dx
insPt = qad_utils.getPolarPointByPtAngle(insPtCenterTxt, textRot, -textWidth / 2)
else:
insPt = qad_utils.getPolarPointByPtAngle(insPtCenterTxt, textRot, textWidth / 2)
# allineamento verticale
#=========================
angleOnCenterTxt = qad_utils.getAngleBy2Pts(arc.center, insPtCenterTxt)
if verticalPos == QadDimStyleTxtVerticalPosEnum.CENTERED_LINE: # testo centrato alla linea
if textRot > (math.pi / 2) and textRot <= math.pi * 3 / 2: # se il testo é capovolto
if (angleOnCenterTxt > 0 and angleOnCenterTxt <= math.pi): # il testo va verso il punto finale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, textHeight / 2)
else: # il testo va verso il punto iniziale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, -textHeight / 2)
else: # il testo è dritto
if (angleOnCenterTxt > 0 and angleOnCenterTxt <= math.pi): # il testo va verso il punto iniziale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, -textHeight / 2)
else: # il testo va verso il punto finale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, textHeight / 2)
elif verticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE: # sopra alla linea
if textRot > (math.pi / 2) and textRot <= math.pi * 3 / 2: # se il testo é capovolto
if (angleOnCenterTxt > 0 and angleOnCenterTxt <= math.pi): # il testo va verso il punto finale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, -self.textOffsetDist)
else: # il testo va verso il punto iniziale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, self.textOffsetDist)
else: # il testo è dritto
if (angleOnCenterTxt > 0 and angleOnCenterTxt <= math.pi): # il testo va verso il punto iniziale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, self.textOffsetDist)
else: # il testo va verso il punto finale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, -self.textOffsetDist)
elif verticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE: # sotto alla linea
if textRot > (math.pi / 2) and textRot <= math.pi * 3 / 2: # se il testo é capovolto
if (angleOnCenterTxt > 0 and angleOnCenterTxt <= math.pi): # il testo va verso il punto finale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, (textHeight + self.textOffsetDist))
else: # il testo va verso il punto iniziale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, -(textHeight + self.textOffsetDist))
else: # il testo è dritto
if (angleOnCenterTxt > 0 and angleOnCenterTxt <= math.pi): # il testo va verso il punto iniziale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, -(textHeight + self.textOffsetDist))
else: # il testo va verso il punto finale dell'arco
insPt = qad_utils.getPolarPointByPtAngle(insPt, angleOnCenterTxt, (textHeight + self.textOffsetDist))
return insPt, textRot
#============================================================================
# getTextPosAndLinesOutOfDimLines
#============================================================================
def getTextPosAndLinesOutOfDimLines(self, dimLinePt1, dimLinePt2, textWidth, textHeight):
"""
Restituisce una lista di 3 elementi nel caso il testo venga spostato fuori dalle linee
di estensione perché era troppo grosso:
- il primo elemento é il punto di inserimento
- il secondo elemento é la rotazione del testo
- il terzo elemento é una lista di linee da usare come porta quota
La funzione lo posizione a lato della linea di estensione 2.
dimLinePt1 = primo punto della linea di quota (QgsPointXY)
dimLinePt2 = secondo punto della linea di quota (QgsPointXY)
textWidth = larghezza testo
textHeight = altezza testo
"""
# Ottengo le linee porta quota per il testo esterno
lines = self.getLeaderLinesOnLine(dimLinePt1, dimLinePt2, textWidth, textHeight)
# considero l'ultima che é quella che si riferisce al testo
line = lines.getLinearObjectAt(-1)
if self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
else:
textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE
textInsPt, textRot = self.getTextPositionOnLine(line.getStartPt(), line.getEndPt(), textWidth, textHeight, \
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE, \
self.textVerticalPos, textRotMode)
return textInsPt, textRot, lines
#============================================================================
# getTextPosAndLinesOutOfDimArc
#============================================================================
def getTextPosAndLinesOutOfDimArc(self, dimLineArc, textWidth, textHeight):
"""
Restituisce una lista di 3 elementi nel caso il testo venga spostato fuori dalle linee
di estensione perché era troppo grosso:
- il primo elemento é il punto di inserimento del testo
- il secondo elemento é la rotazione del testo
- il terzo elemento é una lista di linee da usare come porta quota
La funzione lo posizione a lato della linea di estensione 2.
getTextPosAndLinesOutOfDimArc = arco rappresentante la linea di quota (QadArc)
textWidth = larghezza testo
textHeight = altezza testo
"""
# Ottengo le linee porta quota per il testo esterno
lines = self.getLeaderLinesOnArc(dimLineArc, textWidth, textHeight)
# considero l'ultima che é quella che si riferisce al testo
line = lines.getLinearObjectAt(-1)
if self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
else:
textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE
textInsPt, textRot = self.getTextPositionOnLine(line.getStartPt(), line.getEndPt(), textWidth, textHeight, \
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE, \
self.textVerticalPos, textRotMode)
return textInsPt, textRot, lines
#============================================================================
# getLinearTextAndBlocksPosition
#============================================================================
def getLinearTextAndBlocksPosition(self, dimPt1, dimPt2, dimLine, textWidth, textHeight):
"""
dimPt1 = primo punto da quotare
dimPt2 = secondo punto da quotare
dimLine = linea di quota (QadLine)
textWidth = larghezza testo
textHeight = altezza testo
Restituisce una lista di 4 elementi:
- il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
- il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
e una lista di linee "leader" nel caso il testo sia all'esterno della quota
- il terzo elemento é la rotazione del primo blocco delle frecce; può essere None se non visibile
- il quarto elemento é la rotazione del secondo blocco delle frecce; può essere None se non visibile
"""
textInsPt = None # punto di inserimento del testo
textRot = None # rotazione del testo
textLinearDimComponentOn = None # codice del componente lineare sul quale é posizionato il testo
txtLeaderLines = None # lista di linee "leader" nel caso il testo sia all'esterno della quota
block1Rot = None # rotazione del primo blocco delle frecce
block2Rot = None # rotazione del secondo blocco delle frecce
# se il testo é tra le linee di estensione della quota
if self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE or \
self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE or \
self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE:
dimLineRot = qad_utils.getAngleBy2Pts(dimLine.getStartPt(), dimLine.getEndPt()) # angolo della linea di quota
# cambio gli estremi della linea di quota per considerare lo spazio occupato dai blocchi
dimLinePt1Offset = qad_utils.getPolarPointByPtAngle(dimLine.getStartPt(), dimLineRot, self.getBlock1Size())
dimLinePt2Offset = qad_utils.getPolarPointByPtAngle(dimLine.getEndPt(), dimLineRot + math.pi, self.getBlock2Size())
# testo sopra o sotto alla linea di quota nel caso la linea di quota non sia orizzontale
# e il testo sia dentro le linee di estensione e forzato orizzontale allora il testo diventa centrato
if (self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE or self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE) and \
(dimLineRot != 0 and dimLineRot != math.pi) and self.textRotMode == QadDimStyleTxtRotModeEnum.HORIZONTAL:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.CENTERED_LINE
# testo posizionato nella parte opposta ai punti di quotatura
elif self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
# angolo dal primo punto di quota al primo punto della linea di quota
dimPtToDimLinePt_rot = qad_utils.getAngleBy2Pts(dimPt1, dimLine.getStartPt())
if dimPtToDimLinePt_rot > 0 and \
(dimPtToDimLinePt_rot < math.pi or qad_utils.doubleNear(dimPtToDimLinePt_rot, math.pi)):
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
else:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.BELOW_LINE
else:
textVerticalPos = self.textVerticalPos
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnLine(dimLinePt1Offset, dimLinePt2Offset, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnLine(dimLinePt1Offset, dimLinePt2Offset, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLine(rect, dimLine)
# se lo spazio non é sufficiente per inserire testo e simboli all'interno delle linee di estensione,
# uso qad_utils.doubleSmaller perché a volte i due numeri sono quasi uguali
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
qad_utils.doubleSmaller(spaceForBlock1, self.getBlock1Size() + self.textOffsetDist) or \
qad_utils.doubleSmaller(spaceForBlock2, self.getBlock2Size() + self.textOffsetDist):
if self.blockSuppressionForNoSpace: # sopprime i simboli se non c'é spazio sufficiente all'interno delle linee di estensione
block1Rot = None
block2Rot = None
# considero il testo senza frecce
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), dimLine.getEndPt(), textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), dimLine.getEndPt(), textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLine(rect, dimLine)
# se non c'é spazio neanche per il testo senza le frecce
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
spaceForBlock1 < self.textOffsetDist or spaceForBlock2 < self.textOffsetDist:
# sposta testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimLines(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
else:
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
else: # non devo sopprimere i simboli
# la prima cosa da spostare all'esterno é :
if self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.BOTH_OUTSIDE_EXT_LINES:
# sposta testo e frecce fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimLines(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False) # frecce esterne
# sposta prima le frecce poi, se non basta, anche il testo
elif self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.FIRST_BLOCKS_THEN_TEXT:
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False) # frecce esterne
# considero il testo senza frecce
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLine(rect, dimLine)
# se non c'é spazio neanche per il testo senza le frecce
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
spaceForBlock1 < self.textOffsetDist or spaceForBlock2 < self.textOffsetDist:
# sposta testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimLines(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
else:
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
# sposta prima il testo poi, se non basta, anche le frecce
elif self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.FIRST_TEXT_THEN_BLOCKS:
# sposto il testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimLines(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
# se non ci stanno neanche le frecce
if dimLine.length() <= self.getBlock1Size() + self.getBlock2Size():
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False) # frecce esterne
else:
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, True) # frecce interne
# Sposta indistintamente il testo o le frecce (l'oggetto che si adatta meglio)
elif self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.WHICHEVER_FITS_BEST:
# sposto il più ingombrante
if self.getBlock1Size() + self.getBlock2Size() > textWidth: # le frecce sono più ingombranti del testo
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False) # frecce esterne
# considero il testo senza frecce
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLine(rect, dimLine)
# se non c'é spazio neanche per il testo senza le frecce
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
spaceForBlock1 < self.textOffsetDist or spaceForBlock2 < self.textOffsetDist:
# sposta testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimLines(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
else:
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
else: # il testo é più ingombrante dei simboli
# sposto il testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimLines(dimLine.getStartPt(), dimLine.getEndPt(), \
textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
# se non ci stanno neanche le frecce
if dimLine.length() <= self.getBlock1Size() + self.getBlock2Size():
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False) # frecce esterne
else:
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, True) # frecce interne
else: # se lo spazio é sufficiente per inserire testo e simboli all'interno delle linee di estensione,
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, True) # frecce interne
# il testo é sopra e allineato alla prima linea di estensione
elif self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE_UP:
# angolo della linea che va dal punto di quota all'inizio della linea di quota
rotLine = qad_utils.getAngleBy2Pts(dimPt1, dimLine.getStartPt())
pt = qad_utils.getPolarPointByPtAngle(dimLine.getStartPt(), rotLine, self.textOffsetDist + textWidth)
if self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
else:
textVerticalPos = self.textVerticalPos
if self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
else:
textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getStartPt(), pt, textWidth, textHeight, \
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE, \
textVerticalPos, textRotMode)
textLinearDimComponentOn = QadDimComponentEnum.EXT_LINE1
# calcolo lo spazio dei blocchi in assenza del testo
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLine(None, dimLine)
# se non c'é spazio per i blocchi
if spaceForBlock1 < self.getBlock1Size() or spaceForBlock2 < self.getBlock2Size():
if self.blockSuppressionForNoSpace: # i blocchi sono soppressi
block1Rot = None
block2Rot = None
else: # sposto le frecce all'esterno
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False)
else: # c'é spazio per i blocchi
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, True) # frecce interne
# il testo é sopra e allineato alla seconda linea di estensione
elif self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE_UP:
# angolo della linea che va dal punto di quota all'inizio della linea di quota
rotLine = qad_utils.getAngleBy2Pts(dimPt2, dimLine.getEndPt())
pt = qad_utils.getPolarPointByPtAngle(dimLine.getEndPt(), rotLine, self.textOffsetDist + textWidth)
if self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
else:
textVerticalPos = self.textVerticalPos
if self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
else:
textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE
textInsPt, textRot = self.getTextPositionOnLine(dimLine.getEndPt(), pt, textWidth, textHeight, \
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE, \
textVerticalPos, textRotMode)
textLinearDimComponentOn = QadDimComponentEnum.EXT_LINE2
# calcolo lo spazio dei blocchi in assenza del testo
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnLine(None, dimLine)
# se non c'é spazio per i blocchi
if spaceForBlock1 < self.getBlock1Size() or spaceForBlock2 < self.getBlock2Size():
if self.blockSuppressionForNoSpace: # i blocchi sono soppressi
block1Rot = None
block2Rot = None
else: # sposto le frecce all'esterno
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, False)
else: # c'é spazio per i blocchi
block1Rot, block2Rot = self.getBlocksRotOnLine(dimLine, True) # frecce interne
if self.textDirection == QadDimStyleTxtDirectionEnum.DX_TO_SX:
# il punto di inserimento diventa l'angolo in alto a destra del rettangolo
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot, textWidth)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, textHeight)
# la rotazione viene capovolta
textRot = qad_utils.normalizeAngle(textRot + math.pi)
return [[textInsPt, textRot], [textLinearDimComponentOn, txtLeaderLines], block1Rot, block2Rot]
#============================================================================
# getArcTextAndBlocksPosition
#============================================================================
def getArcTextAndBlocksPosition(self, dimArc, dimLineArc, textWidth, textHeight):
"""
dimArc = arco da quotare
dimLineArc = linea di quota in forma di arco
textWidth = larghezza testo
textHeight = altezza testo
Restituisce una lista di 4 elementi:
- il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
- il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
e una lista di linee "leader" nel caso il testo sia all'esterno della quota
- il terzo elemento é la rotazione del primo blocco delle frecce; può essere None se non visibile
- il quarto elemento é la rotazione del secondo blocco delle frecce; può essere None se non visibile
"""
textInsPt = None # punto di inserimento del testo
textRot = None # rotazione del testo
textLinearDimComponentOn = None # codice del componente lineare sul quale é posizionato il testo
txtLeaderLines = None # lista di linee "leader" nel caso il testo sia all'esterno della quota
block1Rot = None # rotazione del primo blocco delle frecce
block2Rot = None # rotazione del secondo blocco delle frecce
dimLineArcPt1 = dimLineArc.getStartPt()
dimLineArcPt2 = dimLineArc.getEndPt()
dimLineArcLen = dimLineArc.length()
# se il testo é tra le linee di estensione della quota
if self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.CENTERED_LINE or \
self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE or \
self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE:
dimLineArcMiddlePt = dimLineArc.getMiddlePt()
dimLineRot = dimLineArc.getTanDirectionOnPt(dimLineArcMiddlePt) # angolo nel punto medio dell'arco
dimLineArcPt1Offset, dummyTg = dimLineArc.getPointFromStart(self.getBlock1Size())
dimLineArcPt2Offset, dummyTg = dimLineArc.getPointFromStart(dimLineArcLen - self.getBlock2Size())
# testo sopra o sotto alla linea di quota nel caso la linea di quota non sia orizzontale
# e il testo sia dentro le linee di estensione e forzato orizzontale allora il testo diventa centrato
if (self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.ABOVE_LINE or self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.BELOW_LINE) and \
(dimLineRot != 0 and dimLineRot != math.pi) and self.textRotMode == QadDimStyleTxtRotModeEnum.HORIZONTAL:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.CENTERED_LINE
# testo posizionato nella parte opposta ai punti di quotatura
elif self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
# temporaneamento lo imposto centrato solo per averela posizione del testo
textVerticalPos = QadDimStyleTxtVerticalPosEnum.CENTERED_LINE
else:
textVerticalPos = self.textVerticalPos
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
# testo posizionato nella parte opposta ai punti di quotatura
if self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
# punto centrale del testo di quota
insPtCenterTxt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot, textWidth / 2)
# angolo dal centro dell'arco al punto centrale del testo di quota
dimCenterToTextInsPt_rot = qad_utils.getAngleBy2Pts(dimArc.center, insPtCenterTxt)
if dimCenterToTextInsPt_rot > 0 and \
(dimCenterToTextInsPt_rot <= math.pi or qad_utils.doubleNear(dimCenterToTextInsPt_rot, math.pi)):
if dimLineArc.radius >= dimArc.radius:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
else:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.BELOW_LINE
else:
if dimLineArc.radius >= dimArc.radius:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.BELOW_LINE
else:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArc(rect, dimLineArc)
# se lo spazio non é sufficiente per inserire testo e simboli all'interno delle linee di estensione,
# uso qad_utils.doubleSmaller perché a volte i due numeri sono quasi uguali
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
qad_utils.doubleSmaller(spaceForBlock1, self.getBlock1Size() + self.textOffsetDist) or \
qad_utils.doubleSmaller(spaceForBlock2, self.getBlock2Size() + self.textOffsetDist):
if self.blockSuppressionForNoSpace: # sopprime i simboli se non c'é spazio sufficiente all'interno delle linee di estensione
block1Rot = None
block2Rot = None
# considero il testo senza frecce
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArc(rect, dimLineArc)
# se non c'é spazio neanche per il testo senza le frecce
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
spaceForBlock1 < self.textOffsetDist or spaceForBlock2 < self.textOffsetDist:
# sposta testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimArc(dimLineArc, textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
else:
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
else: # non devo sopprimere i simboli
# la prima cosa da spostare all'esterno é :
if self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.BOTH_OUTSIDE_EXT_LINES:
# sposta testo e frecce fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimArc(dimLineArc, textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False) # frecce esterne
# sposta prima le frecce poi, se non basta, anche il testo
elif self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.FIRST_BLOCKS_THEN_TEXT:
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False) # frecce esterne
# considero il testo senza frecce
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArc(rect, dimLineArc)
# se non c'é spazio neanche per il testo senza le frecce
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
spaceForBlock1 < self.textOffsetDist or spaceForBlock2 < self.textOffsetDist:
# sposta testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimArc(dimLineArc, textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
else:
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
# sposta prima il testo poi, se non basta, anche le frecce
elif self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.FIRST_TEXT_THEN_BLOCKS:
# sposto il testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimArc(dimLineArc, textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
# se non ci stanno neanche le frecce
if dimLineArcLen <= self.getBlock1Size() + self.getBlock2Size():
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False) # frecce esterne
else:
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, True) # frecce interne
# Sposta indistintamente il testo o le frecce (l'oggetto che si adatta meglio)
elif self.textBlockAdjust == QadDimStyleTextBlocksAdjustEnum.WHICHEVER_FITS_BEST:
# sposto il più ingombrante
if self.getBlock1Size() + self.getBlock2Size() > textWidth: # le frecce sono più ingombranti del testo
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False) # frecce esterne
# considero il testo senza frecce
if self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, QadDimStyleTxtRotModeEnum.ALIGNED_LINE)
else:
textInsPt, textRot = self.getTextPositionOnArc(dimLineArc, textWidth, textHeight, \
self.textHorizontalPos, textVerticalPos, self.textRotMode)
rect = self.textRectToQadPolyline(textInsPt, textWidth, textHeight, textRot)
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArc(rect, dimLineArc)
# se non c'é spazio neanche per il testo senza le frecce
if spaceForBlock1 == 0 or spaceForBlock2 == 0 or \
spaceForBlock1 < self.textOffsetDist or spaceForBlock2 < self.textOffsetDist:
# sposta testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimArc(dimLineArc, textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
else:
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
else: # il testo é più ingombrante dei simboli
# sposto il testo fuori dalle linee di estensione
textInsPt, textRot, txtLeaderLines = self.getTextPosAndLinesOutOfDimArc(dimLineArc, textWidth, textHeight)
textLinearDimComponentOn = QadDimComponentEnum.LEADER_LINE
# se non ci stanno neanche le frecce
if dimLineArcLen <= self.getBlock1Size() + self.getBlock2Size():
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False) # frecce esterne
else:
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, True) # frecce interne
else: # se lo spazio é sufficiente per inserire testo e simboli all'interno delle linee di estensione,
textLinearDimComponentOn = QadDimComponentEnum.DIM_LINE1
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, True) # frecce interne
# il testo é sopra e allineato alla prima linea di estensione
elif self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE_UP:
# angolo della linea che va dal punto di quota all'inizio della linea di quota
if dimArc.startAngle == dimLineArc.startAngle:
rotLine = qad_utils.getAngleBy2Pts(dimArc.getStartPt(), dimLineArcPt1)
else:
rotLine = qad_utils.getAngleBy2Pts(dimArc.getEndPt(), dimLineArcPt1)
pt = qad_utils.getPolarPointByPtAngle(dimLineArcPt1, rotLine, self.textOffsetDist + textWidth)
if self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
else:
textVerticalPos = self.textVerticalPos
if self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
else:
textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE
textInsPt, textRot = self.getTextPositionOnLine(dimLineArcPt1, pt, textWidth, textHeight, \
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE, \
textVerticalPos, textRotMode)
textLinearDimComponentOn = QadDimComponentEnum.EXT_LINE1
# calcolo lo spazio dei blocchi in assenza del testo
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArc(None, dimLineArc)
# se non c'é spazio per i blocchi
if spaceForBlock1 < self.getBlock1Size() or spaceForBlock2 < self.getBlock2Size():
if self.blockSuppressionForNoSpace: # i blocchi sono soppressi
block1Rot = None
block2Rot = None
else: # sposto le frecce all'esterno
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False)
else: # c'é spazio per i blocchi
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, True) # frecce interne
# il testo é sopra e allineato alla seconda linea di estensione
elif self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.SECOND_EXT_LINE_UP:
# angolo della linea che va dal punto di quota all'inizio della linea di quota
# angolo della linea che va dal punto di quota all'inizio della linea di quota
if dimArc.startAngle == dimLineArc.startAngle:
rotLine = qad_utils.getAngleBy2Pts(dimArc.getEndPt(), dimLineArcPt2)
else:
rotLine = qad_utils.getAngleBy2Pts(dimArc.getStartPt(), dimLineArcPt2)
pt = qad_utils.getPolarPointByPtAngle(dimLineArcPt2, rotLine, self.textOffsetDist + textWidth)
if self.textVerticalPos == QadDimStyleTxtVerticalPosEnum.EXTERN_LINE:
textVerticalPos = QadDimStyleTxtVerticalPosEnum.ABOVE_LINE
else:
textVerticalPos = self.textVerticalPos
if self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION:
textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
else:
textRotMode = QadDimStyleTxtRotModeEnum.ALIGNED_LINE
textInsPt, textRot = self.getTextPositionOnLine(dimLineArcPt2, pt, textWidth, textHeight, \
QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE, \
textVerticalPos, textRotMode)
textLinearDimComponentOn = QadDimComponentEnum.EXT_LINE2
# calcolo lo spazio dei blocchi in assenza del testo
spaceForBlock1, spaceForBlock2 = self.getSpaceForBlock1AndBlock2OnArc(None, dimLineArc)
# se non c'é spazio per i blocchi
if spaceForBlock1 < self.getBlock1Size() or spaceForBlock2 < self.getBlock2Size():
if self.blockSuppressionForNoSpace: # i blocchi sono soppressi
block1Rot = None
block2Rot = None
else: # sposto le frecce all'esterno
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, False)
else: # c'é spazio per i blocchi
block1Rot, block2Rot = self.getBlocksRotOnArc(dimLineArc, True) # frecce interne
if self.textDirection == QadDimStyleTxtDirectionEnum.DX_TO_SX:
# il punto di inserimento diventa l'angolo in alto a destra del rettangolo
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot, textWidth)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, textHeight)
# la rotazione viene capovolta
textRot = qad_utils.normalizeAngle(textRot + math.pi)
return [[textInsPt, textRot], [textLinearDimComponentOn, txtLeaderLines], block1Rot, block2Rot]
#============================================================================
# getRadiusTextAndBlocksPosition
#============================================================================
def getRadiusTextAndBlocksPosition(self, dimLine, textWidth, textHeight):
"""
dimLine = linea di quota (QadLine)
textWidth = larghezza testo
textHeight = altezza testo
Restituisce una lista di 4 elementi:
- il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
- il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
e una lista di linee "leader" nel caso il testo sia all'esterno della quota
- il terzo elemento é la rotazione del primo blocco delle frecce; può essere None se non visibile
- il quarto elemento é la rotazione del secondo blocco delle frecce; può essere None se non visibile
"""
textInsPt = None # punto di inserimento del testo
textRot = None # rotazione del testo
textLinearDimComponentOn = None # codice del componente lineare sul quale é posizionato il testo
txtLeaderLines = None # lista di linee "leader" nel caso il testo sia all'esterno della quota
# cambio alcui parametri di quotatura
block1Name = self.block1Name
self.block1Name = "" # nessuna freccia al punto 1 di quotatura
block2Name = self.block2Name
self.block2Name = "" # nessuna freccia al punto 2 di quotatura
textBlockAdjust = self.textBlockAdjust
self.textBlockAdjust = QadDimStyleTextBlocksAdjustEnum.FIRST_TEXT_THEN_BLOCKS # se il testo non ci sta va fuori dalla linea di quotatura
textHorizontalPos = self.textHorizontalPos
self.textHorizontalPos = QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE
res = self.getLinearTextAndBlocksPosition(dimLine.getStartPt(), dimLine.getEndPt(), dimLine, textWidth, textHeight)
# ripristino i valori originali
self.block1Name = block1Name
self.block2Name = block2Name
self.textBlockAdjust = textBlockAdjust
self.textHorizontalPos = textHorizontalPos
return res
#============================================================================
# getTextFeature
#============================================================================
def getTextFeature(self, measure, pt = None, rot = None):
"""
Restituisce la feature per il testo della quota.
La rotazione é espressa in radianti.
"""
_pt = QgsPointXY(0,0) if pt is None else pt
_rot = 0 if rot is None else rot
textualFeaturePrototype = self.getTextualFeaturePrototype()
if textualFeaturePrototype is None:
return None
f = QgsFeature(textualFeaturePrototype)
g = fromQadGeomToQgsGeom(QadPoint().set(_pt), self.getTextualLayer().crs()) # trasformo la geometria
f.setGeometry(g)
# se il testo dipende da un solo campo
labelFieldNames = qad_label.get_labelFieldNames(self.getTextualLayer())
if len(labelFieldNames) == 1 and len(labelFieldNames[0]) > 0:
f.setAttribute(labelFieldNames[0], self.getFormattedText(measure))
# se l'altezza testo dipende da un solo campo
sizeFldNames = qad_label.get_labelSizeFieldNames(self.getTextualLayer())
if len(sizeFldNames) == 1 and len(sizeFldNames[0]) > 0:
f.setAttribute(sizeFldNames[0], self.textHeight) # altezza testo
# se la rotazione dipende da un solo campo
rotFldNames = qad_label.get_labelRotationFieldNames(self.getTextualLayer())
if len(rotFldNames) == 1 and len(rotFldNames[0]) > 0:
f.setAttribute(rotFldNames[0], qad_utils.toDegrees(_rot)) # Converte da radianti a gradi
# se il font dipende da un solo campo
fontFamilyFldNames = qad_label.get_labelFontFamilyFieldNames(self.getTextualLayer())
if len(fontFamilyFldNames) == 1 and len(fontFamilyFldNames[0]) > 0:
f.setAttribute(fontFamilyFldNames[0], self.textFont) # nome del font di testo
# imposto il colore
try:
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.textColor)
except:
pass
# imposto lo stile di quotatura
try:
if len(self.dimStyleFieldName) > 0:
f.setAttribute(self.dimStyleFieldName, self.name)
if len(self.dimTypeFieldName) > 0:
f.setAttribute(self.dimTypeFieldName, self.dimType)
except:
pass
return f
#============================================================================
# FUNZIONI PER IL TESTO - FINE
# FUNZIONI PER LA LINEA DI LEADER - INIZIO
#============================================================================
#============================================================================
# getAuxiliarySecondLeaderLine
#============================================================================
def getAuxiliarySecondLeaderLine(self, pt1, rotLine, textWidth, textHeight):
"""
Funzione interna di ausilio per le successive che si occupano di leader line.
Restituisce la seconda linea porta quota (quella più vicina al testo).
pt1 = punto da cui iniziare la linea (QgsPointXY)
rotLine = angolo della prima linea porta quota (QgsPointXY)
textWidth = larghezza testo
textHeight = altezza testo
"""
# modalità di rotazione del testo orizzontale o
# testo allineato con la linea di quota se tra le linee di estensione, altrimenti testo orizzontale
if self.textRotMode == QadDimStyleTxtRotModeEnum.HORIZONTAL or \
self.textRotMode == QadDimStyleTxtRotModeEnum.ISO:
if qad_utils.doubleNear(rotLine, math.pi / 2): # verticale dal basso verso l'alto
pt2 = qad_utils.getPolarPointByPtAngle(pt1, 0, self.textOffsetDist + textWidth)
elif qad_utils.doubleNear(rotLine, math.pi * 3 / 2): # verticale dall'alto verso il basso
pt2 = qad_utils.getPolarPointByPtAngle(pt1, math.pi, self.textOffsetDist + textWidth)
elif (rotLine > math.pi * 3 / 2 and rotLine <= math.pi * 2) or \
(rotLine >= 0 and rotLine < math.pi / 2): # da sx a dx
pt2 = qad_utils.getPolarPointByPtAngle(pt1, 0, self.textOffsetDist + textWidth)
else: # da dx a sx
pt2 = qad_utils.getPolarPointByPtAngle(pt1, math.pi, self.textOffsetDist + textWidth)
elif self.textRotMode == QadDimStyleTxtRotModeEnum.ALIGNED_LINE: # testo allineato con la linea di quota
pt2 = qad_utils.getPolarPointByPtAngle(pt1, rotLine, self.textOffsetDist + textWidth)
elif self.textRotMode == QadDimStyleTxtRotModeEnum.FORCED_ROTATION: # testo con rotazione forzata
pt2 = qad_utils.getPolarPointByPtAngle(pt1, self.textForcedRot, self.textOffsetDist + textWidth)
return QadLine().set(pt1, pt2)
#============================================================================
# getLeaderLinesOnLine
#============================================================================
def getLeaderLinesOnLine(self, dimLinePt1, dimLinePt2, textWidth, textHeight):
"""
Restituisce una polilinea (QadPolyline) che forma il porta quota nel caso il testo venga spostato
fuori dalle linee di estensione perché era troppo grosso.
dimLinePt1 = primo punto della linea di quota (QgsPointXY)
dimLinePt2 = secondo punto della linea di quota (QgsPointXY)
textWidth = larghezza testo
textHeight = altezza testo
"""
res = QadPolyline()
# le linee sono a lato della linea di estensione 1
if self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE:
rotLine = qad_utils.getAngleBy2Pts(dimLinePt2, dimLinePt1) # angolo della linea porta quota
pt1 = qad_utils.getPolarPointByPtAngle(dimLinePt1, rotLine, self.getBlock1Size())
res.append(QadLine().set(dimLinePt1, pt1))
# le linee sono a lato della linea di estensione 2
else:
rotLine = qad_utils.getAngleBy2Pts(dimLinePt1, dimLinePt2) # angolo della linea porta quota
pt1 = qad_utils.getPolarPointByPtAngle(dimLinePt2, rotLine, self.getBlock2Size())
res.append(QadLine().set(dimLinePt2, pt1))
# ricavo la seconda linea di porta quota
line2 = self.getAuxiliarySecondLeaderLine(pt1, rotLine, textWidth, textHeight)
res.append(line2)
return res
#============================================================================
# getLeaderLinesOnArc
#============================================================================
def getLeaderLinesOnArc(self, dimLineArc, textWidth, textHeight):
"""
Restituisce una polilinea (QadPolyline) che forma il porta quota nel caso il testo venga spostato
fuori dalle linee di estensione perché era troppo grosso.
dimLineArc = arco rappresentante l'arco di quota (QadArc)
textWidth = larghezza testo
textHeight = altezza testo
"""
res = QadPolyline()
# le linee sono a lato della linea di estensione 1
if self.textHorizontalPos == QadDimStyleTxtHorizontalPosEnum.FIRST_EXT_LINE:
startPt = dimLineArc.getStartPt()
rotLine = dimLineArc.getTanDirectionOnPt(startPt) + math.pi # angolo della linea porta quota sul punto iniziale
pt1 = qad_utils.getPolarPointByPtAngle(startPt, rotLine, self.getBlock1Size())
res.append(QadLine().set(startPt, pt1))
# le linee sono a lato della linea di estensione 2
else:
endPt = dimLineArc.getEndPt()
rotLine = dimLineArc.getTanDirectionOnPt(endPt) # angolo della linea porta quota sul punto finale
pt1 = qad_utils.getPolarPointByPtAngle(endPt, rotLine, self.getBlock2Size())
res.append(QadLine().set(endPt, pt1))
# ricavo la seconda linea di porta quota
line2 = self.getAuxiliarySecondLeaderLine(pt1, rotLine, textWidth, textHeight)
res.append(line2)
return res
#============================================================================
# getLeaderFeature
#============================================================================
def getLeaderFeature(self, leaderLines, leaderLineType = QadDimComponentEnum.LEADER_LINE):
"""
Restituisce la feature per la linea di estensione.
leaderLines = polilinea leader (QadPolyline)
leaderLineType = tipo di linea porta quota (LEADER_LINE, ARC_LEADER_LINE, ...)
"""
if leaderLines is None:
return None
linearFeaturePrototype = self.getLinearFeaturePrototype()
if linearFeaturePrototype is None:
return None
f = QgsFeature(linearFeaturePrototype)
g = fromQadGeomToQgsGeom(leaderLines, self.getLinearLayer().crs())
f.setGeometry(g)
try:
# imposto il tipo di componente della quotatura
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, leaderLineType)
except:
pass
try:
# imposto il tipo di linea
if len(self.lineTypeFieldName) > 0:
f.setAttribute(self.lineTypeFieldName, self.dimLineLineType)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
return f
#============================================================================
# getArcLeaderLine
#============================================================================
def getArcLeaderLine(self, pt, arc):
"""
Restituisce la linea che congiunge il testo all'arco da quotare.
"""
intPts = QadIntersections.infinityLineWithArc(QadLine().set(pt, arc.center), arc)
if len(intPts) == 1:
return [pt, intPts[0]]
elif len(intPts) == 2:
# scelgo il più vicino
if qad_utils.getDistance(pt, intPts[0]) < qad_utils.getDistance(pt, intPts[1]):
return QadLine().set(pt, intPts[0])
else:
return QadLine().set(pt, intPts[1])
else:
return None
#============================================================================
# FUNZIONI PER LA LINEA DI LEADER - FINE
# FUNZIONI PER LE LINEE DI ESTENSIONE - INIZIO
#============================================================================
#============================================================================
# getExtLine
#============================================================================
def getExtLine(self, dimPt, dimLinePt):
"""
dimPt = punto da quotare
dimLinePt = corrispondente punto della linea di quotatura
ritorna una linea di estensione modificata secondo lo stile di quotatura
il primo punto é vicino alla linea di quota, il secondo al punto da quotare
"""
angle = qad_utils.getAngleBy2Pts(dimPt, dimLinePt)
# distanza della linea di estensione oltre la linea di quota
pt1 = qad_utils.getPolarPointByPtAngle(dimLinePt, angle, self.extLineOffsetDimLine)
# distanza della linea di estensione dai punti da quotare
pt2 = qad_utils.getPolarPointByPtAngle(dimPt, angle, self.extLineOffsetOrigPoints)
if self.extLineIsFixedLen == True: # attivata lunghezza fissa delle line di estensione
if qad_utils.getDistance(pt1, pt2) > self.extLineFixedLen:
# lunghezza fissa delle line di estensione (DIMFXL) dalla linea di quota
# al punto da quotare spostato di extLineOffsetOrigPoints
# (la linea di estensione non va oltre il punto da quotare)
d = qad_utils.getDistance(dimLinePt, dimPt)
if d > self.extLineFixedLen:
d = self.extLineFixedLen
pt2 = qad_utils.getPolarPointByPtAngle(dimLinePt, angle + math.pi, d)
return QadLine().set(pt1, pt2)
#============================================================================
# getExtArc
#============================================================================
def getExtArc(self, arc, linePosPt):
"""
arc = arco da quotare
linePosPt = punto corrispondente a dove posizionare la quotatura
Ritorna un arco di estensione per la quotatura DIMRADIUS
"""
# se il punto è all'interno dell'arco
angle = qad_utils.getAngleBy2Pts(arc.center, linePosPt)
if qad_utils.isAngleBetweenAngles(arc.startAngle, arc.endAngle, angle) == True:
return None
myArc = QadArc()
pt = qad_utils.getPolarPointByPtAngle(arc.center, angle, arc.radius) # punto sulla curva
# dalla parte del punto iniziale dell'arco
if qad_utils.getDistance(pt, arc.getStartPt()) < qad_utils.getDistance(pt, arc.getEndPt()):
myArc.set(arc.center, arc.radius, angle, arc.startAngle)
if myArc.length() <= self.extLineOffsetOrigPoints:
return None
myArc.setStartAngleByPt(pt)
dummyPt, dummyTg = myArc.getPointFromStart(-self.extLineOffsetDimLine)
myArc.setStartAngleByPt(dummyPt)
dummyPt, dummyTg = arc.getPointFromStart(-self.extLineOffsetOrigPoints)
myArc.setEndAngleByPt(dummyPt) # cambio punto finale
else: # dalla parte del punto finale dell'arco
myArc.set(arc.center, arc.radius, arc.endAngle, angle)
if myArc.length() <= self.extLineOffsetOrigPoints:
return None
dummyPt, dummyTg = arc.getPointFromEnd(self.extLineOffsetOrigPoints)
myArc.setStartAngleByPt(dummyPt) # cambio punto iniziale
myArc.setEndAngleByPt(pt)
dummyPt, dummyTg = myArc.getPointFromEnd(self.extLineOffsetDimLine)
myArc.setEndAngleByPt(dummyPt)
return myArc
#============================================================================
# getExtLineFeature
#============================================================================
def getExtLineFeature(self, extLine, isExtLine1):
"""
Restituisce la feature per la linea di estensione.
extLine = linea di estensione QadLine o QadArc
isExtLine1 = se True si tratta della linea di estensione 1 altrimenti della linea di estensione 2
"""
if (isExtLine1 == True and self.extLine1Show == False) or \
(isExtLine1 == False and self.extLine2Show == False):
return None
f = QgsFeature(self.getLinearFeaturePrototype())
g = fromQadGeomToQgsGeom(extLine, self.getLinearLayer().crs()) # trasformo la geometria
f.setGeometry(g)
try:
# imposto il tipo di componente della quotatura
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.EXT_LINE1 if isExtLine1 else QadDimComponentEnum.EXT_LINE2)
except:
pass
try:
# imposto il tipo di linea
if len(self.lineTypeFieldName) > 0:
f.setAttribute(self.lineTypeFieldName, self.extLine1LineType if isExtLine1 else self.extLine2LineType)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.extLineColor)
except:
pass
return f
#============================================================================
# FUNZIONI PER LE LINEE DI ESTENSIONE - FINE
# FUNZIONI PER LA LINEA DI QUOTA - INIZIO
#============================================================================
#============================================================================
# getDimLine
#============================================================================
def getDimLine(self, dimPt1, dimPt2, linePosPt, preferredAlignment = QadDimStyleAlignmentEnum.HORIZONTAL,
dimLineRotation = 0.0):
"""
Restituisce la linea di quotatura entro le linee di estensione (eventuali estensioni saranno calcolate
dalla funzione: getDimLineExtensions)
dimPt1 = primo punto da quotare
dimPt2 = secondo punto da quotare
linePosPt = punto per indicare dove deve essere posizionata la linea di quota
preferredAlignment = indica se ci si deve allineare ai punti di quota in modo orizzontale o verticale
(se i punti di quota formano una linea obliqua). Usato solo per le quotature lineari
dimLineRotation = angolo della linea di quotatura (default = 0). Usato solo per le quotature lineari
"""
if self.dimType == QadDimTypeEnum.ALIGNED:
# calcolo la proiezione perpendicolare del punto <linePosPt> sulla linea che congiunge <dimPt1> a <dimPt2>
ptPerp = qad_utils.getPerpendicularPointOnInfinityLine(dimPt1, dimPt2, linePosPt)
d = qad_utils.getDistance(linePosPt, ptPerp)
angle = qad_utils.getAngleBy2Pts(dimPt1, dimPt2)
if qad_utils.leftOfLine(linePosPt, dimPt1, dimPt2) < 0: # a sinistra della linea che congiunge <dimPt1> a <dimPt2>
angle = angle + (math.pi / 2)
else:
angle = angle - (math.pi / 2)
return QadLine().set(qad_utils.getPolarPointByPtAngle(dimPt1, angle, d), \
qad_utils.getPolarPointByPtAngle(dimPt2, angle, d))
elif self.dimType == QadDimTypeEnum.LINEAR:
if preferredAlignment == QadDimStyleAlignmentEnum.HORIZONTAL:
ptDummy = qad_utils.getPolarPointByPtAngle(dimPt1, dimLineRotation + math.pi / 2, 1)
pt1 = qad_utils.getPerpendicularPointOnInfinityLine(dimPt1, ptDummy, linePosPt)
ptDummy = qad_utils.getPolarPointByPtAngle(dimPt2, dimLineRotation + math.pi / 2, 1)
pt2 = qad_utils.getPerpendicularPointOnInfinityLine(dimPt2, ptDummy, linePosPt)
return QadLine().set(pt1, pt2)
elif preferredAlignment == QadDimStyleAlignmentEnum.VERTICAL:
ptDummy = qad_utils.getPolarPointByPtAngle(dimPt1, dimLineRotation, 1)
pt1 = qad_utils.getPerpendicularPointOnInfinityLine(dimPt1, ptDummy, linePosPt)
ptDummy = qad_utils.getPolarPointByPtAngle(dimPt2, dimLineRotation, 1)
pt2 = qad_utils.getPerpendicularPointOnInfinityLine(dimPt2, ptDummy, linePosPt)
return QadLine().set(pt1, pt2)
#============================================================================
# getDimLineForArc
#============================================================================
def getDimLineForArc(self, arc, linePosPt):
"""
Restituisce la linea di quotatura (sottoforma di un arco) per la l'ampiezza di un arco +
un flag per avvisare se l'arco è stato invertito
Restituisce la linea di quotatura entro le linee di estensione (eventuali estensioni saranno calcolate
dalla funzione: getDimArcExtensions)
arc = oggetto arco QadArc (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota
"""
if self.dimType == QadDimTypeEnum.ARC_LENTGH:
myArc = QadArc(arc)
# calcolo la distanza tra <linePosPt> e il centro dell'arco
d = qad_utils.getDistance(linePosPt, myArc.center)
myArc.radius = d # cambio il raggio
# se il punto non è all'interno dell'arco considero l'inverso dell'arco
angle = qad_utils.getAngleBy2Pts(myArc.center, linePosPt)
if qad_utils.isAngleBetweenAngles(myArc.startAngle, myArc.endAngle, angle) == False:
myArc.inverseAngles()
return myArc
return None
#============================================================================
# getDimLineFeature
#============================================================================
def getDimLineFeature(self, dimLine, isDimLine1, textLinearDimComponentOn):
"""
Restituisce la feature per la linea di quota.
dimLine = linea di quota (QadLine o QadArc)
isDimLine1 = se True si tratta della linea di quota 1 altrimenti della linea di quota 2
textLinearDimComponentOn = indica il componente della quota dove é situato il testo di quota (QadDimComponentEnum)
"""
# se non c'é la linea di quota
if dimLine is None:
return None
if isDimLine1 == True: # se si tratta della linea di quota 1
# se la linea di quota 1 deve essere invisibile (vale solo se il testo é sulla linea di quota)
if self.dimLine1Show == False and \
(textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1 or textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE2):
return None
else: # se si tratta della linea di quota 2
# se la linea di quota 2 deve essere invisibile (vale solo se il testo é sulla linea di quota)
if self.dimLine2Show == False and \
(textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1 or textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE2):
return None
f = QgsFeature(self.getLinearFeaturePrototype())
g = fromQadGeomToQgsGeom(dimLine, self.getLinearLayer().crs()) # trasformo la geometria
f.setGeometry(g)
try:
# imposto il tipo di componente della quotatura
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.DIM_LINE1 if isDimLine1 else QadDimComponentEnum.DIM_LINE2)
except:
pass
try:
# imposto il tipo di linea
if len(self.lineTypeFieldName) > 0:
f.setAttribute(self.lineTypeFieldName, self.dimLineLineType)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
return f
#============================================================================
# FUNZIONI PER LA LINEA DI QUOTA - FINE
# FUNZIONI PER LE ESTENSIONI DELLA LINEA DI QUOTATURA - INIZIO
#============================================================================
#============================================================================
# getDimLineExtensions
#============================================================================
def getDimLineExtensions(self, dimLine1, dimLine2):
"""
Restituisce le estensioni delle linee di quotatura a inizio e fine (vedi variabile dimLineOffsetExtLine)
"""
# se non è maggiore di 0 oppure se non ci sono linee di dimensione
if self.dimLineOffsetExtLine <= 0 or (dimLine1 is None and dimLine2 is None):
return None, None
extDimLine1 = None
extDimLine2 = None
# imposto le linee nello stesso verso della linea di dimensione
rot = qad_utils.getAngleBy2Pts(dimLine1.getStartPt(), dimLine1.getEndPt())
if dimLine1 is not None:
# cambio punto iniziale
extDimLine1 = QadLine().set(qad_utils.getPolarPointByPtAngle(dimLine1.getStartPt(), rot + math.pi, self.dimLineOffsetExtLine), \
dimLine1.getStartPt())
if dimLine2 is None: # se la linea di quotatura è composta solo di una linea
# cambio punto finale
extDimLine2 = QadLine().set(dimLine1.getEndPt(), \
qad_utils.getPolarPointByPtAngle(dimLine1.getEndPt(), rot, self.dimLineOffsetExtLine))
if dimLine2 is not None:
rot = qad_utils.getAngleBy2Pts(dimLine2.getStartPt(), dimLine2.getEndPt())
# cambio punto finale
extDimLine2 = QadLine().set(dimLine2.getEndPt(), \
qad_utils.getPolarPointByPtAngle(dimLine2.getEndPt(), rot, self.dimLineOffsetExtLine))
return extDimLine1, extDimLine2
#============================================================================
# getDimArcExtension
#============================================================================
def getDimArcExtensions(self, dimLineArc1, dimLineArc2):
"""
Restituisce le estensioni degli archi di quotatura applicando a inizio e fine (vedi variabile dimLineOffsetExtLine)
"""
# se non è maggiore di 0 oppure se non ci sono linee di dimensione
if self.dimLineOffsetExtLine <= 0 or (dimLineArc1 is None and dimLineArc2 is None):
return None, None
extDimArc1 = None
extDimArc2 = None
if dimLineArc1 is not None:
extDimArc1 = QadArc(dimLineArc1)
extDimArc1.endAngle = dimLineArc1.startAngle
dummyPt, dummyTg = dimLineArc1.getPointFromStart(-self.dimLineOffsetExtLine)
extDimArc1.setStartAngleByPt(dummyPt) # cambio punto iniziale
if dimLineArc2 is None: # se la linea di quotatura è composta solo di un arco
extDimArc2 = QadArc(dimLineArc1)
extDimArc2.startAngle = dimLineArc1.endAngle
dummyPt, dummtTg = dimLineArc1.getPointFromEnd(self.dimLineOffsetExtLine)
extDimArc2.setEndAngleByPt(dummyPt) # cambio punto finale
if dimLineArc2 is not None:
extDimArc2 = QadArc(dimLineArc2)
extDimArc2.startAngle = dimLineArc1.endAngle
dummyPt, dummtTg = dimLineArc2.getPointFromEnd(self.dimLineOffsetExtLine)
dimLineArc2.setEndAngleByPt(dummyPt) # cambio punto finale
return extDimArc1, extDimArc2
#============================================================================
# getDimLineExtFeature
#============================================================================
def getDimLineExtFeature(self, extLine, isExtLine1):
"""
Restituisce la feature per l'estensione della linea di quotatura.
extLine = linea di estensione (QadLine o QadArc)
isExtLine1 = se True si tratta della estensione della linea di quotatura 1 altrimenti della linea di quotatura 2
"""
if extLine is None:
return None
f = QgsFeature(self.getLinearFeaturePrototype())
g = fromQadGeomToQgsGeom(extLine, self.getLinearLayer().crs()) # trasformo la geometria
f.setGeometry(g)
try:
# imposto il tipo di componente della quotatura
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.DIM_LINE_EXT1 if isExtLine1 else QadDimComponentEnum.DIM_LINE_EXT2)
except:
pass
try:
# imposto il tipo di linea
if len(self.lineTypeFieldName) > 0:
f.setAttribute(self.lineTypeFieldName, self.extLine1LineType if isExtLine1 else self.extLine2LineType)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.extLineColor)
except:
pass
return f
#============================================================================
# FUNZIONI PER LE ESTENSIONI DELLA LINEA DI QUOTATURA - FINE
# FUNZIONI PER LA QUOTATURA LINEARE - INIZIO
#============================================================================
#============================================================================
# getLinearDimFeatures
#============================================================================
def getLinearDimFeatures(self, canvas, dimPt1, dimPt2, linePosPt, measure = None, \
preferredAlignment = QadDimStyleAlignmentEnum.HORIZONTAL, \
dimLineRotation = 0.0):
"""
dimPt1 = primo punto da quotare (in unita di mappa)
dimPt2 = secondo punto da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
preferredAlignment = se lo stile di quota é lineare, indica se ci si deve allienare ai punti di quota
in modo orizzontale o verticale (se i punti di quota formano una linea obliqua)
dimLineRotation = angolo della linea di quotatura (default = 0)
# quota lineare con una linea di quota orizzontale o verticale
# ritorna una lista di elementi che descrivono la geometria della quota:
# 1 lista = feature del primo e del secondo punto di quota; QgsFeature 1, QgsFeature 2
# 2 lista = feature della prima e della seconda linea di quota (quest'ultima può essere None); QgsFeature 1, QgsFeature 2
# 3 lista = feature del punto del testo di quota e geometria del rettangolo di occupazione; QgsFeature, QgsGeometry
# 4 lista = feature del primo e del secondo simbolo per la linea di quota (possono essere None); QgsFeature 1, QgsFeature 2
# 5 lista = feature della prima e della seconda linea di estensione (possono essere None); QgsFeature 1, QgsFeature 2
# 6 elemento = feature della linea di leader (può essere None); QgsFeature
"""
self.dimType = QadDimTypeEnum.LINEAR
# punti di quotatura
dimPt1Feature = self.getDimPointFeature(dimPt1, True) # True = primo punto di quotatura
dimPt2Feature = self.getDimPointFeature(dimPt2, False) # False = secondo punto di quotatura
# linea di quota entro le linee di estensione
dimLine1 = self.getDimLine(dimPt1, dimPt2, linePosPt, preferredAlignment, dimLineRotation)
dimLine2 = None
# testo e blocchi
if measure is None:
textValue = dimLine1.length()
else:
textValue = unicode(measure)
textFeature = self.getTextFeature(textValue)
textWidth, textHeight = qad_label.calculateLabelSize(self.getTextualLayer(), textFeature, canvas)
# creo un rettangolo intorno al testo con un buffer = self.textOffsetDist
textWidthOffset = textWidth + self.textOffsetDist * 2
textHeightOffset = textHeight + self.textOffsetDist * 2
# Restituisce una lista di 4 elementi:
# - il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
# - il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
# e una lista di linee "leader" nel caso il testo sia all'esterno della quota
# - il terzo elemento é la rotazione del primo blocco delle frecce; può essere None se non visibile
# - il quarto elemento é la rotazione del secondo blocco delle frecce; può essere None se non visibile
dummy1, dummy2, block1Rot, block2Rot = self.getLinearTextAndBlocksPosition(dimPt1, dimPt2, \
dimLine1, \
textWidthOffset, textHeightOffset)
textOffsetRectInsPt = dummy1[0]
textRot = dummy1[1]
textLinearDimComponentOn = dummy2[0]
txtLeaderLines = dummy2[1]
# trovo il vero punto di inserimento del testo tenendo conto del buffer intorno
textInsPt = qad_utils.getPolarPointByPtAngle(textOffsetRectInsPt, textRot, self.textOffsetDist)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, self.textOffsetDist)
# testo
textGeom = QgsGeometry.fromPointXY(textInsPt)
textFeature = self.getTextFeature(textValue, textInsPt, textRot)
# blocchi frecce
block1Feature = self.getSymbolFeature(dimLine1.getStartPt(), block1Rot, True, textLinearDimComponentOn) # True = primo punto di quotatura
block2Feature = self.getSymbolFeature(dimLine1.getEndPt(), block2Rot, False, textLinearDimComponentOn) # False = secondo punto di quotatura
extLine1 = self.getExtLine(dimPt1, dimLine1.getStartPt())
extLine2 = self.getExtLine(dimPt2, dimLine1.getEndPt())
# creo un rettangolo intorno al testo con un offset
textOffsetRect = self.textRectToQadPolyline(textOffsetRectInsPt, textWidthOffset, textHeightOffset, textRot)
if textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1: # linea di quota ("Dimension line")
dimLine1, dimLine2 = self.adjustLineAccordingTextRect(textOffsetRect, dimLine1, QadDimComponentEnum.DIM_LINE1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE1: # prima linea di estensione ("Extension line 1")
if extLine1 is not None:
extLineRot = qad_utils.getAngleBy2Pts(dimPt1, dimLine1.getStartPt())
extLine1 = self.getExtLine(dimPt1, qad_utils.getPolarPointByPtAngle(dimLine1.getStartPt(), extLineRot, textWidth + self.textOffsetDist))
# cambio il verso della linea perché getExtLine restituisce una linea dalla linea di quota verso il punto di quotatura
reverseExtLine1 = extLine1.copy().reverse()
extLine1, dummy = self.adjustLineAccordingTextRect(textOffsetRect, reverseExtLine1, QadDimComponentEnum.EXT_LINE1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE2: # seconda linea di estensione ("Extension line 2")
if extLine2 is not None:
extLineRot = qad_utils.getAngleBy2Pts(dimPt2, dimLine1.getEndPt())
extLine2 = self.getExtLine(dimPt2, qad_utils.getPolarPointByPtAngle(dimLine1.getEndPt(), extLineRot, textWidth + self.textOffsetDist))
# cambio il verso della linea perché getExtLine restituisce una linea dalla linea di quota verso il punto di quotatura
reverseExtLine2 = extLine2.copy().reverse()
extLine2, dummy = self.adjustLineAccordingTextRect(textOffsetRect, reverseExtLine2, QadDimComponentEnum.EXT_LINE2)
elif textLinearDimComponentOn == QadDimComponentEnum.LEADER_LINE: # linea porta quota usata quando il testo é fuori dalla quota ("Leader")
lastLine = txtLeaderLines.getLinearObjectAt(-1)
lastLine, dummy = self.adjustLineAccordingTextRect(textOffsetRect, lastLine, QadDimComponentEnum.LEADER_LINE)
txtLeaderLines.remove(-1) # sostituisco l'ultimo elemento
txtLeaderLines.append(lastLine)
# linee di quota
dimLine1Feature = self.getDimLineFeature(dimLine1, True, textLinearDimComponentOn) # True = prima linea di quota
dimLine2Feature = self.getDimLineFeature(dimLine2, False, textLinearDimComponentOn) # False = seconda linea di quota
# estensioni delle linee di quota
dimLineExt1, dimLineExt2 = self.getDimLineExtensions(dimLine1, dimLine2)
dimLineExt1Feature = self.getDimLineExtFeature(dimLineExt1, True)
dimLineExt2Feature = self.getDimLineExtFeature(dimLineExt2, False)
# linee di estensione
extLine1Feature = self.getExtLineFeature(extLine1, True) # True = prima linea di estensione
extLine2Feature = self.getExtLineFeature(extLine2, False) # False = seconda linea di estensione
# linea di leader
txtLeaderLineFeature = self.getLeaderFeature(txtLeaderLines)
dimEntity = QadDimEntity()
dimEntity.dimStyle = self
# features testuali
dimEntity.textualFeature = textFeature
# features lineari
if dimLine1Feature is not None:
dimEntity.linearFeatures.append(dimLine1Feature)
if dimLine2Feature is not None:
dimEntity.linearFeatures.append(dimLine2Feature)
if dimLineExt1Feature is not None:
dimEntity.linearFeatures.append(dimLineExt1Feature)
if dimLineExt2Feature is not None:
dimEntity.linearFeatures.append(dimLineExt2Feature)
if extLine1Feature is not None:
dimEntity.linearFeatures.append(extLine1Feature)
if extLine2Feature is not None:
dimEntity.linearFeatures.append(extLine2Feature)
if txtLeaderLineFeature is not None:
dimEntity.linearFeatures.append(txtLeaderLineFeature)
# features puntuali
dimEntity.symbolFeatures.extend([dimPt1Feature, dimPt2Feature])
if block1Feature is not None:
dimEntity.symbolFeatures.append(block1Feature)
if block2Feature is not None:
dimEntity.symbolFeatures.append(block2Feature)
return dimEntity, QgsGeometry.fromPolygonXY([textOffsetRect.asPolyline()])
#============================================================================
# addLinearDimToLayers
#============================================================================
def addLinearDimToLayers(self, plugIn, dimPt1, dimPt2, linePosPt, measure = None, \
preferredAlignment = QadDimStyleAlignmentEnum.HORIZONTAL, \
dimLineRotation = 0.0):
"""
Aggiunge ai layers le features che compongono una quota lineare.
"""
dimEntity, textOffsetRect = self.getLinearDimFeatures(plugIn.canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure, \
preferredAlignment, \
dimLineRotation)
return self.addDimEntityToLayers(plugIn, dimEntity)
#============================================================================
# FUNZIONI PER LA QUOTATURA LINEARE - FINE
# FUNZIONI PER LA QUOTATURA ALLINEATA - INIZIO
#============================================================================
#============================================================================
# getAlignedDimFeatures
#============================================================================
def getAlignedDimFeatures(self, canvas, dimPt1, dimPt2, linePosPt, measure = None):
"""
dimPt1 = primo punto da quotare (in unita di mappa)
dimPt2 = secondo punto da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
# quota lineare con una linea di quota orizzontale o verticale
# ritorna una lista di elementi che descrivono la geometria della quota:
# 1 lista = feature del primo e del secondo punto di quota; QgsFeature 1, QgsFeature 2
# 2 lista = feature della prima e della seconda linea di quota (quest'ultima può essere None); QgsFeature 1, QgsFeature 2
# 3 lista = feature del punto del testo di quota e geometria del rettangolo di occupazione; QgsFeature, QgsGeometry
# 4 lista = feature del primo e del secondo simbolo per la linea di quota (possono essere None); QgsFeature 1, QgsFeature 2
# 5 lista = feature della prima e della seconda linea di estensione (possono essere None); QgsFeature 1, QgsFeature 2
# 6 elemento = feature della linea di leader (può essere None); QgsFeature
"""
self.dimType = QadDimTypeEnum.ALIGNED
# punti di quotatura
dimPt1Feature = self.getDimPointFeature(dimPt1, True) # True = primo punto di quotatura
dimPt2Feature = self.getDimPointFeature(dimPt2, False) # False = secondo punto di quotatura
# linea di quota entro le linee di estensione
dimLine1 = self.getDimLine(dimPt1, dimPt2, linePosPt)
dimLine2 = None
# testo e blocchi
if measure is None:
textValue = dimLine1.length()
else:
textValue = unicode(measure)
textFeature = self.getTextFeature(textValue)
textWidth, textHeight = qad_label.calculateLabelSize(self.getTextualLayer(), textFeature, canvas)
# creo un rettangolo intorno al testo con un buffer = self.textOffsetDist
textWidthOffset = textWidth + self.textOffsetDist * 2
textHeightOffset = textHeight + self.textOffsetDist * 2
# Restituisce una lista di 4 elementi:
# - il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
# - il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
# e una lista di linee "leader" nel caso il testo sia all'esterno della quota
# - il terzo elemento é la rotazione del primo blocco delle frecce; può essere None se non visibile
# - il quarto elemento é la rotazione del secondo blocco delle frecce; può essere None se non visibile
dummy1, dummy2, block1Rot, block2Rot = self.getLinearTextAndBlocksPosition(dimPt1, dimPt2, \
dimLine1, \
textWidthOffset, textHeightOffset)
textOffsetRectInsPt = dummy1[0]
textRot = dummy1[1]
textLinearDimComponentOn = dummy2[0]
txtLeaderLines = dummy2[1]
# trovo il vero punto di inserimento del testo tenendo conto del buffer intorno
textInsPt = qad_utils.getPolarPointByPtAngle(textOffsetRectInsPt, textRot, self.textOffsetDist)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, self.textOffsetDist)
# testo
textGeom = QgsGeometry.fromPointXY(textInsPt)
textFeature = self.getTextFeature(textValue, textInsPt, textRot)
# blocchi frecce
block1Feature = self.getSymbolFeature(dimLine1.getStartPt(), block1Rot, True, textLinearDimComponentOn) # True = primo punto di quotatura
block2Feature = self.getSymbolFeature(dimLine1.getEndPt(), block2Rot, False, textLinearDimComponentOn) # False = secondo punto di quotatura
extLine1 = self.getExtLine(dimPt1, dimLine1.getStartPt())
extLine2 = self.getExtLine(dimPt2, dimLine1.getEndPt())
# creo un rettangolo intorno al testo con un offset
textOffsetRect = self.textRectToQadPolyline(textOffsetRectInsPt, textWidthOffset, textHeightOffset, textRot)
if textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1: # linea di quota ("Dimension line")
dimLine1, dimLine2 = self.adjustLineAccordingTextRect(textOffsetRect, dimLine1, QadDimComponentEnum.DIM_LINE1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE1: # prima linea di estensione ("Extension line 1")
if extLine1 is not None:
extLineRot = qad_utils.getAngleBy2Pts(dimPt1, dimLine1.getStartPt())
extLine1 = self.getExtLine(dimPt1, qad_utils.getPolarPointByPtAngle(dimLine1.getStartPt(), extLineRot, textWidth + self.textOffsetDist))
# cambio il verso della linea perché getExtLine restituisce una linea dalla linea di quota verso il punto di quotatura
reverseExtLine1 = extLine1.copy().reverse()
extLine1, dummy = self.adjustLineAccordingTextRect(textOffsetRect, reverseExtLine1, QadDimComponentEnum.EXT_LINE1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE2: # seconda linea di estensione ("Extension line 2")
if extLine2 is not None:
extLineRot = qad_utils.getAngleBy2Pts(dimPt2, dimLine1.getEndPt())
extLine2 = self.getExtLine(dimPt2, qad_utils.getPolarPointByPtAngle(dimLine1.getEndPt(), extLineRot, textWidth + self.textOffsetDist))
# cambio il verso della linea perché getExtLine restituisce una linea dalla linea di quota verso il punto di quotatura
reverseExtLine2 = extLine2.copy().reverse()
extLine2, dummy = self.adjustLineAccordingTextRect(textOffsetRect, reverseExtLine2, QadDimComponentEnum.EXT_LINE2)
elif textLinearDimComponentOn == QadDimComponentEnum.LEADER_LINE: # linea porta quota usata quando il testo é fuori dalla quota ("Leader")
lastLine = txtLeaderLines.getLinearObjectAt(-1)
lastLine, dummy = self.adjustLineAccordingTextRect(textOffsetRect, lastLine, QadDimComponentEnum.LEADER_LINE)
txtLeaderLines.remove(-1) # sostituisco l'ultimo elemento
txtLeaderLines.append(lastLine)
# linee di quota
dimLine1Feature = self.getDimLineFeature(dimLine1, True, textLinearDimComponentOn) # True = prima linea di quota
dimLine2Feature = self.getDimLineFeature(dimLine2, False, textLinearDimComponentOn) # False = seconda linea di quota
# estensioni delle linee di quota
dimLineExt1, dimLineExt2 = self.getDimLineExtensions(dimLine1, dimLine2)
dimLineExt1Feature = self.getDimLineExtFeature(dimLineExt1, True)
dimLineExt2Feature = self.getDimLineExtFeature(dimLineExt2, False)
# linee di estensione
extLine1Feature = self.getExtLineFeature(extLine1, True) # True = prima linea di estensione
extLine2Feature = self.getExtLineFeature(extLine2, False) # False = seconda linea di estensione
# linea di leader
txtLeaderLineFeature = self.getLeaderFeature(txtLeaderLines)
dimEntity = QadDimEntity()
dimEntity.dimStyle = self
# features testuali
dimEntity.textualFeature = textFeature
# features lineari
if dimLine1Feature is not None:
dimEntity.linearFeatures.append(dimLine1Feature)
if dimLine2Feature is not None:
dimEntity.linearFeatures.append(dimLine2Feature)
if dimLineExt1Feature is not None:
dimEntity.linearFeatures.append(dimLineExt1Feature)
if dimLineExt2Feature is not None:
dimEntity.linearFeatures.append(dimLineExt2Feature)
if extLine1Feature is not None:
dimEntity.linearFeatures.append(extLine1Feature)
if extLine2Feature is not None:
dimEntity.linearFeatures.append(extLine2Feature)
if txtLeaderLineFeature is not None:
dimEntity.linearFeatures.append(txtLeaderLineFeature)
# features puntuali
dimEntity.symbolFeatures.extend([dimPt1Feature, dimPt2Feature])
if block1Feature is not None:
dimEntity.symbolFeatures.append(block1Feature)
if block2Feature is not None:
dimEntity.symbolFeatures.append(block2Feature)
return dimEntity, QgsGeometry.fromPolygonXY([textOffsetRect.asPolyline()])
#============================================================================
# addAlignedDimToLayers
#============================================================================
def addAlignedDimToLayers(self, plugIn, dimPt1, dimPt2, linePosPt, measure = None, \
preferredAlignment = QadDimStyleAlignmentEnum.HORIZONTAL, \
dimLineRotation = 0.0):
"""
dimPt1 = primo punto da quotare (in unita di mappa)
dimPt2 = secondo punto da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
preferredAlignment = se lo stile di quota é lineare, indica se ci si deve allienare ai punti di quota
in modo orizzontale o verticale (se i punti di quota formano una linea obliqua)
dimLineRotation = angolo della linea di quotatura (default = 0)
Aggiunge ai layers le features che compongono una quota allineata.
"""
dimEntity, textOffsetRect = self.getAlignedDimFeatures(plugIn.canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure)
return self.addDimEntityToLayers(plugIn, dimEntity)
#============================================================================
# FUNZIONI PER LA QUOTATURA ALLINEATA - FINE
# FUNZIONI PER LA QUOTATURA ARCO - INIZIO
#============================================================================
#============================================================================
# getArcDimFeatures
#============================================================================
def getArcDimFeatures(self, canvas, dimArc, linePosPt, measure = None, arcLeader = None):
"""
dimArc = oggetto arco QadArc da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
arcLeader = indica se si deve disegnare la linea direttrice dalla quota all'arco
# quota arco per misurare la lunghezza di un arco o parte di esso
# ritorna una lista di elementi che descrivono la geometria della quota:
# 1 lista = feature del primo e del secondo punto di quota; QgsFeature 1, QgsFeature 2
# 2 lista = feature della prima e della seconda linea di quota (quest'ultima può essere None); QgsFeature 1, QgsFeature 2
# 3 lista = feature del punto del testo di quota e geometria del rettangolo di occupazione; QgsFeature, QgsGeometry
# 4 lista = feature del primo e del secondo simbolo per la linea di quota (possono essere None); QgsFeature 1, QgsFeature 2
# 5 lista = feature della prima e della seconda linea di estensione (possono essere None); QgsFeature 1, QgsFeature 2
# 6 elemento = feature della linea di leader (può essere None); QgsFeature
"""
self.dimType = QadDimTypeEnum.ARC_LENTGH
# linea di quota sottoforma di arco
dimLineArc1 = self.getDimLineForArc(dimArc, linePosPt)
dimLineArc1StartPt = dimLineArc1.getStartPt()
dimLineArc1EndPt = dimLineArc1.getEndPt()
dimLineArc2 = None
dimPt1 = dimArc.getStartPt()
dimPt2 = dimArc.getEndPt()
# punti di quotatura
dimPt1Feature = self.getDimPointFeature(dimPt1, True) # True = primo punto di quotatura
dimPt2Feature = self.getDimPointFeature(dimPt2, False) # False = secondo punto di quotatura
# testo e blocchi
if measure is None:
textValue = dimArc.length()
else:
textValue = unicode(measure)
textFeature = self.getTextFeature(textValue)
textWidth, textHeight = qad_label.calculateLabelSize(self.getTextualLayer(), textFeature, canvas)
# creo un rettangolo intorno al testo con un buffer = self.textOffsetDist
textWidthOffset = textWidth + self.textOffsetDist * 2
textHeightOffset = textHeight + self.textOffsetDist * 2
arcSymbRadius = textHeight * 2 / 4
if self.arcSymbPos == QadDimStyleArcSymbolPosEnum.BEFORE_TEXT:
textWidthOffset = textWidthOffset + self.textOffsetDist + 2 * arcSymbRadius
elif self.arcSymbPos == QadDimStyleArcSymbolPosEnum.ABOVE_TEXT:
textHeightOffset = textHeightOffset + self.textOffsetDist + arcSymbRadius
# Restituisce una lista di 4 elementi:
# - il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
# - il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
# e una lista di linee "leader" nel caso il testo sia all'esterno della quota
# - il terzo elemento é la rotazione del primo blocco delle frecce; può essere None se non visibile
# - il quarto elemento é la rotazione del secondo blocco delle frecce; può essere None se non visibile
dummy1, dummy2, block1Rot, block2Rot = self.getArcTextAndBlocksPosition(dimArc, dimLineArc1, \
textWidthOffset, textHeightOffset)
textOffsetRectInsPt = dummy1[0]
textRot = dummy1[1]
textLinearDimComponentOn = dummy2[0]
txtLeaderLines = dummy2[1]
# trovo il vero punto di inserimento del testo tenendo conto del buffer intorno
if self.arcSymbPos == QadDimStyleArcSymbolPosEnum.BEFORE_TEXT:
textInsPt = qad_utils.getPolarPointByPtAngle(textOffsetRectInsPt, textRot, self.textOffsetDist + self.textOffsetDist + 2 * arcSymbRadius)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, self.textOffsetDist)
else:
textInsPt = qad_utils.getPolarPointByPtAngle(textOffsetRectInsPt, textRot, self.textOffsetDist)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, self.textOffsetDist)
# testo
textGeom = QgsGeometry.fromPointXY(textInsPt)
textFeature = self.getTextFeature(textValue, textInsPt, textRot)
# blocchi frecce
block1Feature = self.getSymbolFeature(dimLineArc1StartPt, block1Rot, True, textLinearDimComponentOn) # True = primo punto di quotatura
block2Feature = self.getSymbolFeature(dimLineArc1EndPt, block2Rot, False, textLinearDimComponentOn) # False = secondo punto di quotatura
extLine1 = self.getExtLine(dimPt1, dimLineArc1StartPt)
extLine2 = self.getExtLine(dimPt2, dimLineArc1EndPt)
# creo un rettangolo intorno al testo con un offset
textOffsetRect = self.textRectToQadPolyline(textOffsetRectInsPt, textWidthOffset, textHeightOffset, textRot)
if textLinearDimComponentOn == QadDimComponentEnum.DIM_LINE1: # linea di quota ("Dimension line")
dimLineArc1, dimLineArc2 = self.adjustArcAccordingTextRect(textOffsetRect, dimLineArc1, QadDimComponentEnum.DIM_LINE1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE1: # prima linea di estensione ("Extension line 1")
if extLine1 is not None:
extLineRot = qad_utils.getAngleBy2Pts(dimPt1, dimLineArc1StartPt)
extLine1 = self.getExtLine(dimPt1, qad_utils.getPolarPointByPtAngle(dimLineArc1StartPt, extLineRot, textWidth + self.textOffsetDist))
# cambio il verso della linea perché getExtLine restituisce una linea dalla linea di quota verso il punto di quotatura
reverseExtLine1 = extLine1.copy().reverse()
extLine1, dummy = self.adjustLineAccordingTextRect(textOffsetRect, reverseExtLine1, QadDimComponentEnum.EXT_LINE1)
elif textLinearDimComponentOn == QadDimComponentEnum.EXT_LINE2: # seconda linea di estensione ("Extension line 2")
if extLine2 is not None:
extLineRot = qad_utils.getAngleBy2Pts(dimPt2, dimLineArc1EndPt)
extLine2 = self.getExtLine(dimPt2, qad_utils.getPolarPointByPtAngle(dimLineArc1EndPt, extLineRot, textWidth + self.textOffsetDist))
# cambio il verso della linea perché getExtLine restituisce una linea dalla linea di quota verso il punto di quotatura
reverseExtLine2 = extLine2.copy().reverse()
extLine2, dummy = self.adjustLineAccordingTextRect(textOffsetRect, reverseExtLine2, QadDimComponentEnum.EXT_LINE2)
elif textLinearDimComponentOn == QadDimComponentEnum.LEADER_LINE: # linea porta quota usata quando il testo é fuori dalla quota ("Leader")
lastLine = txtLeaderLines.getLinearObjectAt(-1)
lastLine, dummy = self.adjustLineAccordingTextRect(textOffsetRect, lastLine, QadDimComponentEnum.LEADER_LINE)
txtLeaderLines.remove(-1) # sostituisco l'ultimo elemento
txtLeaderLines.append(lastLine)
# linee di quota
if dimLineArc1 is None:
dimLine1Feature = None
else:
dimLine1Feature = self.getDimLineFeature(dimLineArc1, True, textLinearDimComponentOn) # True = prima linea di quota
if dimLineArc2 is None:
dimLine2Feature = None
else:
dimLine2Feature = self.getDimLineFeature(dimLineArc2, False, textLinearDimComponentOn) # False = seconda linea di quota
# estensioni delle linee di quota
dimArcExt1, dimArcExt2 = self.getDimArcExtensions(dimLineArc1, dimLineArc2)
if dimArcExt1 is None:
dimLineExt1Feature = None
else:
dimLineExt1Feature = self.getDimLineExtFeature(dimArcExt1, True)
if dimArcExt2 is None:
dimLineExt2Feature = None
else:
dimLineExt2Feature = self.getDimLineExtFeature(dimArcExt2, False)
# linee di estensione
extLine1Feature = self.getExtLineFeature(extLine1, True) # True = prima linea di estensione
extLine2Feature = self.getExtLineFeature(extLine2, False) # False = seconda linea di estensione
# linea di leader
txtLeaderLineFeature = self.getLeaderFeature(txtLeaderLines, QadDimComponentEnum.ARC_LEADER_LINE)
# linea di arc leader
arcLeaderLineFeature = None
arcLeaderBlockFeature = None
if arcLeader: # se si vuole la linea che congiunge il testo all'arco da quotare
arcLeaderLine = self.getArcLeaderLine(textOffsetRectInsPt, dimArc)
if arcLeaderLine is not None:
arcLeaderLines = QadPolyline()
arcLeaderLines.append(arcLeaderLine)
arcLeaderLineFeature = self.getLeaderFeature(arcLeaderLines)
arcLeaderBlockFeature = self.getLeaderSymbolFeature(arcLeaderLine.getEndPt(), \
arcLeaderLine.getTanDirectionOnPt())
# simbolo dell'arco
arcSymbolLineFeature = None
if self.arcSymbPos == QadDimStyleArcSymbolPosEnum.BEFORE_TEXT:
arc = QadArc()
arcPt1 = qad_utils.getPolarPointByPtAngle(textInsPt, textRot, - self.textOffsetDist)
arcCenter = qad_utils.getPolarPointByPtAngle(arcPt1, textRot, - arcSymbRadius)
arcPt2 = qad_utils.getPolarPointByPtAngle(arcCenter, textRot, - arcSymbRadius)
arc.fromStartCenterEndPts(arcPt1, arcCenter, arcPt2)
arcSymbolLineFeature = self.getArcSymbolLineFeature(arc)
elif self.arcSymbPos == QadDimStyleArcSymbolPosEnum.ABOVE_TEXT:
arc = QadArc()
arcCenter = qad_utils.getPolarPointByPtAngle(textInsPt, textRot, textWidth / 2)
arcCenter = qad_utils.getPolarPointByPtAngle(arcCenter, textRot + math.pi / 2, arcSymbRadius + self.textOffsetDist)
arcPt1 = qad_utils.getPolarPointByPtAngle(arcCenter, textRot, arcSymbRadius)
arcPt2 = qad_utils.getPolarPointByPtAngle(arcCenter, textRot, - arcSymbRadius)
arc.fromStartCenterEndPts(arcPt1, arcCenter, arcPt2)
arcSymbolLineFeature = self.getArcSymbolLineFeature(arc)
dimEntity = QadDimEntity()
dimEntity.dimStyle = self
# features testuali
dimEntity.textualFeature = textFeature
# features lineari
if dimLine1Feature is not None:
dimEntity.linearFeatures.append(dimLine1Feature)
if dimLine2Feature is not None:
dimEntity.linearFeatures.append(dimLine2Feature)
if dimLineExt1Feature is not None:
dimEntity.linearFeatures.append(dimLineExt1Feature)
if dimLineExt2Feature is not None:
dimEntity.linearFeatures.append(dimLineExt2Feature)
if extLine1Feature is not None:
dimEntity.linearFeatures.append(extLine1Feature)
if extLine2Feature is not None:
dimEntity.linearFeatures.append(extLine2Feature)
if txtLeaderLineFeature is not None:
dimEntity.linearFeatures.append(txtLeaderLineFeature)
if arcLeaderLineFeature is not None:
dimEntity.linearFeatures.append(arcLeaderLineFeature)
if arcSymbolLineFeature is not None:
dimEntity.linearFeatures.append(arcSymbolLineFeature)
# features puntuali
dimEntity.symbolFeatures.extend([dimPt1Feature, dimPt2Feature])
if block1Feature is not None:
dimEntity.symbolFeatures.append(block1Feature)
if block2Feature is not None:
dimEntity.symbolFeatures.append(block2Feature)
if arcLeaderBlockFeature is not None:
dimEntity.symbolFeatures.append(arcLeaderBlockFeature)
return dimEntity, QgsGeometry.fromPolygonXY([textOffsetRect.asPolyline()])
#============================================================================
# addArcDimToLayers
#============================================================================
def addArcDimToLayers(self, plugIn, dimArc, linePosPt, measure = None, arcLeader = False):
"""
dimArc = arco da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
arcLeader = indica se si deve disegnare la linea direttrice dalla quota all'arco
Aggiunge ai layers le features che compongono una quota allineata.
"""
dimEntity, textOffsetRect = self.getArcDimFeatures(plugIn.canvas, \
dimArc, \
linePosPt, \
measure, \
arcLeader)
return self.addDimEntityToLayers(plugIn, dimEntity)
#============================================================================
# FUNZIONI PER LA QUOTATURA ARCO - FINE
# FUNZIONI PER LA QUOTATURA RAGGIO - INIZIO
#============================================================================
#============================================================================
# getCenterMarkerLinesFeature
#============================================================================
def getCenterMarkerLinesFeature(self, canvas, dimObj, linePosPt):
"""
center = punto del centro dell'arco o del cerchio da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
Restituisce una lista di feature che rappresentano del linee di marker del centro
"""
if self.centerMarkSize == 0.0: # 0 = niente
return []
# se linePosPos è < del raggio non si deve inserire il marker del centro
if qad_utils.getDistance(dimObj.center , linePosPt) < dimObj.radius:
return []
geoms = []
if self.centerMarkSize > 0.0: # dimensione marcatore di centro
horizLine = QadLine().set(QgsPointXY(dimObj.center.x() - self.centerMarkSize, dimObj.center.y()), \
QgsPointXY(dimObj.center.x() + self.centerMarkSize, dimObj.center.y()))
geoms.append(horizLine)
vertLine = QadLine().set(QgsPointXY(dimObj.center.x(), dimObj.center.y() - self.centerMarkSize), \
QgsPointXY(dimObj.center.x(), dimObj.center.y() + self.centerMarkSize))
geoms.append(vertLine)
else: # dimensione linee d'asse
centerMarkSize = -self.centerMarkSize
horizLine = QadLine().set(QgsPointXY(dimObj.center.x() - centerMarkSize, dimObj.center.y()), \
QgsPointXY(dimObj.center.x() + centerMarkSize, dimObj.center.y()))
geoms.append(horizLine)
vertLine = QadLine().set(QgsPointXY(dimObj.center.x(), dimObj.center.y() - centerMarkSize), \
QgsPointXY(dimObj.center.x(), dimObj.center.y() + centerMarkSize))
geoms.append(vertLine)
if (2 * centerMarkSize) < dimObj.radius:
horizLine = QadLine().set(QgsPointXY(dimObj.center.x() - (2 * centerMarkSize), dimObj.center.y()), \
QgsPointXY(dimObj.center.x() - dimObj.radius - centerMarkSize, dimObj.center.y()))
geoms.append(horizLine)
horizLine = QadLine().set(QgsPointXY(dimObj.center.x() + (2 * centerMarkSize), dimObj.center.y()), \
QgsPointXY(dimObj.center.x() + dimObj.radius + centerMarkSize, dimObj.center.y()))
geoms.append(horizLine)
vertLine = QadLine().set(QgsPointXY(dimObj.center.x(), dimObj.center.y() - (2 * centerMarkSize)), \
QgsPointXY(dimObj.center.x(), dimObj.center.y() - dimObj.radius - centerMarkSize))
geoms.append(vertLine)
vertLine = QadLine().set(QgsPointXY(dimObj.center.x(), dimObj.center.y() + (2 * centerMarkSize)), \
QgsPointXY(dimObj.center.x(), dimObj.center.y() + dimObj.radius + centerMarkSize))
geoms.append(vertLine)
features = []
for g in geoms:
f = QgsFeature(self.getLinearFeaturePrototype())
f.setGeometry(fromQadGeomToQgsGeom(g, self.getLinearLayer().crs())) # trasformo la geometria
try:
# imposto il tipo di componente della quotatura
if len(self.componentFieldName) > 0:
f.setAttribute(self.componentFieldName, QadDimComponentEnum.CENTER_MARKER_LINE)
except:
pass
try:
# imposto il tipo di linea
if len(self.lineTypeFieldName) > 0:
f.setAttribute(self.lineTypeFieldName, self.dimLineLineType)
except:
pass
try:
# imposto il colore
if len(self.colorFieldName) > 0:
f.setAttribute(self.colorFieldName, self.dimLineColor)
except:
pass
features.append(f)
return features
#============================================================================
# getRadiusDimFeatures
#============================================================================
def getRadiusDimFeatures(self, canvas, dimObj, linePosPt, measure = None):
"""
dimObj = oggetto arco circle da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
# quota raggio per misurare la lunghezza di un raggio di arco o di cerchio
# ritorna una lista di elementi che descrivono la geometria della quota:
# 1 lista = feature del primo e del secondo punto di quota; QgsFeature 1, QgsFeature 2
# 2 lista = feature della prima e della seconda linea di quota (quest'ultima può essere None); QgsFeature 1, QgsFeature 2
# 3 lista = feature del punto del testo di quota e geometria del rettangolo di occupazione; QgsFeature, QgsGeometry
# 4 lista = feature del primo e del secondo simbolo per la linea di quota (possono essere None); QgsFeature 1, QgsFeature 2
# 5 lista = feature della prima e della seconda linea di estensione (possono essere None); QgsFeature 1, QgsFeature 2
# 6 elemento = feature della linea di leader (può essere None); QgsFeature
"""
self.dimType = QadDimTypeEnum.RADIUS
# marker del centro
dimCenterMarkers = self.getCenterMarkerLinesFeature(canvas, dimObj, linePosPt)
# punti di quotatura
dimPt1 = dimObj.center
angle = qad_utils.getAngleBy2Pts(dimPt1, linePosPt)
dimPt2 = qad_utils.getPolarPointByPtAngle(dimPt1, angle, dimObj.radius) # punto sulla curva
dimPt1Feature = self.getDimPointFeature(dimPt1, True) # True = primo punto di quotatura
dimPt2Feature = self.getDimPointFeature(dimPt2, False) # False = secondo punto di quotatura
# se il blocco di quota 1 e il blocco di quota 2 sono visibili
if self.block1Name != "" and self.block1Name != "":
blockRot = qad_utils.getAngleBy2Pts(linePosPt, dimPt2)
if qad_utils.getDistance(linePosPt, dimPt2) <= 2 * self.getBlock2Size():
linePosPt = qad_utils.getPolarPointByPtAngle(dimPt2, blockRot + math.pi, 2 * self.getBlock2Size())
# blocco freccia
blockFeature = self.getSymbolFeature(dimPt2, blockRot, \
True if self.block1Name != "" else False, \
QadDimComponentEnum.LEADER_LINE)
else:
blockFeature = None
# linea di quota
dimLine = QadLine().set(linePosPt, dimPt2)
# la linea di quota 1 o la linea di quota 2 devono essere visibile
if self.dimLine1Show == True or self.dimLine2Show == True:
dimLineFeature = self.getDimLineFeature(dimLine, self.dimLine1Show, QadDimComponentEnum.LEADER_LINE)
else: # la linea di quota è invisibile
dimLineFeature = None
# linea di estensione
extLineFeature = None
if dimObj.whatIs() == "ARC":
extArc = self.getExtArc(dimObj, linePosPt)
# linee di estensione
if extArc is not None:
extLineFeature = self.getExtLineFeature(extArc, True) # True = prima linea di estensione
# testo e blocchi
if measure is None:
textValue = QadMsg.translate("Command_DIM", "R") + self.getFormattedText(dimObj.radius) # antepongo la R di Radius
else:
textValue = unicode(measure)
textFeature = self.getTextFeature(textValue)
textWidth, textHeight = qad_label.calculateLabelSize(self.getTextualLayer(), textFeature, canvas)
# creo un rettangolo intorno al testo con un buffer = self.textOffsetDist
textWidthOffset = textWidth + self.textOffsetDist * 2
textHeightOffset = textHeight + self.textOffsetDist * 2
# creo una linea fittizia per il posizionamento del testo
# la creo lunga metà della lunghezza del testo per forzare il testo fuori dalla linea fittizia
pt = qad_utils.getPolarPointByPtAngle(linePosPt, angle + math.pi, textWidthOffset / 2)
# Restituisce una lista di 4 elementi:
# - il primo elemento é una lista con il punto di inserimento del testo della quota e la sua rotazione
# - il secondo elemento é una lista con flag che indica il tipo della linea sulla quale é stato messo il testo; vedi QadDimComponentEnum
# e una lista di linee "leader" nel caso il testo sia all'esterno della quota
dummy1, dummy2, block1Rot, block2Rot = self.getRadiusTextAndBlocksPosition(QadLine().set(linePosPt, pt), \
textWidthOffset, textHeightOffset)
textOffsetRectInsPt = dummy1[0]
textRot = dummy1[1]
textLinearDimComponentOn = dummy2[0]
txtLeaderLines = dummy2[1]
# trovo il vero punto di inserimento del testo tenendo conto del buffer intorno
textInsPt = qad_utils.getPolarPointByPtAngle(textOffsetRectInsPt, textRot, self.textOffsetDist)
textInsPt = qad_utils.getPolarPointByPtAngle(textInsPt, textRot + math.pi / 2, self.textOffsetDist)
# testo
textGeom = QgsGeometry.fromPointXY(textInsPt)
textFeature = self.getTextFeature(textValue, textInsPt, textRot)
# creo un rettangolo intorno al testo con un offset
textOffsetRect = self.textRectToQadPolyline(textOffsetRectInsPt, textWidthOffset, textHeightOffset, textRot)
lastLine = txtLeaderLines.getLinearObjectAt(-1)
lastLine, dummy = self.adjustLineAccordingTextRect(textOffsetRect, lastLine, QadDimComponentEnum.LEADER_LINE)
txtLeaderLines.remove(-1) # sostituisco l'ultimo elemento
txtLeaderLines.append(lastLine)
# linea di leader
txtLeaderLineFeature = self.getLeaderFeature(txtLeaderLines)
dimEntity = QadDimEntity()
dimEntity.dimStyle = self
# features testuali
dimEntity.textualFeature = textFeature
# features lineari
if dimLineFeature is not None:
dimEntity.linearFeatures.append(dimLineFeature)
if extLineFeature is not None:
dimEntity.linearFeatures.append(extLineFeature)
if txtLeaderLineFeature is not None:
dimEntity.linearFeatures.append(txtLeaderLineFeature)
for dimCenterMarker in dimCenterMarkers:
dimEntity.linearFeatures.append(dimCenterMarker)
# features puntuali
dimEntity.symbolFeatures.extend([dimPt1Feature, dimPt2Feature])
if blockFeature is not None:
dimEntity.symbolFeatures.append(blockFeature)
return dimEntity, QgsGeometry.fromPolygonXY([textOffsetRect.asPolyline()])
#============================================================================
# addRadiusDimToLayers
#============================================================================
def addRadiusDimToLayers(self, plugIn, dimObj, linePosPt, measure = None):
"""
dimObj = oggetto arco circle da quotare (in unita di mappa)
linePosPt = punto per indicare dove deve essere posizionata la linea di quota (in unita di mappa)
measure = indica se la misura é predeterminata oppure (se = None) deve essere calcolata
Aggiunge ai layers le features che compongono una quota allineata.
"""
dimEntity, textOffsetRect = self.getRadiusDimFeatures(plugIn.canvas, \
dimObj, \
linePosPt, \
measure)
return self.addDimEntityToLayers(plugIn, dimEntity)
#============================================================================
# FUNZIONI PER LA QUOTATURA RAGGIO - FINE
#============================================================================
#===============================================================================
# QadDimStylesClass list of dimension styles
#===============================================================================
class QadDimStylesClass():
def __init__(self, dimStyleList = None):
if dimStyleList is None:
self.dimStyleList = []
else:
self.set(dimStyleList)
def __del__(self):
if dimStyleList is None:
del self.dimStyleList[:]
def isEmpty(self):
return True if self.count() == 0 else False
def count(self):
return len(self.dimStyleList)
def clear(self):
del self.dimStyleList[:]
def findDimStyle(self, dimStyleName):
"""
La funzione, dato un nome di stile di quotatura, lo cerca nella lista e,
in caso di successo, restituisce lo stile di quotatura.
"""
for dimStyle in self.dimStyleList:
if dimStyle.name == dimStyleName:
return dimStyle
return None
def addDimStyle(self, dimStyle, toFile = False, filePath = ""):
d = self.findDimStyle(dimStyle)
if d is None:
self.dimStyleList.append(QadDimStyle(dimStyle))
if toFile:
if dimStyle.save(filePath, False) == False: # senza sovrascrivere file
return False
return True
return False
#============================================================================
# removeDimStyle
#============================================================================
def removeDimStyle(self, dimStyleName, toFile = False):
i = 0
for dimStyle in self.dimStyleList:
if dimStyle.name == dimStyleName:
del self.dimStyleList[i]
if toFile:
dimStyle.remove()
return True
else:
i = i + 1
return False
#============================================================================
# renameDimStyle
#============================================================================
def renameDimStyle(self, dimStyleName, newDimStyleName):
if dimStyleName == newDimStyleName: # nome uguale
return True
if self.findDimStyle(newDimStyleName) is not None:
return False
dimStyle = self.findDimStyle(dimStyleName)
if dimStyle is None:
return False
return dimStyle.rename(newDimStyleName)
#============================================================================
# load
#============================================================================
def load(self, dir = None, append = False):
"""
Carica le impostazioni di tutti gli stili di quotatura presenti nella directory indicata.
se dir = None se esiste un progetto caricato il percorso è quello del progetto altrimenti + il percorso locale di qad
"""
if dir is None:
if append == False:
self.clear()
# se esiste un progetto caricato il percorso è quello del progetto
prjFileInfo = QFileInfo(QgsProject.instance().fileName())
path = prjFileInfo.absolutePath()
if len(path) > 0:
path += "/;"
path += QgsApplication.qgisSettingsDirPath() + "python/plugins/qad/"
# lista di directory separate da ";"
dirList = path.strip().split(";")
for _dir in dirList:
self.load(_dir, True) # in append
else:
_dir = QDir.cleanPath(dir)
if _dir == "":
return False
if _dir.endswith("/") == False:
_dir = _dir + "/"
if not os.path.exists(_dir):
return False
if append == False:
self.clear()
dimStyle = QadDimStyle()
fileNames = os.listdir(_dir)
for fileName in fileNames:
if fileName.endswith(".dim"):
path = _dir + fileName
if dimStyle.load(path) == True:
if self.findDimStyle(dimStyle.name) is None:
self.addDimStyle(dimStyle)
return True
#============================================================================
# getDimIdByEntity
#============================================================================
def getDimIdByEntity(self, entity):
"""
La funzione, data un'entità, verifica se fa parte di uno stile di quotatura della lista e,
in caso di successo, restituisce lo stile di quotatura e il codice della quotatura altrimenti None, None.
"""
for dimStyle in self.dimStyleList:
dimId = dimStyle.getDimIdByEntity(entity)
if dimId is not None:
return dimStyle, dimId
return None, None
#============================================================================
# isDimEntity
#============================================================================
def isDimEntity(self, entity):
"""
La funzione, data un'entità, verifica se fa parte di uno stile di quotatura della lista e,
in caso di successo, restituisce true altrimenti False.
"""
dimStyle, dimId = self.getDimIdByEntity(entity)
if dimStyle is None or dimId is None:
return False
else:
return True
#============================================================================
# getDimEntity
#============================================================================
def getDimEntity(self, layer, fid = None):
"""
la funzione può essere richiamata in 2 modi:
con un solo parametro di tipo QadEntity
con due parametri, il primo QgsVectorLayer e il secondo l'id della feature
"""
# verifico se l'entità appartiene ad uno stile di quotatura
if type(layer) == QgsVectorLayer:
entity = QadEntity()
entity.set(layer, fid)
dimStyle, dimId = self.getDimIdByEntity(entity)
else: # il parametro layer puo essere un oggetto QadEntity
dimStyle, dimId = self.getDimIdByEntity(layer)
if (dimStyle is None) or (dimId is None):
return None
dimEntity = QadDimEntity()
if dimEntity.initByDimId(dimStyle, dimId) == False:
return None
return dimEntity
#============================================================================
# getDimListByLayer
#============================================================================
def getDimListByLayer(self, layer):
"""
La funzione, dato un layer, verifica se fa parte di uno o più stili di quotatura della lista e,
in caso di successo, restituisce la lista degli stili di quotatura di appartenenza.
"""
result = []
for dimStyle in self.dimStyleList:
if dimStyle.isDimLayer(layer):
if dimStyle not in result:
result.append(dimStyle)
return result
#============================================================================
# addAllDimComponentsToEntitySet
#============================================================================
def addAllDimComponentsToEntitySet(self, entitySet, onlyEditableLayers):
"""
La funzione verifica se le entità che fanno parte di un entitySet sono anche parte di quotatura e,
in caso affermativo, aggiunge tutti i componenti della quotatura all'entitySet.
"""
elaboratedDimEntitySet = QadEntitySet() # lista delle entità di quota elaborate
entity = QadEntity()
for layerEntitySet in entitySet.layerEntitySetList:
# verifico se il layer appartiene ad uno o più stili di quotatura
dimStyleList = self.getDimListByLayer(layerEntitySet.layer)
for dimStyle in dimStyleList: # per tutti gli stili di quotatura
if dimStyle is not None:
remove = False
if onlyEditableLayers == True:
# se anche un solo layer non é modificabile
if dimStyle.getTextualLayer().isEditable() == False or \
dimStyle.getSymbolLayer().isEditable() == False or \
dimStyle.getLinearLayer().isEditable() == False:
remove = True
features = layerEntitySet.getFeatureCollection()
for feature in features:
entity.set(layerEntitySet.layer, feature.id())
if not elaboratedDimEntitySet.containsEntity(entity):
dimId = dimStyle.getDimIdByEntity(entity)
if dimId is not None:
dimEntitySet = dimStyle.getEntitySet(dimId)
if remove == False:
entitySet.unite(dimEntitySet)
else:
entitySet.subtract(dimEntitySet)
elaboratedDimEntitySet.unite(dimEntitySet)
#============================================================================
# removeAllDimLayersFromEntitySet
#============================================================================
def removeAllDimLayersFromEntitySet(self, entitySet):
"""
La funzione rimuove tutte le entità che fanno parte di quotature dall'entitySet.
"""
for dimStyle in self.dimStyleList:
entitySet.removeLayerEntitySet(dimStyle.getTextualLayer())
entitySet.removeLayerEntitySet(dimStyle.getSymbolLayer())
entitySet.removeLayerEntitySet(dimStyle.getLinearLayer())
#===============================================================================
# QadDimEntity dimension entity class
#===============================================================================
class QadDimEntity():
#============================================================================
# __init__
#============================================================================
def __init__(self, dimEntity = None):
self.dimStyle = None
self.textualFeature = None
self.linearFeatures = []
self.symbolFeatures = []
if dimEntity is not None:
self.set(dimEntity)
def whatIs(self):
return "DIMENTITY"
def isInitialized(self):
if (self.dimStyle is None) or (self.textualFeature is None):
return False
else:
return True
def __eq__(self, dimEntity):
"""self == other"""
if self.isInitialized() == False or dimEntity.isInitialized() == False :
return False
if self.getTextualLayer() == dimEntity.getTextualLayer() and self.getDimId() == dimEntity.getDimId():
return True
else:
return False
#============================================================================
# isValid
#============================================================================
def isValid(self):
"""
Verifica se lo stile di quotatura é valido e in caso affermativo ritorna True.
Se la quotatura non é valida ritorna False.
"""
if self.dimStyle is None:
return False
return self.dimStyle.isValid()
#============================================================================
# getTextualLayer
#============================================================================
def getTextualLayer(self):
if self.dimStyle is None:
return None
return self.dimStyle.getTextualLayer()
#============================================================================
# getLinearLayer
#============================================================================
def getLinearLayer(self):
if self.dimStyle is None:
return None
return self.dimStyle.getLinearLayer()
#============================================================================
# getSymbolLayer
#============================================================================
def getSymbolLayer(self):
if self.dimStyle is None:
return None
return self.dimStyle.getSymbolLayer()
#============================================================================
# set
#============================================================================
def set(self, dimEntity):
self.dimStyle = QadDimStyle(dimEntity.dimStyle)
self.textualFeature = QgsFeature(dimEntity.textualFeature)
del self.linearFeatures[:]
for f in dimEntity.linearFeatures:
self.linearFeatures.append(QgsFeature(f))
del self.symbolFeatures[:]
for f in dimEntity.symbolFeatures:
self.symbolFeatures.append(QgsFeature(f))
#============================================================================
# getLinearGeometryCollection
#============================================================================
def getLinearGeometryCollection(self):
result = []
for f in self.linearFeatures:
result.append(f.geometry())
return result
#============================================================================
# getSymbolGeometryCollection
#============================================================================
def getSymbolGeometryCollection(self):
result = []
for f in self.symbolFeatures:
result.append(f.geometry())
return result
#============================================================================
# getDimId
#============================================================================
def getDimId(self):
"""
La funzione restituisce il codice della quotatura altrimenti None.
"""
try:
return self.textualFeature.attribute(self.idFieldName)
except:
return None
def recodeDimIdToFeature(self, newDimId):
try:
# imposto il codice della quota
self.textualFeature.setAttribute(self.dimStyle.idFieldName, newDimId)
for f in self.linearFeatures:
f.setAttribute(self.dimStyle.idParentFieldName, newDimId)
for f in self.symbolFeatures:
f.setAttribute(self.dimStyle.idParentFieldName, newDimId)
except:
return False
return True
#============================================================================
# addToLayers
#============================================================================
def addToLayers(self, plugIn):
# prima di tutto inserisco il testo di quota per ricodificare la quotatura
# plugIn, layer, feature, coordTransform, refresh, check_validity
if qad_layer.addFeatureToLayer(plugIn, self.getTextualLayer(), self.textualFeature, None, False, False) == False:
return False
newDimId = self.textualFeature.id()
if self.recodeDimIdToFeature(newDimId) == False:
return False
# plugIn, layer, feature, refresh, check_validity
if qad_layer.updateFeatureToLayer(plugIn, self.getTextualLayer(), self.textualFeature, False, False) == False:
return False
# plugIn, layer, features, coordTransform, refresh, check_validity
if qad_layer.addFeaturesToLayer(plugIn, self.getLinearLayer(), self.linearFeatures, None, False, False) == False:
return False
# plugIn, layer, features, coordTransform, refresh, check_validity
if qad_layer.addFeaturesToLayer(plugIn, self.getSymbolLayer(), self.symbolFeatures, None, False, False) == False:
return False
return True
#============================================================================
# deleteToLayers
#============================================================================
def deleteToLayers(self, plugIn):
ids =[]
# plugIn, layer, featureId, refresh
if qad_layer.deleteFeatureToLayer(plugIn, self.getTextualLayer(), self.textualFeature.id(), False) == False:
return False
for f in self.linearFeatures:
ids.append(f.id())
# plugIn, layer, featureIds, refresh
if qad_layer.deleteFeaturesToLayer(plugIn, self.getLinearLayer(), ids, False) == False:
return False
del ids[:]
for f in self.symbolFeatures:
ids.append(f.id())
# plugIn, layer, featureIds, refresh
if qad_layer.deleteFeaturesToLayer(plugIn, self.getSymbolLayer(), ids, False) == False:
return False
return True
#============================================================================
# initByEntity
#============================================================================
def initByEntity(self, dimStyle, entity):
dimId = dimStyle.getDimIdByEntity(entity)
if dimId is None:
return False
return self.initByDimId(dimStyle, dimId)
#============================================================================
# initByDimId
#============================================================================
def initByDimId(self, dimStyle, dimId):
self.dimStyle = QadDimStyle(dimStyle)
entitySet = self.dimStyle.getEntitySet(dimId)
if entitySet.count() == 0: return False
self.textualFeature = None
layerEntitySet = entitySet.findLayerEntitySet(self.getTextualLayer())
if layerEntitySet is not None:
features = layerEntitySet.getFeatureCollection()
self.textualFeature = features[0]
# entità lineari
layerEntitySet = entitySet.findLayerEntitySet(self.getLinearLayer())
del self.linearFeatures[:] # svuoto la lista
if layerEntitySet is not None:
self.linearFeatures = layerEntitySet.getFeatureCollection()
# entità puntuali
layerEntitySet = entitySet.findLayerEntitySet(self.getSymbolLayer())
del self.symbolFeatures[:] # svuoto la lista
if layerEntitySet is not None:
self.symbolFeatures = layerEntitySet.getFeatureCollection()
return True
#============================================================================
# getEntitySet
#============================================================================
def getEntitySet(self):
result = QadEntitySet()
if self.isValid() == False: return result;
layerEntitySet = QadLayerEntitySet()
layerEntitySet.set(self.getTextualLayer(), [self.textualFeature])
result.addLayerEntitySet(layerEntitySet)
layerEntitySet = QadLayerEntitySet()
layerEntitySet.set(self.getLinearLayer(), self.linearFeatures)
result.addLayerEntitySet(layerEntitySet)
layerEntitySet = QadLayerEntitySet()
layerEntitySet.set(self.getSymbolLayer(), self.symbolFeatures)
result.addLayerEntitySet(layerEntitySet)
return result
#============================================================================
# selectOnLayer
#============================================================================
def selectOnLayer(self, incremental = True):
self.getEntitySet().selectOnLayer(incremental)
#============================================================================
# deselectOnLayer
#============================================================================
def deselectOnLayer(self):
self.getEntitySet().deselectOnLayer()
#============================================================================
# getDimPts
#============================================================================
def getDimPts(self, destinationCrs = None):
"""
destinationCrs = sistema di coordinate in cui verrà restituito il risultato
"""
dimPt1 = None
dimPt2 = None
if len(self.dimStyle.componentFieldName) > 0:
# cerco tra gli elementi puntuali
for f in self.symbolFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
if value == QadDimComponentEnum.DIM_PT1: # primo punto da quotare ("Dimension point 1")
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getSymbolLayer().crs(), \
destinationCrs,
QgsProject.instance())) # trasformo la geometria in map coordinate
dimPt1 = g.asPoint()
elif value == QadDimComponentEnum.DIM_PT2: # secondo punto da quotare ("Dimension point 2")
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getSymbolLayer().crs(), \
destinationCrs,
QgsProject.instance())) # trasformo la geometria in map coordinate
dimPt2 = g.asPoint()
except:
return None, None
return QadPoint(dimPt1), QadPoint(dimPt2)
#============================================================================
# getDimLinePts
#============================================================================
def getDimLinePts(self, destinationCrs = None):
"""
destinationCrs = sistema di coordinate in cui verrà restituito il risultato
"""
dimLinePt1 = None
dimLinePt2 = None
# cerco i punti iniziale-finale della linea di quota
if len(self.dimStyle.componentFieldName) > 0:
# prima cerco tra gli elementi lineari
for f in self.linearFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
# primo punto da quotare ("Dimension point 1") o secondo punto da quotare ("Dimension point 2")
if value == QadDimComponentEnum.DIM_LINE1 or value == QadDimComponentEnum.DIM_LINE2:
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getLinearLayer().crs(), \
destinationCrs, \
QgsProject.instance())) # trasformo la geometria in map coordinate
pts = g.asPolyline()
if value == QadDimComponentEnum.DIM_LINE1:
dimLinePt1 = pts[0]
else:
dimLinePt2 = pts[-1]
except:
return None, None
if dimLinePt1 is None or dimLinePt2 is None:
# poi cerco tra gli elementi puntuali
for f in self.symbolFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
# primo blocco della freccia ("Block 1")
if dimLinePt1 is None and value == QadDimComponentEnum.BLOCK1:
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getSymbolLayer().crs(), \
destinationCrs, \
QgsProject.instance())) # trasformo la geometria in map coordinate
dimLinePt1 = g.asPoint()
# secondo blocco della freccia ("Block 2")
if dimLinePt2 is None and value == QadDimComponentEnum.BLOCK2:
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getSymbolLayer().crs(), \
destinationCrs, \
QgsProject.instance())) # trasformo la geometria in map coordinate
dimLinePt2 = g.asPoint()
except:
return None, None
return dimLinePt1, dimLinePt2
#============================================================================
# getDimArc
#============================================================================
def getDimArc(self, destinationCrs = None):
"""
destinationCrs = sistema di coordinate in cui verrà restituito il risultato
"""
# cerco i punti di quotatura
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
if dimPt1 is None or dimPt2 is None: return None
# cerco il punto iniziale e finale della linea di quota
dimLinePt1, dimLinePt2 = self.getDimLinePts(destinationCrs)
if dimLinePt1 is None or dimLinePt2 is None: return None
ang1 = qad_utils.normalizeAngle(qad_utils.getAngleBy2Pts(dimPt1, dimLinePt1))
ang2 = qad_utils.normalizeAngle(qad_utils.getAngleBy2Pts(dimLinePt2, dimPt2))
if qad_utils.TanDirectionNear(ang1, ang2) == True: # arco di 180 gradi
ptCenter = qad_utils.getMiddlePoint(dimPt1, dimPt2)
else:
ptCenter = qad_utils.getIntersectionPointOn2InfinityLines(dimPt1, dimLinePt1, dimPt2, dimLinePt2)
arc = QadArc()
if arc.fromStartCenterEndPts(dimPt1, ptCenter, dimPt2) == False:
return None
return arc
#============================================================================
# getDimLeaderLine
#============================================================================
def getDimLeaderLine(self, leaderLineType = None, destinationCrs = None):
"""
Trova la linea porta quota del tipo indicato (in destinationCrs tipicamente = map coordinate)
destinationCrs = sistema di coordinate in cui è espresso containerGeom e in cui verrà restituito il risultato
"""
if len(self.dimStyle.componentFieldName) > 0:
# prima cerco tra gli elementi lineari
for f in self.linearFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
if value == leaderLineType:
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getLinearLayer().crs(), \
destinationCrs, \
QgsProject.instance())) # trasformo la geometria in map coordinate
return g.asPolyline()
except:
return None
return None
#============================================================================
# getDimLinePosPt
#============================================================================
def getDimLinePosPt(self, containerGeom = None, destinationCrs = None):
"""
Trova fra i vari punti possibili un punto che indichi dove si trova la linea di quota (in destinationCrs tipicamente = map coordinate)
se containerGeom <> None il punto deve essere contenuto in containerGeom
containerGeom = può essere una QgsGeometry rappresentante un poligono (in destinationCrs tipicamente = map coordinate) contenente i punti di geom da stirare
oppure una lista dei punti da stirare (in destinationCrs tipicamente = map coordinate)
destinationCrs = sistema di coordinate in cui è espresso containerGeom e in cui verrà restituito il risultato
"""
if len(self.dimStyle.componentFieldName) > 0:
# prima cerco tra gli elementi lineari
for f in self.linearFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
# primo punto da quotare ("Dimension point 1") o secondo punto da quotare ("Dimension point 2")
if value == QadDimComponentEnum.DIM_LINE1 or value == QadDimComponentEnum.DIM_LINE2:
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getLinearLayer().crs(), \
destinationCrs, \
QgsProject.instance())) # trasformo la geometria in map coordinate
pts = g.asPolyline()
if containerGeom is not None: # verifico che il punto iniziale sia interno a containerGeom
if type(containerGeom) == QgsGeometry: # geometria
if containerGeom.contains(pts[0]) == True:
return QadPoint(pts[0])
else:
# verifico che il punto finale sia interno a containerGeom
if containerGeom.contains(pts[-1]) == True:
return QadPoint(pts[-1])
elif type(containerGeom) == list: # lista di punti
for containerPt in containerGeom:
if qad_utils.ptNear(containerPt, pts[0]): # se i punti sono sufficientemente vicini
return QadPoint(pts[0])
else:
# verifico il punto finale
if qad_utils.ptNear(containerPt,pts[-1]):
return QadPoint(pts[-1])
else:
return QadPoint(pts[0]) # punto iniziale
except:
return None
# poi cerco tra gli elementi puntuali
for f in self.symbolFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
# primo blocco della freccia ("Block 1") o secondo blocco della freccia ("Block 2")
if value == QadDimComponentEnum.BLOCK1 or value == QadDimComponentEnum.BLOCK2:
g = f.geometry()
if (destinationCrs is not None) and destinationCrs != self.getSymbolLayer().crs():
g.transform(QgsCoordinateTransform(self.getSymbolLayer().crs(), \
destinationCrs, \
QgsProject.instance())) # trasformo la geometria in map coordinate
dimLinePosPt = g.asPoint()
if containerGeom is not None: # verifico che il punto sia interno a containerGeom
if type(containerGeom) == QgsGeometry: # geometria
if containerGeom.contains(dimLinePosPt) == True:
return QadPoint(dimLinePosPt)
elif type(containerGeom) == list: # lista di punti
for containerPt in containerGeom:
if ptNear(containerPt, dimLinePosPt): # se i punti sono sufficientemente vicini
return QadPoint(dimLinePosPt)
else:
return QadPoint(dimLinePosPt)
except:
return None
return None
#============================================================================
# getDimLinearAlignment
#============================================================================
def getDimLinearAlignment(self):
dimLinearAlignment = None
dimLineRotation = None
Pts = []
if len(self.dimStyle.componentFieldName) > 0:
# prima cerco tra gli elementi lineari
for f in self.linearFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
if value == QadDimComponentEnum.DIM_LINE1: # primo punto da quotare ("Dimension point 1")
Pts = f.geometry().asPolyline()
break
elif value == QadDimComponentEnum.DIM_LINE2: # secondo punto da quotare ("Dimension point 2")
Pts = f.geometry().asPolyline()
break
except:
return None, None
if Pts is None:
# poi cerco tra gli elementi puntuali
for f in self.symbolFeatures:
try:
value = f.attribute(self.dimStyle.componentFieldName)
if value == QadDimComponentEnum.BLOCK1: # primo blocco della freccia ("Block 1")
Pts.append(f.geometry().asPoint())
elif value == QadDimComponentEnum.BLOCK2: # secondo blocco della freccia ("Block 1")
Pts.append(f.geometry().asPoint())
except:
return None, None
if len(Pts) > 1: # almeno 2 punti
if qad_utils.doubleNear(Pts[0].x(), Pts[-1].x()): # linea verticale (stessa x)
dimLinearAlignment = QadDimStyleAlignmentEnum.VERTICAL
dimLineRotation = 0
elif qad_utils.doubleNear(Pts[0].y(), Pts[-1].y()): # linea orizzontale (stessa y)
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
dimLineRotation = 0
else:
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
dimLineRotation = qad_utils.getAngleBy2Pts(Pts[0], Pts[-1])
return dimLinearAlignment, dimLineRotation
#============================================================================
# getDimCircle
#============================================================================
def getDimCircle(self, destinationCrs = None):
"""
destinationCrs = sistema di coordinate in cui verrà restituito il risultato
Ritorna un cerchio a cui si riferisce la quotatura DIMRADIUS
"""
# cerco i punti di quotatura
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
if dimPt1 is None or dimPt2 is None: return None
circle = QadCircle()
circle.center = dimPt1
circle.radius = qad_utils.getDistance(dimPt1, dimPt2)
return circle
#============================================================================
# getTextRot
#============================================================================
def getTextRot(self):
textRot = None
if len(self.dimStyle.rotFieldName) > 0:
try:
textRot = self.textualFeature.attribute(self.dimStyle.rotFieldName)
except:
return None
return qad_utils.toRadians(textRot)
#============================================================================
# getTextValue
#============================================================================
def getTextValue(self):
textValue = None
if self.dimStyle.getTextualLayer() is None:
return None;
# se il testo dipende da un solo campo
labelFieldNames = qad_label.get_labelFieldNames(self.dimStyle.getTextualLayer())
if len(labelFieldNames) == 1 and len(labelFieldNames[0]) > 0:
try:
textValue = self.textualFeature.attribute(labelFieldNames[0])
except:
return None
return textValue
#============================================================================
# getTextPt
#============================================================================
def getTextPt(self, destinationCrs = None):
# destinationCrs = sistema di coordinate in cui verrà restituito il risultato
g = self.textualFeature.geometry()
if (destinationCrs is not None) and destinationCrs != self.getTextualLayer().crs():
g.transform(QgsCoordinateTransform(self.getTextualLayer().crs(), \
destinationCrs,
QgsProject.instance())) # trasformo la geometria in map coordinate
return g.asPoint()
#============================================================================
# isCalculatedText
#============================================================================
def isCalculatedText(self):
# la funzione verifica se il testo della quota è calcolato dalla grafica o se è stato forzato un testo diverso
measure = self.getTextValue()
if self.dimStyle.dimType == QadDimTypeEnum.ALIGNED: # quota lineare allineata ai punti di origine delle linee di estensione
dimPt1, dimPt2 = self.getDimPts()
return measure == self.dimStyle.getFormattedText(qad_utils.getDistance(dimPt1, dimPt2))
elif self.dimStyle.dimType == QadDimTypeEnum.LINEAR: # quota lineare con una linea di quota orizzontale o verticale
dimPt1, dimPt2 = self.getDimPts()
linePosPt = self.getDimLinePosPt()
preferredAlignment, dimLineRotation = self.getDimLinearAlignment()
# linea di quota entro le linee di estensione
dimLine = self.dimStyle.getDimLine(dimPt1, dimPt2, linePosPt, preferredAlignment, dimLineRotation)
if dimLine is None: return False
return measure == self.dimStyle.getFormattedText(dimLine.length())
elif self.dimStyle.dimType == QadDimTypeEnum.ARC_LENTGH: # quota per la lunghezza di un arco
dimArc = self.getDimArc()
if dimArc is None: return False
return measure == self.dimStyle.getFormattedText(dimArc.length())
elif self.dimStyle.dimType == QadDimTypeEnum.RADIUS: # quota radiale, misura il raggio di un cerchio o di un arco
dimPt1, dimPt2 = self.getDimPts()
return measure == self.dimStyle.getFormattedText(qad_utils.getDistance(dimPt1, dimPt2))
return True
#============================================================================
# isCalculatedTextRot
#============================================================================
def isCalculatedTextRot(self):
# la funzione verifica se la rotazione del testo della quota è calcolato dalla grafica o se è stato forzato una rotazione diversa
measure = self.getTextValue()
txtRot = self.getTextRot()
canvas = qgis.utils.iface.mapCanvas()
destinationCrs = canvas.mapSettings().destinationCrs()
if self.dimStyle.dimType == QadDimTypeEnum.ALIGNED: # quota lineare allineata ai punti di origine delle linee di estensione
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None):
dimEntity, textOffsetRect = self.dimStyle.getAlignedDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure)
return txtRot == dimEntity.getTextRot()
elif self.dimStyle.dimType == QadDimTypeEnum.LINEAR: # quota lineare con una linea di quota orizzontale o verticale
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
dimLinearAlignment, dimLineRotation = self.getDimLinearAlignment()
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None) and \
(dimLinearAlignment is not None) and (dimLineRotation is not None):
if dimLinearAlignment == QadDimStyleAlignmentEnum.VERTICAL:
dimLineRotation = math.pi / 2
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
dimEntity, textOffsetRect = self.dimStyle.getLinearDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure, \
dimLinearAlignment, \
dimLineRotation)
return txtRot == dimEntity.getTextRot()
elif self.dimStyle.dimType == QadDimTypeEnum.ARC_LENTGH: # quota per la lunghezza di un arco
dimArc = self.getDimArc()
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimArc is not None) and (linePosPt is not None):
dimEntity, textOffsetRect = self.dimStyle.getArcDimFeatures(canvas, dimArc, linePosPt, measure)
return txtRot == dimEntity.getTextRot()
elif self.dimStyle.dimType == QadDimTypeEnum.RADIUS: # quota radiale, misura il raggio di un cerchio o di un arco
dimCircle = self.getDimCircle()
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimCircle is not None) and (linePosPt is not None):
dimEntity, textOffsetRect = self.dimStyle.getRadiusDimFeatures(canvas, dimCircle, linePosPt, measure)
return txtRot == dimEntity.getTextRot()
return True
#============================================================================
# move
#============================================================================
def move(self, offsetX, offsetY):
# offsetX = spostamento X in map coordinate
# offsetY = spostamento Y in map coordinate
if self.isValid() == False: return False;
canvas = qgis.utils.iface.mapCanvas()
destinationCrs = canvas.mapSettings().destinationCrs()
g = self.textualFeature.geometry()
qadGeom = fromQgsGeomToQadGeom(g, self.getTextualLayer().crs())
qadGeom.move(offsetX, offsetY)
g = fromQadGeomToQgsGeom(qadGeom, self.getTextualLayer().crs())
self.textualFeature.setGeometry(g)
for f in self.linearFeatures:
g = f.geometry()
qadGeom = fromQgsGeomToQadGeom(g, self.getLinearLayer().crs())
qadGeom.move(offsetX, offsetY)
g = fromQadGeomToQgsGeom(qadGeom, self.getLinearLayer().crs())
f.setGeometry(g)
for f in self.symbolFeatures:
g = f.geometry()
qadGeom = fromQgsGeomToQadGeom(g, self.getSymbolLayer().crs())
qadGeom.move(offsetX, offsetY)
g = fromQadGeomToQgsGeom(qadGeom, self.getSymbolLayer().crs())
f.setGeometry(g)
return False
#============================================================================
# rotate
#============================================================================
def rotate(self, basePt, angle):
# basePt = punto base espresso in map coordinate
if self.isValid() == False: return False;
canvas = qgis.utils.iface.mapCanvas()
destinationCrs = canvas.mapSettings().destinationCrs()
measure = None if self.isCalculatedText() else self.getTextValue()
textRot = None if self.isCalculatedTextRot() else self.getTextRot()
if textRot is not None: # se la rotazione era forzata allora la imposto
prevTextRotMode = self.dimStyle.textRotMode
self.dimStyle.textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
self.dimStyle.textForcedRot = textRot
if self.dimStyle.dimType == QadDimTypeEnum.ALIGNED: # quota lineare allineata ai punti di origine delle linee di estensione
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None):
dimPt1 = qad_utils.rotatePoint(dimPt1, basePt, angle)
dimPt2 = qad_utils.rotatePoint(dimPt2, basePt, angle)
linePosPt = qad_utils.rotatePoint(linePosPt, basePt, angle)
dimEntity, textOffsetRect = self.dimStyle.getAlignedDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.LINEAR: # quota lineare con una linea di quota orizzontale o verticale
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
preferredAlignment, dimLineRotation = self.getDimLinearAlignment()
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None) and \
(preferredAlignment is not None) and (dimLineRotation is not None):
dimPt1 = qad_utils.rotatePoint(dimPt1, basePt, angle)
dimPt2 = qad_utils.rotatePoint(dimPt2, basePt, angle)
linePosPt = qad_utils.rotatePoint(linePosPt, basePt, angle)
dimLinearAlignment, dimLineRotation = self.getDimLinearAlignment()
if dimLinearAlignment == QadDimStyleAlignmentEnum.VERTICAL:
dimLineRotation = math.pi / 2
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
dimLineRotation = dimLineRotation + angle
dimEntity, textOffsetRect = self.dimStyle.getLinearDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure, \
dimLinearAlignment, \
dimLineRotation)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.ARC_LENTGH: # quota per la lunghezza di un arco
dimArc = self.getDimArc(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimArc is not None) and (linePosPt is not None):
dimArc.rotate(basePt, angle)
linePosPt = qad_utils.rotatePoint(linePosPt, basePt, angle)
arcLeader = True if self.getDimLeaderLine(QadDimComponentEnum.ARC_LEADER_LINE) is not None else False
dimEntity, textOffsetRect = self.dimStyle.getArcDimFeatures(canvas, \
dimArc, \
linePosPt, \
measure, \
arcLeader)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.RADIUS: # quota radiale, misura il raggio di un cerchio o di un arco
# non si può fare perchè non si può sapere se la quota si riferiva ad un cerchio o ad un arco
# al momento ipotizzo si riferisca sempre ad un cerchio
dimCircle = self.getDimCircle()
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimCircle is not None) and (linePosPt is not None):
dimCircle.rotate(basePt, angle)
linePosPt = qad_utils.rotatePoint(linePosPt, basePt, angle)
dimEntity, textOffsetRect = self.dimStyle.getRadiusDimFeatures(canvas, dimCircle, linePosPt, measure)
self.set(dimEntity)
if textRot is not None:
self.dimStyle.textRotMode = prevTextRotMode # ripristino la situazione precedente
return True
#============================================================================
# scale
#============================================================================
def scale(self, basePt, scale):
# basePt = punto base espresso in map coordinate
if self.isValid() == False: return False;
canvas = qgis.utils.iface.mapCanvas()
destinationCrs = canvas.mapSettings().destinationCrs()
measure = None if self.isCalculatedText() else self.getTextValue()
textRot = None if self.isCalculatedTextRot() else self.getTextRot()
if textRot is not None: # se la rotazione era forzata allora la imposto
prevTextRotMode = self.dimStyle.textRotMode
self.dimStyle.textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
self.dimStyle.textForcedRot = textRot
if self.dimStyle.dimType == QadDimTypeEnum.ALIGNED: # quota lineare allineata ai punti di origine delle linee di estensione
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None):
dimPt1 = qad_utils.scalePoint(dimPt1, basePt, scale)
dimPt2 = qad_utils.scalePoint(dimPt2, basePt, scale)
linePosPt = qad_utils.scalePoint(linePosPt, basePt, scale)
dimEntity, textOffsetRect = self.dimStyle.getAlignedDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.LINEAR: # quota lineare con una linea di quota orizzontale o verticale
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
preferredAlignment, dimLineRotation = self.getDimLinearAlignment()
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None) and \
(preferredAlignment is not None) and (dimLineRotation is not None):
textForcedRot = self.getTextRot()
if textForcedRot is not None:
self.dimStyle.textForcedRot = textForcedRot
dimPt1 = qad_utils.scalePoint(dimPt1, basePt, scale)
dimPt2 = qad_utils.scalePoint(dimPt2, basePt, scale)
linePosPt = qad_utils.scalePoint(linePosPt, basePt, scale)
dimLinearAlignment, dimLineRotation = self.getDimLinearAlignment()
if dimLinearAlignment == QadDimStyleAlignmentEnum.VERTICAL:
dimLineRotation = math.pi / 2
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
dimEntity, textOffsetRect = self.dimStyle.getLinearDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure, \
dimLinearAlignment, \
dimLineRotation)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.ARC_LENTGH: # quota per la lunghezza di un arco
dimArc = self.getDimArc(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimArc is not None) and \
(linePosPt is not None):
dimArc.scale(basePt, scale)
linePosPt = qad_utils.scalePoint(linePosPt, basePt, scale)
arcLeader = True if self.getDimLeaderLine(QadDimComponentEnum.ARC_LEADER_LINE) is not None else False
dimEntity, textOffsetRect = self.dimStyle.getArcDimFeatures(canvas, \
dimArc, \
linePosPt, \
measure, \
arcLeader)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.RADIUS: # quota radiale, misura il raggio di un cerchio o di un arco
# non si può fare perchè non si può sapere se la quota si riferiva ad un cerchio o ad un arco
# al momento ipotizzo si riferisca sempre ad un cerchio
dimCircle = self.getDimCircle()
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimCircle is not None) and (linePosPt is not None):
dimCircle.scale(basePt, scale)
linePosPt = qad_utils.scalePoint(linePosPt, basePt, scale)
dimEntity, textOffsetRect = self.dimStyle.getRadiusDimFeatures(canvas, dimCircle, linePosPt, measure)
self.set(dimEntity)
if textRot is not None:
self.dimStyle.textRotMode = prevTextRotMode # ripristino la situazione precedente
return True
#============================================================================
# mirror
#============================================================================
def mirror(self, mirrorPt, mirrorAngle):
# mirrorPt = punto base espresso in map coordinate
if self.isValid() == False: return False;
canvas = qgis.utils.iface.mapCanvas()
destinationCrs = canvas.mapSettings().destinationCrs()
measure = None if self.isCalculatedText() else self.getTextValue()
textRot = None if self.isCalculatedTextRot() else self.getTextRot()
if textRot is not None: # se la rotazione era forzata allora la imposto
prevTextRotMode = self.dimStyle.textRotMode
self.dimStyle.textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
self.dimStyle.textForcedRot = textRot
if self.dimStyle.dimType == QadDimTypeEnum.ALIGNED: # quota lineare allineata ai punti di origine delle linee di estensione
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None):
dimPt1 = qad_utils.mirrorPoint(dimPt1, mirrorPt, mirrorAngle)
dimPt2 = qad_utils.mirrorPoint(dimPt2, mirrorPt, mirrorAngle)
linePosPt = qad_utils.mirrorPoint(linePosPt, mirrorPt, mirrorAngle)
dimEntity, textOffsetRect = self.dimStyle.getAlignedDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.LINEAR: # quota lineare con una linea di quota orizzontale o verticale
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
preferredAlignment, dimLineRotation = self.getDimLinearAlignment()
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None) and \
(preferredAlignment is not None) and (dimLineRotation is not None):
textForcedRot = self.getTextRot()
if textForcedRot is not None:
self.dimStyle.textForcedRot = textForcedRot
dimPt1 = qad_utils.mirrorPoint(dimPt1, mirrorPt, mirrorAngle)
dimPt2 = qad_utils.mirrorPoint(dimPt2, mirrorPt, mirrorAngle)
linePosPt = qad_utils.mirrorPoint(linePosPt, mirrorPt, mirrorAngle)
dimLinearAlignment, dimLineRotation = self.getDimLinearAlignment()
if dimLinearAlignment == QadDimStyleAlignmentEnum.VERTICAL:
dimLineRotation = math.pi / 2
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
ptDummy = qad_utils.getPolarPointByPtAngle(mirrorPt, dimLineRotation, 1)
ptDummy = qad_utils.mirrorPoint(ptDummy, mirrorPt, mirrorAngle)
dimLineRotation = qad_utils.getAngleBy2Pts(mirrorPt, ptDummy)
dimEntity, textOffsetRect = self.dimStyle.getLinearDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure, \
dimLinearAlignment, \
dimLineRotation)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.ARC_LENTGH: # quota per la lunghezza di un arco
dimArc = self.getDimArc(destinationCrs)
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimArc is not None) and \
(linePosPt is not None):
dimArc.mirror(mirrorPt, mirrorAngle)
linePosPt = qad_utils.mirrorPoint(linePosPt, mirrorPt, mirrorAngle)
arcLeader = True if self.getDimLeaderLine(QadDimComponentEnum.ARC_LEADER_LINE) is not None else False
dimEntity, textOffsetRect = self.dimStyle.getArcDimFeatures(canvas, \
dimArc, \
linePosPt, \
measure, \
arcLeader)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.RADIUS: # quota radiale, misura il raggio di un cerchio o di un arco
# non si può fare perchè non si può sapere se la quota si riferiva ad un cerchio o ad un arco
# al momento ipotizzo si riferisca sempre ad un cerchio
dimCircle = self.getDimCircle()
linePosPt = self.getDimLinePosPt(None, destinationCrs)
if (dimCircle is not None) and (linePosPt is not None):
dimCircle.mirror(mirrorPt, mirrorAngle)
linePosPt = qad_utils.mirrorPoint(linePosPt, mirrorPt, mirrorAngle)
dimEntity, textOffsetRect = self.dimStyle.getRadiusDimFeatures(canvas, dimCircle, linePosPt, measure)
self.set(dimEntity)
if textRot is not None:
self.dimStyle.textRotMode = prevTextRotMode # ripristino la situazione precedente
return True
#============================================================================
# stretch
#============================================================================
def stretch(self, containerGeom, offsetX, offsetY):
"""
containerGeom = può essere una QgsGeometry rappresentante un poligono contenente i punti di geom da stirare
oppure una lista dei punti da stirare espressi in map coordinate
offsetX = spostamento X in map coordinate
offsetY = spostamento Y in map coordinate
"""
if self.isValid() == False: return False;
canvas = qgis.utils.iface.mapCanvas()
destinationCrs = canvas.mapSettings().destinationCrs()
measure = None if self.isCalculatedText() else self.getTextValue()
textRot = None if self.isCalculatedTextRot() else self.getTextRot()
if textRot is not None: # se la rotazione era forzata allora la imposto
prevTextRotMode = self.dimStyle.textRotMode
self.dimStyle.textRotMode = QadDimStyleTxtRotModeEnum.FORCED_ROTATION
self.dimStyle.textForcedRot = textRot
if self.dimStyle.dimType == QadDimTypeEnum.ALIGNED: # quota lineare allineata ai punti di origine delle linee di estensione
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(containerGeom, destinationCrs)
if dimPt1 is not None:
newPt = qad_stretch_fun.stretchPoint(dimPt1, containerGeom, offsetX, offsetY)
if newPt is not None:
dimPt1 = newPt
if dimPt2 is not None:
newPt = qad_stretch_fun.stretchPoint(dimPt2, containerGeom, offsetX, offsetY)
if newPt is not None:
dimPt2 = newPt
if linePosPt is not None:
newPt = qad_stretch_fun.stretchPoint(linePosPt, containerGeom, offsetX, offsetY)
if newPt is not None:
linePosPt = newPt
else:
linePosPt = self.getDimLinePosPt(None, destinationCrs)
# verifico se è stato coinvolto il testo della quota
if qad_stretch_fun.isPtContainedForStretch(self.getTextPt(destinationCrs), containerGeom):
if linePosPt is not None:
linePosPt = qad_utils.movePoint(linePosPt, offsetX, offsetY)
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None):
dimEntity, textOffsetRect = self.dimStyle.getAlignedDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.LINEAR: # quota lineare con una linea di quota orizzontale o verticale
dimPt1, dimPt2 = self.getDimPts(destinationCrs)
linePosPt = self.getDimLinePosPt(containerGeom, destinationCrs)
dimLinearAlignment, dimLineRotation = self.getDimLinearAlignment()
if dimPt1 is not None:
newPt = qad_stretch_fun.stretchPoint(dimPt1, containerGeom, offsetX, offsetY)
if newPt is not None:
dimPt1 = newPt
if dimPt2 is not None:
newPt = qad_stretch_fun.stretchPoint(dimPt2, containerGeom, offsetX, offsetY)
if newPt is not None:
dimPt2 = newPt
if linePosPt is not None:
newPt = qad_stretch_fun.stretchPoint(linePosPt, containerGeom, offsetX, offsetY)
if newPt is not None:
linePosPt = newPt
else:
linePosPt = self.getDimLinePosPt(None, destinationCrs)
# verifico se è stato coinvolto il testo della quota
if qad_stretch_fun.isPtContainedForStretch(self.getTextPt(destinationCrs), containerGeom):
if linePosPt is not None:
linePosPt = qad_utils.movePoint(linePosPt, offsetX, offsetY)
if (dimPt1 is not None) and (dimPt2 is not None) and \
(linePosPt is not None) and \
(dimLinearAlignment is not None) and (dimLineRotation is not None):
if dimLinearAlignment == QadDimStyleAlignmentEnum.VERTICAL:
dimLineRotation = math.pi / 2
dimLinearAlignment = QadDimStyleAlignmentEnum.HORIZONTAL
dimEntity, textOffsetRect = self.dimStyle.getLinearDimFeatures(canvas, \
dimPt1, \
dimPt2, \
linePosPt, \
measure, \
dimLinearAlignment, \
dimLineRotation)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.ARC_LENTGH: # quota per la lunghezza di un arco
dimArc = self.getDimArc(destinationCrs)
linePosPt = self.getDimLinePosPt(containerGeom, destinationCrs)
if dimArc is not None:
dimArc = qad_stretch_fun.stretchQadGeometry(dimArc, containerGeom, \
offsetX, offsetY)
if linePosPt is not None:
newPt = qad_utils.movePoint(linePosPt, offsetX, offsetY)
linePosPt = qad_utils.getPolarPointBy2Pts(dimArc.center, linePosPt, qad_utils.getDistance(dimArc.center, newPt))
else:
linePosPt = self.getDimLinePosPt(None, destinationCrs)
# verifico se è stato coinvolto il testo della quota
textPt = self.getTextPt(destinationCrs)
if qad_stretch_fun.isPtContainedForStretch(textPt, containerGeom):
if linePosPt is not None:
newPt = qad_utils.movePoint(textPt, offsetX, offsetY)
linePosPt = qad_utils.getPolarPointBy2Pts(dimArc.center, linePosPt, qad_utils.getDistance(dimArc.center, newPt))
if (dimArc is not None) and \
(linePosPt is not None):
arcLeader = True if self.getDimLeaderLine(QadDimComponentEnum.ARC_LEADER_LINE) is not None else False
dimEntity, textOffsetRect = self.dimStyle.getArcDimFeatures(canvas, \
dimArc, \
linePosPt, \
measure, \
arcLeader)
self.set(dimEntity)
elif self.dimStyle.dimType == QadDimTypeEnum.RADIUS: # quota radiale, misura il raggio di un cerchio o di un arco
# non si può fare perchè non si può sapere se la quota si riferiva ad un cerchio o ad un arco
# al momento ipotizzo si riferisca sempre ad un cerchio
dimCircle = self.getDimCircle()
linePosPt = self.getDimLinePosPt(containerGeom, destinationCrs)
if dimCircle is not None:
dimCircle = qad_stretch_fun.stretchQadGeometry(dimCircle, containerGeom, \
offsetX, offsetY)
if linePosPt is not None:
newPt = qad_utils.movePoint(linePosPt, offsetX, offsetY)
linePosPt = qad_utils.getPolarPointBy2Pts(dimCircle.center, linePosPt, qad_utils.getDistance(dimCircle.center, newPt))
else:
linePosPt = self.getDimLinePosPt(None, destinationCrs)
# verifico se è stato coinvolto il testo della quota
textPt = self.getTextPt(destinationCrs)
if qad_stretch_fun.isPtContainedForStretch(textPt, containerGeom):
if linePosPt is not None:
newPt = qad_utils.movePoint(textPt, offsetX, offsetY)
linePosPt = qad_utils.getPolarPointBy2Pts(dimCircle.center, linePosPt, qad_utils.getDistance(dimCircle.center, newPt))
if (dimCircle is not None) and (linePosPt is not None):
dimEntity, textOffsetRect = self.dimStyle.getRadiusDimFeatures(canvas, dimCircle, linePosPt, measure)
self.set(dimEntity)
if textRot is not None:
self.dimStyle.textRotMode = prevTextRotMode # ripristino la situazione precedente
return True;
#============================================================================
# getDimComponentByEntity
#============================================================================
def getDimComponentByEntity(self, entity):
"""
La funzione, data un'entità, restituisce il componente della quotatura.
"""
if entity.layer == self.getTextualLayer():
return QadDimComponentEnum.TEXT_PT
elif entity.layer == self.getLinearLayer() or \
entity.layer == self.getSymbolLayer():
try:
return entity.getFeature().attribute(self.dimStyle.componentFieldName)
except:
return None
return None
#============================================================================
# appendDimEntityIfNotExisting
#============================================================================
def appendDimEntityIfNotExisting(dimEntityList, dimEntity):
"""
La funzione è di utilità nei comandi per evitare di elaborare più volte oggetti appartenenti a quotatura
dimEntityList è da dichiarare come una lista semplice (es. dimElaboratedList = [])
La funzione cerca in dimEntityList se esiste dimEntity, in caso affermativo ritorna False
altrimenti aggiunge alla lista dimEntity e ritorna True
"""
for item in dimEntityList:
if item == dimEntity: return False
dimEntityList.append(dimEntity)
return True
#===============================================================================
# = variabile globale
#===============================================================================
QadDimStyles = QadDimStylesClass() # lista degli stili di quotatura caricati
|
gam17/QAD
|
qad_dim.py
|
Python
|
gpl-3.0
| 317,606
|
[
"ESPResSo"
] |
544bfa8c12d4cdd58146e60871c0167694e9108251caff8ebd9ce8cf69a2a813
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', gethostname().lower())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
# set --allow-privileged flag for kubelet
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('kubernetes-worker.gpu.enabled'):
hookenv.log('Adding '
'--feature-gates=DevicePlugins=true '
'to kubelet')
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
if context['arch'] == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
elif context['arch'] == 'arm64':
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller-arm64:0.9.0-beta.15"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1beta2'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gcp'
if cloud_provider == 'aws':
return getfqdn()
else:
return gethostname()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud-request-sent')
hookenv.status_set('waiting', 'waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined')
def clear_requested_integration():
remove_state('kubernetes-worker.cloud-request-sent')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready')
@when_not('kubernetes-worker.restarted-for-cloud')
def restart_for_cloud():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
set_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.restart-needed')
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
|
wjiangjay/origin
|
vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 44,441
|
[
"CDK"
] |
11325ca23d9b98fd90e1084556a893527ca13c362b56f27d5a27003fbd0350dd
|
# Copyright (C) 2017 Matthew C. Zwier and Lillian T. Chong
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division; __metaclass__ = type
import numpy, h5py
from scipy.signal import fftconvolve
from westtools import WESTTool, WESTDataReader, IterRangeSelection
import westpa
from west.data_manager import (weight_dtype, n_iter_dtype, vstr_dtype)
from west.we_driver import NewWeightEntry
import mclib
from westpa import h5io
fluxentry_dtype = numpy.dtype([('n_iter', n_iter_dtype),
('flux', weight_dtype),
('count', numpy.uint)])
target_index_dtype = numpy.dtype([('target_label', vstr_dtype),
('mean_flux', weight_dtype),
('mean_flux_ci_lb', weight_dtype),
('mean_flux_ci_ub', weight_dtype),
('mean_flux_correl_len', numpy.uintc)])
from westtools.dtypes import iter_block_ci_dtype as ci_dtype
def _extract_fluxes_fileversion_lt_7(iter_start, iter_stop, data_manager):
'''Extract fluxes from old format, where groups for iterations where recyling
occurs contain a 'recycling' table.'''
assert data_manager.we_h5file_version < 7
iter_count = iter_stop - iter_start
target_count = data_manager.get_iter_group(iter_start)['recycling'].shape[0]
fluxdata = numpy.zeros((iter_count,), dtype=fluxentry_dtype)
if data_manager.we_h5file_version < 5:
flux_field = 'weight'
else:
flux_field = 'flux'
fluxdata = {itarget: numpy.zeros((iter_count,), dtype=fluxentry_dtype)
for itarget in xrange(target_count)}
for iiter, n_iter in enumerate(xrange(iter_start, iter_stop)):
rdata = data_manager.get_iter_group(n_iter)['recycling']
for itarget in xrange(target_count):
fluxdata[itarget][iiter]['n_iter'] = n_iter
fluxdata[itarget][iiter]['flux'] = rdata[itarget][flux_field]
fluxdata[itarget][iiter]['count'] = rdata[itarget]['count']
del rdata
return fluxdata
def _extract_fluxes_fileversion_7(iter_start, iter_stop, data_manager):
'''Extract fluxes from HDF5 file version 7, where recycling information is
stored in the "new_weights" group of the iteration *following* recycling
events.'''
assert data_manager.we_h5file_version >= 7
iter_count = iter_stop - iter_start
iters = numpy.arange(iter_start, iter_stop, dtype=n_iter_dtype)
# for each target by name, collect the iterations, fluxes, and counts
# This is not the most foolproof way to do this, but it's good enough, and fast.
# The most correct way to do this is tracing trajectories,
# and warning if the boundary conditions change during the trace,
# but that's for another tool.
by_target = {}
for iiter, n_iter in enumerate(xrange(iter_start, iter_stop)):
target_states = data_manager.get_target_states(n_iter)
try:
new_weight_index = data_manager.get_iter_group(n_iter+1)['new_weights']['index']
except KeyError:
# no recycling data available
continue
for tstate in target_states:
try:
target_info = by_target[tstate.label]
except KeyError:
# State not seen before
target_info = by_target[tstate.label] = numpy.zeros((iter_count,), dtype=fluxentry_dtype)
# If the target happens not to exist in an iteration (for whatever reason),
# store a count of -1 as a sentinel
target_info['count'][:] = -1
target_info['n_iter'][:] = iters[:]
recycled_from_tstate = ( (new_weight_index['source_type'] == NewWeightEntry.NW_SOURCE_RECYCLED)
&(new_weight_index['target_state_id'] == tstate.state_id)
)
recycle_count = recycled_from_tstate.sum()
target_info['count'][iiter] = recycle_count
if recycle_count:
# flux is in units of per tau
target_info['flux'][iiter] = new_weight_index[recycled_from_tstate]['weight'].sum()
del new_weight_index, target_states
# Find the last contiguous run where target is available
for target_label in by_target:
fluxdata = by_target[target_label]
by_target[target_label] = fluxdata[numpy.searchsorted(fluxdata['count'],[0])[0]:]
return by_target
def extract_fluxes(iter_start=None, iter_stop=None, data_manager=None):
'''Extract flux values from the WEST HDF5 file for iterations >= iter_start
and < iter_stop, optionally using another data manager instance instead of the
global one returned by ``westpa.rc.get_data_manager()``.
Returns a dictionary mapping target names (if available, target index otherwise)
to a 1-D array of type ``fluxentry_dtype``, which contains columns for iteration
number, flux, and count.
'''
data_manager = data_manager or westpa.rc.get_data_manager()
iter_start = iter_start or 1
iter_stop = iter_stop or data_manager.current_iteration
if data_manager.we_h5file_version < 7:
return _extract_fluxes_fileversion_lt_7(iter_start, iter_stop, data_manager)
else:
return _extract_fluxes_fileversion_7(iter_start, iter_stop,data_manager)
class WFluxanlTool(WESTTool):
prog='w_fluxanl'
description = '''\
Extract fluxes into pre-defined target states from WEST data,
average, and construct confidence intervals. Monte Carlo bootstrapping
is used to account for the correlated and possibly non-Gaussian statistical
error in flux measurements.
All non-graphical output (including that to the terminal and HDF5) assumes that
the propagation/resampling period ``tau`` is equal to unity; to obtain results
in familiar units, divide all fluxes and multiply all correlation lengths by
the true value of ``tau``.
'''
output_format_version = 2
def __init__(self):
super(WFluxanlTool,self).__init__()
self.data_reader = WESTDataReader()
self.iter_range = IterRangeSelection()
self.output_h5file = None
self.output_group = None
self.target_groups = {}
self.fluxdata = {}
self.alpha = None
self.autocorrel_alpha = None
self.n_sets = None
self.do_evol = False
self.evol_step = 1
def add_args(self, parser):
self.data_reader.add_args(parser)
self.iter_range.add_args(parser)
ogroup = parser.add_argument_group('output options')
ogroup.add_argument('-o', '--output', default='fluxanl.h5',
help='Store intermediate data and analysis results to OUTPUT (default: %(default)s).')
cgroup = parser.add_argument_group('calculation options')
cgroup.add_argument('--disable-bootstrap', '-db', dest='bootstrap', action='store_const', const=False,
help='''Enable the use of Monte Carlo Block Bootstrapping.''')
cgroup.add_argument('--disable-correl', '-dc', dest='correl', action='store_const', const=False,
help='''Disable the correlation analysis.''')
cgroup.add_argument('-a', '--alpha', type=float, default=0.05,
help='''Calculate a (1-ALPHA) confidence interval on the average flux'
(default: %(default)s)''')
cgroup.add_argument('--autocorrel-alpha', type=float, dest='acalpha', metavar='ACALPHA',
help='''Evaluate autocorrelation of flux to (1-ACALPHA) significance.
Note that too small an ACALPHA will result in failure to detect autocorrelation
in a noisy flux signal. (Default: same as ALPHA.)''')
cgroup.add_argument('-N', '--nsets', type=int,
help='''Use NSETS samples for bootstrapping (default: chosen based on ALPHA)''')
cgroup.add_argument('--evol', action='store_true', dest='do_evol',
help='''Calculate time evolution of flux confidence intervals (expensive).''')
cgroup.add_argument('--evol-step', type=int, default=1, metavar='ESTEP',
help='''Calculate time evolution of flux confidence intervals every ESTEP
iterations (default: %(default)s)''')
def process_args(self, args):
self.data_reader.process_args(args)
self.data_reader.open()
self.iter_range.data_manager = self.data_reader
self.iter_range.process_args(args)
self.output_h5file = h5py.File(args.output, 'w')
self.alpha = args.alpha
# Disable the bootstrap or the correlation analysis.
self.mcbs_enable = args.bootstrap if args.bootstrap is not None else True
self.do_correl = args.correl if args.correl is not None else True
self.autocorrel_alpha = args.acalpha or self.alpha
self.n_sets = args.nsets or mclib.get_bssize(self.alpha)
self.do_evol = args.do_evol
self.evol_step = args.evol_step or 1
def calc_store_flux_data(self):
westpa.rc.pstatus('Calculating mean flux and confidence intervals for iterations [{},{})'
.format(self.iter_range.iter_start, self.iter_range.iter_stop))
fluxdata = extract_fluxes(self.iter_range.iter_start, self.iter_range.iter_stop, self.data_reader)
# Create a group to store data in
output_group = h5io.create_hdf5_group(self.output_h5file, 'target_flux', replace=False, creating_program=self.prog)
self.output_group = output_group
output_group.attrs['version_code'] = self.output_format_version
self.iter_range.record_data_iter_range(output_group)
n_targets = len(fluxdata)
index = numpy.empty((len(fluxdata),), dtype=target_index_dtype)
avg_fluxdata = numpy.empty((n_targets,), dtype=ci_dtype)
for itarget, (target_label, target_fluxdata) in enumerate(fluxdata.iteritems()):
# Create group and index entry
index[itarget]['target_label'] = str(target_label)
target_group = output_group.create_group('target_{}'.format(itarget))
self.target_groups[target_label] = target_group
# Store per-iteration values
target_group['n_iter'] = target_fluxdata['n_iter']
target_group['count'] = target_fluxdata['count']
target_group['flux'] = target_fluxdata['flux']
h5io.label_axes(target_group['flux'], ['n_iter'], units=['tau^-1'])
# Calculate flux autocorrelation
fluxes = target_fluxdata['flux']
mean_flux = fluxes.mean()
fmm = fluxes - mean_flux
acorr = fftconvolve(fmm,fmm[::-1])
acorr = acorr[len(acorr)//2:]
acorr /= acorr[0]
acorr_ds = target_group.create_dataset('flux_autocorrel', data=acorr)
h5io.label_axes(acorr_ds, ['lag'], ['tau'])
# Calculate overall averages and CIs
#avg, lb_ci, ub_ci, correl_len = mclib.mcbs_ci_correl(fluxes, numpy.mean, self.alpha, self.n_sets,
# autocorrel_alpha=self.autocorrel_alpha, subsample=numpy.mean)
avg, lb_ci, ub_ci, sterr, correl_len = mclib.mcbs_ci_correl({'dataset': fluxes}, estimator=(lambda stride, dataset: numpy.mean(dataset)), alpha=self.alpha, n_sets=self.n_sets,
autocorrel_alpha=self.autocorrel_alpha, subsample=numpy.mean, do_correl=self.do_correl, mcbs_enable=self.mcbs_enable )
avg_fluxdata[itarget] = (self.iter_range.iter_start, self.iter_range.iter_stop, avg, lb_ci, ub_ci, sterr, correl_len)
westpa.rc.pstatus('target {!r}:'.format(target_label))
westpa.rc.pstatus(' correlation length = {} tau'.format(correl_len))
westpa.rc.pstatus(' mean flux and CI = {:e} ({:e},{:e}) tau^(-1)'.format(avg,lb_ci,ub_ci))
index[itarget]['mean_flux'] = avg
index[itarget]['mean_flux_ci_lb'] = lb_ci
index[itarget]['mean_flux_ci_ub'] = ub_ci
index[itarget]['mean_flux_correl_len'] = correl_len
# Write index and summary
index_ds = output_group.create_dataset('index', data=index)
index_ds.attrs['mcbs_alpha'] = self.alpha
index_ds.attrs['mcbs_autocorrel_alpha'] = self.autocorrel_alpha
index_ds.attrs['mcbs_n_sets'] = self.n_sets
self.fluxdata = fluxdata
self.output_h5file['avg_flux'] = avg_fluxdata
def calc_evol_flux(self):
westpa.rc.pstatus('Calculating cumulative evolution of flux confidence intervals every {} iteration(s)'
.format(self.evol_step))
for itarget, (target_label, target_fluxdata) in enumerate(self.fluxdata.iteritems()):
fluxes = target_fluxdata['flux']
target_group = self.target_groups[target_label]
iter_start = target_group['n_iter'][0]
iter_stop = target_group['n_iter'][-1]
iter_count = iter_stop - iter_start
n_blocks = iter_count // self.evol_step
if iter_count % self.evol_step > 0: n_blocks += 1
cis = numpy.empty((n_blocks,), dtype=ci_dtype)
for iblock in xrange(n_blocks):
block_iter_stop = min(iter_start + (iblock+1)*self.evol_step, iter_stop)
istop = min((iblock+1)*self.evol_step, len(target_fluxdata['flux']))
fluxes = target_fluxdata['flux'][:istop]
#avg, ci_lb, ci_ub, correl_len = mclib.mcbs_ci_correl(fluxes, numpy.mean, self.alpha, self.n_sets,
# autocorrel_alpha = self.autocorrel_alpha,
# subsample=numpy.mean)
avg, ci_lb, ci_ub, sterr, correl_len = mclib.mcbs_ci_correl({'dataset': fluxes}, estimator=(lambda stride, dataset: numpy.mean(dataset)), alpha=self.alpha, n_sets=self.n_sets,
autocorrel_alpha = self.autocorrel_alpha,
subsample=numpy.mean, do_correl=self.do_correl, mcbs_enable=self.mcbs_enable )
cis[iblock]['iter_start'] = iter_start
cis[iblock]['iter_stop'] = block_iter_stop
cis[iblock]['expected'], cis[iblock]['ci_lbound'], cis[iblock]['ci_ubound'] = avg, ci_lb, ci_ub
cis[iblock]['corr_len'] = correl_len
cis[iblock]['sterr'] = sterr
del fluxes
cis_ds = target_group.create_dataset('flux_evolution', data=cis)
cis_ds.attrs['iter_step'] = self.evol_step
cis_ds.attrs['mcbs_alpha'] = self.alpha
cis_ds.attrs['mcbs_autocorrel_alpha'] = self.autocorrel_alpha
cis_ds.attrs['mcbs_n_sets'] = self.n_sets
def go(self):
self.calc_store_flux_data()
if self.do_evol:
self.calc_evol_flux()
if __name__ == '__main__':
WFluxanlTool().main()
|
ajoshpratt/westpa
|
lib/west_tools/w_fluxanl.py
|
Python
|
gpl-3.0
| 16,409
|
[
"Gaussian"
] |
6ea6cf1f9f066a69ed28185ed023cf4179bdf607df3dfb8d5f300b80640703cc
|
# This component calculates the humidity ratio from the ladybug weather file import parameters
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Chris Mackey <chris@mackeyarchitecture.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#Conversion formulas are taken from the following publications:
#Vaisala. (2013) Humidity Conversion Formulas: Calculation Formulas for Humidity. www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
#W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic Properties of Ordinary Water Substance for General and Scientific Use ", Journal of Physical and Chemical Reference Data, June 2002 ,Volume 31, Issue 2, pp. 387535
"""
Calculates the humidity ratio from the ladybug weather file import parameters
Conversion formulas are taken from the following publications:
Vaisala. (2013) Humidity Conversion Formulas: Calculation Formulas for Humidity. www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic Properties of Ordinary Water Substance for General and Scientific Use ", Journal of Physical and Chemical Reference Data, June 2002 ,Volume 31, Issue 2, pp. 387535
-
Provided by Ladybug 0.0.61
Args:
_dryBulbTemperature: The dry bulb temperature from the Import epw component.
_relativeHumidity: The relative humidity from the Import epw component.
_barometricPressure: The barometric pressure from the Import epw component.
Returns:
readMe!: ...
humidityRatio: The hourly humidity ratio (kg water / kg air).
enthalpy: The hourly enthalpy of the air (kJ / kg).
partialPressure: The hourly partial pressure of water vapor in the atmosphere (Pa).
saturationPressure: The saturation pressure of water vapor in the atmosphere (Pa).
"""
ghenv.Component.Name = "Ladybug_Humidity Ratio Calculator"
ghenv.Component.NickName = 'CalcHumidityRatio'
ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "1 | AnalyzeWeatherData"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import math
import scriptcontext as sc
def checkTheData():
try:
hourlyDBTemp = _dryBulbTemperature
if 'Temperature' in hourlyDBTemp[2] and hourlyDBTemp[4] == 'Hourly': checkData1 = True
else: checkData1 = False
hourlyRH = _relativeHumidity
if 'Relative Humidity' in hourlyRH[2] and hourlyRH[4] == 'Hourly': checkData2 = True
else: checkData2 = False
barPress = _barometricPressure
if 'Barometric Pressure' in barPress[2] and barPress[4] == 'Hourly': checkData3 = True
else: checkData3 = False
if checkData1 == True and checkData2 == True and checkData3 == True: checkData = True
except: checkData = False
return checkData
def main():
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_comfortModels = sc.sticky["ladybug_ComfortModels"]()
#Separate the numbers from the header strings
Tnumbers = []
Tstr = []
for item in _dryBulbTemperature:
try: Tnumbers.append(float(item))
except: Tstr.append(item)
Rnumbers = []
Rstr = []
for item in _relativeHumidity:
try: Rnumbers.append(float(item))
except: Rstr.append(item)
Bnumbers = []
Bstr = []
for item in _barometricPressure:
try: Bnumbers.append(float(item))
except: Bstr.append(item)
#Calculate the Humidity Ratio.
HRCalc, ENCalc, vapPress, satPress = lb_comfortModels.calcHumidRatio(Tnumbers, Rnumbers, Bnumbers)
#Build the strings and add it to the final calculation outputs
HR = []
HR.append(Tstr[0])
HR.append(Tstr[1])
HR.append('Humidity Ratio')
HR.append('kg water / kg air')
HR.append(Tstr[4])
HR.append(Tstr[5])
HR.append(Tstr[6])
for item in HRCalc:
HR.append(item)
EN = []
EN.append(Tstr[0])
EN.append(Tstr[1])
EN.append('Enthalpy')
EN.append('kJ/kg')
EN.append(Tstr[4])
EN.append(Tstr[5])
EN.append(Tstr[6])
for item in ENCalc:
EN.append(item)
SP = []
SP.append(Tstr[0])
SP.append(Tstr[1])
SP.append('Saturation Pressure')
SP.append('Pa')
SP.append(Tstr[4])
SP.append(Tstr[5])
SP.append(Tstr[6])
satPress100 = []
for item in satPress:
satPress100.append(item*100)
for item in satPress100:
SP.append(item)
VP = []
VP.append(Tstr[0])
VP.append(Tstr[1])
VP.append('Vapor Pressure')
VP.append('Pa')
VP.append('Hourly')
VP.append(Tstr[5])
VP.append(Tstr[6])
vapPress100 = []
for item in vapPress:
vapPress100.append(item*100)
for item in vapPress100:
VP.append(item)
return HR, EN, VP, SP
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return None, None, None, None
#Check the data to make sure it is the correct type
checkData = checkTheData()
if checkData == True:
res = main()
if res!=-1:
humidityRatio, enthalpy, partialPressure, saturationPressure = res
print 'Humidity ratio calculation completed successfully!'
else:
print 'Please provide all of the required annual data inputs.'
|
boris-p/ladybug
|
src/Ladybug_Humidity Ratio Calculator.py
|
Python
|
gpl-3.0
| 7,319
|
[
"EPW"
] |
d557dbe61bafdba770304a17485bd94404750aea2c34be561d7d1623747c2562
|
import datetime
import re
import smtplib
import time
import urllib
from typing import Any, List, Optional, Sequence
from unittest.mock import MagicMock, patch
from urllib.parse import urlencode
import orjson
from django.conf import settings
from django.contrib.auth.views import INTERNAL_RESET_URL_TOKEN
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.test import override_settings
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
ConfirmationKeyException,
MultiuseInvite,
confirmation_url,
create_confirmation_link,
generate_key,
get_object_from_key,
one_click_unsubscribe_link,
)
from zerver.context_processors import common_context
from zerver.decorator import do_two_factor_login
from zerver.forms import HomepageForm, check_subdomain_available
from zerver.lib.actions import (
add_new_user_history,
do_add_default_stream,
do_change_full_name,
do_change_user_role,
do_create_default_stream_group,
do_create_realm,
do_create_user,
do_deactivate_realm,
do_deactivate_user,
do_get_user_invites,
do_invite_users,
do_set_realm_property,
get_default_streams_for_realm,
get_stream,
)
from zerver.lib.email_notifications import enqueue_welcome_emails, followup_day2_email_delay
from zerver.lib.initial_password import initial_password
from zerver.lib.mobile_auth_otp import (
ascii_to_hex,
hex_to_ascii,
is_valid_otp,
otp_decrypt_api_key,
otp_encrypt_api_key,
xor_hex_strings,
)
from zerver.lib.name_restrictions import is_disposable_domain
from zerver.lib.rate_limiter import add_ratelimit_rule, remove_ratelimit_rule
from zerver.lib.send_email import FromAddress, deliver_email, send_future_email
from zerver.lib.stream_subscription import get_stream_subscriptions_for_user
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.subdomains import is_root_domain_available
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
avatar_disk_path,
cache_tries_captured,
find_key_by_email,
get_test_image_file,
load_subdomain_token,
message_stream_count,
most_recent_message,
most_recent_usermessage,
queries_captured,
reset_emails_in_zulip_realm,
)
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
Message,
PreregistrationUser,
Realm,
Recipient,
ScheduledEmail,
Stream,
Subscription,
UserMessage,
UserProfile,
flush_per_request_caches,
get_realm,
get_system_bot,
get_user,
get_user_by_delivery_email,
)
from zerver.views.auth import redirect_and_log_into_subdomain, start_two_factor_auth
from zerver.views.development.registration import confirmation_key
from zerver.views.invite import get_invitee_emails_set
from zproject.backends import ExternalAuthDataDict, ExternalAuthResult
class RedirectAndLogIntoSubdomainTestCase(ZulipTestCase):
def test_data(self) -> None:
realm = get_realm("zulip")
user_profile = self.example_user("hamlet")
name = user_profile.full_name
email = user_profile.delivery_email
response = redirect_and_log_into_subdomain(ExternalAuthResult(user_profile=user_profile))
data = load_subdomain_token(response)
self.assertDictEqual(data, {'full_name': name,
'email': email,
'subdomain': realm.subdomain,
'is_signup': False})
data_dict = ExternalAuthDataDict(is_signup=True, multiuse_object_key='key')
response = redirect_and_log_into_subdomain(ExternalAuthResult(user_profile=user_profile,
data_dict=data_dict))
data = load_subdomain_token(response)
self.assertDictEqual(data, {'full_name': name,
'email': email,
'subdomain': realm.subdomain,
# the email has an account at the subdomain,
# so is_signup get overridden to False:
'is_signup': False,
'multiuse_object_key': 'key',
})
data_dict = ExternalAuthDataDict(email=self.nonreg_email("alice"),
full_name="Alice",
subdomain=realm.subdomain,
is_signup=True,
full_name_validated=True,
multiuse_object_key='key')
response = redirect_and_log_into_subdomain(ExternalAuthResult(data_dict=data_dict))
data = load_subdomain_token(response)
self.assertDictEqual(data, {'full_name': "Alice",
'email': self.nonreg_email("alice"),
'full_name_validated': True,
'subdomain': realm.subdomain,
'is_signup': True,
'multiuse_object_key': 'key',
})
class DeactivationNoticeTestCase(ZulipTestCase):
def test_redirection_for_deactivated_realm(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 302)
self.assertIn('deactivated', result.url)
def test_redirection_for_active_realm(self) -> None:
for url in ('/register/', '/login/'):
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
def test_deactivation_notice_when_realm_is_active(self) -> None:
result = self.client_get('/accounts/deactivated/')
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
def test_deactivation_notice_when_deactivated(self) -> None:
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_get('/accounts/deactivated/')
self.assertIn("Zulip Dev, has been deactivated.", result.content.decode())
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self) -> None:
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
realm = get_realm('zulip')
stream = Stream.objects.get(realm=realm, name='Denmark')
DefaultStream.objects.create(stream=stream, realm=realm)
# Make sure at least 3 messages are sent to Denmark and it's a default stream.
message_id = self.send_stream_message(self.example_user('hamlet'), stream.name, "test 1")
self.send_stream_message(self.example_user('hamlet'), stream.name, "test 2")
self.send_stream_message(self.example_user('hamlet'), stream.name, "test 3")
with patch("zerver.lib.actions.add_new_user_history"):
self.register(self.nonreg_email('test'), "test")
user_profile = self.nonreg_user('test')
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
# Sent a message afterwards to trigger a race between message
# sending and `add_new_user_history`.
race_message_id = self.send_stream_message(self.example_user('hamlet'),
streams[0].name, "test")
# Overwrite ONBOARDING_UNREAD_MESSAGES to 2
ONBOARDING_UNREAD_MESSAGES = 2
with patch("zerver.lib.actions.ONBOARDING_UNREAD_MESSAGES",
ONBOARDING_UNREAD_MESSAGES):
add_new_user_history(user_profile, streams)
# Our first message is in the user's history
self.assertTrue(UserMessage.objects.filter(user_profile=user_profile,
message_id=message_id).exists())
# The race message is in the user's history and marked unread.
self.assertTrue(UserMessage.objects.filter(user_profile=user_profile,
message_id=race_message_id).exists())
self.assertFalse(UserMessage.objects.get(user_profile=user_profile,
message_id=race_message_id).flags.read.is_set)
# Verify that the ONBOARDING_UNREAD_MESSAGES latest messages
# that weren't the race message are marked as unread.
latest_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient__type=Recipient.STREAM,
).exclude(message_id=race_message_id).order_by('-message_id')[0:ONBOARDING_UNREAD_MESSAGES]
self.assertEqual(len(latest_messages), 2)
for msg in latest_messages:
self.assertFalse(msg.flags.read.is_set)
# Verify that older messages are correctly marked as read.
older_messages = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient__type=Recipient.STREAM,
).exclude(message_id=race_message_id).order_by(
'-message_id')[ONBOARDING_UNREAD_MESSAGES:ONBOARDING_UNREAD_MESSAGES + 1]
self.assertTrue(len(older_messages) > 0)
for msg in older_messages:
self.assertTrue(msg.flags.read.is_set)
def test_auto_subbed_to_personals(self) -> None:
"""
Newly created users are auto-subbed to the ability to receive
personals.
"""
test_email = self.nonreg_email('test')
self.register(test_email, "test")
user_profile = self.nonreg_user('test')
old_messages_count = message_stream_count(user_profile)
self.send_personal_message(user_profile, user_profile)
new_messages_count = message_stream_count(user_profile)
self.assertEqual(new_messages_count, old_messages_count + 1)
recipient = Recipient.objects.get(type_id=user_profile.id,
type=Recipient.PERSONAL)
message = most_recent_message(user_profile)
self.assertEqual(message.recipient, recipient)
with patch('zerver.models.get_display_recipient', return_value='recip'):
self.assertEqual(
str(message),
'<Message: recip / / '
'<UserProfile: {} {}>>'.format(user_profile.email, user_profile.realm))
user_message = most_recent_usermessage(user_profile)
self.assertEqual(
str(user_message),
f'<UserMessage: recip / {user_profile.email} ([])>',
)
class InitialPasswordTest(ZulipTestCase):
def test_none_initial_password_salt(self) -> None:
with self.settings(INITIAL_PASSWORD_SALT=None):
self.assertIsNone(initial_password('test@test.com'))
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def get_reset_mail_body(self, subdomain: str='zulip') -> str:
from django.core.mail import outbox
[message] = outbox
self.assertRegex(
message.from_email,
fr"^Zulip Account Security <{self.TOKENIZED_NOREPLY_REGEX}>\Z",
)
self.assertIn(f"{subdomain}.testserver", message.extra_headers["List-Id"])
return message.body
def test_password_reset(self) -> None:
user = self.example_user("hamlet")
email = user.delivery_email
old_password = initial_password(email)
assert old_password is not None
self.login_user(user)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
body = self.get_reset_mail_body()
self.assertIn("reset your password", body)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(
email, url_pattern=settings.EXTERNAL_HOST + r"(\S\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.endswith(f'/{INTERNAL_RESET_URL_TOKEN}/'))
final_reset_url = result.url
result = self.client_get(final_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
with self.settings(PASSWORD_MIN_LENGTH=3, PASSWORD_MIN_GUESSES=1000):
# Verify weak passwords don't work.
result = self.client_post(final_reset_url,
{'new_password1': 'easy',
'new_password2': 'easy'})
self.assert_in_response("The password is too weak.",
result)
result = self.client_post(final_reset_url,
{'new_password1': 'f657gdGGk9',
'new_password2': 'f657gdGGk9'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login_by_email(email, password='f657gdGGk9')
user_profile = self.example_user('hamlet')
self.assert_logged_in_user_id(user_profile.id)
# make sure old password no longer works
self.assert_login_failure(email, password=old_password)
def test_password_reset_for_non_existent_user(self) -> None:
email = 'nonexisting@mars.com'
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
body = self.get_reset_mail_body()
self.assertIn('Somebody (possibly you) requested a new password', body)
self.assertIn('You do not have an account', body)
self.assertIn('safely ignore', body)
self.assertNotIn('reset your password', body)
self.assertNotIn('deactivated', body)
def test_password_reset_for_deactivated_user(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
do_deactivate_user(user_profile)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
body = self.get_reset_mail_body()
self.assertIn('Somebody (possibly you) requested a new password', body)
self.assertIn('has been deactivated', body)
self.assertIn('safely ignore', body)
self.assertNotIn('reset your password', body)
self.assertNotIn('not have an account', body)
def test_password_reset_with_deactivated_realm(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
do_deactivate_realm(user_profile.realm)
# start the password reset process by supplying an email address
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
# Check that the password reset email is from a noreply address.
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(RATE_LIMITING=True)
def test_rate_limiting(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
from django.core.mail import outbox
add_ratelimit_rule(10, 2, domain='password_reset_form_by_email')
start_time = time.time()
with patch('time.time', return_value=start_time):
self.client_post('/accounts/password/reset/', {'email': email})
self.client_post('/accounts/password/reset/', {'email': email})
self.assert_length(outbox, 2)
# Too many password reset emails sent to the address, we won't send more.
with self.assertLogs(level='INFO') as info_logs:
self.client_post('/accounts/password/reset/', {'email': email})
self.assertEqual(info_logs.output, [
'INFO:root:Too many password reset attempts for email hamlet@zulip.com'
])
self.assert_length(outbox, 2)
# Resetting for a different address works though.
self.client_post('/accounts/password/reset/', {'email': self.example_email("othello")})
self.assert_length(outbox, 3)
self.client_post('/accounts/password/reset/', {'email': self.example_email("othello")})
self.assert_length(outbox, 4)
# After time, password reset emails can be sent again.
with patch('time.time', return_value=start_time + 11):
self.client_post('/accounts/password/reset/', {'email': email})
self.client_post('/accounts/password/reset/', {'email': email})
self.assert_length(outbox, 6)
remove_ratelimit_rule(10, 2, domain='password_reset_form_by_email')
def test_wrong_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="zephyr")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
body = self.get_reset_mail_body('zephyr')
self.assertIn('Somebody (possibly you) requested a new password', body)
self.assertIn('You do not have an account', body)
self.assertIn("active accounts in the following organization(s).\nhttp://zulip.testserver",
body)
self.assertIn('safely ignore', body)
self.assertNotIn('reset your password', body)
self.assertNotIn('deactivated', body)
def test_invalid_subdomain(self) -> None:
email = self.example_email("hamlet")
# start the password reset process by supplying an email address
result = self.client_post(
'/accounts/password/reset/', {'email': email},
subdomain="invalid")
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 404)
self.assert_in_response("There is no Zulip organization hosted at this subdomain.",
result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auth_only(self) -> None:
"""If the email auth backend is not enabled, password reset should do nothing"""
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once()
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email in a few minutes to finish the process.", result)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_and_email_auth(self) -> None:
"""If both email and LDAP auth backends are enabled, limit password
reset to users outside the LDAP domain"""
# If the domain matches, we don't generate an email
with self.settings(LDAP_APPEND_DOMAIN="zulip.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
mock_logging.assert_called_once_with("Password reset not allowed for user in LDAP domain")
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# If the domain doesn't match, we do generate an email
with self.settings(LDAP_APPEND_DOMAIN="example.com"):
email = self.example_email("hamlet")
with patch('logging.info') as mock_logging:
result = self.client_post('/accounts/password/reset/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
body = self.get_reset_mail_body()
self.assertIn('reset your password', body)
def test_redirect_endpoints(self) -> None:
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["/accounts/home/"], result)
result = self.client_get('/accounts/new/send_confirm/alice@example.com')
self.assert_in_success_response(["/new/"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self) -> None:
self.login('hamlet')
user_profile = self.example_user('hamlet')
self.assert_logged_in_user_id(user_profile.id)
def test_login_deactivated_user(self) -> None:
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
result = self.login_with_return(self.example_email("hamlet"), "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your account is no longer active.", result)
self.assert_logged_in_user_id(None)
def test_login_bad_password(self) -> None:
user = self.example_user("hamlet")
password: Optional[str] = "wrongpassword"
result = self.login_with_return(user.delivery_email, password=password)
self.assert_in_success_response([user.delivery_email], result)
self.assert_logged_in_user_id(None)
# Parallel test to confirm that the right password works using the
# same login code, which verifies our failing test isn't broken
# for some other reason.
password = initial_password(user.delivery_email)
result = self.login_with_return(user.delivery_email, password=password)
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user.id)
@override_settings(RATE_LIMITING_AUTHENTICATE=True)
def test_login_bad_password_rate_limiter(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.delivery_email
add_ratelimit_rule(10, 2, domain='authenticate_by_username')
start_time = time.time()
with patch('time.time', return_value=start_time):
self.login_with_return(email, password="wrongpassword")
self.assert_logged_in_user_id(None)
self.login_with_return(email, password="wrongpassword")
self.assert_logged_in_user_id(None)
# We're over the allowed limit, so the next attempt, even with the correct
# password, will get blocked.
result = self.login_with_return(email)
self.assert_in_success_response(["Try again in 10 seconds"], result)
# After time passes, we should be able to log in.
with patch('time.time', return_value=start_time + 11):
self.login_with_return(email)
self.assert_logged_in_user_id(user_profile.id)
remove_ratelimit_rule(10, 2, domain='authenticate_by_username')
def test_login_nonexist_user(self) -> None:
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please enter a correct email and password", result)
self.assert_logged_in_user_id(None)
def test_login_wrong_subdomain(self) -> None:
with patch("logging.warning") as mock_warning:
result = self.login_with_return(self.mit_email("sipbtest"), "xxx")
mock_warning.assert_called_once()
self.assertEqual(result.status_code, 200)
self.assert_in_response("Your Zulip account is not a member of the "
"organization associated with this subdomain.", result)
self.assert_logged_in_user_id(None)
def test_login_invalid_subdomain(self) -> None:
result = self.login_with_return(self.example_email("hamlet"), "xxx",
subdomain="invalid")
self.assertEqual(result.status_code, 404)
self.assert_in_response("There is no Zulip organization hosted at this subdomain.", result)
self.assert_logged_in_user_id(None)
def test_register(self) -> None:
reset_emails_in_zulip_realm()
realm = get_realm("zulip")
stream_names = [f"stream_{i}" for i in range(40)]
for stream_name in stream_names:
stream = self.make_stream(stream_name, realm=realm)
DefaultStream.objects.create(stream=stream, realm=realm)
# Clear all the caches.
flush_per_request_caches()
ContentType.objects.clear_cache()
with queries_captured() as queries, cache_tries_captured() as cache_tries:
self.register(self.nonreg_email('test'), "test")
# Ensure the number of queries we make is not O(streams)
self.assertEqual(len(queries), 72)
# We can probably avoid a couple cache hits here, but there doesn't
# seem to be any O(N) behavior. Some of the cache hits are related
# to sending messages, such as getting the welcome bot, looking up
# the alert words for a realm, etc.
self.assertEqual(len(cache_tries), 15)
user_profile = self.nonreg_user('test')
self.assert_logged_in_user_id(user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.client_post('/accounts/home/', {'email': self.nonreg_email('test')},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_register_deactivated_partway_through(self) -> None:
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
email = self.nonreg_email('test')
result = self.client_post('/accounts/home/', {'email': email},
subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertNotIn('deactivated', result.url)
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.submit_reg_form_for_user(email, "abcd1234", subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
with self.assertRaises(UserProfile.DoesNotExist):
self.nonreg_user('test')
def test_login_deactivated_realm(self) -> None:
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return(self.example_email("hamlet"), subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual('/accounts/deactivated/', result.url)
def test_logout(self) -> None:
self.login('hamlet')
# We use the logout API, not self.logout, to make sure we test
# the actual logout code path.
self.client_post('/accounts/logout/')
self.assert_logged_in_user_id(None)
def test_non_ascii_login(self) -> None:
"""
You can log in even if your password contain non-ASCII characters.
"""
email = self.nonreg_email('test')
password = "hümbüǵ"
# Registering succeeds.
self.register(email, password)
user_profile = self.nonreg_user('test')
self.assert_logged_in_user_id(user_profile.id)
self.logout()
self.assert_logged_in_user_id(None)
# Logging in succeeds.
self.logout()
self.login_by_email(email, password)
self.assert_logged_in_user_id(user_profile.id)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=False)
def test_login_page_redirects_logged_in_user(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login('cordelia')
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_options_request_to_login_page(self) -> None:
response = self.client_options('/login/')
self.assertEqual(response.status_code, 200)
@override_settings(TWO_FACTOR_AUTHENTICATION_ENABLED=True)
def test_login_page_redirects_logged_in_user_under_2fa(self) -> None:
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
user_profile = self.example_user("cordelia")
self.create_default_device(user_profile)
self.login('cordelia')
self.login_2fa(user_profile)
response = self.client_get("/login/")
self.assertEqual(response["Location"], "http://zulip.testserver")
def test_start_two_factor_auth(self) -> None:
request = MagicMock(POST={})
with patch('zerver.views.auth.TwoFactorLoginView') as mock_view:
mock_view.as_view.return_value = lambda *a, **k: HttpResponse()
response = start_two_factor_auth(request)
self.assertTrue(isinstance(response, HttpResponse))
def test_do_two_factor_login(self) -> None:
user_profile = self.example_user('hamlet')
self.create_default_device(user_profile)
request = MagicMock()
with patch('zerver.decorator.django_otp.login') as mock_login:
do_two_factor_login(request, user_profile)
mock_login.assert_called_once()
def test_zulip_default_context_does_not_load_inline_previews(self) -> None:
realm = get_realm("zulip")
description = "https://www.google.com/images/srpr/logo4w.png"
realm.description = description
realm.save(update_fields=["description"])
response = self.client_get("/login/")
expected_response = """<p><a href="https://www.google.com/images/srpr/logo4w.png">\
https://www.google.com/images/srpr/logo4w.png</a></p>"""
self.assertEqual(response.context_data["realm_description"], expected_response)
self.assertEqual(response.status_code, 200)
class InviteUserBase(ZulipTestCase):
def check_sent_emails(self, correct_recipients: List[str],
custom_from_name: Optional[str]=None) -> None:
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_from_name is not None:
self.assertIn(custom_from_name, outbox[0].from_email)
self.assertRegex(outbox[0].from_email, fr" <{self.TOKENIZED_NOREPLY_REGEX}>\Z")
self.assertEqual(outbox[0].extra_headers["List-Id"], "Zulip Dev <zulip.testserver>")
def invite(self, invitee_emails: str, stream_names: Sequence[str], body: str='',
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> HttpResponse:
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
stream_ids = []
for stream_name in stream_names:
stream_ids.append(self.get_stream_id(stream_name))
return self.client_post("/json/invites",
{"invitee_emails": invitee_emails,
"stream_ids": orjson.dumps(stream_ids).decode(),
"invite_as": invite_as})
class InviteUserTest(InviteUserBase):
def test_successful_invite_user(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login('hamlet')
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee], custom_from_name="Hamlet")
def test_newbie_restrictions(self) -> None:
user_profile = self.example_user('hamlet')
invitee = "alice-test@zulip.com"
stream_name = 'Denmark'
self.login_user(user_profile)
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
user_profile.date_joined = timezone_now() - datetime.timedelta(days=10)
user_profile.save()
with self.settings(INVITES_MIN_USER_AGE_DAYS=5):
result = self.invite(invitee, [stream_name])
self.assert_json_success(result)
with self.settings(INVITES_MIN_USER_AGE_DAYS=15):
result = self.invite(invitee, [stream_name])
self.assert_json_error_contains(result, "Your account is too new")
def test_invite_limits(self) -> None:
user_profile = self.example_user('hamlet')
realm = user_profile.realm
stream_name = 'Denmark'
# These constants only need to be in descending order
# for this test to trigger an InvitationError based
# on max daily counts.
site_max = 50
realm_max = 40
num_invitees = 30
max_daily_count = 20
daily_counts = [(1, max_daily_count)]
invite_emails = [
f'foo-{i:02}@zulip.com'
for i in range(num_invitees)
]
invitees = ','.join(invite_emails)
self.login_user(user_profile)
realm.max_invites = realm_max
realm.date_created = timezone_now()
realm.save()
def try_invite() -> HttpResponse:
with self.settings(OPEN_REALM_CREATION=True,
INVITES_DEFAULT_REALM_DAILY_MAX=site_max,
INVITES_NEW_REALM_LIMIT_DAYS=daily_counts):
result = self.invite(invitees, [stream_name])
return result
result = try_invite()
self.assert_json_error_contains(result, 'enough remaining invites')
# Next show that aggregate limits expire once the realm is old
# enough.
realm.date_created = timezone_now() - datetime.timedelta(days=8)
realm.save()
with queries_captured() as queries:
with cache_tries_captured() as cache_tries:
result = try_invite()
self.assert_json_success(result)
# TODO: Fix large query count here.
#
# TODO: There is some test OTHER than this one
# that is leaking some kind of state change
# that throws off the query count here. It
# is hard to investigate currently (due to
# the large number of queries), so I just
# use an approximate equality check.
actual_count = len(queries)
expected_count = 281
if abs(actual_count - expected_count) > 1:
raise AssertionError(f'''
Unexpected number of queries:
expected query count: {expected_count}
actual: {actual_count}
''')
# Almost all of these cache hits are to re-fetch each one of the
# invitees. These happen inside our queue processor for sending
# confirmation emails, so they are somewhat difficult to avoid.
#
# TODO: Mock the call to queue_json_publish, so we can measure the
# queue impact separately from the user-perceived impact.
self.assert_length(cache_tries, 32)
# Next get line coverage on bumping a realm's max_invites.
realm.date_created = timezone_now()
realm.max_invites = site_max + 10
realm.save()
result = try_invite()
self.assert_json_success(result)
# Finally get coverage on the case that OPEN_REALM_CREATION is False.
with self.settings(OPEN_REALM_CREATION=False):
result = self.invite(invitees, [stream_name])
self.assert_json_success(result)
def test_cross_realm_bot(self) -> None:
inviter = self.example_user('hamlet')
self.login_user(inviter)
cross_realm_bot_email = 'emailgateway@zulip.com'
legit_new_email = 'fred@zulip.com'
invitee_emails = ','.join([cross_realm_bot_email, legit_new_email])
result = self.invite(invitee_emails, ['Denmark'])
self.assert_json_error(
result,
"Some of those addresses are already using Zulip," +
" so we didn't send them an invitation." +
" We did send invitations to everyone else!")
def test_invite_mirror_dummy_user(self) -> None:
'''
A mirror dummy account is a temporary account
that we keep in our system if we are mirroring
data from something like Zephyr or IRC.
We want users to eventually just sign up or
register for Zulip, in which case we will just
fully "activate" the account.
Here we test that you can invite a person who
has a mirror dummy account.
'''
inviter = self.example_user('hamlet')
self.login_user(inviter)
mirror_user = self.example_user('cordelia')
mirror_user.is_mirror_dummy = True
mirror_user.is_active = False
mirror_user.save()
self.assertEqual(
PreregistrationUser.objects.filter(email=mirror_user.email).count(),
0,
)
result = self.invite(mirror_user.email, ['Denmark'])
self.assert_json_success(result)
prereg_user = PreregistrationUser.objects.get(email=mirror_user.email)
self.assertEqual(
prereg_user.referred_by.email,
inviter.email,
)
def test_successful_invite_user_as_owner_from_owner_account(self) -> None:
self.login('desdemona')
invitee = self.nonreg_email('alice')
result = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER'])
self.assert_json_success(result)
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_owner)
self.assertFalse(invitee_profile.is_guest)
def test_invite_user_as_owner_from_admin_account(self) -> None:
self.login('iago')
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER'])
self.assert_json_error(response, "Must be an organization owner")
def test_successful_invite_user_as_admin_from_admin_account(self) -> None:
self.login('iago')
invitee = self.nonreg_email('alice')
result = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_ADMIN'])
self.assert_json_success(result)
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertTrue(invitee_profile.is_realm_admin)
self.assertFalse(invitee_profile.is_realm_owner)
self.assertFalse(invitee_profile.is_guest)
def test_invite_user_as_admin_from_normal_account(self) -> None:
self.login('hamlet')
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['REALM_ADMIN'])
self.assert_json_error(response, "Must be an organization administrator")
def test_invite_user_as_invalid_type(self) -> None:
"""
Test inviting a user as invalid type of user i.e. type of invite_as
is not in PreregistrationUser.INVITE_AS
"""
self.login('iago')
invitee = self.nonreg_email('alice')
response = self.invite(invitee, ["Denmark"], invite_as=10)
self.assert_json_error(response, "Must be invited as an valid type of user")
def test_successful_invite_user_as_guest_from_normal_account(self) -> None:
self.login('hamlet')
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['GUEST_USER']))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertFalse(invitee_profile.is_realm_admin)
self.assertTrue(invitee_profile.is_guest)
def test_successful_invite_user_as_guest_from_admin_account(self) -> None:
self.login('iago')
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, ["Denmark"],
invite_as=PreregistrationUser.INVITE_AS['GUEST_USER']))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
self.assertFalse(invitee_profile.is_realm_admin)
self.assertTrue(invitee_profile.is_guest)
def test_successful_invite_user_with_name(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login('hamlet')
email = "alice-test@zulip.com"
invitee = f"Alice Test <{email}>"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email], custom_from_name="Hamlet")
def test_successful_invite_user_with_name_and_normal_one(self) -> None:
"""
A call to /json/invites with valid parameters causes an invitation
email to be sent.
"""
self.login('hamlet')
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = f"Alice Test <{email}>, {email2}"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2], custom_from_name="Hamlet")
def test_require_realm_admin(self) -> None:
"""
The invite_by_admins_only realm setting works properly.
"""
realm = get_realm('zulip')
realm.invite_by_admins_only = True
realm.save()
self.login('hamlet')
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = f"Alice Test <{email}>, {email2}"
self.assert_json_error(self.invite(invitee, ["Denmark"]),
"Must be an organization administrator")
# Now verify an administrator can do it
self.login('iago')
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self) -> None:
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login('hamlet')
user_profile = self.example_user('hamlet')
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe(user_profile, private_stream_name)
public_msg_id = self.send_stream_message(
self.example_user("hamlet"),
"Denmark",
topic_name="Public topic",
content="Public message",
)
secret_msg_id = self.send_stream_message(
self.example_user("hamlet"),
private_stream_name,
topic_name="Secret topic",
content="Secret message",
)
invitee = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user(invitee, "password")
invitee_profile = self.nonreg_user('alice')
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
self.assertFalse(invitee_profile.is_realm_admin)
# Test that exactly 2 new Zulip messages were sent, both notifications.
last_3_messages = list(reversed(list(Message.objects.all().order_by("-id")[0:3])))
first_msg = last_3_messages[0]
self.assertEqual(first_msg.id, secret_msg_id)
# The first, from notification-bot to the user who invited the new user.
second_msg = last_3_messages[1]
self.assertEqual(second_msg.sender.email, "notification-bot@zulip.com")
self.assertTrue(second_msg.content.startswith(
f"alice_zulip.com <`{invitee_profile.email}`> accepted your",
))
# The second, from welcome-bot to the user who was invited.
third_msg = last_3_messages[2]
self.assertEqual(third_msg.sender.email, "welcome-bot@zulip.com")
self.assertTrue(third_msg.content.startswith("Hello, and welcome to Zulip!"))
def test_multi_user_invite(self) -> None:
"""
Invites multiple users with a variety of delimiters.
"""
self.login('hamlet')
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email(f"{user}-test@zulip.com"))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_max_invites_model(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.max_invites, settings.INVITES_DEFAULT_REALM_DAILY_MAX)
realm.max_invites = 3
realm.save()
self.assertEqual(get_realm("zulip").max_invites, 3)
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.save()
def test_invite_too_many_users(self) -> None:
# Only a light test of this pathway; e.g. doesn't test that
# the limit gets reset after 24 hours
self.login('iago')
invitee_emails = "1@zulip.com, 2@zulip.com"
self.invite(invitee_emails, ["Denmark"])
invitee_emails = ", ".join(str(i) for i in range(get_realm("zulip").max_invites - 1))
self.assert_json_error(self.invite(invitee_emails, ["Denmark"]),
"You do not have enough remaining invites. "
"Please contact desdemona+admin@zulip.com to have your limit raised. "
"No invitations were sent.")
def test_missing_or_invalid_params(self) -> None:
"""
Tests inviting with various missing or invalid parameters.
"""
realm = get_realm('zulip')
do_set_realm_property(realm, 'emails_restricted_to_domains', True)
self.login('hamlet')
invitee_emails = "foo@zulip.com"
self.assert_json_error(self.invite(invitee_emails, []),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_guest_user_invitation(self) -> None:
"""
Guest user can't invite new users
"""
self.login('polonius')
invitee = "alice-test@zulip.com"
self.assert_json_error(self.invite(invitee, ["Denmark"]), "Not allowed for guest users")
self.assertEqual(find_key_by_email(invitee), None)
self.check_sent_emails([])
def test_invalid_stream(self) -> None:
"""
Tests inviting to a non-existent stream.
"""
self.login('hamlet')
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
f"Stream does not exist with id: {self.INVALID_STREAM_ID}. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self) -> None:
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login('hamlet')
hamlet_email = 'hAmLeT@zUlIp.com'
result = self.invite(hamlet_email, ["Denmark"])
self.assert_json_error(result, "We weren't able to invite anyone.")
self.assertFalse(
PreregistrationUser.objects.filter(email__iexact=hamlet_email).exists(),
)
self.check_sent_emails([])
def normalize_string(self, s: str) -> str:
s = s.strip()
return re.sub(r'\s+', ' ', s)
def test_invite_links_in_name(self) -> None:
"""
If you invite an address already using Zulip, no invitation is sent.
"""
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
# Test we properly handle links in user full names
do_change_full_name(hamlet, "</a> https://www.google.com", hamlet)
result = self.invite('newuser@zulip.com', ["Denmark"])
self.assert_json_success(result)
self.check_sent_emails(['newuser@zulip.com'])
from django.core.mail import outbox
body = self.normalize_string(outbox[0].alternatives[0][0])
# Verify that one can't get Zulip to send invitation emails
# that third-party products will linkify using the full_name
# field, because we've included that field inside the mailto:
# link for the sender.
self.assertIn('<a href="mailto:hamlet@zulip.com" style="color:#46aa8f; text-decoration:underline"></a> https://www.google.com (hamlet@zulip.com)</a> wants', body)
# TODO: Ideally, this test would also test the Invitation
# Reminder email generated, but the test setup for that is
# annoying.
def test_invite_some_existing_some_new(self) -> None:
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login('hamlet')
existing = [self.example_email("hamlet"), "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
invitee_emails = "\n".join(existing + new)
self.assert_json_error(self.invite(invitee_emails, ["Denmark"]),
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = PreregistrationUser.objects.get(email='foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
self.login('hamlet')
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_using_disposable_email(self) -> None:
"""
In a realm with `disallow_disposable_email_addresses = True`, you can't invite
people with a disposable domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
self.login('hamlet')
external_address = "foo@mailnator.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self) -> None:
"""
In a realm with `emails_restricted_to_domains = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_outside_domain_before_closing(self) -> None:
"""
If you invite someone with a different domain from that of the realm
when `emails_restricted_to_domains = False`, but `emails_restricted_to_domains` later
changes to true, the invitation should succeed but the invitee's signup
attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@example.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("only allows users with email addresses", result)
def test_disposable_emails_before_closing(self) -> None:
"""
If you invite someone with a disposable email when
`disallow_disposable_email_addresses = False`, but
later changes to true, the invitation should succeed
but the invitee's signup attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.disallow_disposable_email_addresses = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo@mailnator.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.disallow_disposable_email_addresses = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@mailnator.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please sign up using a real email address.", result)
def test_invite_with_email_containing_plus_before_closing(self) -> None:
"""
If you invite someone with an email containing plus when
`emails_restricted_to_domains = False`, but later change
`emails_restricted_to_domains = True`, the invitation should
succeed but the invitee's signup attempt should fail as
users are not allowed to signup using email containing +
when the realm is restricted to domain.
"""
zulip_realm = get_realm("zulip")
zulip_realm.emails_restricted_to_domains = False
zulip_realm.save()
self.login('hamlet')
external_address = "foo+label@zulip.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.emails_restricted_to_domains = True
zulip_realm.save()
result = self.submit_reg_form_for_user(external_address, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("Zulip Dev, does not allow signups using emails\n that contains +", result)
def test_invalid_email_check_after_confirming_email(self) -> None:
self.login('hamlet')
email = "test@zulip.com"
self.assert_json_success(self.invite(email, ["Denmark"]))
obj = Confirmation.objects.get(confirmation_key=find_key_by_email(email))
prereg_user = obj.content_object
prereg_user.email = "invalid.email"
prereg_user.save()
result = self.submit_reg_form_for_user(email, "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("The email address you are trying to sign up with is not valid", result)
def test_invite_with_non_ascii_streams(self) -> None:
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login('hamlet')
invitee = "alice-test@zulip.com"
stream_name = "hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe(self.example_user("hamlet"), stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_invitation_reminder_email(self) -> None:
from django.core.mail import outbox
# All users belong to zulip realm
referrer_name = 'hamlet'
current_user = self.example_user(referrer_name)
self.login_user(current_user)
invitee_email = self.nonreg_email('alice')
self.assert_json_success(self.invite(invitee_email, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee_email))
self.check_sent_emails([invitee_email])
data = {"email": invitee_email, "referrer_email": current_user.email}
invitee = PreregistrationUser.objects.get(email=data["email"])
referrer = self.example_user(referrer_name)
link = create_confirmation_link(invitee, Confirmation.INVITATION)
context = common_context(referrer)
context.update(
activate_url=link,
referrer_name=referrer.full_name,
referrer_email=referrer.email,
referrer_realm_name=referrer.realm.name,
)
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
email = data["email"]
send_future_email(
"zerver/emails/invitation_reminder", referrer.realm, to_emails=[email],
from_address=FromAddress.no_reply_placeholder, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
deliver_email(job)
self.assertEqual(len(outbox), email_count + 1)
self.assertIn(FromAddress.NOREPLY, outbox[-1].from_email)
# Now verify that signing up clears invite_reminder emails
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
email = data["email"]
send_future_email(
"zerver/emails/invitation_reminder", referrer.realm, to_emails=[email],
from_address=FromAddress.no_reply_placeholder, context=context)
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 1)
self.register(invitee_email, "test")
email_jobs_to_deliver = ScheduledEmail.objects.filter(
scheduled_timestamp__lte=timezone_now(), type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(len(email_jobs_to_deliver), 0)
def test_no_invitation_reminder_when_link_expires_quickly(self) -> None:
self.login('hamlet')
# Check invitation reminder email is scheduled with 4 day link expiry
with self.settings(INVITATION_LINK_VALIDITY_DAYS=4):
self.invite('alice@zulip.com', ['Denmark'])
self.assertEqual(ScheduledEmail.objects.filter(type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check invitation reminder email is not scheduled with 3 day link expiry
with self.settings(INVITATION_LINK_VALIDITY_DAYS=3):
self.invite('bob@zulip.com', ['Denmark'])
self.assertEqual(ScheduledEmail.objects.filter(type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# make sure users can't take a valid confirmation key from another
# pathway and use it with the invitation URL route
def test_confirmation_key_of_wrong_type(self) -> None:
email = self.nonreg_email("alice")
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
url = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
# Mainly a test of get_object_from_key, rather than of the invitation pathway
with self.assertRaises(ConfirmationKeyException) as cm:
get_object_from_key(registration_key, Confirmation.INVITATION)
self.assertEqual(cm.exception.error_type, ConfirmationKeyException.DOES_NOT_EXIST)
# Verify that using the wrong type doesn't work in the main confirm code path
email_change_url = create_confirmation_link(prereg_user, Confirmation.EMAIL_CHANGE)
email_change_key = email_change_url.split('/')[-1]
url = '/accounts/do_confirm/' + email_change_key
result = self.client_get(url)
self.assert_in_success_response(["Whoops. We couldn't find your "
"confirmation link in the system."], result)
def test_confirmation_expired(self) -> None:
email = self.nonreg_email("alice")
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
url = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
conf = Confirmation.objects.filter(confirmation_key=registration_key).first()
conf.date_sent -= datetime.timedelta(weeks=3)
conf.save()
target_url = '/' + url.split('/', 3)[3]
result = self.client_get(target_url)
self.assert_in_success_response(["Whoops. The confirmation link has expired "
"or been deactivated."], result)
def test_send_more_than_one_invite_to_same_user(self) -> None:
self.user_profile = self.example_user('iago')
streams = []
for stream_name in ["Denmark", "Scotland"]:
streams.append(get_stream(stream_name, self.user_profile.realm))
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
prereg_user = PreregistrationUser.objects.get(email="foo@zulip.com")
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False)
invites = PreregistrationUser.objects.filter(email__iexact="foo@zulip.com")
self.assertEqual(len(invites), 3)
do_create_user(
'foo@zulip.com',
'password',
self.user_profile.realm,
'full name',
prereg_user=prereg_user,
)
accepted_invite = PreregistrationUser.objects.filter(
email__iexact="foo@zulip.com", status=confirmation_settings.STATUS_ACTIVE)
revoked_invites = PreregistrationUser.objects.filter(
email__iexact="foo@zulip.com", status=confirmation_settings.STATUS_REVOKED)
# If a user was invited more than once, when it accepts one invite and register
# the others must be canceled.
self.assertEqual(len(accepted_invite), 1)
self.assertEqual(accepted_invite[0].id, prereg_user.id)
expected_revoked_invites = set(invites.exclude(id=prereg_user.id))
self.assertEqual(set(revoked_invites), expected_revoked_invites)
def test_confirmation_obj_not_exist_error(self) -> None:
""" Since the key is a param input by the user to the registration endpoint,
if it inserts an invalid value, the confirmation object won't be found. This
tests if, in that scenario, we handle the exception by redirecting the user to
the confirmation_link_expired_error page.
"""
email = self.nonreg_email('alice')
password = 'password'
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
confirmation_link = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = 'invalid_confirmation_key'
url = '/accounts/register/'
response = self.client_post(url, {'key': registration_key, 'from_confirmation': 1, 'full_nme': 'alice'})
self.assertEqual(response.status_code, 200)
self.assert_in_success_response(['The registration link has expired or is not valid.'], response)
registration_key = confirmation_link.split('/')[-1]
response = self.client_post(url, {'key': registration_key, 'from_confirmation': 1, 'full_nme': 'alice'})
self.assert_in_success_response(['We just need you to do one last thing.'], response)
response = self.submit_reg_form_for_user(email, password, key=registration_key)
self.assertEqual(response.status_code, 302)
def test_validate_email_not_already_in_realm(self) -> None:
email = self.nonreg_email('alice')
password = 'password'
realm = get_realm('zulip')
inviter = self.example_user('iago')
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
confirmation_link = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = confirmation_link.split('/')[-1]
url = "/accounts/register/"
self.client_post(url, {"key": registration_key, "from_confirmation": 1, "full_name": "alice"})
self.submit_reg_form_for_user(email, password, key=registration_key)
url = "/accounts/register/"
response = self.client_post(url, {"key": registration_key, "from_confirmation": 1, "full_name": "alice"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('login') + '?' +
urlencode({"email": email}))
class InvitationsTestCase(InviteUserBase):
def test_do_get_user_invites(self) -> None:
self.login('iago')
user_profile = self.example_user("iago")
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
prereg_user_one = PreregistrationUser(email="TestOne@zulip.com", referred_by=user_profile)
prereg_user_one.save()
prereg_user_two = PreregistrationUser(email="TestTwo@zulip.com", referred_by=user_profile)
prereg_user_two.save()
prereg_user_three = PreregistrationUser(email="TestThree@zulip.com", referred_by=hamlet)
prereg_user_three.save()
prereg_user_four = PreregistrationUser(email="TestFour@zulip.com", referred_by=othello)
prereg_user_four.save()
prereg_user_other_realm = PreregistrationUser(
email="TestOne@zulip.com", referred_by=self.mit_user("sipbtest"))
prereg_user_other_realm.save()
multiuse_invite = MultiuseInvite.objects.create(referred_by=user_profile, realm=user_profile.realm)
create_confirmation_link(multiuse_invite, Confirmation.MULTIUSE_INVITE)
self.assertEqual(len(do_get_user_invites(user_profile)), 5)
self.assertEqual(len(do_get_user_invites(hamlet)), 1)
self.assertEqual(len(do_get_user_invites(othello)), 1)
def test_successful_get_open_invitations(self) -> None:
"""
A GET call to /json/invites returns all unexpired invitations.
"""
realm = get_realm("zulip")
days_to_activate = getattr(settings, 'INVITATION_LINK_VALIDITY_DAYS', "Wrong")
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', "Wrong")
self.assertNotEqual(days_to_activate, "Wrong")
self.assertNotEqual(active_value, "Wrong")
self.login('iago')
user_profile = self.example_user("iago")
prereg_user_one = PreregistrationUser(email="TestOne@zulip.com", referred_by=user_profile)
prereg_user_one.save()
expired_datetime = timezone_now() - datetime.timedelta(days=(days_to_activate+1))
prereg_user_two = PreregistrationUser(email="TestTwo@zulip.com", referred_by=user_profile)
prereg_user_two.save()
PreregistrationUser.objects.filter(id=prereg_user_two.id).update(invited_at=expired_datetime)
prereg_user_three = PreregistrationUser(email="TestThree@zulip.com",
referred_by=user_profile, status=active_value)
prereg_user_three.save()
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
multiuse_invite_one = MultiuseInvite.objects.create(referred_by=hamlet, realm=realm)
create_confirmation_link(multiuse_invite_one, Confirmation.MULTIUSE_INVITE)
multiuse_invite_two = MultiuseInvite.objects.create(referred_by=othello, realm=realm)
create_confirmation_link(multiuse_invite_two, Confirmation.MULTIUSE_INVITE)
confirmation = Confirmation.objects.last()
confirmation.date_sent = expired_datetime
confirmation.save()
result = self.client_get("/json/invites")
self.assertEqual(result.status_code, 200)
invites = orjson.loads(result.content)["invites"]
self.assertEqual(len(invites), 2)
self.assertFalse(invites[0]["is_multiuse"])
self.assertEqual(invites[0]["email"], "TestOne@zulip.com")
self.assertTrue(invites[1]["is_multiuse"])
self.assertEqual(invites[1]["invited_by_user_id"], hamlet.id)
def test_successful_delete_invitation(self) -> None:
"""
A DELETE call to /json/invites/<ID> should delete the invite and
any scheduled invitation reminder emails.
"""
self.login('iago')
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify that the scheduled email exists.
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assertEqual(result.status_code, 200)
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_successful_member_delete_invitation(self) -> None:
"""
A DELETE call from member account to /json/invites/<ID> should delete the invite and
any scheduled invitation reminder emails.
"""
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
# Verify that the scheduled email exists.
prereg_user = PreregistrationUser.objects.get(email=invitee,
referred_by=user_profile)
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
# Verify another non-admin can't delete
result = self.api_delete(self.example_user("othello"),
'/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "Must be an organization administrator")
# Verify that the scheduled email still exists.
prereg_user = PreregistrationUser.objects.get(email=invitee,
referred_by=user_profile)
ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER)
# Verify deletion works.
result = self.api_delete(user_profile,
'/api/v1/invites/' + str(prereg_user.id))
self.assertEqual(result.status_code, 200)
result = self.api_delete(user_profile,
'/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_delete_owner_invitation(self) -> None:
self.login('desdemona')
owner = self.example_user('desdemona')
invitee = "DeleteMe@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark'],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
result = self.api_delete(self.example_user('iago'),
'/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "Must be an organization owner")
result = self.api_delete(owner, '/api/v1/invites/' + str(prereg_user.id))
self.assert_json_success(result)
result = self.api_delete(owner, '/api/v1/invites/' + str(prereg_user.id))
self.assert_json_error(result, "No such invitation")
self.assertRaises(ScheduledEmail.DoesNotExist,
lambda: ScheduledEmail.objects.get(address__iexact=invitee,
type=ScheduledEmail.INVITATION_REMINDER))
def test_delete_multiuse_invite(self) -> None:
"""
A DELETE call to /json/invites/multiuse<ID> should delete the
multiuse_invite.
"""
self.login('iago')
zulip_realm = get_realm("zulip")
multiuse_invite = MultiuseInvite.objects.create(referred_by=self.example_user("hamlet"), realm=zulip_realm)
create_confirmation_link(multiuse_invite, Confirmation.MULTIUSE_INVITE)
result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assertEqual(result.status_code, 200)
self.assertIsNone(MultiuseInvite.objects.filter(id=multiuse_invite.id).first())
# Test that trying to double-delete fails
error_result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assert_json_error(error_result, "No such invitation")
# Test deleting owner mutiuse_invite.
multiuse_invite = MultiuseInvite.objects.create(referred_by=self.example_user("desdemona"), realm=zulip_realm,
invited_as=PreregistrationUser.INVITE_AS['REALM_OWNER'])
create_confirmation_link(multiuse_invite, Confirmation.MULTIUSE_INVITE)
error_result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assert_json_error(error_result, 'Must be an organization owner')
self.login('desdemona')
result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite.id))
self.assert_json_success(result)
self.assertIsNone(MultiuseInvite.objects.filter(id=multiuse_invite.id).first())
# Test deleting multiuse invite from another realm
mit_realm = get_realm("zephyr")
multiuse_invite_in_mit = MultiuseInvite.objects.create(referred_by=self.mit_user("sipbtest"), realm=mit_realm)
create_confirmation_link(multiuse_invite_in_mit, Confirmation.MULTIUSE_INVITE)
error_result = self.client_delete('/json/invites/multiuse/' + str(multiuse_invite_in_mit.id))
self.assert_json_error(error_result, "No such invitation")
def test_successful_resend_invitation(self) -> None:
"""
A POST call to /json/invites/<ID>/resend should send an invitation reminder email
and delete any scheduled invitation reminder email.
"""
self.login('iago')
invitee = "resend_me@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify and then clear from the outbox the original invite email
self.check_sent_emails([invitee], custom_from_name="Zulip")
from django.core.mail import outbox
outbox.pop()
# Verify that the scheduled email exists.
scheduledemail_filter = ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Resend invite
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assertEqual(ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
self.assertEqual(result.status_code, 200)
error_result = self.client_post('/json/invites/' + str(9999) + '/resend')
self.assert_json_error(error_result, "No such invitation")
self.check_sent_emails([invitee], custom_from_name="Zulip")
def test_successful_member_resend_invitation(self) -> None:
"""A POST call from member a account to /json/invites/<ID>/resend
should send an invitation reminder email and delete any
scheduled invitation reminder email if they send the invite.
"""
self.login('hamlet')
user_profile = self.example_user('hamlet')
invitee = "resend_me@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark']))
# Verify hamlet has only one invitation (Member can resend invitations only sent by him).
invitation = PreregistrationUser.objects.filter(referred_by=user_profile)
self.assertEqual(len(invitation), 1)
prereg_user = PreregistrationUser.objects.get(email=invitee)
# Verify and then clear from the outbox the original invite email
self.check_sent_emails([invitee], custom_from_name="Zulip")
from django.core.mail import outbox
outbox.pop()
# Verify that the scheduled email exists.
scheduledemail_filter = ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Resend invite
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assertEqual(ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
self.assertEqual(result.status_code, 200)
error_result = self.client_post('/json/invites/' + str(9999) + '/resend')
self.assert_json_error(error_result, "No such invitation")
self.check_sent_emails([invitee], custom_from_name="Zulip")
self.logout()
self.login("othello")
invitee = "TestOne@zulip.com"
prereg_user_one = PreregistrationUser(email=invitee, referred_by=user_profile)
prereg_user_one.save()
prereg_user = PreregistrationUser.objects.get(email=invitee)
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "Must be an organization administrator")
def test_resend_owner_invitation(self) -> None:
self.login("desdemona")
invitee = "resend_owner@zulip.com"
self.assert_json_success(self.invite(invitee, ['Denmark'],
invite_as=PreregistrationUser.INVITE_AS['REALM_OWNER']))
self.check_sent_emails([invitee], custom_from_name="Zulip")
scheduledemail_filter = ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER)
self.assertEqual(scheduledemail_filter.count(), 1)
original_timestamp = scheduledemail_filter.values_list('scheduled_timestamp', flat=True)
# Test only organization owners can resend owner invitation.
self.login('iago')
prereg_user = PreregistrationUser.objects.get(email=invitee)
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "Must be an organization owner")
self.login('desdemona')
result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_success(result)
self.assertEqual(ScheduledEmail.objects.filter(
address__iexact=invitee, type=ScheduledEmail.INVITATION_REMINDER).count(), 1)
# Check that we have exactly one scheduled email, and that it is different
self.assertEqual(scheduledemail_filter.count(), 1)
self.assertNotEqual(original_timestamp,
scheduledemail_filter.values_list('scheduled_timestamp', flat=True))
def test_accessing_invites_in_another_realm(self) -> None:
inviter = UserProfile.objects.exclude(realm=get_realm('zulip')).first()
prereg_user = PreregistrationUser.objects.create(
email='email', referred_by=inviter, realm=inviter.realm)
self.login('iago')
error_result = self.client_post('/json/invites/' + str(prereg_user.id) + '/resend')
self.assert_json_error(error_result, "No such invitation")
error_result = self.client_delete('/json/invites/' + str(prereg_user.id))
self.assert_json_error(error_result, "No such invitation")
def test_prereg_user_status(self) -> None:
email = self.nonreg_email("alice")
password = "password"
realm = get_realm('zulip')
inviter = UserProfile.objects.first()
prereg_user = PreregistrationUser.objects.create(
email=email, referred_by=inviter, realm=realm)
confirmation_link = create_confirmation_link(prereg_user, Confirmation.USER_REGISTRATION)
registration_key = confirmation_link.split('/')[-1]
result = self.client_post(
"/accounts/register/",
{"key": registration_key,
"from_confirmation": "1",
"full_name": "alice"})
self.assertEqual(result.status_code, 200)
confirmation = Confirmation.objects.get(confirmation_key=registration_key)
prereg_user = confirmation.content_object
self.assertEqual(prereg_user.status, 0)
result = self.submit_reg_form_for_user(email, password, key=registration_key)
self.assertEqual(result.status_code, 302)
prereg_user = PreregistrationUser.objects.get(
email=email, referred_by=inviter, realm=realm)
self.assertEqual(prereg_user.status, confirmation_settings.STATUS_ACTIVE)
user = get_user_by_delivery_email(email, realm)
self.assertIsNotNone(user)
self.assertEqual(user.delivery_email, email)
class InviteeEmailsParserTests(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self) -> None:
emails_raw = f"{self.email1} ,{self.email2}, {self.email3}"
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self) -> None:
emails_raw = f"{self.email1}\n {self.email2}\n {self.email3} "
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self) -> None:
emails_raw = f"Email One <{self.email1}>\nEmailTwo<{self.email2}>\nEmail Three<{self.email3}>"
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self) -> None:
emails_raw = f"Email One <{self.email1}>,EmailTwo<{self.email2}>\n{self.email3}"
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class MultiuseInviteTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.realm = get_realm('zulip')
self.realm.invite_required = True
self.realm.save()
def generate_multiuse_invite_link(self, streams: Optional[List[Stream]]=None,
date_sent: Optional[datetime.datetime]=None) -> str:
invite = MultiuseInvite(realm=self.realm, referred_by=self.example_user("iago"))
invite.save()
if streams is not None:
invite.streams.set(streams)
if date_sent is None:
date_sent = timezone_now()
key = generate_key()
Confirmation.objects.create(content_object=invite, date_sent=date_sent,
confirmation_key=key, type=Confirmation.MULTIUSE_INVITE)
return confirmation_url(key, self.realm, Confirmation.MULTIUSE_INVITE)
def check_user_able_to_register(self, email: str, invite_link: str) -> None:
password = "password"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
from django.core.mail import outbox
outbox.pop()
def test_valid_multiuse_link(self) -> None:
email1 = self.nonreg_email("test")
email2 = self.nonreg_email("test1")
email3 = self.nonreg_email("alice")
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS - 1)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
self.check_user_able_to_register(email1, invite_link)
self.check_user_able_to_register(email2, invite_link)
self.check_user_able_to_register(email3, invite_link)
def test_expired_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
date_sent = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
invite_link = self.generate_multiuse_invite_link(date_sent=date_sent)
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("The confirmation link has expired or been deactivated.", result)
def test_invalid_multiuse_link(self) -> None:
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
result = self.client_post(invite_link, {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Whoops. The confirmation link is malformed.", result)
def test_invalid_multiuse_link_in_open_realm(self) -> None:
self.realm.invite_required = False
self.realm.save()
email = self.nonreg_email('newuser')
invite_link = "/join/invalid_key/"
with patch('zerver.views.registration.get_realm_from_request', return_value=self.realm):
with patch('zerver.views.registration.get_realm', return_value=self.realm):
self.check_user_able_to_register(email, invite_link)
def test_multiuse_link_with_specified_streams(self) -> None:
name1 = "newuser"
name2 = "bob"
email1 = self.nonreg_email(name1)
email2 = self.nonreg_email(name2)
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email1, invite_link)
self.check_user_subscribed_only_to_streams(name1, streams)
stream_names = ["Rome", "Verona"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
invite_link = self.generate_multiuse_invite_link(streams=streams)
self.check_user_able_to_register(email2, invite_link)
self.check_user_subscribed_only_to_streams(name2, streams)
def test_create_multiuse_link_api_call(self) -> None:
self.login('iago')
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
def test_create_multiuse_link_with_specified_streams_api_call(self) -> None:
self.login('iago')
stream_names = ["Rome", "Scotland", "Venice"]
streams = [get_stream(stream_name, self.realm) for stream_name in stream_names]
stream_ids = [stream.id for stream in streams]
result = self.client_post('/json/invites/multiuse',
{"stream_ids": orjson.dumps(stream_ids).decode()})
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.check_user_subscribed_only_to_streams("test", streams)
def test_only_admin_can_create_multiuse_link_api_call(self) -> None:
self.login('iago')
# Only admins should be able to create multiuse invites even if
# invite_by_admins_only is set to False.
self.realm.invite_by_admins_only = False
self.realm.save()
result = self.client_post('/json/invites/multiuse')
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
self.login('hamlet')
result = self.client_post('/json/invites/multiuse')
self.assert_json_error(result, "Must be an organization administrator")
def test_multiuse_link_for_inviting_as_owner(self) -> None:
self.login('iago')
result = self.client_post('/json/invites/multiuse',
{"invite_as": orjson.dumps(PreregistrationUser.INVITE_AS['REALM_OWNER']).decode()})
self.assert_json_error(result, "Must be an organization owner")
self.login('desdemona')
result = self.client_post('/json/invites/multiuse',
{"invite_as": orjson.dumps(PreregistrationUser.INVITE_AS['REALM_OWNER']).decode()})
self.assert_json_success(result)
invite_link = result.json()["invite_link"]
self.check_user_able_to_register(self.nonreg_email("test"), invite_link)
def test_create_multiuse_link_invalid_stream_api_call(self) -> None:
self.login('iago')
result = self.client_post('/json/invites/multiuse',
{"stream_ids": orjson.dumps([54321]).decode()})
self.assert_json_error(result, "Invalid stream id 54321. No invites were sent.")
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self) -> None:
# An invalid unsubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = self.example_user('hamlet')
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
user_profile = self.example_user('hamlet')
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(user_profile)
self.assertEqual(2, ScheduledEmail.objects.filter(users=user_profile).count())
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, ScheduledEmail.objects.filter(users=user_profile).count())
def test_digest_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
user_profile = self.example_user('hamlet')
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
context = {'name': '', 'realm_uri': '', 'unread_pms': [], 'hot_conversations': [],
'new_users': [], 'new_streams': {'plain': []}, 'unsubscribe_link': ''}
send_future_email('zerver/emails/digest', user_profile.realm,
to_user_ids=[user_profile.id], context=context)
self.assertEqual(1, ScheduledEmail.objects.filter(users=user_profile).count())
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, ScheduledEmail.objects.filter(users=user_profile).count())
def test_login_unsubscribe(self) -> None:
"""
We provide one-click unsubscribe links in login
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = self.example_user('hamlet')
user_profile.enable_login_emails = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile, "login")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
user_profile.refresh_from_db()
self.assertFalse(user_profile.enable_login_emails)
class RealmCreationTest(ZulipTestCase):
@override_settings(OPEN_REALM_CREATION=True)
def check_able_to_create_realm(self, email: str, password: str="test") -> None:
notification_bot = get_system_bot(settings.NOTIFICATION_BOT)
signups_stream, _ = create_stream_if_needed(notification_bot.realm, 'signups')
string_id = "zuliptest"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/new/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].startswith('http://zuliptest.testserver/accounts/login/subdomain/'))
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
user = get_user(email, realm)
self.assertEqual(user.realm, realm)
# Check that user is the owner.
self.assertEqual(user.role, UserProfile.ROLE_REALM_OWNER)
# Check defaults
self.assertEqual(realm.org_type, Realm.CORPORATE)
self.assertEqual(realm.emails_restricted_to_domains, False)
self.assertEqual(realm.invite_required, True)
# Check welcome messages
for stream_name, text, message_count in [
(Realm.DEFAULT_NOTIFICATION_STREAM_NAME, 'with the topic', 3),
(Realm.INITIAL_PRIVATE_STREAM_NAME, 'private stream', 1)]:
stream = get_stream(stream_name, realm)
recipient = stream.recipient
messages = Message.objects.filter(recipient=recipient).order_by('date_sent')
self.assertEqual(len(messages), message_count)
self.assertIn(text, messages[0].content)
# Check signup messages
recipient = signups_stream.recipient
messages = Message.objects.filter(recipient=recipient).order_by('id')
self.assertEqual(len(messages), 2)
self.assertIn('Signups enabled', messages[0].content)
self.assertIn('signed up', messages[1].content)
self.assertEqual('zuliptest', messages[1].topic_name())
# Piggyback a little check for how we handle
# empty string_ids.
realm.string_id = ''
self.assertEqual(realm.display_subdomain, '.')
def test_create_realm_non_existing_email(self) -> None:
self.check_able_to_create_realm("user1@test.com")
def test_create_realm_existing_email(self) -> None:
self.check_able_to_create_realm("hamlet@zulip.com")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_create_realm_ldap_email(self) -> None:
self.init_default_ldap_database()
with self.settings(LDAP_EMAIL_ATTR="mail"):
self.check_able_to_create_realm("newuser_email@zulip.com",
self.ldap_password("newuser_with_email"))
def test_create_realm_as_system_bot(self) -> None:
result = self.client_post('/new/', {'email': 'notification-bot@zulip.com'})
self.assertEqual(result.status_code, 200)
self.assert_in_response('notification-bot@zulip.com is reserved for system bots', result)
def test_create_realm_no_creation_key(self) -> None:
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_create_realm_with_subdomain(self) -> None:
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
# Create new realm with the email
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/new/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url, subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, 'http://zuliptest.testserver')
# Make sure the realm is created
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True, FREE_TRIAL_DAYS=30)
def test_create_realm_during_free_trial(self) -> None:
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
with self.assertRaises(Realm.DoesNotExist):
get_realm(string_id)
result = self.client_post('/new/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/new/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name)
self.assertEqual(result.status_code, 302)
result = self.client_get(result.url, subdomain=string_id)
self.assertEqual(result.url, 'http://zuliptest.testserver/upgrade/?onboarding=true')
result = self.client_get(result.url, subdomain=string_id)
self.assert_in_success_response(["Not ready to start your trial?"], result)
realm = get_realm(string_id)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user(email, realm).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
@override_settings(OPEN_REALM_CREATION=True)
def test_mailinator_signup(self) -> None:
result = self.client_post('/new/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "length 3 or greater",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://a-0.testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = '',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
@override_settings(OPEN_REALM_CREATION=True)
def test_subdomain_restrictions_root_domain_option(self) -> None:
password = "test"
email = "user1@test.com"
realm_name = "Test"
result = self.client_post('/new/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
# test root domain will fail with ROOT_DOMAIN_LANDING_PAGE
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assert_in_response('unavailable', result)
# test valid use of root domain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'abcdef',
realm_in_root_domain = 'true',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith('http://testserver/accounts/login/subdomain/'))
def test_is_root_domain_available(self) -> None:
self.assertTrue(is_root_domain_available())
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
self.assertFalse(is_root_domain_available())
realm = get_realm("zulip")
realm.string_id = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
realm.save()
self.assertFalse(is_root_domain_available())
def test_subdomain_check_api(self) -> None:
result = self.client_get("/json/realm/subdomain/zulip")
self.assert_in_success_response(["Subdomain unavailable. Please choose a different one."], result)
result = self.client_get("/json/realm/subdomain/zu_lip")
self.assert_in_success_response(["Subdomain can only have lowercase letters, numbers, and \'-\'s."], result)
result = self.client_get("/json/realm/subdomain/hufflepuff")
self.assert_in_success_response(["available"], result)
self.assert_not_in_success_response(["unavailable"], result)
def test_subdomain_check_management_command(self) -> None:
# Short names should work
check_subdomain_available('aa', from_management_command=True)
# So should reserved ones
check_subdomain_available('zulip', from_management_command=True)
# malformed names should still not
with self.assertRaises(ValidationError):
check_subdomain_available('-ba_d-', from_management_command=True)
class UserSignUpTest(InviteUserBase):
def _assert_redirected_to(self, result: HttpResponse, url: str) -> None:
self.assertEqual(result.status_code, 302)
self.assertEqual(result['LOCATION'], url)
def test_bad_email_configuration_for_accounts_home(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh'),
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/accounts/home/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0],
('Error in accounts_home: %s', 'uh oh'),
)
def test_bad_email_configuration_for_create_realm(self) -> None:
"""
Make sure we redirect for SMTP errors.
"""
email = self.nonreg_email('newguy')
smtp_mock = patch(
'zerver.views.registration.send_confirm_registration_email',
side_effect=smtplib.SMTPException('uh oh'),
)
error_mock = patch('logging.error')
with smtp_mock, error_mock as err:
result = self.client_post('/new/', {'email': email})
self._assert_redirected_to(result, '/config-error/smtp')
self.assertEqual(
err.call_args_list[0][0],
('Error in create_realm: %s', 'uh oh'),
)
def test_user_default_language_and_timezone(self) -> None:
"""
Check if the default language of new user is the default language
of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
timezone = "US/Mountain"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_language', "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, timezone=timezone)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.default_language, realm.default_language)
self.assertEqual(user_profile.timezone, timezone)
from django.core.mail import outbox
outbox.pop()
def test_default_twenty_four_hour_time(self) -> None:
"""
Check if the default twenty_four_hour_time setting of new user
is the default twenty_four_hour_time of the realm.
"""
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_twenty_four_hour_time', True)
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newguy')
self.assertEqual(user_profile.twenty_four_hour_time, realm.default_twenty_four_hour_time)
def test_signup_already_active(self) -> None:
"""
Check if signing up with an active email redirects to a login page.
"""
email = self.example_email("hamlet")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
self.assert_in_response("You've already registered", result)
def test_signup_system_bot(self) -> None:
email = "notification-bot@zulip.com"
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
result = self.client_get(result.url)
# This is not really the right error message, but at least it's an error.
self.assert_in_response("You've already registered", result)
def test_signup_existing_email(self) -> None:
"""
Check if signing up with an email used in another realm succeeds.
"""
email = self.example_email('hamlet')
password = "newpassword"
realm = get_realm('lear')
result = self.client_post('/accounts/home/', {'email': email}, subdomain="lear")
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain="lear")
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain="lear")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, subdomain="lear")
self.assertEqual(result.status_code, 302)
get_user(email, realm)
self.assertEqual(UserProfile.objects.filter(delivery_email=email).count(), 2)
def test_signup_invalid_name(self) -> None:
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response(["Invalid characters in name!"], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_without_password(self) -> None:
"""
Check if signing up without a password works properly when
password_auth_enabled is False.
"""
email = self.nonreg_email('newuser')
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with patch('zerver.views.registration.password_auth_enabled', return_value=False):
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
# User should now be logged in.
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newuser')
self.assert_logged_in_user_id(user_profile.id)
def test_signup_without_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'from_confirmation': '1'})
self.assert_in_success_response(["We just need you to do one last thing."], result)
# Verify that the user is asked for name and password
self.assert_in_success_response(['id_password', 'id_full_name'], result)
def test_signup_email_message_contains_org_header(self) -> None:
email = "newguy@zulip.com"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
from django.core.mail import outbox
self.assertEqual(outbox[0].extra_headers["List-Id"], "Zulip Dev <zulip.testserver>")
def test_signup_with_full_name(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["We just need you to do one last thing."], result)
def test_signup_with_weak_password(self) -> None:
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with self.settings(PASSWORD_MIN_LENGTH=6, PASSWORD_MIN_GUESSES=1000):
result = self.client_post(
'/accounts/register/',
{'password': 'easy',
'key': find_key_by_email(email),
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
self.assert_in_success_response(["We just need you to do one last thing."], result)
result = self.submit_reg_form_for_user(email,
'easy',
full_name="New Guy")
self.assert_in_success_response(["The password is too weak."], result)
with self.assertRaises(UserProfile.DoesNotExist):
# Account wasn't created.
get_user(email, get_realm("zulip"))
def test_signup_with_default_stream_group(self) -> None:
# Check if user is subscribed to the streams of default
# stream group as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
result = self.submit_reg_form_for_user(email, password, default_stream_groups=["group 1"])
self.check_user_subscribed_only_to_streams("newguy", default_streams + group1_streams)
def test_signup_two_confirmation_links(self) -> None:
email = self.nonreg_email('newguy')
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
first_confirmation_url = self.get_confirmation_url_from_outbox(email)
first_confirmation_key = find_key_by_email(email)
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
second_confirmation_url = self.get_confirmation_url_from_outbox(email)
# Sanity check:
self.assertNotEqual(first_confirmation_url, second_confirmation_url)
# Register the account (this will use the second confirmation url):
result = self.submit_reg_form_for_user(email, password, full_name="New Guy",
from_confirmation="1")
self.assert_in_success_response(["We just need you to do one last thing.",
"New Guy",
email],
result)
result = self.submit_reg_form_for_user(email,
password,
full_name="New Guy")
user_profile = UserProfile.objects.get(delivery_email=email)
self.assertEqual(user_profile.delivery_email, email)
# Now try to to register using the first confirmation url:
result = self.client_get(first_confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': first_confirmation_key,
'terms': True,
'full_name': "New Guy",
'from_confirmation': '1'})
# Error page should be displayed
self.assert_in_success_response(["The registration link has expired or is not valid."], result)
self.assertEqual(result.status_code, 200)
def test_signup_with_multiple_default_stream_groups(self) -> None:
# Check if user is subscribed to the streams of default
# stream groups as well as default streams.
email = self.nonreg_email('newguy')
password = "newpassword"
realm = get_realm("zulip")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
default_streams = []
for stream_name in ["venice", "verona"]:
stream = get_stream(stream_name, realm)
do_add_default_stream(stream)
default_streams.append(stream)
group1_streams = []
for stream_name in ["scotland", "denmark"]:
stream = get_stream(stream_name, realm)
group1_streams.append(stream)
do_create_default_stream_group(realm, "group 1", "group 1 description", group1_streams)
group2_streams = []
for stream_name in ["scotland", "rome"]:
stream = get_stream(stream_name, realm)
group2_streams.append(stream)
do_create_default_stream_group(realm, "group 2", "group 2 description", group2_streams)
result = self.submit_reg_form_for_user(email, password,
default_stream_groups=["group 1", "group 2"])
self.check_user_subscribed_only_to_streams(
"newguy", list(set(default_streams + group1_streams + group2_streams)))
def test_signup_without_user_settings_from_another_realm(self) -> None:
hamlet_in_zulip = self.example_user('hamlet')
email = hamlet_in_zulip.delivery_email
password = "newpassword"
subdomain = "lear"
realm = get_realm("lear")
# Make an account in the Zulip realm, but we're not copying from there.
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, source_realm="on",
HTTP_HOST=subdomain + ".testserver")
hamlet = get_user(self.example_email("hamlet"), realm)
self.assertEqual(hamlet.left_side_userlist, False)
self.assertEqual(hamlet.default_language, "en")
self.assertEqual(hamlet.emojiset, "google-blob")
self.assertEqual(hamlet.high_contrast_mode, False)
self.assertEqual(hamlet.enable_stream_audible_notifications, False)
self.assertEqual(hamlet.enter_sends, False)
self.assertEqual(hamlet.tutorial_status, UserProfile.TUTORIAL_WAITING)
def test_signup_with_user_settings_from_another_realm(self) -> None:
hamlet_in_zulip = self.example_user('hamlet')
email = hamlet_in_zulip.delivery_email
password = "newpassword"
subdomain = "lear"
lear_realm = get_realm("lear")
self.login('hamlet')
with get_test_image_file('img.png') as image_file:
self.client_post("/json/users/me/avatar", {'file': image_file})
hamlet_in_zulip.refresh_from_db()
hamlet_in_zulip.left_side_userlist = True
hamlet_in_zulip.default_language = "de"
hamlet_in_zulip.emojiset = "twitter"
hamlet_in_zulip.high_contrast_mode = True
hamlet_in_zulip.enter_sends = True
hamlet_in_zulip.tutorial_status = UserProfile.TUTORIAL_FINISHED
hamlet_in_zulip.save()
result = self.client_post('/accounts/home/', {'email': email}, subdomain=subdomain)
self.assertEqual(result.status_code, 302)
result = self.client_get(result["Location"], subdomain=subdomain)
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url, subdomain=subdomain)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'key': find_key_by_email(email),
'from_confirmation': '1'},
subdomain=subdomain)
self.assert_in_success_response(["Import settings from existing Zulip account",
"selected >\n Zulip Dev",
"We just need you to do one last thing."], result)
result = self.submit_reg_form_for_user(email, password, source_realm="zulip",
HTTP_HOST=subdomain + ".testserver")
hamlet_in_lear = get_user(email, lear_realm)
self.assertEqual(hamlet_in_lear.left_side_userlist, True)
self.assertEqual(hamlet_in_lear.default_language, "de")
self.assertEqual(hamlet_in_lear.emojiset, "twitter")
self.assertEqual(hamlet_in_lear.high_contrast_mode, True)
self.assertEqual(hamlet_in_lear.enter_sends, True)
self.assertEqual(hamlet_in_lear.enable_stream_audible_notifications, False)
self.assertEqual(hamlet_in_lear.tutorial_status, UserProfile.TUTORIAL_FINISHED)
zulip_path_id = avatar_disk_path(hamlet_in_zulip)
lear_path_id = avatar_disk_path(hamlet_in_lear)
with open(zulip_path_id, 'rb') as f:
zulip_avatar_bits = f.read()
with open(lear_path_id, 'rb') as f:
lear_avatar_bits = f.read()
self.assertTrue(len(zulip_avatar_bits) > 500)
self.assertEqual(zulip_avatar_bits, lear_avatar_bits)
def test_signup_invalid_subdomain(self) -> None:
"""
Check if attempting to authenticate to the wrong subdomain logs an
error and redirects.
"""
email = "newuser@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
def invalid_subdomain(**kwargs: Any) -> Any:
return_data = kwargs.get('return_data', {})
return_data['invalid_subdomain'] = True
with patch('zerver.views.registration.authenticate', side_effect=invalid_subdomain):
with patch('logging.error') as mock_error:
result = self.client_post(
'/accounts/register/',
{'password': password,
'full_name': 'New User',
'key': find_key_by_email(email),
'terms': True})
mock_error.assert_called_once()
self.assertEqual(result.status_code, 302)
def test_replace_subdomain_in_confirmation_link(self) -> None:
"""
Check that manually changing the subdomain in a registration
confirmation link doesn't allow you to register to a different realm.
"""
email = "newuser@zulip.com"
self.client_post('/accounts/home/', {'email': email})
result = self.client_post(
'/accounts/register/',
{'password': "password",
'key': find_key_by_email(email),
'terms': True,
'full_name': "New User",
'from_confirmation': '1'}, subdomain="zephyr")
self.assert_in_success_response(["We couldn't find your confirmation link"], result)
def test_failed_signup_due_to_restricted_domain(self) -> None:
realm = get_realm('zulip')
do_set_realm_property(realm, 'invite_required', False)
do_set_realm_property(realm, 'emails_restricted_to_domains', True)
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn(f"Your email address, {email}, is not in one of the domains",
form.errors['email'][0])
def test_failed_signup_due_to_disposable_email(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = False
realm.disallow_disposable_email_addresses = True
realm.save()
email = 'abc@mailnator.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Please use your real email address", form.errors['email'][0])
def test_failed_signup_due_to_email_containing_plus(self) -> None:
realm = get_realm('zulip')
realm.emails_restricted_to_domains = True
realm.save()
email = 'iago+label@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn("Email addresses containing + are not allowed in this organization.", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self) -> None:
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
email = 'user@zulip.com'
form = HomepageForm({'email': email}, realm=realm)
self.assertIn(f"Please request an invite for {email} from",
form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self) -> None:
email = 'user@acme.com'
form = HomepageForm({'email': email}, realm=None)
self.assertIn("organization you are trying to join using {} does "
"not exist".format(email), form.errors['email'][0])
def test_access_signup_page_in_root_domain_without_realm(self) -> None:
result = self.client_get('/register', subdomain="", follow=True)
self.assert_in_success_response(["Find your Zulip accounts"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_from_confirmation(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
match = re.search(settings.EXTERNAL_HOST + r"(\S+)>", message.body)
assert match is not None
[confirmation_url] = match.groups()
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# Verify that user is asked for its LDAP/Active Directory password.
self.assert_in_success_response(['Enter your LDAP/Active Directory password.',
'ldap-password'], result)
self.assert_not_in_success_response(['id_password'], result)
# Test the TypeError exception handler
with patch("zproject.backends.ZulipLDAPAuthBackendBase.get_mapped_name", side_effect=TypeError):
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"newuser@zulip.com"],
result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPUserPopulator',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_populate_only_registration_from_confirmation(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
match = re.search(settings.EXTERNAL_HOST + r"(\S+)>", message.body)
assert match is not None
[confirmation_url] = match.groups()
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Full name should be set from LDAP
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"New LDAP fullname",
"newuser@zulip.com"],
result)
# Verify that the user is asked for name
self.assert_in_success_response(['id_full_name'], result)
# Verify that user is NOT asked for its LDAP/Active Directory password.
# LDAP is not configured for authentication in this test.
self.assert_not_in_success_response(['Enter your LDAP/Active Directory password.',
'ldap-password'], result)
# If we were using e.g. the SAML auth backend, there
# shouldn't be a password prompt, but since it uses the
# EmailAuthBackend, there should be password field here.
self.assert_in_success_response(['id_password'], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_end_to_end(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
full_name = 'New LDAP fullname'
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Full name should be set from LDAP
self.assert_in_success_response(["We just need you to do one last thing.",
full_name,
"newuser@zulip.com"],
result)
# Submit the final form with the wrong password.
result = self.submit_reg_form_for_user(email,
'wrongpassword',
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Didn't create an account
with self.assertRaises(UserProfile.DoesNotExist):
user_profile = UserProfile.objects.get(delivery_email=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
# Submit the final form with the correct password.
result = self.submit_reg_form_for_user(email,
password,
full_name=full_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_split_full_name_mapping(self) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'first_name': 'sn', 'last_name': 'cn'}
subdomain = 'zulip'
email = 'newuser_splitname@zulip.com'
password = self.ldap_password("newuser_splitname")
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Click confirmation link
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
# Test split name mapping.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, "First Last")
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_auto_registration_on_login(self) -> None:
"""The most common way for LDAP authentication to be used is with a
server that doesn't have a terms-of-service required, in which
case we offer a complete single-sign-on experience (where the
user just enters their LDAP username and password, and their
account is created if it doesn't already exist).
This test verifies that flow.
"""
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {
'full_name': 'cn',
'custom_profile_field__phone_number': 'homePhone',
}
full_name = 'New LDAP fullname'
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from form which was set by LDAP.
self.assertEqual(user_profile.full_name, full_name)
# Test custom profile fields are properly synced.
phone_number_field = CustomProfileField.objects.get(realm=user_profile.realm, name='Phone number')
phone_number_field_value = CustomProfileFieldValue.objects.get(user_profile=user_profile,
field=phone_number_field)
self.assertEqual(phone_number_field_value.value, 'a-new-number')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_registration_multiple_realms(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
self.init_default_ldap_database()
ldap_user_attr_map = {
'full_name': 'cn',
}
do_create_realm('test', 'test', False)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
subdomain = "zulip"
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(
delivery_email=email, realm=get_realm('zulip'))
self.logout()
# Test registration in another realm works.
subdomain = "test"
self.login_with_return(email, password,
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(
delivery_email=email, realm=get_realm('test'))
self.assertEqual(user_profile.delivery_email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_ldap_registration_when_names_changes_are_disabled(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
# Click confirmation link. This will 'authenticated_full_name'
# session variable which will be used to set the fullname of
# the user.
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from LDAP session.
self.assertEqual(user_profile.full_name, 'New LDAP fullname')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_signup_with_ldap_and_email_enabled_using_email_with_ldap_append_domain(self) -> None:
password = "nonldappassword"
email = "newuser@zulip.com"
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# If the user's email is inside the LDAP directory and we just
# have a wrong password, then we refuse to create an account
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because password was wrong
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
self.assertFalse(UserProfile.objects.filter(delivery_email=email).exists())
# For the rest of the test we delete the user from ldap.
del self.mock_ldap.directory["uid=newuser,ou=users,dc=zulip,dc=com"]
# If the user's email is not in the LDAP directory, but fits LDAP_APPEND_DOMAIN,
# we refuse to create the account.
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
), self.assertLogs('zulip.ldap', 'DEBUG') as debug_log:
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because emails matching LDAP_APPEND_DOMAIN,
# aren't allowed to create non-LDAP accounts.
self.assertEqual(result.url, "/accounts/login/?email=newuser%40zulip.com")
self.assertFalse(UserProfile.objects.filter(delivery_email=email).exists())
self.assertEqual(debug_log.output, [
'DEBUG:zulip.ldap:ZulipLDAPAuthBackend: No LDAP user matching django_to_ldap_username result: newuser. Input username: newuser@zulip.com'
])
# If the email is outside of LDAP_APPEND_DOMAIN, we successfully create a non-LDAP account,
# with the password managed in the Zulip database.
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='example.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
with patch('zerver.views.registration.logging.warning') as mock_warning:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
mock_warning.assert_called_once_with(
"New account email %s could not be found in LDAP",
"newuser@zulip.com",
)
with self.assertLogs('zulip.ldap', 'DEBUG') as debug_log:
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(debug_log.output, [
'DEBUG:zulip.ldap:ZulipLDAPAuthBackend: Email newuser@zulip.com does not match LDAP domain example.com.'
])
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from the POST request, not LDAP
self.assertEqual(user_profile.full_name, 'Non-LDAP Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_signup_with_ldap_and_email_enabled_using_email_with_ldap_email_search(self) -> None:
# If the user's email is inside the LDAP directory and we just
# have a wrong password, then we refuse to create an account
password = "nonldappassword"
email = "newuser_email@zulip.com" # belongs to user uid=newuser_with_email in the test directory
subdomain = "zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
# We get redirected back to the login page because password was wrong
self.assertEqual(result.url, "/accounts/login/?email=newuser_email%40zulip.com")
self.assertFalse(UserProfile.objects.filter(delivery_email=email).exists())
# If the user's email is not in the LDAP directory , though, we
# successfully create an account with a password in the Zulip
# database.
password = "nonldappassword"
email = "nonexistent@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_EMAIL_ATTR='mail',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
with patch('zerver.views.registration.logging.warning') as mock_warning:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
mock_warning.assert_called_once_with(
"New account email %s could not be found in LDAP",
"nonexistent@zulip.com",
)
with self.assertLogs('zulip.ldap', 'DEBUG') as debug_log:
result = self.submit_reg_form_for_user(email,
password,
full_name="Non-LDAP Full Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(debug_log.output, [
'DEBUG:zulip.ldap:ZulipLDAPAuthBackend: No LDAP user matching django_to_ldap_username result: nonexistent@zulip.com. Input username: nonexistent@zulip.com'
])
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zulip.testserver/")
user_profile = UserProfile.objects.get(delivery_email=email)
# Name comes from the POST request, not LDAP
self.assertEqual(user_profile.full_name, 'Non-LDAP Full Name')
def ldap_invite_and_signup_as(self, invite_as: int, streams: Sequence[str] = ['Denmark']) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
subdomain = 'zulip'
email = 'newuser@zulip.com'
password = self.ldap_password("newuser")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
):
with self.assertLogs('zulip.ldap', 'DEBUG') as debug_log:
# Invite user.
self.login('iago')
self.assertEqual(debug_log.output, [
'DEBUG:zulip.ldap:ZulipLDAPAuthBackend: No LDAP user matching django_to_ldap_username result: iago. Input username: iago@zulip.com'
])
response = self.invite(invitee_emails='newuser@zulip.com',
stream_names=streams,
invite_as=invite_as)
self.assert_json_success(response)
self.logout()
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
from_confirmation="1",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
full_name="Ignore",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend'))
def test_ldap_invite_user_as_admin(self) -> None:
self.ldap_invite_and_signup_as(PreregistrationUser.INVITE_AS['REALM_ADMIN'])
user_profile = UserProfile.objects.get(
delivery_email=self.nonreg_email('newuser'))
self.assertTrue(user_profile.is_realm_admin)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend'))
def test_ldap_invite_user_as_guest(self) -> None:
self.ldap_invite_and_signup_as(PreregistrationUser.INVITE_AS['GUEST_USER'])
user_profile = UserProfile.objects.get(
delivery_email=self.nonreg_email('newuser'))
self.assertTrue(user_profile.is_guest)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend'))
def test_ldap_invite_streams(self) -> None:
stream_name = 'Rome'
realm = get_realm('zulip')
stream = get_stream(stream_name, realm)
default_streams = get_default_streams_for_realm(realm)
default_streams_name = [stream.name for stream in default_streams]
self.assertNotIn(stream_name, default_streams_name)
# Invite user.
self.ldap_invite_and_signup_as(PreregistrationUser.INVITE_AS['REALM_ADMIN'], streams=[stream_name])
user_profile = UserProfile.objects.get(delivery_email=self.nonreg_email('newuser'))
self.assertTrue(user_profile.is_realm_admin)
sub = get_stream_subscriptions_for_user(user_profile).filter(recipient__type_id=stream.id)
self.assertEqual(len(sub), 1)
def test_registration_when_name_changes_are_disabled(self) -> None:
"""
Test `name_changes_disabled` when we are not running under LDAP.
"""
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="New Name",
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
user_profile = UserProfile.objects.get(delivery_email=email)
# 'New Name' comes from POST data; not from LDAP session.
self.assertEqual(user_profile.full_name, 'New Name')
def test_realm_creation_through_ldap(self) -> None:
password = self.ldap_password("newuser")
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn'}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
match = re.search(settings.EXTERNAL_HOST + r"(\S+)>", message.body)
assert match is not None
[confirmation_url] = match.groups()
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
TERMS_OF_SERVICE=False,
):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
key = find_key_by_email(email)
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
prereg_user.realm_creation = True
prereg_user.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["We just need you to do one last thing.",
"newuser@zulip.com"],
result)
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored: Any) -> None:
password = "test"
subdomain = "zephyr"
user_profile = self.mit_user("sipbtest")
email = user_profile.delivery_email
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email}, subdomain="zephyr")
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
f"/accounts/send_confirm/{email}"))
result = self.client_get(result["Location"], subdomain="zephyr")
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
match = re.search(settings.EXTERNAL_HOST + r"(\S+)>", message.body)
assert match is not None
[confirmation_url] = match.groups()
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url, subdomain="zephyr")
self.assertEqual(result.status_code, 200)
# If the mirror dummy user is already active, attempting to
# submit the registration form should raise an AssertionError
# (this is an invalid state, so it's a bug we got here):
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"), \
self.assertLogs('django.request', 'ERROR') as error_log:
result = self.submit_reg_form_for_user(
email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertTrue('ERROR:django.request:Internal Server Error: /accounts/register/' in error_log.output[0])
self.assertTrue('raise AssertionError("Mirror dummy user is already active!' in error_log.output[0])
self.assertTrue('AssertionError: Mirror dummy user is already active!' in error_log.output[0])
user_profile.is_active = False
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assert_logged_in_user_id(user_profile.id)
def test_registration_of_active_mirror_dummy_user(self) -> None:
"""
Trying to activate an already-active mirror dummy user should
raise an AssertionError.
"""
user_profile = self.mit_user("sipbtest")
email = user_profile.delivery_email
user_profile.is_mirror_dummy = True
user_profile.is_active = True
user_profile.save()
with self.assertRaisesRegex(AssertionError, "Mirror dummy user is already active!"), \
self.assertLogs('django.request', 'ERROR') as error_log:
self.client_post('/register/', {'email': email}, subdomain="zephyr")
self.assertTrue('ERROR:django.request:Internal Server Error: /register/' in error_log.output[0])
self.assertTrue('raise AssertionError("Mirror dummy user is already active!' in error_log.output[0])
self.assertTrue('AssertionError: Mirror dummy user is already active!' in error_log.output[0])
@override_settings(TERMS_OF_SERVICE=False)
def test_dev_user_registration(self) -> None:
"""Verify that /devtools/register_user creates a new user, logs them
in, and redirects to the logged-in app."""
count = UserProfile.objects.count()
email = f"user-{count}@zulip.com"
result = self.client_post('/devtools/register_user/')
user_profile = UserProfile.objects.all().order_by("id").last()
self.assertEqual(result.status_code, 302)
self.assertEqual(user_profile.delivery_email, email)
self.assertEqual(result['Location'], "http://zulip.testserver/")
self.assert_logged_in_user_id(user_profile.id)
@override_settings(TERMS_OF_SERVICE=False)
def test_dev_user_registration_create_realm(self) -> None:
count = UserProfile.objects.count()
string_id = f"realm-{count}"
result = self.client_post('/devtools/register_realm/')
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].startswith(
f'http://{string_id}.testserver/accounts/login/subdomain'))
result = self.client_get(result["Location"], subdomain=string_id)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], f'http://{string_id}.testserver')
user_profile = UserProfile.objects.all().order_by("id").last()
self.assert_logged_in_user_id(user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login_user(user)
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = self.example_user('hamlet')
self.assertFalse(user.is_active)
password = initial_password(email)
assert password is not None
self.assert_login_failure(email, password=password)
def test_do_not_deactivate_final_owner(self) -> None:
user = self.example_user('desdemona')
user_2 = self.example_user('iago')
self.login_user(user)
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization owner.")
user = self.example_user('desdemona')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_owner)
do_change_user_role(user_2, UserProfile.ROLE_REALM_OWNER)
self.assertTrue(user_2.is_realm_owner)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_user_role(user, UserProfile.ROLE_REALM_OWNER)
def test_do_not_deactivate_final_user(self) -> None:
realm = get_realm('zulip')
UserProfile.objects.filter(realm=realm).exclude(
role=UserProfile.ROLE_REALM_OWNER).update(is_active=False)
user = self.example_user("desdemona")
self.login_user(user)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only user.")
class TestLoginPage(ZulipTestCase):
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/", {"next": "/upgrade/"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/", {"next": "/upgrade/"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/')
result = self.client_get("/en/login/", {"next": "/upgrade/"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/go/?next=%2Fupgrade%2F')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host: MagicMock) -> None:
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
def test_login_page_registration_hint(self) -> None:
response = self.client_get("/login/")
self.assert_not_in_success_response(["Don't have an account yet? You need to be invited to join this organization."], response)
realm = get_realm("zulip")
realm.invite_required = True
realm.save(update_fields=["invite_required"])
response = self.client_get("/login/")
self.assert_in_success_response(["Don't have an account yet? You need to be invited to join this organization."], response)
class TestFindMyTeam(ZulipTestCase):
def test_template(self) -> None:
result = self.client_get('/accounts/find/')
self.assertIn("Find your Zulip accounts", result.content.decode('utf8'))
def test_result(self) -> None:
# We capitalize a letter in cordelia's email to test that the search is case-insensitive.
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,cordeliA@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2CcordeliA%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertIn("cordeliA@zulip.com", content)
from django.core.mail import outbox
# 3 = 1 + 2 -- Cordelia gets an email each for the "zulip" and "lear" realms.
self.assertEqual(len(outbox), 3)
def test_find_team_ignore_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="iago@zulip.com,invalid_email@zulip.com"))
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "/accounts/find/?emails=iago%40zulip.com%2Cinvalid_email%40zulip.com")
result = self.client_get(result.url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn(self.example_email("iago"), content)
self.assertIn("invalid_email@", content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_reject_invalid_email(self) -> None:
result = self.client_post('/accounts/find/',
dict(emails="invalid_string"))
self.assertEqual(result.status_code, 200)
self.assertIn(b"Enter a valid email", result.content)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
# Just for coverage on perhaps-unnecessary validation code.
result = self.client_get("/accounts/find/", {"emails": "invalid"})
self.assertEqual(result.status_code, 200)
def test_find_team_zero_emails(self) -> None:
data = {'emails': ''}
result = self.client_post('/accounts/find/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_one_email(self) -> None:
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 1)
def test_find_team_deactivated_user(self) -> None:
do_deactivate_user(self.example_user("hamlet"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_deactivated_realm(self) -> None:
do_deactivate_realm(get_realm("zulip"))
data = {'emails': self.example_email("hamlet")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=hamlet%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_bot_email(self) -> None:
data = {'emails': self.example_email("webhook_bot")}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/accounts/find/?emails=webhook-bot%40zulip.com')
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
def test_find_team_more_than_ten_emails(self) -> None:
data = {'emails': ','.join(f'hamlet-{i}@zulip.com' for i in range(11))}
result = self.client_post('/accounts/find/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
from django.core.mail import outbox
self.assertEqual(len(outbox), 0)
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self) -> None:
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'},
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
class MobileAuthOTPTest(ZulipTestCase):
def test_xor_hex_strings(self) -> None:
self.assertEqual(xor_hex_strings('1237c81ab', '18989fd12'), '0aaf57cb9')
with self.assertRaises(AssertionError):
xor_hex_strings('1', '31')
def test_is_valid_otp(self) -> None:
self.assertEqual(is_valid_otp('1234'), False)
self.assertEqual(is_valid_otp('1234abcd' * 8), True)
self.assertEqual(is_valid_otp('1234abcZ' * 8), False)
def test_ascii_to_hex(self) -> None:
self.assertEqual(ascii_to_hex('ZcdR1234'), '5a63645231323334')
self.assertEqual(hex_to_ascii('5a63645231323334'), 'ZcdR1234')
def test_otp_encrypt_api_key(self) -> None:
api_key = '12ac' * 8
otp = '7be38894' * 8
result = otp_encrypt_api_key(api_key, otp)
self.assertEqual(result, '4ad1e9f7' * 8)
decryped = otp_decrypt_api_key(result, otp)
self.assertEqual(decryped, api_key)
class FollowupEmailTest(ZulipTestCase):
def test_followup_day2_email(self) -> None:
user_profile = self.example_user('hamlet')
# Test date_joined == Sunday
user_profile.date_joined = datetime.datetime(2018, 1, 7, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Tuesday
user_profile.date_joined = datetime.datetime(2018, 1, 2, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=2, hours=-1))
# Test date_joined == Thursday
user_profile.date_joined = datetime.datetime(2018, 1, 4, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
# Test date_joined == Friday
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=3, hours=-1))
# Time offset of America/Phoenix is -07:00
user_profile.timezone = 'America/Phoenix'
# Test date_joined == Friday in UTC, but Thursday in the user's timezone
user_profile.date_joined = datetime.datetime(2018, 1, 5, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.assertEqual(followup_day2_email_delay(user_profile), datetime.timedelta(days=1, hours=-1))
class NoReplyEmailTest(ZulipTestCase):
def test_noreply_email_address(self) -> None:
self.assertTrue(re.search(self.TOKENIZED_NOREPLY_REGEX, FromAddress.tokenized_no_reply_address()))
with self.settings(ADD_TOKENS_TO_NOREPLY_ADDRESS=False):
self.assertEqual(FromAddress.tokenized_no_reply_address(), "noreply@testserver")
class TwoFactorAuthTest(ZulipTestCase):
@patch('two_factor.models.totp')
def test_two_factor_login(self, mock_totp: MagicMock) -> None:
token = 123456
email = self.example_email('hamlet')
password = self.ldap_password('hamlet')
user_profile = self.example_user('hamlet')
user_profile.set_password(password)
user_profile.save()
self.create_default_device(user_profile)
def totp(*args: Any, **kwargs: Any) -> int:
return token
mock_totp.side_effect = totp
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',),
TWO_FACTOR_CALL_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_SMS_GATEWAY='two_factor.gateways.fake.Fake',
TWO_FACTOR_AUTHENTICATION_ENABLED=True):
first_step_data = {"username": email,
"password": password,
"two_factor_login_view-current_step": "auth"}
with self.assertLogs('two_factor.gateways.fake', 'INFO') as info_logs:
result = self.client_post("/accounts/login/", first_step_data)
self.assertEqual(info_logs.output, [
'INFO:two_factor.gateways.fake:Fake SMS to +12125550100: "Your token is: 123456"'
])
self.assertEqual(result.status_code, 200)
second_step_data = {"token-otp_token": str(token),
"two_factor_login_view-current_step": "token"}
result = self.client_post("/accounts/login/", second_step_data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zulip.testserver")
# Going to login page should redirect to '/' if user is already
# logged in.
result = self.client_get('/accounts/login/')
self.assertEqual(result["Location"], "http://zulip.testserver")
class NameRestrictionsTest(ZulipTestCase):
def test_whitelisted_disposable_domains(self) -> None:
self.assertFalse(is_disposable_domain('OPayQ.com'))
class RealmRedirectTest(ZulipTestCase):
def test_realm_redirect_without_next_param(self) -> None:
result = self.client_get("/accounts/go/")
self.assert_in_success_response(["Enter your organization's Zulip URL"], result)
result = self.client_post("/accounts/go/", {"subdomain": "zephyr"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://zephyr.testserver")
result = self.client_post("/accounts/go/", {"subdomain": "invalid"})
self.assert_in_success_response(["We couldn't find that Zulip organization."], result)
def test_realm_redirect_with_next_param(self) -> None:
result = self.client_get("/accounts/go/", {"next": "billing"})
self.assert_in_success_response(["Enter your organization's Zulip URL", 'action="/accounts/go/?next=billing"'], result)
result = self.client_post("/accounts/go/?next=billing", {"subdomain": "lear"})
self.assertEqual(result.status_code, 302)
self.assertEqual(result["Location"], "http://lear.testserver/billing")
|
showell/zulip
|
zerver/tests/test_signup.py
|
Python
|
apache-2.0
| 203,718
|
[
"VisIt"
] |
34a3006bbd83e442b5463749c68f6938a9bfcd31e7e7fb89d126aed6fb4a3716
|
import seaborn # noqa
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler, Normalizer
num_vecs = 100000
# num_num_dims = 10
# dim_stride = 10
num_radii = 20
num_dims_list = [2, 4, 8, 18, 32, 64, 128] # list(range(0, num_num_dims * dim_stride + 1, dim_stride))
rand_list = [np.random.rand, np.random.randn]
radii = np.array(list(range(1, num_radii + 1)))
radii = radii / len(radii)
counts = np.zeros((len(rand_list), len(radii), len(num_dims_list)))
for k, rand in enumerate(rand_list):
for j, num_dims in enumerate(tqdm(num_dims_list)):
x = rand(num_vecs, num_dims)
# scaler = MinMaxScaler(feature_range=(-1, 1))
# x = scaler.fit_transform(x)
# normalizer = Normalizer()
# x = normalizer.fit_transform(x)
# equivalent to normalizer:
x *= (1. / np.linalg.norm(x, axis=1)).reshape(-1, 1).dot(np.ones((1, x.shape[1])))
for i, r in enumerate(radii):
mask = (-r < x) & (x < r)
counts[k, i, j] = (mask.sum(axis=1) == mask.shape[1]).sum()
"""
>>> df = pd.DataFrame(counts[0], index=radii, columns=num_dims_list) / num_vecs
>>> df = df.round(2)
>>> df[df == 0] = ''
>>> df
2 4 8 18 32 64 128
0.05
0.10
0.15 0.36
0.20 0.1 1
0.25 1 1
0.30 0.55 1 1
0.35 0.12 0.98 1 1
0.40 0.62 1 1 1
0.45 0.03 0.92 1 1 1
0.50 0.2 0.99 1 1 1
0.55 0.01 0.5 1 1 1 1
0.60 0.08 0.74 1 1 1 1
0.65 0.24 0.89 1 1 1 1
0.70 0.44 0.96 1 1 1 1
0.75 0.12 0.64 0.98 1 1 1 1
0.80 0.25 0.78 1 1 1 1 1
0.85 0.38 0.88 1 1 1 1 1
0.90 0.52 0.94 1 1 1 1 1
0.95 0.67 0.98 1 1 1 1 1
1.00 1 1 1 1 1 1 1
"""
for k, rand in enumerate(rand_list):
df = pd.DataFrame(counts[k], index=radii, columns=num_dims_list) / num_vecs
fig1 = plt.figure(k * 2 + 1)
plt.clf()
ax1 = fig1.subplots()
df.plot(ax=ax1)
distribution = 'continuous' if rand.__name__ == 'rand' else 'Gaussian'
plt.title('Bounding Box ({} distribution)'.format(distribution))
plt.legend([str(d) + 'D' for d in num_dims_list])
plt.xlabel('Bounding Box Radius (z-score)')
plt.ylabel('Portion Inside (%)')
plt.grid(True)
fig2 = plt.figure(k * 2 + 2)
plt.clf()
ax2 = fig2.subplots()
df.diff().plot(ax=ax2)
plt.title('Bounding Shell Density ({} distribution)'.format(distribution))
plt.legend([str(d) + 'D' for d in num_dims_list])
plt.xlabel('Shell Inner Radius (z-score)')
plt.ylabel('Portion Inside (%)')
plt.grid(True)
plt.show(block=False)
|
totalgood/nlpia
|
src/nlpia/book/examples/ch_app_h.py
|
Python
|
mit
| 3,097
|
[
"Gaussian"
] |
6848451543663dee5158a8116d98e3269fb6b2472684cbebc1b85f355dda281c
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
import random
import scipy
import copy
from pysurvival import utils
from pysurvival.models import BaseModel
# %matplotlib inline
# List of Survival Distributions
DISTRIBUTIONS = ['Exponential',
'Weibull',
'Gompertz',
'Log-Logistic',
'Log-Normal',]
# List of risk types
RISK_TYPES = ['Linear', 'Square', 'Gaussian']
class SimulationModel(BaseModel):
"""
A general framework for simulating right-censored survival data
for proportional hazards models by incorporating
* a baseline hazard function from a known survival distribution,
* a set of covariates.
The framework is based on "Generating Survival Times to Simulate
Cox Proportional Hazards Models"
https://www.ncbi.nlm.nih.gov/pubmed/22763916
The formula for the different survival times and functions, and hazard
functions can be found at :
http://data.princeton.edu/pop509/ParametricSurvival.pdf
Parameters:
-----------
* survival_distribution: string (default = 'exponential')
Defines a known survival distribution. The available options are:
- Exponential
- Weibull
- Gompertz
- Log-Logistic
- Log-Normal
* risk_type: string (default='linear')
Defines the type of risk function. The available options are:
- Linear
- Square
- Gaussian
* alpha: double (default = 1.)
the scale parameter
* beta: double (default = 1.)
the shape parameter
* bins: int (default=100)
the number of bins of the time axis
* censored_parameter: double (default = 1.)
coefficient used to calculate the censored distribution. This
distribution is a normal such that N(loc=censored_parameter, scale=5)
* risk_parameter: double (default = 1.)
Scaling coefficient of the risk score such that:
- linear: r(x) = exp(<x, W>)
- square: r(x) = exp(risk_parameter*(<x, W>)^2)
- gaussian: r(x) = exp( exp(-(<x, W>)^2/risk_parameter) )
<.,.> is the dot product
"""
def __init__(self, survival_distribution = 'exponential',
risk_type = 'linear', censored_parameter = 1., alpha = 1, beta = 1.,
bins = 100, risk_parameter = 1.):
# Saving the attributes
self.censored_parameter = censored_parameter
self.alpha = alpha
self.beta = beta
self.risk_parameter = risk_parameter
self.bins = bins
self.features = []
# Checking risk_type
if any([risk_type.lower() == r.lower() for r in RISK_TYPES ]):
self.risk_type = risk_type
else:
error = "{} isn't a valid risk type. "
error += "Only {} are currently available."
error = error.format(risk_type, ", ".join(RISK_TYPES))
raise NotImplementedError(error)
# Checking distribution
if any([survival_distribution.lower() == d.lower() \
for d in DISTRIBUTIONS ]):
self.survival_distribution = survival_distribution
else:
error = "{} isn't a valid survival distribution. "
error += "Only {} are currently available."
error = error.format(survival_distribution,", ".join(DISTRIBUTIONS))
raise NotImplementedError(error)
# Initializing the elements from BaseModel
super(SimulationModel, self).__init__(auto_scaler = True)
@staticmethod
def random_data(N):
"""
Generating a array of size N from a random distribution -- the available
distributions are:
* binomial,
* chisquare,
* exponential,
* gamma,
* normal,
* uniform
* laplace
"""
index = np.random.binomial(n = 4, p = 0.5)
distributions = {
'binomial_a': np.random.binomial(n = 20, p = 0.6, size = N ),
'binomial_b': np.random.binomial(n = 200, p = 0.6, size = N ),
'chisquare': np.random.chisquare(df = 10, size = N ),
'exponential_a': np.random.exponential(scale=0.1, size = N ),
'exponential_b': np.random.exponential(scale=0.01, size = N ),
'gamma': np.random.gamma(shape=2., scale=2., size = N ),
'normal_a': np.random.normal(loc=-1.0, scale=5.0, size=N ),
'normal_b': np.random.normal(loc=10.0, scale=10.0, size=N ),
'uniform_a': np.random.uniform(low=-2.0, high=10.0, size=N ),
'uniform_b': np.random.uniform(low=-20.0, high=100.0, size=N ),
'laplace': np.random.laplace(loc=0.0, scale=1.0, size=N )
}
list_distributions = copy.deepcopy(list(distributions.keys()))
random.shuffle(list_distributions)
key = list_distributions[ index ]
return key, distributions[key]
def time_function(self, BX):
"""
Calculating the survival times based on the given distribution
T = H^(-1)( -log(U)/risk_score ), where:
* H is the cumulative baseline hazard function
(H^(-1) is the inverse function)
* U is a random variable uniform - Uni[0,1].
The method is inspired by https://gist.github.com/jcrudy/10481743
"""
# Calculating scale coefficient using the features
num_samples = BX.shape[0]
lambda_exp_BX = np.exp(BX)*self.alpha
lambda_exp_BX = lambda_exp_BX.flatten()
# Generating random uniform variables
U = np.random.uniform(0, 1, num_samples)
# Exponential
if self.survival_distribution.lower().startswith('exp') :
self.survival_distribution = 'Exponential'
return - np.log( U )/( lambda_exp_BX )
# Weibull
elif self.survival_distribution.lower().startswith('wei') :
self.survival_distribution = 'Weibull'
return np.power( - np.log( U )/( lambda_exp_BX ), 1./self.beta )
# Gompertz
elif self.survival_distribution.lower().startswith('gom') :
self.survival_distribution = 'Gompertz'
return ( 1./self.beta)*\
np.log( 1 - self.beta*np.log( U )/(lambda_exp_BX) )
# Log-Logistic
elif 'logistic' in self.survival_distribution.lower() :
self.survival_distribution = 'Log-Logistic'
return np.power( U/(1.-U), 1./self.beta )/(lambda_exp_BX )
# Log-Normal
elif 'normal' in self.survival_distribution.lower() :
self.survival_distribution = 'Log-Normal'
W = np.random.normal(0, 1, num_samples)
return lambda_exp_BX*np.exp(self.beta*W)
def hazard_function(self, t, BX):
""" Calculating the hazard function based on the given distribution """
# Calculating scale coefficient using the features
_lambda = self.alpha*np.exp( BX )
# Exponential
if self.survival_distribution.lower().startswith( 'exp' ) :
return np.repeat(_lambda, len(t))
# Weibull
elif self.survival_distribution.lower().startswith('wei'):
return _lambda*self.beta*np.power( t, self.beta-1 )
# Gompertz
elif self.survival_distribution.lower().startswith('gom'):
return _lambda*np.exp(self.beta*t )
# Log-Logistic
elif self.survival_distribution.lower().endswith('logistic'):
numerator = _lambda*self.beta*np.power((_lambda*t), self.beta-1 )
denominator = (1 + np.power( (_lambda*t), self.beta) )
return numerator/denominator
# Log-Normal
elif self.survival_distribution.lower().endswith('normal'):
arg_normal = (np.log(t) - np.log(_lambda))/self.beta
numerator = (1./(t*self.beta))*scipy.stats.norm.pdf( arg_normal )
denominator = 1. - scipy.stats.norm.cdf(arg_normal)
return numerator/denominator
def survival_function(self, t, BX):
"""
Calculating the survival function based on the given
distribution
"""
# Calculating scale coefficient using the features
_lambda = self.alpha*np.exp( BX )
# Exponential
if self.survival_distribution.lower().startswith( 'exp' ) :
return np.exp( -t*_lambda )
# Weibull
elif self.survival_distribution.lower().startswith('wei'):
return np.exp( -np.power(t, self.beta)*_lambda )
# Gompertz
elif self.survival_distribution.lower().startswith('gom'):
return np.exp( -_lambda/self.beta*( np.exp(self.beta*t) - 1) )
# Log-Logistic
elif self.survival_distribution.lower().endswith('logistic'):
return 1./(1.+ np.power(_lambda*t, self.beta) )
# Log-Normal
elif self.survival_distribution.lower().endswith('normal'):
arg_cdf = (np.log(t) - np.log(_lambda))/self.beta
return 1. - scipy.stats.norm.cdf(arg_cdf)
def risk_function(self, x_std):
""" Calculating the risk function based on the given risk type """
# Dot product
risk = np.dot( x_std, self.feature_weights )
# Choosing the type of risk
if self.risk_type.lower() == 'linear' :
return risk.reshape(-1, 1)
elif self.risk_type.lower() == 'square' :
risk = np.square(risk*self.risk_parameter)
elif self.risk_type.lower() == 'gaussian' :
risk = np.square(risk)
risk = np.exp( - risk*self.risk_parameter)
return risk.reshape(-1, 1)
def generate_data(self, num_samples = 100, num_features = 3,
feature_weights = None):
"""
Generating a dataset of simulated survival times from a given
distribution through the hazard function using the Cox model
Parameters:
-----------
* `num_samples`: **int** *(default=100)* --
Number of samples to generate
* `num_features`: **int** *(default=3)* --
Number of features to generate
* `feature_weights`: **array-like** *(default=None)* --
list of the coefficients of the underlying Cox-Model.
The features linked to each coefficient are generated
from random distribution from the following list:
* binomial
* chisquare
* exponential
* gamma
* normal
* uniform
* laplace
If None then feature_weights = [1.]*num_features
Returns:
--------
* dataset: pandas.DataFrame
dataset of simulated survival times, event status and features
Example:
--------
from pysurvival.models.simulations import SimulationModel
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'gompertz',
risk_type = 'linear',
censored_parameter = 5.0,
alpha = 0.01,
beta = 5., )
# Generating N Random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features=5)
# Showing a few data-points
dataset.head()
"""
# Data parameters
self.num_variables = num_features
if feature_weights is None :
self.feature_weights = [1.]*self.num_variables
feature_weights = self.feature_weights
else:
feature_weights = utils.check_data(feature_weights)
if num_features != len(feature_weights):
error = "The length of feature_weights ({}) "
error += "and num_features ({}) are not the same."
error = error.format(len(feature_weights), num_features)
raise ValueError(error)
self.feature_weights = feature_weights
# Generating random features
# Creating the features
X = np.zeros((num_samples, self.num_variables))
columns = []
for i in range( self.num_variables ) :
key, X[:, i] = self.random_data(num_samples)
columns.append( 'x_' + str(i+1) )
X_std = self.scaler.fit_transform( X )
BX = self.risk_function( X_std )
# Building the survival times
T = self.time_function(BX)
C = np.random.normal( loc = self.censored_parameter,
scale = 5, size = num_samples )
C = np.maximum(C, 0.)
time = np.minimum( T, C )
E = 1.*(T == time)
# Building dataset
self.features = columns
self.dataset = pd.DataFrame( data = np.c_[X, time, E],
columns = columns + ['time', 'event'] )
# Building the time axis and time buckets
self.times = np.linspace(0., max(self.dataset['time']), self.bins)
self.get_time_buckets()
# Building baseline functions
self.baseline_hazard = self.hazard_function(self.times, 0)
self.baseline_survival = self.survival_function(self.times, 0)
# Printing summary message
message_to_print = "Number of data-points: {} - Number of events: {}"
print( message_to_print.format(num_samples, sum(E)) )
return self.dataset
def predict(self, x, t = None):
"""
Predicting the hazard, density and survival functions
Parameters:
-----------
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
* t: float (default=None)
Time at which hazard, density and survival functions
should be calculated. If None, the method returns
the functions for all times t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
else:
# Ensuring x has 2 dimensions
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Calculating risk_score, hazard, density and survival
BX = self.risk_function(x)
hazard = self.hazard_function(self.times, BX.reshape(-1, 1))
survival = self.survival_function(self.times, BX.reshape(-1, 1))
density = (hazard*survival)
if t is None:
return hazard, density, survival
else:
min_abs_value = [abs(a_j_1-t) for (a_j_1, a_j) in self.time_buckets]
index = np.argmin(min_abs_value)
return hazard[:, index], density[:, index], survival[:, index]
def predict_risk(self, x):
"""
Predicting the risk score function
Parameters:
-----------
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
else:
# Ensuring x has 2 dimensions
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Calculating risk_score
risk_score = self.risk_function(x)
return risk_score
|
square/pysurvival
|
pysurvival/models/simulations.py
|
Python
|
apache-2.0
| 16,325
|
[
"Gaussian"
] |
f512606c14220a18cf830090c751bb5078bdacd5cfb30f8e461383200f844466
|
"""
This script screens genes against a reference database using megaBLAST for every input FASTA file.
Specifically, it takes as input a list of FASTA files and searches every DNA sequence against the reference database. Obviously,
it performs a targeted analysis for input sets of DNA sequences.
For example, you may want to find out all resistance genes in every bacterial genome, for which you may create FASTA files of
coding sequences for every genome and use this script to profile this kind of genes.
Number of options: 6 (2 compulsory and 4 optional)
Usage:
python screen_genes_blast.py --in *.fna --db [reference database] --strains [a comma-delimited string of strain names]
--genomes [a comma-delimited string of genome names] --opt [options and arguments for BLAST]
--outfmt [output format code] > [output file name]
Prerequisite: A BLAST nucleotide database should be made before using this script.
makeblastdb -in your.fasta -dbtype nucl -out db_name -logfile your.log
Options "--strains" and "--genomes" are optional.
A spreadsheet can be created beforehand to ensure the strain name and the genome name to match each FASTA file:
strain genome fasta_file
AH0650_Sm1 chr chr.fna
AH0650_Sm1 plasmid plasmid.fna
Author: Yu Wan (wanyuac@gmail.com, https://github.com/wanyuac)
Development history: 3 July 2016
Python version: 2.7.10
License: GNU GPL 2.1
"""
from argparse import ArgumentParser
import sys, os, subprocess
def parse_arguments():
# read arguments of options
parser = ArgumentParser(description="Fix problems in SRST2's ARG-Annot database")
parser.add_argument("--in", "-i", dest = "input", nargs = "+", type = str, required = True, default = "", help = "A list of input FASTA files")
parser.add_argument("--db", "-d", dest = "db", type = str, required = True, default = "", help="A reference nucleotide database for BLAST")
parser.add_argument("--strains", "-s", dest = "strains", type = str, required = False, default = "", help = "(optional) Comma-delimited names of bacterial strains")
parser.add_argument("--genomes", "-g", dest = "genomes", type = str, required = False, default = "", help = "(optional) Comma-delimited genome names")
parser.add_argument("--opt", "-o", dest = "opt", type = str, required = False, default = "-evalue 0.001 -max_target_seqs 2 -perc_identity 98",\
help = "Options and argument passed to BLAST")
parser.add_argument("--outfmt", "-f", dest = "outfmt", type = str, required = False,\
default = "6 qseqid sseqid qstart qend sstart send qlen slen length bitscore pident qcovs gaps evalue",\
help = "The configuration of the 'outfmt' option for BLAST")
return parser.parse_args()
def main():
args = parse_arguments()
n_fasta = len(args.input)
# parse strain information
if args.strains != "":
strains = args.strains.split(",")
n_str = len(strains)
else:
strains = None
n_str = 0
# parse genome information
if args.genomes != "":
genomes = args.genomes.split(",")
n_gen = len(genomes)
else:
genomes = None
n_gen = 0
# check whether strains, genomes and files match
if n_str != n_fasta:
sys.exit("Error: strain number is not equal to the number of FASTA files.")
if n_gen != n_fasta:
sys.exit("Error: genome number is not equal to the number of FASTA files.")
# get column names of the output file
colnames = args.outfmt.split(" ")[1 : ] # remove the first element -- the format id
# print the header line to the stdout
if n_gen > 0:
colnames = ["genome"] + colnames
if n_str > 0:
colnames = ["strain"] + colnames
print "\t".join(colnames)
# search every set of query sequences against the reference database
i = 0 # the counter of FASTA files
for fasta in args.input:
cmd = ["blastn", "-task", "megablast", "-db", args.db, "-query", fasta] + \
args.opt.split(" ") + ["-outfmt", args.outfmt] # Each pair of the option and its argument must be separated as elements of a list.
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out = proc.communicate() # obtain the output of BLAST from the standard output
hits = out[0].splitlines() # stderr: out[1]
# print all lines in the current output
for line in hits:
if n_gen > 0:
line = genomes[i] + "\t" + line # add the genome name to each line
if n_str > 0:
line = strains[i] + "\t" + line # add the strain name to each line
print line
i += 1
if __name__ == "__main__":
main()
|
wanyuac/BINF_toolkit
|
screen_genes_blast.py
|
Python
|
gpl-3.0
| 4,849
|
[
"BLAST"
] |
93d9d2277877969ffaaaa9613ad7c70582fec10d893e0c0b9ee0f977345676fc
|
""" pytest for WorkflowTasks
"""
# pylint: disable=protected-access,missing-docstring,invalid-name
from mock import MagicMock
import pytest
from DIRAC.Interfaces.API.Job import Job
# sut
from DIRAC.TransformationSystem.Client.TaskManager import WorkflowTasks
mockTransClient = MagicMock()
mockTransClient.setTaskStatusAndWmsID.return_value = {'OK': True}
WMSClientMock = MagicMock()
jobMonitoringClient = MagicMock()
wfTasks = WorkflowTasks(transClient=mockTransClient,
submissionClient=WMSClientMock,
jobMonitoringClient=jobMonitoringClient,
outputDataModule="mock")
odm_o = MagicMock()
odm_o.execute.return_value = {'OK': True, 'Value': {}}
wfTasks.outputDataModule_o = odm_o
taskDict = {1: {'TransformationID': 1, 'a1': 'aa1', 'b1': 'bb1', 'Site': 'MySite'},
2: {'TransformationID': 1, 'a2': 'aa2', 'b2': 'bb2', 'InputData': ['a1', 'a2']},
3: {'TransformationID': 2, 'a3': 'aa3', 'b3': 'bb3'}, }
taskDictNoInputs = {1: {'TransformationID': 1, 'a1': 'aa1', 'b1': 'bb1', 'Site': 'MySite'},
2: {'TransformationID': 1, 'a2': 'aa2', 'b2': 'bb2'},
3: {'TransformationID': 2, 'a3': 'aa3', 'b3': 'bb3'}, }
expected = {'OK': True,
'Value': {1: {'a1': 'aa1', 'TaskObject': '', 'TransformationID': 1,
'b1': 'bb1', 'Site': 'ANY', 'JobType': 'User'},
2: {'TaskObject': '', 'a2': 'aa2', 'TransformationID': 1,
'InputData': ['a1', 'a2'], 'b2': 'bb2', 'Site': 'ANY', 'JobType': 'User'},
3: {'TaskObject': '', 'a3': 'aa3', 'TransformationID': 2,
'b3': 'bb3', 'Site': 'ANY', 'JobType': 'User'}
}
}
expectedBulk = {'OK': True,
'Value': {'BulkJobObject': '',
1: {'a1': 'aa1', 'TransformationID': 1, 'b1': 'bb1',
'Site': 'MySite', 'JobType': 'User'},
2: {'a2': 'aa2', 'TransformationID': 1, 'b2': 'bb2',
'InputData': ['a1', 'a2'], 'JobType': 'User'},
3: {'TransformationID': 2, 'a3': 'aa3', 'b3': 'bb3',
'JobType': 'User'}}}
@pytest.mark.parametrize("taskDictionary, bulkSubmissionFlag, result, expectedRes", [
(taskDict, False, True, expected),
(taskDict, True, False, expectedBulk),
(taskDictNoInputs, True, True, expectedBulk),
])
def test_prepareTranformationTasks(taskDictionary, bulkSubmissionFlag, result, expectedRes):
res = wfTasks.prepareTransformationTasks('', taskDictionary, 'test_user', 'test_group', 'test_DN',
bulkSubmissionFlag=bulkSubmissionFlag)
assert res['OK'] == result
if res['OK']:
for key, value in res['Value'].iteritems():
if key != 'BulkJobObject':
assert key in expectedRes['Value']
for tKey, tValue in value.iteritems():
assert tKey in expectedRes['Value'][key]
if tKey == 'TaskObject':
assert isinstance(tValue, Job)
else:
assert tValue == expectedRes['Value'][key][tKey]
def ourgetSitesForSE(ses):
if ses == ['pippo'] or ses == 'pippo':
return {'OK': True, 'Value': ['Site1']}
elif ses == ['pluto'] or ses == 'pluto':
return {'OK': True, 'Value': ['Site2']}
elif ses == ['pippo', 'pluto'] or ses == 'pippo,pluto':
return {'OK': True, 'Value': ['Site1', 'Site2']}
@pytest.mark.parametrize("paramsDict, expected", [
({'Site': '', 'TargetSE': ''}, ['ANY']),
({'Site': 'ANY', 'TargetSE': ''}, ['ANY']),
({'TargetSE': 'Unknown'}, ['ANY']),
({'Site': 'Site2', 'TargetSE': ''}, ['Site2']),
({'Site': 'Site1;Site2', 'TargetSE': 'pippo'}, ['Site1']),
({'Site': 'Site1;Site2', 'TargetSE': 'pippo,pluto'}, ['Site1', 'Site2']),
({'Site': 'Site1;Site2;Site3', 'TargetSE': 'pippo,pluto'}, ['Site1', 'Site2']),
({'Site': 'Site2', 'TargetSE': 'pippo,pluto'}, ['Site2']),
({'Site': 'ANY', 'TargetSE': 'pippo,pluto'}, ['Site1', 'Site2']),
({'Site': 'Site1', 'TargetSE': 'pluto'}, [])
])
def test__handleDestination(mocker, paramsDict, expected):
mocker.patch('DIRAC.TransformationSystem.Client.TaskManagerPlugin.getSitesForSE', side_effect=ourgetSitesForSE)
res = wfTasks._handleDestination(paramsDict)
assert sorted(res) == sorted(expected)
|
arrabito/DIRAC
|
TransformationSystem/Client/test/Test_Client_WorkflowTasks.py
|
Python
|
gpl-3.0
| 4,449
|
[
"DIRAC"
] |
e17d523acdc9e4d40bbcd940ac505a7048d890c6c2d1f1bb7a030250a0091c82
|
# -*- coding: utf-8 -*-
# @Author: Maria Elena Villalobos Ponte
# @Date: 2016-12-14 22:10:04
# @Last Modified by: Maria Elena Villalobos Ponte
# @Last Modified time: 2016-12-15 12:07:46
import rpy2
import rpy2.robjects.numpy2ri
import rpy2.robjects.pandas2ri
import numpy as np
from rpy2.robjects.packages import importr
rpy2.robjects.numpy2ri.activate()
rpy2.robjects.pandas2ri.activate()
dHSIC_R = importr('dHSIC')
bnlearn = importr('bnlearn')
class dHSIC:
def __init__(self, X, Y, alpha=0.05, method="gamma",
kernel="gaussian", B=100, pairwise=False):
self.res = dHSIC_R.dhsic_test(X, Y, alpha, method, kernel, B, pairwise)
self.statistic = tuple(self.res[0])[0]
self.critic_value = tuple(self.res[1])[0]
self.p_value = tuple(self.res[2])[0]
class CI:
def __init__(self, X_var, Y_var, Z_vars, data, test='corr'):
Z_vars = np.array(Z_vars)
self.res = bnlearn.ci_test(X_var, Y_var, Z_vars, data, test=test)
self.statistic = self.res[0][0]
self.p_value = self.res[1][0]
if __name__ == '__main__':
pass
|
mvp291/dsga1005
|
code/r_independence.py
|
Python
|
apache-2.0
| 1,104
|
[
"Gaussian"
] |
28d697ff65e0a67b804d8819b07cb86a1a003f167f6684f27b144a458ecca16e
|
##
## Look up application name via package name
##
## 0. Searching app name other metadata by package name
## ex:python LookUPAppName 0 packageName.txt output.csv
## The input file contains app package names which are separated by \n
## com.asus.email
## com.asus.todo
## com.asus.launcher
##
## 1. Searching popular apps' metadata from google play via keyword.
## ex:python LookUPAppName 1 email output.csv
## Author Feiwen Cheng
## Date : 2014/7/28
import urllib,urllib2,sys,codecs,traceback
from bs4 import BeautifulSoup
def loadPackageFromFile(fileName):
packageName = []
with open(fileName, 'r+') as inputFile:
for line in inputFile:
packageName.append(line.strip("\n"));
return packageName
def searchPackageNameViaKeyword(keyword):
packageName = []
#Search apps via key word from google play
searchUrl = "https://play.google.com/store/search"
searchValues = {"q" : keyword, "c" : "apps"}
searchData = urllib.urlencode(searchValues)
searchReq = urllib2.Request(searchUrl, searchData)
searchPage = urllib2.urlopen(searchReq)
searchSoup = BeautifulSoup(searchPage)
#Get the entries of apps
apps = searchSoup.findAll("div", class_="card no-rationale square-cover apps small")
for app in apps:
appPackage = app["data-docid"]
packageName.append(appPackage);
return packageName
#Check arg
if len(sys.argv)!=4:
print "Usage",sys.argv[0], "0 inputFile outputFileName"
print "Usage",sys.argv[0], "1 keyword outputFileName"
sys.exit(0);
#Get package name through inputfile or keyword
packages = []
if(sys.argv[1] == "0"):
packages = loadPackageFromFile(sys.argv[2])
else:
packages = searchPackageNameViaKeyword(sys.argv[2])
appSearchFromWDJ =[]
with codecs.open(sys.argv[3], 'w', 'utf-8-sig') as outputFile:
outputFile.write("App Package Name")
outputFile.write(",")
outputFile.write("App Name")
outputFile.write(",")
outputFile.write("App Category")
outputFile.write(",")
outputFile.write("Rating Count")
outputFile.write(",")
outputFile.write("Download Count")
outputFile.write(",")
outputFile.write("Score")
outputFile.write(",")
outputFile.write("S5")
outputFile.write(",")
outputFile.write("S4")
outputFile.write(",")
outputFile.write("S3")
outputFile.write(",")
outputFile.write("S2")
outputFile.write(",")
outputFile.write("S1")
outputFile.write(",")
outputFile.write("Price")
outputFile.write(",")
outputFile.write("Vendor")
outputFile.write("\n")
#visit the webpage of the app on the google play
for appPackage in packages:
appName = "Unknown"
appCategory = "Unknown"
rateCount = "Unknown"
downloadCount = "Unknown"
score = "UnKnown"
s5 ="Unknown"
s4 ="Unknown"
s3 ="Unknown"
s2 ="Unknown"
s1 ="Unknown"
vendor = "Unknown"
price = "0"
try:
#page = urllib2.urlopen("https://play.google.com/store/apps/details?id=com.square_enix.android_googleplay.FFIV_GP")
page = urllib2.urlopen("https://play.google.com/store/apps/details?id="+appPackage)
soup = BeautifulSoup(page)
appName = soup.find("div", class_="document-title").div.string.replace(","," ")
appCategory = soup.find("a", class_="document-subtitle category").span.string.replace(","," ")
rateCount = soup.find("div", class_="reviews-stats").find("span", class_="reviews-num").string.replace(",","")
s5 = soup.find("div", class_="rating-bar-container five").find("span", class_="bar-number").string.replace(",","")
s4 = soup.find("div", class_="rating-bar-container four").find("span", class_="bar-number").string.replace(",","")
s3 = soup.find("div", class_="rating-bar-container three").find("span", class_="bar-number").string.replace(",","")
s2 = soup.find("div", class_="rating-bar-container two").find("span", class_="bar-number").string.replace(",","")
s1 = soup.find("div", class_="rating-bar-container one").find("span", class_="bar-number").string.replace(",","")
downloadCount = soup.find(itemprop="numDownloads").string.replace(",","")
score = soup.find("div", class_="score").string
try:
price = soup.find(itemprop="price")["content"].replace(",","")
except:
pass
vendor = soup.find("a", class_="document-subtitle primary").span.string.replace(","," ")
outputFile.write(appPackage)
outputFile.write(",")
outputFile.write(appName)
outputFile.write(",")
outputFile.write(appCategory)
outputFile.write(",")
outputFile.write(rateCount)
outputFile.write(",")
outputFile.write(downloadCount)
outputFile.write(",")
outputFile.write(score)
outputFile.write(",")
outputFile.write(s5)
outputFile.write(",")
outputFile.write(s4)
outputFile.write(",")
outputFile.write(s3)
outputFile.write(",")
outputFile.write(s2)
outputFile.write(",")
outputFile.write(s1)
outputFile.write(",")
outputFile.write(price)
outputFile.write(",")
outputFile.write(vendor)
outputFile.write("\n")
except:
#print '-'*60
#traceback.print_exc(file=sys.stdout)
#print '-'*60
appSearchFromWDJ.append(appPackage)
if len(appSearchFromWDJ) > 0 and sys.argv[1] == "0" :
print "Some app need to look up from Wandouja"
with codecs.open("wandouja_" + sys.argv[3], 'w', 'utf-8-sig') as outputFile:
outputFile.write("App Package Name")
outputFile.write(",")
outputFile.write("App Name")
outputFile.write(",")
outputFile.write("App Category")
outputFile.write(",")
outputFile.write("Rating Count")
outputFile.write("\n")
for appPackage in appSearchFromWDJ:
try:
page = urllib2.urlopen("http://www.wandoujia.com/apps/"+appPackage)
soup = BeautifulSoup(page)
appName = soup.body["data-title"]
appCategory = soup.find("dd", class_="tag-box").a.string
score = soup.find("span", class_="item love").i.string
outputFile.write(appPackage)
outputFile.write(",")
outputFile.write(appName)
outputFile.write(",")
outputFile.write(appCategory)
outputFile.write(",")
outputFile.write(score)
except:
print "Cannot query app information:" + appPackage
|
ChengFeiwen/mypython
|
LookupAppName4.py
|
Python
|
apache-2.0
| 5,988
|
[
"VisIt"
] |
2277df5d9675ea6928525056060d6f141810b2a23dd7d9d4ac432cee03a7efeb
|
# Filename: setup.py
import os
import sys
from os.path import join as pjoin
import pyeda
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
NAME = pyeda.__name__
VERSION = pyeda.__version__
AUTHOR = "Chris Drake"
AUTHOR_EMAIL = "cjdrake AT gmail DOT com"
DESCRIPTION = "Python Electronic Design Automation"
KEYWORDS = [
"binary decision diagram",
"Boolean algebra",
"Boolean satisfiability",
"combinational logic",
"combinatorial logic",
"computer arithmetic",
"digital arithmetic",
"digital logic",
"EDA",
"electronic design automation",
"Espresso",
"Espresso-exact",
"Espresso-signature",
"logic",
"logic minimization",
"logic optimization",
"logic synthesis",
"math",
"mathematics",
"PicoSAT",
"SAT",
"satisfiability",
"truth table",
"Two-level logic minimization",
"Two-level logic optimization",
]
with open('README.rst') as fin:
README = fin.read()
with open('LICENSE') as fin:
LICENSE = fin.read()
URL = "https://github.com/cjdrake/pyeda"
DOWNLOAD_URL = "https://pypi.python.org/packages/source/p/pyeda"
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
]
PYEDA_PKGS = [
'pyeda',
'pyeda.boolalg',
'pyeda.logic',
'pyeda.parsing',
]
TEST_PKGS = [
'pyeda.test',
'pyeda.boolalg.test',
'pyeda.logic.test',
'pyeda.parsing.test',
]
PACKAGES = PYEDA_PKGS + TEST_PKGS
# Espresso extension
ESPRESSO = dict(
define_macros = [],
include_dirs = [
pjoin('thirdparty', 'espresso', 'src'),
],
sources = [
pjoin('thirdparty', 'espresso', 'src', 'cofactor.c'),
pjoin('thirdparty', 'espresso', 'src', 'cols.c'),
pjoin('thirdparty', 'espresso', 'src', 'compl.c'),
pjoin('thirdparty', 'espresso', 'src', 'contain.c'),
pjoin('thirdparty', 'espresso', 'src', 'cubestr.c'),
pjoin('thirdparty', 'espresso', 'src', 'cvrin.c'),
pjoin('thirdparty', 'espresso', 'src', 'cvrm.c'),
pjoin('thirdparty', 'espresso', 'src', 'cvrmisc.c'),
pjoin('thirdparty', 'espresso', 'src', 'cvrout.c'),
pjoin('thirdparty', 'espresso', 'src', 'dominate.c'),
pjoin('thirdparty', 'espresso', 'src', 'espresso.c'),
pjoin('thirdparty', 'espresso', 'src', 'essen.c'),
pjoin('thirdparty', 'espresso', 'src', 'exact.c'),
pjoin('thirdparty', 'espresso', 'src', 'expand.c'),
pjoin('thirdparty', 'espresso', 'src', 'gasp.c'),
pjoin('thirdparty', 'espresso', 'src', 'gimpel.c'),
pjoin('thirdparty', 'espresso', 'src', 'globals.c'),
pjoin('thirdparty', 'espresso', 'src', 'hack.c'),
pjoin('thirdparty', 'espresso', 'src', 'indep.c'),
pjoin('thirdparty', 'espresso', 'src', 'irred.c'),
pjoin('thirdparty', 'espresso', 'src', 'matrix.c'),
pjoin('thirdparty', 'espresso', 'src', 'mincov.c'),
pjoin('thirdparty', 'espresso', 'src', 'opo.c'),
pjoin('thirdparty', 'espresso', 'src', 'pair.c'),
pjoin('thirdparty', 'espresso', 'src', 'part.c'),
pjoin('thirdparty', 'espresso', 'src', 'primes.c'),
pjoin('thirdparty', 'espresso', 'src', 'reduce.c'),
pjoin('thirdparty', 'espresso', 'src', 'rows.c'),
pjoin('thirdparty', 'espresso', 'src', 'set.c'),
pjoin('thirdparty', 'espresso', 'src', 'setc.c'),
pjoin('thirdparty', 'espresso', 'src', 'sharp.c'),
pjoin('thirdparty', 'espresso', 'src', 'sminterf.c'),
pjoin('thirdparty', 'espresso', 'src', 'solution.c'),
pjoin('thirdparty', 'espresso', 'src', 'sparse.c'),
pjoin('thirdparty', 'espresso', 'src', 'unate.c'),
pjoin('thirdparty', 'espresso', 'src', 'verify.c'),
pjoin('pyeda', 'boolalg', 'espressomodule.c'),
],
)
# exprnode C extension
EXPRNODE = dict(
define_macros = [
('NDEBUG', None),
],
include_dirs = [
pjoin('extension', 'boolexpr'),
],
sources = [
pjoin('extension', 'boolexpr', 'argset.c'),
pjoin('extension', 'boolexpr', 'array.c'),
pjoin('extension', 'boolexpr', 'binary.c'),
pjoin('extension', 'boolexpr', 'boolexpr.c'),
pjoin('extension', 'boolexpr', 'bubble.c'),
pjoin('extension', 'boolexpr', 'compose.c'),
pjoin('extension', 'boolexpr', 'dict.c'),
pjoin('extension', 'boolexpr', 'flatten.c'),
pjoin('extension', 'boolexpr', 'nnf.c'),
pjoin('extension', 'boolexpr', 'product.c'),
pjoin('extension', 'boolexpr', 'set.c'),
pjoin('extension', 'boolexpr', 'simple.c'),
pjoin('extension', 'boolexpr', 'util.c'),
pjoin('extension', 'boolexpr', 'vector.c'),
pjoin('pyeda', 'boolalg', 'exprnodemodule.c'),
],
extra_compile_args = ['--std=c99'],
)
# PicoSAT C extension
with open(pjoin('thirdparty', 'picosat', 'VERSION')) as fin:
PICOSAT_VERSION = '"' + fin.read().strip() + '"'
PICOSAT = dict(
define_macros = [
('NDEBUG', None),
],
include_dirs = [
pjoin('thirdparty', 'picosat'),
],
sources = [
pjoin('thirdparty', 'picosat', 'picosat.c'),
pjoin('pyeda', 'boolalg', 'picosatmodule.c'),
],
)
if sys.platform == 'win32':
PICOSAT['define_macros'] += [
('NGETRUSAGE', None),
('inline', '__inline'),
]
EXT_MODULES = [
Extension('pyeda.boolalg.espresso', **ESPRESSO),
Extension('pyeda.boolalg.exprnode', **EXPRNODE),
Extension('pyeda.boolalg.picosat', **PICOSAT),
]
SCRIPTS = [
pjoin('script', 'espresso'),
pjoin('script', 'picosat'),
]
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
keywords=KEYWORDS,
long_description=README,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
packages=PACKAGES,
ext_modules=EXT_MODULES,
scripts=SCRIPTS,
test_suite='nose.collector',
)
|
karissa/pyeda
|
setup.py
|
Python
|
bsd-2-clause
| 6,351
|
[
"ESPResSo"
] |
aaaa80db2dbc69b29095300ef6434fe2167f3debadfd4e5a73f735a520e08f6c
|
from __future__ import print_function, division
from typing import Union, Optional, List, Iterable, Dict, Any
from collections import defaultdict
from abc import ABCMeta
import copy
import re
import string
import math
import logging
import sys
import numpy as np
from sympy import (MatMul, MatAdd, Basic, MatrixExpr, MatrixSymbol, ZeroMatrix, Symbol, Identity, Transpose,
Inverse, Number, Rational, ln, Determinant, pi, sympify, srepr, S, Expr, Matrix)
from sympy.printing.latex import LatexPrinter
from sympy.core.evaluate import global_evaluate
from sympy.core.compatibility import iterable, ordered, default_sort_key
# GREEK symbols
SMALL_MU_GREEK = '\u03bc'
BIG_SIGMA_GREEK = '\u03a3'
SMALL_SIGMA_GREEK = '\u03c3'
BIG_OMEGA_GREEK = '\u03a9'
BIG_LAMBDA_GREEK = '\u039b'
SMALL_ETA_GREEK = '\u03b7'
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
LOG.addHandler(ch)
######## Matrix operations with lists as matrices ########
def _mul_with_num(num, mat):
"""
Used by 'matmul'
Multiplies a matrix/vector represented as a list (see 'matmul') with a number.
Args:
num - A number to multiply all elements of mat with
mat - A list/list of lists representing a vector/matrix (see 'matmul')
Returns:
mat of same shape with elements multiplied with num
"""
from symgp.superexpressions import SuperMatMul
if isinstance(mat[0], list):
new_mat = [[SuperMatMul(num,mat[i][j]).doit() for j in range(len(mat[0]))] for i in range(len(mat))]
else:
new_mat = [SuperMatMul(num, v).doit() for v in mat]
return new_mat
def _check_shape_matmul(mat, order):
"""
Checks size of 'mat' and reshapes if necessary.
Args:
mat - A list/list of lists representing a vector/matrix (see 'matmul')
order - Indicates whether mat is a left/right matrix/vector such that
we can broadcast appropriately.
Returns:
(m,n) - Tuple giving the shape of mat
broadcast - Boolean indicating whether we should broadcast list
"""
broadcast_list = False
if isinstance(mat[0],list):
m = len(mat)
n = len(mat[0])
elif order == 'left':
m = 1
n = len(mat)
broadcast_list = True
else: # order == 'right'
m = len(mat)
n = 1
broadcast_list = True
return m, n, broadcast_list
def matmul(list1, list2):
"""
Multiply two lists in a matrix fashion.
Similar to numpy's matrix multiplication of arrays:
- If list1 has shape (m1,) (i.e. it is a 1-D list) it is broadcast to (1,m1).
Here we take the transpose of all the elements as we assume.
list2 must have shapes (m1,n2) or (m1,) otherwise an Exception is raised.
A list of shape (n2,) or (1,) is returned.
- If list2 has shape (m2,) it is broadcast to (m2,1).
list1 must have shapes (m2,) or (m1,m2) otherwise an Exception is raised.
A list of shape (1,) or (m1,) is returned.
- Any other case requires the shapes to match.
For example, we can call this as:
- matmul([[A, B], [C, D]], [a, b])
- matmul([[A, B], [C, D]], [[a], [b]])
All elements (A, B, C, D, a, b) are all SuperMatSymbols where the shapes must match.
Multiplying all elements in a list by a number is also supported e.g. matmul(a,5) or matmul(5,a).
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
# Handle multiplication by integers
if isinstance(list1, int):
return _mul_with_num(list1, list2)
if isinstance(list2, int):
return _mul_with_num(list2, list1)
# Check sizes and reshape if necessary
m1, n1, broadcast_list1 = _check_shape_matmul(list1, 'left')
m2, n2, broadcast_list2 = _check_shape_matmul(list2, 'right')
# Check shapes
if n1 != m2:
raise Exception("Shapes don't match: %s, %s" % ((m1, n1), (m2, n2)))
# Multiply based on types of lists
if broadcast_list1 and broadcast_list2: # (1,n1) x (m2,1)
out_list = [SuperMatAdd(*[SuperMatMul(list1[i],list2[i]).doit() for i in range(n1)]).doit()]
elif broadcast_list1: # (1,n1) x (m2,n2)
out_list = [0 for _ in range(n2)]
for i in range(n2):
out_list[i] = SuperMatAdd(*[SuperMatMul(list1[j],list2[j][i]).doit() for j in range(m2)]).doit()
elif broadcast_list2: # (m1,n1) x (m2,1)
out_list = [0 for _ in range(m1)]
for i in range(m1):
out_list[i] = SuperMatAdd(*[SuperMatMul(list1[i][j],list2[j]).doit() for j in range(m2)]).doit()
else: # (m1,n1) x (m2,n2)
out_list = [[0 for _ in range(n2)] for _ in range(m1)]
for i in range(m1):
for j in range(n2):
out_list[i][j] = SuperMatAdd(*[SuperMatMul(list1[i][k],list2[k][j]).doit() for k in range(n1)]).doit()
return out_list
def _check_shape_matadd(mat):
"""
Determines matrix shape of given matrix (as defined in matmul) 'mat'
Args:
mat - A list/list of lists representing a vector/matrix (see 'matmul')
Returns:
m, n - The shape of mat
"""
if isinstance(mat[0],list):
m = len(mat)
n = len(mat[0])
else:
m = 0
n = len(mat)
return m, n
def _assert_shapes(m1,n1,m2,n2):
"""
Checks whether shapes match
"""
if m1 != m2 or n1 != n2:
raise Exception("Shapes don't match: %s, %s" % ((m1, n1), (m2, n2)))
def matadd(list1, list2):
"""
Adds two lists that must be the same shape. We reshape list of (m,) to (0,m).
Returns a list of the same shape as the lists.
"""
from symgp.superexpressions import SuperMatAdd
# Check sizes
m1, n1 = _check_shape_matadd(list1)
m2, n2 = _check_shape_matadd(list2)
# Check shapes match
_assert_shapes(m1,n1,m2,n2)
# Shape out_list based on whether list1 is 1-D.
if m1 == 0:
out_list = [SuperMatAdd(list1[i],list2[i]).doit() for i in range(n1)]
else:
out_list = [[SuperMatAdd(list1[i][j],list2[i][j]).doit() for j in range(n1)] for i in range(m1)]
return out_list
def mattrans(mat):
"""
Returns the transpose of an mxn matrix (list of lists)
Arg:
mat - A list/list of lists representing a vector/matrix (see 'matmul')
Returns:
mat_T - A transpose of shape n x m where mat has shape m x n. If mat has
shape (m,) we simply return mat where each element is the
transpose of its corresponding element in mat.
"""
if all([not isinstance(e,list) for e in mat]): # (m,) case
return [e.T.doit() for e in mat]
else: # Other case
if any([not isinstance(e,list) for e in mat]):
raise Exception("mat is not a regular matrix")
m_T = len(mat[0])
n_T = len(mat)
mat_T = [[mat[j][i].T.doit() for j in range(n_T)] for i in range(m_T)]
return mat_T
def matinv(mat):
"""
Inverts nxn matrices.
Args:
mat - A list/list of lists representing a vector/matrix (see 'matmul') of
shape (n,n)
Returns:
If n > 2, we first partition then apply the algorithm again.
If n == 1, we simply return the SuperMatInverse of the element.
"""
if any([not isinstance(e,list) for e in mat]):
raise Exception("This is not a suitable matrix")
if len(mat) != len(mat[0]):
raise Exception("This isn't a square matrix.")
n = len(mat)
# Recursively calculate the inverse to get the large untidy expression
if n == 1:
return [[mat[0][0].I]]
else:
if n == 2:
P, Q = [[mat[0][0]]], [[mat[0][1]]]
R, S = [[mat[1][0]]], [[mat[1][1]]]
else:
P, Q, R, S = partition_block(mat,[len(mat)-1,len(mat[0])-1])
P_bar = matinv(matadd(P,matmul(matmul(matmul(-1,Q),matinv(S)),R)))
Q_bar = matmul(matmul(matmul(-1,P_bar),Q),matinv(S))
R_bar = matmul(matmul(matmul(-1,matinv(S)),R),P_bar)
S_bar = matadd(matinv(S),matmul(matmul(matmul(matmul(matinv(S),R),P_bar),Q),matinv(S)))
# Create new matrix by top bottom method i.e. create top of matrix then create bottom
top = []
for row1, row2 in zip(P_bar,Q_bar):
top.append(row1+row2)
bottom = []
for row1, row2 in zip(R_bar,S_bar):
bottom.append(row1+row2)
return top+bottom
def _copy_block(block):
"""
Makes a copy of block as used by 'partition_block'
"""
new_block = []
if isinstance(block[0], list):
for row in block:
new_block.append(list(row))
else: #isinstance(block, list)
new_block = list(block)
return new_block
def is_matrix(block):
"""
Returns true if block is a matrix.
A matrix must be a Python list of lists where each list has length
greater than 1 and all lists must be same length
"""
return (all([isinstance(r,list) for r in block]) and all([len(block[0])==len(r) for r in block]))
def is_1d_vector(block):
"""
Returns True if ``block is a 1-d list.
:param block: A list.
"""
return all([not isinstance(e, list) for e in block])
def is_2d_vector(block):
"""
Returns True if ``block is a 2-d list.
:param block: A list.
"""
return all([isinstance(r, list) for r in block]) and all([len(r) == 1 for r in block])
def is_vector(block):
"""
Returns true if block is a vector.
A vector must be:
- A Python list where each element is not a list e.g. [a,b,c]
- A Python list of lists where each list has length 1 e.g. [[a],[b],[c]]
"""
return is_1d_vector(block) or is_2d_vector(block)
def is_square(block):
"""
Determines whether block is a square matrix.
"""
return is_matrix(block) and (len(block[0]) == len(block))
def _move_cols_to_end(block, indices):
"""
Moves the columns given by indices to the end of block
preserving the order of the columns
For example if:
block = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,12,13]]
indices=[1,2]
we get
block = [[1, 4, 2, 3],
[5, 8, 6, 7],
[9,13,10,12]]
"""
num_rows, num_cols = len(block), len(block[0])
indices = sorted(indices,reverse=True)
new_block = _copy_block(block)
for idx, col in enumerate(indices):
if col == num_cols-1:
continue
else:
c = col
# Shifts column to last available column i.e.
while c < num_cols-(idx+1):
for row in range(num_rows):
temp = new_block[row][c]
new_block[row][c] = new_block[row][c+1]
new_block[row][c+1] = temp
c += 1
return new_block
def _move_elems_to_end(block, indices):
"""
Moves the elements in vector 'block' at locations given in 'indices' to the end
of the block whilst preserving the order of the elements
"""
indices = sorted(indices,reverse=True)
block_size = len(block)
new_block = _copy_block(block)
# Push elements corresponding to indices to end of list
for idx, k in enumerate(indices):
if k == block_size-1:
continue
else:
i = k
while i < block_size-(idx+1):
temp = new_block[i]
new_block[i] = new_block[i+1]
new_block[i+1] = temp
i += 1
return new_block
def partition_block(block, indices):
"""
Partitions a list into four or two sections based on the indices
Args:
block - The input block to be partitioned:
- If block is 2-D, we partition it into [[P, Q], [R, S]]
- If block is 1-D, we partition it into [a, b] (shape=(m,)) or
[[a],[b]] (shape=(m,1))
indices - The indices that form one partition:
- If block is 2-D, this can be 2-D (e.g. [[1,2],[0,1]]) or 1-D (e.g. [1,2,3]).
If 1-D, block needs to be square.
- If block is 1-D, this should also be a vector (e.g. [1,2,3] or [[1],[2],[3]])
Repeat indices are removed automatically. The order of the columns/rows are
preserved.
For example for block = [[A,B,C,D],[E,F,G,H],[I,J,K,L],[M,N,O,Z]] and
indices = [[0,2],[1,3]], we get:
P = [[E,G],[M,O]] Q = [[F,H],[N,Z]]
R = [[A,C],[I,K]] S = [[B,D],[J,L]]
Returns:
Either
P, Q, R, S - The four partitions for 2-D blocks
Or
a, b - The two partitions for 1-D blocks
"""
# Checks validity of indices values
_is_valid_idx = lambda idx, max_idx: all([(i >= 0 and i < max_idx) for i in idx])
# Check block is a correct block matrix/vector ([[...],[...]] or [...])
if not (is_matrix(block) or is_vector(block)):
raise Exception("The block to be partitioned must be a matrix ([[A,B], [C,D]]) or \
vector ([a,b] or [[a],[b]])")
# Copy block
new_block = _copy_block(block)
if is_matrix(new_block) and not is_vector(new_block):
num_rows, num_cols = len(new_block), len(new_block[0])
# Check indices are appropriate for matrix
if (all([isinstance(e,int) for e in indices]) and is_square(new_block)):
indices = [indices, indices] # Convert to 2-D
else:
if not all([isinstance(e,list) for e in indices]):
raise Exception("Incorrect form for indices for a matrix. Must be a list of lists e.g.\
[[1,2],[3]] or a 1-D list [1,2] if the matrix is square")
# Remove repeat set of indices
row_indices = list(set(indices[0]))
col_indices = list(set(indices[1]))
# Check for 1x1 case
if num_rows == 1 and num_cols == 1:
raise Exception("Can't partition a 1x1 block. Minimum size is 2x2")
# Check that all indices are in appropriate range
if not _is_valid_idx(row_indices,num_rows):
raise Exception("Invalid row indices. Must be in range: [%s,%s]" % (0,num_rows-1))
if not _is_valid_idx(col_indices,num_cols):
raise Exception("Invalid column indices. Must be in range: [%s,%s]" % (0,num_cols-1))
# First push columns indicated by indices to end
new_block = _move_cols_to_end(new_block, col_indices)
# Do same for rows
new_block = list(map(list,zip(*new_block))) # Flip rows and columns
new_block = _move_cols_to_end(new_block, row_indices)
new_block = list(map(list,zip(*new_block)))
m = num_rows - len(row_indices) # Number of rows of partition not given by indices
n = num_cols - len(col_indices) # Number of columns of partition not given by indices
# Create partitions
P = [new_block[i][:n] for i in range(m)] # No row and col indices
Q = [new_block[i][n:] for i in range(m)] # No row but col indices
R = [new_block[i][:n] for i in range(m, num_rows)] # No col but row indices
S = [new_block[i][n:] for i in range(m, num_rows)] # Intersection of row and col indices
return P, Q, R, S
else: # Vector
block_size = len(new_block)
# Check for 1x1 case
if block_size == 1:
raise Exception("Can't partition a 1x1 block")
# Check indices are appropriate for vector
if is_vector(indices):
if all([isinstance(e,list) for e in indices]): # Convert to 1-D list
indices = [e[0] for e in indices]
else:
raise Exception("Incorrect form of indices. Must be 1-D e.g. [1,2]")
# Check that all indices are in appropriate range
if not _is_valid_idx(indices,block_size):
raise Exception("Invalid indices. Must be in range: [%s,%s]" % (0,block_size-1))
# Remove duplicates
indices = list(set(indices))
new_block = _move_elems_to_end(new_block,indices)
# Partition
m1 = block_size - len(indices)
a = new_block[:m1]
b = new_block[m1:]
return a, b
######## MVG helper functions ########
def get_logZ(cov):
"""
Calculates log-normalising constant symbol using cov
"""
return -cov.shape[0]/2*ln(2*pi) - Rational(1,2)*ln(Determinant(cov))
######### Search and replace functions ########
def replace_with_num(expr, d):
"""
Replaces matrix symbols with numerical matrices using a DFS search through the
expression tree.
Args:
- 'expr': The expression which we want to evaluate.
- 'd': A dictionary mapping the matrix symbols to numerical matrices (these can be
SymPy 'Matrix' objects or 'numpy.ndarray' arrays).
Returns:
- A 'numpy.ndarray' that is the evaluation of the expr with the numerical
matrices.
"""
import numpy as np
# Determine what to return based on type(expr)
if isinstance(expr, MatrixSymbol):
try:
return d[expr.name]
except KeyError as e:
print("Error: No numerical matrix was specified for %s" % (e))
elif isinstance(expr, Number):
return expr
elif isinstance(expr, MatrixExpr):
sub_exprs = []
for arg in expr.args:
sub_exprs.append(replace_with_num(arg, d))
if expr.is_MatMul:
for e in sub_exprs:
if not isinstance(e,Number):
shape = e.shape[0]
break
out = np.eye(shape)
for e in sub_exprs:
if isinstance(e,Number):
out *= np.float(e)
elif isinstance(e,Matrix):
out = np.dot(out,np.array(e.tolist(),dtype=np.float32))
else:
out = np.dot(out,e)
return out
elif expr.is_MatAdd:
if len(sub_exprs[0].shape) == 2:
out = np.zeros(sub_exprs[0].shape)
else:
out = np.zeros(sub_exprs[0].shape[0])
for e in sub_exprs:
if isinstance(e,Matrix):
out += np.array(e.tolist(),dtype=np.float32).reshape(out.shape)
else:
out += e.reshape(out.shape)
return out
elif expr.is_Inverse:
if isinstance(sub_exprs[0],Matrix):
out = np.linalg.inv(np.array(sub_exprs[0].tolist(),dtype=np.float32))
else:
out = np.linalg.inv(sub_exprs[0])
return out
else: # expr.is_Transpose
if isinstance(sub_exprs[0],Matrix):
out = np.array(sub_exprs[0].T.tolist(),dtype=np.float32)
else:
out = sub_exprs[0].T
return out
else:
raise Exception("Expression should be a MatrixExpr")
def evaluate_expr(expr, d):
"""
Evaluates a matrix expression with the given numerical matrices
Args:
- 'expr' - The symbolic matrix expression
- 'd' - A dictionary mapping the matrix symbols to numerical matrices
Returns:
- 'r' - The result of all the matrix calculations
"""
r = replace_with_num(expr, d)
return r
def replace_with_expanded(expr, done=True, excluded=None):
"""
Similar to 'replace_with_num' above except we replace SuperMatrixSymbols
with their expanded forms if they exist
Args:
expr - The current MatrixExpr
Returns:
expr - The expanded MatrixExpr
done - Boolean indicating whether no more expansions can be done
"""
from symgp.superexpressions import (SuperMatSymbol, SuperMatTranspose, SuperMatInverse, SuperMatAdd,
SuperMatMul, SuperDiagMat, SuperBlockDiagMat)
from symgp.kernels.kernel import KernelMatrix
if (isinstance(expr, MatMul) or isinstance(expr, MatAdd) or
isinstance(expr, Inverse) or isinstance(expr, Transpose)):
sub_exprs = []
for arg in expr.args:
expanded, done = replace_with_expanded(arg, done, excluded=excluded)
sub_exprs.append(expanded)
if expr.is_MatMul:
e = SuperMatMul(*sub_exprs)
elif expr.is_MatAdd:
e = SuperMatAdd(*sub_exprs)
elif expr.is_Inverse:
e = SuperMatInverse(*sub_exprs)
else: # expr.is_Transpose
e = SuperMatTranspose(*sub_exprs)
return e, done
elif excluded is not None and any([isinstance(expr, c) for c in excluded]):
return expr, done
elif isinstance(expr, SuperMatSymbol) and expr.expanded is not None:
return expr.expanded, False
else:
return expr, done
def expand_to_fullexpr(expr, num_passes=-1, excluded=None):
"""
Expands a MatrixExpr composed of SuperMatSymbols by substituting any SuperMatSymbol
with an 'expanded'
Args:
expr - The expression to expand
num_passes - The number of passes to make through the expression. -1 indicates that
we pass through expression until no more substitutions can be made.
excluded - The classes (and its subclasses) to exclude substitution with expanded
expressions for.
Return:
e - The expanded expression
"""
e = expr
# Keep on passing through expression until no more substitutions can be made
if num_passes == -1:
done = False
while not done:
done = True
e, done = replace_with_expanded(e, done, excluded=excluded)
else:
for _ in range(num_passes):
e, _ = replace_with_expanded(e, excluded=excluded)
return e.doit().doit()
def _replace_with_MatSym(expr, rule):
"""
Replaces the MatrixExpr expression in 'expr' given by the replacement rule
Args:
expr - The expression which we want to replace sub-expressions in.
rule - A tuple that matches the old expression (old_expr) to the replacement (repl) as
(old_expr, repl)
Returns:
subbed_expr - 'expr' with the substitution made
"""
from collections import deque
from symgp import SuperDiagMat, SuperBlockDiagMat
old_expr, repl = rule
len_old_expr = len(old_expr.args) # Number of arguments. TODO: Check for cases where k is a single symbol
# Table used to build back tree.
#
# We pair a key of a sub_expression with an id 'k' that indicates sub_expr was the k'th entry into the table with either:
#
# - A list of (sub_expr.args[i], k) tuples indicating the keys from which to search for the
# next expressions in the tree in their correct order:
#
# {(sub_expr, j): [(sub_expr.args[0],m),(sub_expr.args[1],l), ...]}
#
# - A Expr that we substitute in for sub_expr when it is retrieved by higher nodes in the expression tree:
#
# {(sub_expr, j): sub_expr_repl}
#
# where sub_expr_repl is the expression that we replace sub_expr with. It can be sub_expr itself or a replacement
# we define.
tree_table = defaultdict(list)
queue = deque(((expr, 0, 0),))
#tree_table[(full_expr,0)] = list(zip(list(full_expr.args),[1]*len(full_expr.args)))
curr_id = 1 # An id to uniquely identify each sub-expression i.e. we can have similar expressions at the same level
while len(queue) > 0:
sub_expr, level, old_id = queue.pop()
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
# Match current rule to expressions in this sub expression
len_sub_expr = len(sub_expr.args)
i = 0
while i < len_sub_expr:
j = 0
l = 0 # Used when we need to skip over symbols e.g. for addition where we may need to match a subset of args.
matched, skipped = _match_with_pat(sub_expr,i,old_expr)
if matched: # Match found: Replace match with pattern
# Determine the level of the new replacement expression in the expression tree
if len_old_expr == len_sub_expr:
new_level = level
else:
new_level = level + 1
queue.appendleft((repl, new_level, curr_id))
# We need to re-order sub_expr - mainly for matches in MatAdds with remainders e.g. matching A in A + B + C
if skipped:
old_sub_expr = sub_expr
# Get remainder after removing old_expr
rem = sub_expr
for c in old_expr.args:
rem -= c
rem = [rem] if not isinstance(rem,MatAdd) else list(rem.args)
# Create new expression
new_args = list(old_expr.args) + rem
sub_expr = type(sub_expr)(*new_args)
# As we changed the sub_expr we have to reassign the elements of the old one
if tree_table.get((old_sub_expr, level, old_id)):
old_values = tree_table.pop((old_sub_expr, level, old_id))
tree_table[(sub_expr, level, old_id)] = old_values + [(repl, new_level, curr_id)]
else:
tree_table[(sub_expr, level, old_id)] = [(repl, new_level, curr_id)]
curr_id += 1
else:
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((repl, new_level, curr_id))
curr_id += 1
# Start after pattern
i += len_old_expr
else:
queue.appendleft((sub_expr.args[i], level+1, curr_id))
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((sub_expr.args[i], level+1, curr_id))
curr_id += 1
# Start at next symbol
i += 1
else:
# Add expression for this node
tree_table[(sub_expr, level, old_id)] = sub_expr
# Sort based on level in descending order
sorted_tree_table = sorted(tree_table.items(), key=lambda elem: elem[0][1], reverse=True)
# Create expression from table
for p, c in sorted_tree_table:
# Skip terminal nodes else update tree table for non-terminal nodes
if p[0] == c:
continue
else:
# Create MatrixExpr using the elements in the value c, which is a list, for the key p and
# then update 'tree_table'
tree_table[p] = type(p[0])(*[tree_table[e] for e in c])
# Rewrite full expression
subbed_expr = tree_table[sorted_tree_table[-1][0]]
return subbed_expr
def _match_with_pat(expr, start, pat):
"""
Matches an expression or a portion of it to a pattern.
Args:
expr - The expression we want to match.
start - The starting index into expr
pat - The pattern we want to find in 'expr'. This can be:
- A MatrixExpr. Here we aim to find pat in 'expr'
- A Kernel. We aim to find KernelMatrix objects/MatrixExprs
composed of KernelMatrix objects that match Kernel
Returns:
matched - Indicates whether the pattern was found in 'expr'.
skipped - Indicates whether we had to skip over symbols when matching
in a MatAdd expression.
(Optional)
pattern - The pattern that we match. Only returned for when pat is a Kernel
repl - The replacement expression. Only returned for when pat is a Kernel
Examples:
- expr = A*B*D, pat = A*B -> matched = True, skipped = False
- expr = A + B + C, pat = A + C -> matched = True, skipped = True (as we had to skip over B)
Note that 'skipped' is determined based on the order of expr.args.
- expr = K(a,u)*K(u,u)*K(u,b), pat = Q (Q.sub_kernels=[K,K], Q.M=K(u,u)) -> matched = True, skipped = True
(We match the whole expression with Q), pattern = K(a,u)*K(u,u)*K(u,b), repl = Q(a,b)
"""
from symgp import Kernel
len_expr = len(expr.args)
matched, skipped = False, False
#print("Pat: ", pat, type(pat))
#print("Expr: ", expr)
if isinstance(pat, MatrixExpr):
if isinstance(pat, MatrixSymbol):
from symgp import SuperMatMul
pat = SuperMatMul(pat)
#print("Expr: ", expr)
len_pat = len(pat.args)
j = 0
l = 0
while j < len_pat and start + l + j < len_expr:
if start + l + j >= len_expr:
break
#print("Current expr: ", expr.args[start+l+j])
#print("current pat: ", pat.args[j])
if (expr.args[start+l+j].doit() != pat.args[j].doit()):# or (sub_expr.args[i+l+j].match(k.args[j])):
#print("Not matched")
#foundMatch = False
# As additions may be stored in any order, we need to skip symbols so that we can match
# the pattern
if isinstance(pat, MatAdd) and isinstance(expr, MatAdd):
l += 1
else:
break
else:
#print("Matched")
j += 1
if j == len_pat:
matched = True
#print("Matched full expr")
if l > 0:
skipped = True
return matched, skipped
elif isinstance(pat, Kernel):
#print("Kernel pat: ", pat)
kern_vars = get_all_kernel_variables(expr)
# Get all possible kernel patterns
patterns = []
#print("kern_vars: ", kern_vars)
for v1 in kern_vars:
patterns.extend([pat(v1,v2) for v2 in kern_vars])
# Sort patterns based on length of underlying expression
def sort_func(e):
e_full = e.to_full_expr()
if isinstance(e_full, MatrixSymbol):
return 1
else:
return len(e_full.args)
patterns = sorted(patterns, key=sort_func)[::-1]
#print("patterns: ", patterns)
# Find a match in our list of patterns
for i, p in enumerate(patterns):
#print("p: ",p," patterns[i]: ",patterns[i])
#print("p.expanded: ", p.expanded)
#print("p.to_full_expr(): ", p.to_full_expr())
#print("expr, start: ", expr, start)
#print("patterns: ", patterns)
#print("matched_pat: ", p.to_full_expr())
matched, skipped = _match_with_pat(expr, start, p.to_full_expr())
#print("matched, skipped, matched_pat, repl: ", matched, skipped, p.to_full_expr(), p)
if matched:
return matched, skipped, p.to_full_expr(), p
return matched, skipped, None, None
else:
raise Exception("Invalid pattern 'pat': Must be a Kernel object or a MatrixExpr")
def _replace_with_Kernel(expr, kern):
"""
Replaces the kernel expression in 'expr' given by the replacement rule
Args:
expr - The expression which we want to replace sub-expressions in.
kern - The Kernel we want to replace an expression in 'expr' with. The expression
belongs to the set of expression 'kern' represents.
For example, if
M = Constant('M',n,n,full_expr=K(u,u).I)
kern = Kernel(sub_kernels=[K,K],kernel_type='mul',mat=M,name='Q')
we replace all expressions of form K({v1},u)*K(u,u).I*K(u,{v2}) where {v1} and {v2} can be
any variable.
Returns:
subbed_expr - 'expr' with the substitution made
"""
from collections import deque
from symgp import Kernel, SuperDiagMat, SuperBlockDiagMat
# Table used to build back tree.
#
# We pair a key of a sub_expression with an id 'k' that indicates sub_expr was the k'th entry in the table with either:
#
# - A list of (sub_expr.args[i], k) tuples indicating the keys from which to search for the
# next expressions in the tree in their correct order:
#
# {(sub_expr, j): [(sub_expr.args[0],m),(sub_expr.args[1],l), ...]}
#
# - A Expr that we substitute in for sub_expr when it is retrieved by higher nodes in the expression tree:
#
# {(sub_expr, j): sub_expr_repl}
#
# where sub_expr_repl is the expression that we replace sub_expr with. It can be sub_expr itself or a replacement
# we define.
tree_table = defaultdict(list)
if isinstance(kern,Kernel):
queue = deque(((expr, 0, 0),)) # Tuples of (expression, tree level, expression id)
curr_id = 1 # An id to uniquely identify each sub-expression i.e. we can have similar expressions at the same level
while len(queue) > 0:
sub_expr, level, old_id = queue.pop()
#print("sub_expr: {}, level: {}, old_id: {}".format(sub_expr, level, old_id))
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
# TODO: Add functionality to replace expressions such as A+D in D + 2*A.
len_sub_expr = len(sub_expr.args)
i = 0
while i < len_sub_expr:
matched, skipped, pattern, repl = _match_with_pat(sub_expr, i, kern)
#print("i: ", i)
#print("Matched: {}, Skipped: {}, Pattern: {}, Repl: {}".format(
# matched, skipped, pattern, repl))
# Update 'tree_table'
if matched: # Match found: Replace match with pattern
# Determine the level of the new replacement expression in the expression tree
if len(pattern.args) == len_sub_expr:
new_level = level
else:
new_level = level + 1
queue.appendleft((repl, new_level, curr_id))
# We need to re-order sub_expr - mainly for matches in MatAdds with
# remainders e.g. matching A in A + B + C
if skipped:
old_sub_expr = sub_expr
# Get remainder after removing old_expr
rem = sub_expr
for c in pattern.args:
rem -= c
rem = [rem] if not isinstance(rem,MatAdd) else list(rem.args)
# Create new expression
new_args = list(pattern.args) + rem
sub_expr = type(sub_expr)(*new_args)
# As we changed the sub_expr we have to reassign the elements of the old one
if tree_table.get((old_sub_expr, level, old_id)):
old_values = tree_table.pop((old_sub_expr, level, old_id))
tree_table[(sub_expr, level, old_id)] = old_values + [(repl, new_level, curr_id)]
else:
tree_table[(sub_expr, level, old_id)] = [(repl, new_level, curr_id)]
else:
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((repl, new_level, curr_id))
#print("Key: {}, Val: {}".format((sub_expr, level, old_id),
# (repl, new_level, curr_id)))
# Start after pattern
if isinstance(pattern, MatrixSymbol):
i += 1
else:
i += len(pattern.args)
else:
#print("Key: {}, Val: {}".format((sub_expr, level, old_id),
# (sub_expr.args[i], level+1, curr_id)))
queue.appendleft((sub_expr.args[i], level+1, curr_id))
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((sub_expr.args[i], level+1, curr_id))
# Start at next symbol
i += 1
curr_id += 1
else:
# Add expression for this node
tree_table[(sub_expr, level, old_id)] = sub_expr
else:
raise Exception("Invalid 'old_expr': Should be a Kernel, MatMul or MatAdd object")
# Sort based on level in descending order
sorted_tree_table = sorted(tree_table.items(), key=lambda elem: elem[0][1], reverse=True)
#print("\n")
#for ele in sorted_tree_table:
# print(ele)
#print("\n")
# Create expression from table
for p, c in sorted_tree_table:
# Skip terminal nodes else update tree table for non-terminal nodes
if p[0] == c:
continue
else:
# Create MatrixExpr using the elements in the value c, which is a list, for the key p and
# then update 'tree_table'
tree_table[p] = type(p[0])(*[tree_table[e] for e in c])
#print("p: {}, tree_table[p]: {}".format(p, tree_table[p]))
subbed_expr = tree_table[sorted_tree_table[-1][0]]
return subbed_expr
def replace(expr, rules):
"""
Replaces expressions in expr with the given rules.
Args:
expr - The input expression
rules - A list where elements can be:
- A tuple matching an old MatrixExpr to a new MatSym, or
- A Kernel object that has an underlying representation of matrix expressions
to match e.g.
M = Constant('M',n,n,full_expr=K(u,u).I)
Q = Kernel(sub_kernels=[K,K],kernel_type='mul',mat=M,name='Q')
matches expressions of the form 'K({v1},u)*K(u,u).I*K(u,{v2})' where {v1} and {v2} can be
any variable
N.B. For an expression of the form -1*A we must replace it with another expression
of the form -1*B and not A with B.
Returns:
The expression with the substitutions made.
"""
from symgp import Kernel
# Get the full expression
#full_expr = expand_to_fullexpr(expr)
full_expr = expr
# For each substitution rule, replace the corresponding sub-expression
for r in rules:
if isinstance(r,Kernel):
full_expr = _replace_with_Kernel(full_expr, r)
elif isinstance(r,tuple) and isinstance(r[0],MatrixExpr) and isinstance(r[1],MatrixSymbol):
full_expr = _replace_with_MatSym(full_expr, r)
else:
raise Exception("Invalid matching of expressions to replacements. Rule must be (old_expr,repl) or kern")
return full_expr.doit()
def replace_with_SuperMat(expr, d):
"""
Similar to replace_with_num above except we replace symbols with the
corresponding SuperMatExpr symbols
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd, SuperMatInverse, SuperMatTranspose
if isinstance(expr, Symbol) or isinstance(expr, Number):
if isinstance(expr, Symbol):
try:
return d[expr.name]
except KeyError as e:
print("Error: No SuperMatSymbol substitute was specified for %s" % (e))
else:
return expr
r = []
for arg in expr.args:
r.append(replace_with_SuperMat(arg, d))
if isinstance(expr, Expr):
if expr.is_Mul:
return SuperMatMul(*r)
elif expr.is_Add:
return SuperMatAdd(*r)
elif expr.is_Inverse:
return SuperMatInverse(*r)
else: # expr.is_Transpose
return SuperMatTranspose(*r)
else:
raise Exception("Expression should be a MatrixExpr")
######## LaTeX printing ########
class matLatPrinter(LatexPrinter):
def _print_Symbol(self, expr):
if expr.name[0] == SMALL_SIGMA_GREEK:
return self._print(Symbol('\sigma'+expr.name[1:]))
else:
return LatexPrinter().doprint(expr)
def _print_SuperMatSymbol(self, expr):
mat_type = expr.mat_type
#print("mat_type: ",mat_type)
#print("expr: ",expr)
name = expr.name
"""if (mat_type == 'mean' or mat_type == 'covar' or mat_type == 'invcovar' or
mat_type == 'natmean' or mat_type == 'precision'):
dep_vars = expr.dep_vars
cond_vars = expr.cond_vars
if mat_type == 'mean':
if not isinstance(dep_vars[0],list):
name = '\mu_{'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\mu_{'+','.join([self._print(v) for v in dep_vars])
elif mat_type == 'covar':
if not isinstance(dep_vars[0],list):
name = '\Sigma_{'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\Sigma_{'+','.join([self._print(v) for v in dep_vars])
elif mat_type == 'invcovar':
if not isinstance(dep_vars[0],list):
name = '\Sigma^{-1}_{'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\Sigma^{-1}_{'+','.join([self._print(v) for v in dep_vars])
elif mat_type == 'natmean':
if not isinstance(dep_vars[0],list):
name = '\eta_{1,'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\eta_{1,'+','.join([self._print(v) for v in dep_vars])
else: # mat_type == 'precision'
if not isinstance(dep_vars[0],list):
name = '\eta_{2,'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\eta_{2,'+','.join([self._print(v) for v in dep_vars])
if len(cond_vars) > 0:
name += '|'+','.join([self._print(v) for v in cond_vars])
name += '}'
return name
else: # Just use the Symbol converted latex form
if expr.name[-2:] == '_s':
return r'\mathbf{'+expr.name[:-2]+'}_{*}'
else:
return r'\mathbf{'+expr.name+'}'"""
return r'\mathbf{' + expr.name + '}'
def _print_SuperMatInverse(self, expr):
return self._print(expr.args[0]) +'^{-1}'
def _print_SuperMatTranspose(self, expr):
return self._print(expr.args[0]) +'^T'
def _print_SuperDiagMat(self, expr):
return r"\text{diag}["+self._print(expr.arg)+"]"
def _print_SuperBlockDiagMat(self, expr):
return r"\text{blockdiag}["+self._print(expr.arg)+"]"
def _print_SuperMatAdd(self, expr):
terms = list(expr.args)
# Fix to stop first negative term being rendered as -1 in LaTex i.e. we want
# A - BCB^{T} in LaTex instead of -1BCB^{T} + A
if terms[0].args[0] == S.NegativeOne:
terms = terms[1:] + terms[:1]
tex = " + ".join(map(self._print, terms))
return tex
def _print_MVG(self, expr):
from symgp.kernels.kernel import KernelMatrix
excluded_classes = [KernelMatrix]
# Form MVG name
latex_name = r'\begin{align*}' + "\n"
latex_name += expr.prefix+r'\left('
vars_name_pre = ','.join([self._print(v) for v in expr.variables]) # Name of vars
if len(expr.cond_vars) > 0:
vars_name_pre += '|'+','.join([self._print(v) for v in expr.cond_vars])
latex_name += vars_name_pre + r'\right)'
# N(mean, covar)
latex_name += r'&= \mathcal{N}\left('
if len(expr.variables) > 1:
vars_name_N = r'\left[\begin{smallmatrix}'
for i in range(len(expr.variables)-1):
vars_name_N += self._print(expr.variables[i])+r'\\'
vars_name_N += self._print(expr.variables[-1])+r'\end{smallmatrix}\right]'
# Mean
mean_short_name = r'\mathbf{m}_{'+vars_name_pre+r'}'
if expr.mean.blockform is not None:
mean_name = r'\left[\begin{smallmatrix}'
for i in range(len(expr.mean.blockform)-1):
mean_name += self._print(expand_to_fullexpr(
expr.mean.blockform[i], excluded=excluded_classes).doit())+r'\\'
mean_name += self._print(expand_to_fullexpr(
expr.mean.blockform[-1], excluded=excluded_classes).doit())+r'\end{smallmatrix}\right]'
# Covariance
covar_short_name = r'\mathbf{\Sigma}_{'+vars_name_pre+r'}'
if expr.covar.blockform is not None:
covar_name = r'\left[\begin{smallmatrix}'
for i in range(len(expr.covar.blockform)-1):
for j in range(len(expr.covar.blockform[i])-1):
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[i][j], excluded=excluded_classes).doit())+r'&'
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[i][-1], excluded=excluded_classes).doit())+r'\\'
# Add last row
for j in range(len(expr.covar.blockform[-1])-1):
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[-1][j], excluded=excluded_classes).doit())+r'&'
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[-1][-1], excluded=excluded_classes).doit())+r'\end{smallmatrix}\right]'
# Write shortened distribution expression
latex_name += vars_name_N + r';' + mean_short_name + r',' + covar_short_name + r'\right)\\'+"\n"
else:
mean_short_name = r'\mathbf{m}_{'+vars_name_pre+r'}'
mean_name = self._print(expand_to_fullexpr(
expr.mean.expanded, excluded=excluded_classes).doit()) if expr.mean.expanded is not None else ''
covar_short_name = r'\mathbf{\Sigma}_{'+vars_name_pre+r'}'
covar_name = self._print(expand_to_fullexpr(
expr.covar.expanded, excluded=excluded_classes).doit()) if expr.covar.expanded is not None else ''
# Write shortened distribution expression
var_name_N = self._print(expr.variables[0])
latex_name += var_name_N + r';' + mean_short_name+r','+covar_short_name+r'\right)\\' + "\n"
# Add full expressions for mean and covariance below
if mean_name != '' and covar_name != '':
latex_name += mean_short_name + r' &= ' + mean_name + r'\\' + "\n" + \
covar_short_name + r' &= ' + covar_name + r'\\' + "\n"
latex_name += r'\end{align*}'
return latex_name
def _print_Identity(self, expr):
return r'\mathbf{I}'
#def _print_NegativeOne(self, expr):
# return r'-'
def _print_ZeroMatrix(self, expr):
return r'\mathbf{0}'
def _print_KernelMatrix(self, expr):
latex_name = r'\mathbf{'+expr.K.name+'}_{'+matLatex(expr.inputs[0])+','+\
matLatex(expr.inputs[1])+'}'
return latex_name
def matLatex(expr, profile=None, **kwargs):
"""
Returns the LaTeX code for the given expression
"""
if profile is not None:
profile.update(kwargs)
else:
profile = kwargs
out_latex = matLatPrinter(profile).doprint(expr)
#Clean up string
out_latex = re.sub('(\+.\-1)','-',out_latex) # Change '+ -1' to '-'
return out_latex
def updateLatexDoc(filename, expr):
"""
Updates the latex filename with the given expression.
This function is mainly used to typeset the LaTeX code that is produced by calling
utils.matLatex(expr).
We append the expression to the list of 'dmath' environments from the breqn package.
For MVGs we also display the full expressions for the mean and covariance below the
expression for the distribution.
Args:
filename - The '.tex' file to which we write the LaTeX code.
expr - The expression (or list of expressions) for which we want to generate LaTeX.
This can be any native SymPy expression (and the subclasses in this library)
or an MVG.
"""
import subprocess
from MVG import MVG
with open(filename,'r+') as f:
contents = f.read()
split_contents = re.split(r"(.+\\begin\{document\}\n)(.+)(\\end\{document\}.*)", contents, flags=re.DOTALL)
edited_content = split_contents[2]
if edited_content == '\n':
edited_content = ''
if not isinstance(expr, list):
expr = [expr]
for e in expr:
# Write our expression to the end of the file
if isinstance(e, MVG):
edited_content += r'\section{$'+ matLatex(e.name) + r'$}' + "\n"
edited_content += r'\begingroup\makeatletter\def\f@size{12}\check@mathfonts'+ "\n" + \
r'\def\maketag@@@#1{\hbox{\m@th\large\normalfont#1}}'+ "\n"
edited_content += matLatex(e)
edited_content += r'\endgroup'+ "\n\n"
else:
edited_content += r'\section{expression}' + "\n"
edited_content += "\\begin{align*}\n"
edited_content += matLatex(e)
edited_content += "\n\\end{align*}\n"
split_contents[2] = edited_content
f.seek(0)
f.write(''.join(split_contents))
f.truncate()
subprocess.check_call(["latexmk", "-pdf",str(filename)])
subprocess.check_call(["open", filename.split(".")[0]+".pdf"])
######## Expression conversion functions ########
def expand_mat_sums(sums):
"""
Helper method for 'expand_matmul'
Based on 'def _expandsums' in sympy.core.mul
"""
from symgp.superexpressions.supermatadd import SuperMatAdd, SuperMatMul
L = len(sums)
if L == 1:
return sums[0]
terms = []
left = expand_mat_sums(sums[:L//2]).args
right = expand_mat_sums(sums[L//2:]).args
terms = [a*b for a in left for b in right]
added = SuperMatAdd(*terms)
return added
def expand_matmul(expr):
"""
Expands MatMul objects e.g. C*(A+B) -> C*A + C*B
Based on 'def _eval_expand_mul' in sympy.core.mul
"""
from symgp.superexpressions import SuperMatAdd
sums, rewrite = [], False
for factor in expr.args:
if isinstance(factor, MatrixExpr) and factor.is_MatAdd:
sums.append(factor)
rewrite = True
else:
sums.append(Basic(factor))
if not rewrite:
return expr
else:
if sums:
terms = expand_mat_sums(sums).args
args = []
for term in terms:
t = term
if isinstance(t,MatrixExpr) and t.is_MatMul and any(a.is_MatAdd if isinstance(a,MatrixExpr) else False for a in t.args):
t = expand_matmul(t)
args.append(t)
return SuperMatAdd(*args).doit()
else:
return expr
def expand_matexpr(expr):
"""
Expands matrix expressions (MatrixExpr)
"""
from symgp.superexpressions import SuperMatAdd
if expr.is_MatAdd:
args = []
args.extend([expand_matexpr(a) if a.is_MatMul else a for a in expr.args])
return SuperMatAdd(*args).doit()
elif expr.is_MatMul:
return expand_matmul(expr).doit()
else:
return expr.doit()
def collect(expr, syms, muls, evaluate=None):
"""
Collect additive terms of a matrix expression
Adapted from 'collect' function in SymPy library (https://github.com/sympy/sympy/blob/master/sympy/simplify/radsimp.py)
Args:
expr - The expression to collect terms for
syms + muls - List of 1 or 2 symbols corresponding to order of multiplication indicators in 'muls'.
e.g. syms=[B,A],muls=['left','right'] corresponds to collecting terms for expressions
of the form B*{W1}*A + B*{W2}*A + {W3} where {W1}, {W2} and {W3} are matrix
expressions to give B*({W1} + {W2})*A + {W3}
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
if not isinstance(expr, MatAdd):
return expr
if evaluate is None:
evaluate = global_evaluate[0]
def make_expression(terms):
product = [term for term in terms]
return SuperMatMul(*product)
def parse_expression(terms, pattern, mul):
"""Parse terms searching for a pattern.
terms is a list of MatrixExprs
pattern is an expression treated as a product of factors
Returns tuple of unmatched and matched terms.
"""
if (not isinstance(pattern, MatrixSymbol) and
not isinstance(pattern, Transpose) and
not isinstance(pattern, Inverse) and
not isinstance(pattern, MatAdd)):
pattern = pattern.args
else:
pattern = (pattern,)
if len(terms) < len(pattern):
# pattern is longer than matched product
# so no chance for positive parsing result
return None
else:
if not isinstance(pattern, MatAdd):
pattern = [elem for elem in pattern]
terms = terms[:] # need a copy
elems = []
for elem in pattern:
if elem.is_Number:
# a constant is a match for everything
continue
for j in range(len(terms)):
# Search from right if we have a duplicate of 'pattern' in 'terms'. We only want to match one
# based on whether we collect terms on the right or left hand side given by 'mul'.
if mul == 'right':
k = len(terms)-1 - j
else:
k = j
if terms[k] is None:
continue
term = terms[k]
if (((not (isinstance(term, elem.__class__) and (isinstance(elem, MatrixSymbol) or
isinstance(elem, Transpose) or isinstance(elem, Inverse))))
and term.match(elem) is not None) or
(term == elem)):
# found common term so remove it from the expression
# and try to match next element in the pattern
elems.append(terms[k])
terms[k] = None
break
else:
# pattern element not found
return None
return [_f for _f in terms if _f], elems
# Check that syms is of length 1 or 2
if iterable(syms):
syms = [s for s in syms]
if len(syms) > 2:
raise Exception("Too many matching symbols. Maximum is 2")
else:
syms = [syms]
# Check that muls is either a list of same length as syms or a string for which
# syms only has one element
if iterable(muls):
muls = [m for m in muls]
mul = muls[0]
if len(muls) != len(syms):
raise Exception("Number of muls should match syms.")
else:
mul = muls
if not isinstance(mul,str) and len(syms) > 1:
raise Exception("Number of muls should match syms.")
expr = sympify(expr)
# Get all expressions in summation
# If syms[0] is a MatAdd, collect terms in summa that are equal to to the symbol
if isinstance(syms[0], MatAdd) and isinstance(expr, MatAdd):
matched, rejected = ZeroMatrix(expr.shape[0],expr.shape[1]), expr
for s in syms[0].args:
for t in rejected.args:
if s == t:
matched += t
rejected -= t
break
summa = [matched]
if matched != expr:
if isinstance(rejected,MatAdd):
summa += [i for i in rejected.args]
else:
summa += [rejected]
else:
summa = [i for i in expr.args]
collected, disliked = defaultdict(list), ZeroMatrix(expr.shape[0],expr.shape[1])
# For each product in the summation, match the first symbol and update collected/
# disliked depending on whether a match was/wasn't made.
for product in summa:
if isinstance(product, MatMul):
terms = [i for i in product.args]
else:
terms = [product]
# Only look at first symbol
symbol = syms[0]
result = parse_expression(terms, symbol, mul)
# If symbol matched a pattern in terms, we collect the multiplicative terms for the
# symbol into a dictionary 'collected'
if result is not None:
terms, elems = result
index = Identity(elems[0].shape[0])
for elem in elems:
index *= elem
terms = make_expression(terms)
if isinstance(terms, Number):
if mul == 'left':
terms = SuperMatMul(Identity(index.shape[1]),terms)
else:
terms = SuperMatMul(Identity(index.shape[0]),terms)
collected[index].append(terms)
else:
# none of the patterns matched
disliked += product
# add terms now for each key
collected = {k: SuperMatAdd(*v) for k, v in collected.items()}
if isinstance(syms,list) and isinstance(muls,list):
second_mul = muls[1]
first_sym, second_sym = syms
collected[first_sym] = collect(collected[first_sym],[second_sym],second_mul)
if not disliked.is_ZeroMatrix:
if mul == 'left':
collected[Identity(disliked.shape[0])] = disliked
else:
collected[Identity(disliked.shape[1])] = disliked
if evaluate:
if mul == 'left':
if len(collected.items()) == 1:
return [key*val for key, val in collected.items()][0]
else:
if expr.is_MatMul:
return SuperMatMul(*[key*val for key, val in collected.items()])
else:
return SuperMatAdd(*[key*val for key, val in collected.items()])
else: # mul == 'right'
if len(collected.items()) == 1:
return [val*key for key, val in collected.items()][0]
else:
if expr.is_MatMul:
return SuperMatMul(*[val*key for key, val in collected.items()])
else:
return SuperMatAdd(*[val*key for key, val in collected.items()])
else:
return collected
def accept_inv_lemma(e, start, end):
"""
Checks if expr satisfies the matrix form E^{-1}F(H - GE^{-1}F)^{-1}.
We return True if e matches otherwise return False.
"""
def checkSym(a):
return isinstance(a, MatrixSymbol) or isinstance(a, Inverse) or isinstance(a, Transpose)
def checkMatExpr(a, class_name):
return isinstance(a, class_name)
if len(e.args) < 3:
return False
arg_1, arg_2, arg_3 = e.args[start:end+1]
# Match E^{-1}
if not checkSym(arg_1):
return False
# Match E^{-1}F
if not checkSym(arg_2):
return False
# Match E^{-1}F({MatExpr})^{-1}
if not checkMatExpr(arg_3, Inverse):
return False
# Match E^{-1}F({MatAdd})^{-1}
if not checkMatExpr(arg_3.arg, MatAdd):
return False
# Match E^{-1}F(A + B)^{-1}
if len(arg_3.arg.args) == 2:
# Check whether it is E^{-1}F(A + MatMul)^{-1} or E^{-1}F(MatMul + B)^{-1}
if checkSym(arg_3.arg.args[0]) and checkMatExpr(arg_3.arg.args[1], MatMul):
arg_3_args = arg_3.arg.args[1].args
elif checkSym(arg_3.arg.args[1]) and checkMatExpr(arg_3.arg.args[0], MatMul):
arg_3_args = arg_3.arg.args[0].args
else:
return False
else:
return False
# Match E^{-1}F(A + GCD)^{-1} or E^{-1}F(A + (-1)*GCD)^{-1}
if len(arg_3_args) == 3 and not isinstance(arg_3_args[0], type(S.NegativeOne)):
# Check whether CD matches E^{-1}F
if not (arg_3_args[1] == arg_1 and arg_3_args[2] == arg_2):
return False
elif len(arg_3.arg.args[1].args) == 4 and isinstance(arg_3.arg.args[1].args[0], type(S.NegativeOne)):
# Check whether CD matches E^{-1}F
if not (arg_3_args[2] == arg_1 and arg_3_args[3] == arg_2):
return False
else:
return False
# Successful match
return True
def check_inv_lemma(expr):
if len(expr.args) == 3 and accept_inv_lemma(expr,0,2):
return True
else:
return False
def simplify(expr):
"""
A simplification algorithm
We return a tuple of (simps, subs) (See below)
"""
from symgp.superexpressions import SuperMatSymbol
depth = get_max_depth(expand_to_fullexpr(expr))
simps = [] # The simplified expressions we have obtained with the associated substitutions
subs = {} # Pairs substituted expressions with the substitutions made
usedSubs = [] # The expressions we have substituted we have used so far
# Get the expressions at every depth
#exprs_by_depth = get_exprs_at_depth(expr, range(depth+1))
usedNames = SuperMatSymbol.getUsedNames()
min_expr = expr
for d in range(depth, -1, -1):
# Get the exprs at each depth for the new shortest expressions
exprs_by_depth = get_exprs_at_depth(min_expr, range(depth+1))
sub_exprs = exprs_by_depth[d]
min_syms = math.inf
# For each sub expression at level d check for copies in other parts of expressions
for s in sub_exprs:
repetitions = 0
# Find other similar expressions to s
for k in exprs_by_depth.keys():
if k == d:
continue
if s in exprs_by_depth[k]:
repetitions += exprs_by_depth[k].count(s)
# Make replacements if expression 's' appears more than twice throughout expression or
# it corresponds to the special matrix inverse lemma
if (repetitions > 0 or check_inv_lemma(s)) and s not in usedSubs:
# Update the used substituted expressions
usedSubs.append(s)
# TODO: Allow for using best or range of simplified exprs from previous depths
# Lower case for vectors and upper case for matrices
if s.shape[0] != 1 and s.shape[1] != 1:
avail_prefixes = string.ascii_uppercase
else:
avail_prefixes = string.ascii_lowercase
# Keep on searching for available replacement names
for c in avail_prefixes:
i = 0
r_name = c + '_{' + str(i) + '}'
while r_name in usedNames and i < 99:
i += 1
r_name = c + '_{' + str(i) + '}'
if not r_name in usedNames:
r = SuperMatSymbol(s.shape[0], s.shape[1], r_name, expanded=s)
repl_list = [(s,r)]
simp_expr = replace(min_expr, repl_list).doit()
if not subs.get(s):
subs[s] = r
simps.append(simp_expr.doit())
num_syms = get_num_symbols(simp_expr)
if num_syms < min_syms:
min_syms = num_syms
min_expr = simp_expr.doit()
# Check if we can collect any symbols on simp_expr. If we can add to simps.
if isinstance(simp_expr, MatAdd):
ends_of_expr_collection = get_ends(simp_expr)
for ends_of_expr in ends_of_expr_collection:
ends_dict_left = defaultdict(list)
ends_dict_right = defaultdict(list)
ends_dict_both = defaultdict(list)
# Collect left ends and right ends
for l in range(len(ends_of_expr)):
if len(ends_of_expr[l]) == 2:
ends_dict_left[ends_of_expr[l][0]].append(l)
ends_dict_right[ends_of_expr[l][1]].append(l)
ends_dict_both[ends_of_expr[l]].append(l)
else:
ends_dict_left[ends_of_expr[l][0]].append(l)
ends_dict_right[ends_of_expr[l][0]].append(l)
# If there are two or more repetitions of a symbol, collect
for key, val in ends_dict_left.items():
simped = collect(simp_expr,key,'left').doit()
if len(val) >= 2 and not simped in simps:
simps.append(simped)
for key, val in ends_dict_right.items():
simped = collect(simp_expr,key,'right').doit()
if len(val) >= 2 and not simped in simps:
simps.append(simped)
# For cases where both ends are repeated two or more times (e.g. A*P*A + A*Q*A + B), collect
for key, val in ends_dict_both.items():
simped = collect(simp_expr,[key[0],key[1]],['left','right']).doit()
if len(val) >= 2 and not simped in simps:
simps.append(simped)
break
simps = sorted(simps, key=lambda e: get_num_symbols(e))
return simps, subs
######## Quick creation of variables/constants ########
def variables(var_names, var_shapes):
"""
Creates a tuple of SuperMatSymbol Variables with the given names
Args:
var_names - The names of each variable.
Can be a string, list or tuple.
For a string, the variable names are separated by spaces e.g. "u f fs" for variables with
names "u", "f" and "fs".
var_shapes - The shapes of each variable.
Can be a list or tuple of tuples. e.g. [(m,n), (p,q), (i,j)] for shapes (m,n), (p,q) and (i,j)
If the variable is a column vector, we simply need to specify one dimension e.g. [m, p, i] for shapes
(m,1), (p,1) and (i,1).
We can also have combinations e.g [m, (p,q), i]
Returns:
output_vars - A tuple of variables for each
"""
from symgp.superexpressions import Variable
if isinstance(var_names, str):
var_names = var_names.split(" ")
# Lists must be of same length
assert(len(var_names) == len(var_shapes))
for i, shape in enumerate(var_shapes):
if isinstance(shape, Symbol):
var_shapes[i] = (shape,1)
return (Variable(name, shape[0], shape[1]) for name, shape in zip(var_names, var_shapes))
def constants(const_names, const_shapes):
from symgp.superexpressions import Constant
if isinstance(const_names, str):
const_names = const_names.split(" ")
# Lists must be of same length
assert(len(const_names) == len(const_shapes))
for i, shape in enumerate(const_shapes):
if isinstance(shape, Symbol):
const_shapes[i] = (shape,1)
return (Constant(name, shape[0], shape[1]) for name, shape in zip(const_names, const_shapes))
######## Useful functions to get info about expressions ########
def get_exprs_at_depth(expr, depths):
"""
Returns the MatAdd and MatMul expressions in expr at levels given by 'depth' of the expression tree.
The top expression is level 0.
If no expressions at the levels exist, we simply return an empty dict
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
if isinstance(depths, int):
depths = [depths]
else:
depths = list(depths)
exprs_at_depths = defaultdict(list)
stack = [{expr: 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
if level in depths and (isinstance(sub_expr, MatAdd) or isinstance(sub_expr, MatMul)):
if isinstance(sub_expr, MatAdd) and len(sub_expr.args) > 2: # Substitute all permutations of > 2 arg MatAdds
sub_expr_perms = get_permutations(sub_expr)
exprs_at_depths[level].extend(sub_expr_perms)
elif isinstance(sub_expr, MatMul): # Substitute
# Remove number at head of expression
if isinstance(sub_expr.args[0], Number):
sub_expr = type(sub_expr)(*sub_expr.args[1:])
if len(sub_expr.args) > 2:
l = len(sub_expr.args)
start, end = 0, 2
while end < l:
if (accept_inv_lemma(sub_expr,start,end)):
new_expr = type(sub_expr)(*sub_expr.args[start:end+1])
exprs_at_depths[level].append(new_expr)
break
else:
start += 1
end += 1
if end == l:
exprs_at_depths[level].append(sub_expr)
else:
exprs_at_depths[level].append(sub_expr)
else:
exprs_at_depths[level].append(sub_expr)
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
#print(type(arg),arg)
stack.append({arg: level+1})
return exprs_at_depths
def get_ends(expr):
"""
Returns the left and right matrices of the args of the MatAdd expression, expr.
For example for A*Q*B + 2*C + D*E, we return [(A,B), (C,), (D,E)]
or for (Q+A)*R*(Q+A) + Q + A we return [(Q+A,Q+A), (Q+A,)]
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
# The collections of 'ends' lists where each has different groupings of single symbols.
# For example, for an expression (A+B)*Q*(B+C) + A + B + C, the two 'ends' lists we get are:
#
# ends_collection = [[(A+B,B+C), (A+B), (C,)],
# [(A+B,B+C), (A,), (B+C,)]]
#
ends_collection = []
# The preliminary list of end arguments of args of expr
ends = []
expr_args = list(expr.doit().args)
mmul_to_rem = {} # Pairs a MatMul to the remainder keyed by the ends. We ignore expressions of form {Number}*A where
# A is a MatSym, MatTrans or MatInv
for a in expr_args:
a_mmul = a.as_coeff_mmul()[1].doit()
if isinstance(a, MatMul):
ends.append((a_mmul.args[0],a_mmul.args[-1]))
mmul_to_rem[(a_mmul.args[0],a_mmul.args[-1])] = (a,(expr - a).doit())
else:
ends.append((a_mmul,))
ends_collection.append(ends)
for ends_mmul, val in mmul_to_rem.items():
for end in ends_mmul:
if isinstance(end,MatAdd):
rem = val[1]
match = [elem for elem in get_permutations(val[1]) if elem==end]
if len(match) > 1:
raise Exception("More than one match found: %s"%(match))
if len(match) > 0:
new_ends = [ends_mmul]
new_ends.append((match[0],))
for arg in match[0].args:
rem = (rem - arg).doit()
# Get remaining elements
if isinstance(rem, MatMul):
for arg in rem.args:
if isinstance(arg, MatMul):
new_ends.append((arg.args[0],arg.args[-1]))
else:
new_ends.append((arg,))
else:
new_ends.append((rem,))
if not new_ends in ends_collection:
ends_collection.append(new_ends)
return ends_collection
def get_num_symbols(expr):
"""
Returns the number of MatrixSyms in the expression
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
numSyms = 0
stack = [{expr: 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
if isinstance(sub_expr, MatrixSymbol):
numSyms += 1
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append({arg: level+1})
return numSyms
def display_expr_tree(expr):
"""
Visualizes the expression tree for the given expression
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
stack = [{expand_to_fullexpr(expr): 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
print("-" + 4*level*"-",sub_expr)
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append({arg: level+1})
def get_max_depth(expr):
"""
Get the maximum depth of the expression tree down to the lowest symbol
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
depth = 0
stack = [{expr: 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append({arg: level+1})
depth = level + 1 if level+1 > depth else depth
return depth
def get_all_kernel_variables(expr):
"""
Returns all the variables that are arguments of KernelMatrix objects that are
in expr
For example, for expr = K(a,u)*K(u,u)*K(u,b), we return kern_vars = [a,u,b]
"""
from symgp import SuperDiagMat, SuperBlockDiagMat, KernelMatrix
kern_vars = []
stack = [(expr,0)]
while len(stack) > 0:
sub_expr, level = stack.pop()
if isinstance(sub_expr,KernelMatrix):
if sub_expr.inputs[0] not in kern_vars:
kern_vars.append(sub_expr.inputs[0])
if sub_expr.inputs[1] not in kern_vars:
kern_vars.append(sub_expr.inputs[1])
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append((arg,level+1))
return kern_vars
def get_permutations(expr):
"""
Returns the permutations of a MatAdd expression for lengths 2 to len(expr).
For example, for A + B + C + D, we return:
[A+B, A+C, A+D, B+C, B+D, C+D, A+B+C, A+B+D, A+C+D, B+C+D, A+B+C+D]
"""
from symgp.superexpressions import SuperMatAdd
import itertools
if isinstance(expr, MatrixSymbol) or isinstance(expr, Transpose) or isinstance(expr, Inverse):
return [expr]
if not isinstance(expr, MatAdd):
raise Exception("Function only works for MatAdd expressions")
expr_args = expr.args
expr_perms = []
for i in range(2,len(expr_args)+1):
expr_perms.extend([SuperMatAdd(*e).doit() for e in itertools.combinations(expr_args,i)])
return expr_perms
def get_var_coeffs(expr, var):
"""
Returns the coeffs for the given variable and the remainder
Args:
- 'expr' - The expanded matrix expression
- 'var' - List of variables for which we find the coefficients
Returns:
- 'coeffs' - A list of coeffs of the variables. Same size as 'var'
- 'rem' - The remaining expression (when we subtract the terms corresponding to variables in 'var')
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
coeffs = [ZeroMatrix(expr.shape[0],v.shape[0]) for v in var]
# Search the expression tree for each variable in var then add coefficient to list
if expr.is_MatAdd:
for arg in expr.args:
if arg in var:
for i, v in enumerate(var):
if arg == v:
coeffs[i] = arg.as_coeff_mmul()[0]
else:
for arg2 in arg.args:
if arg2 in var:
for i, v in enumerate(var):
if arg2 == v:
coeffs[i] = SuperMatMul(*[c for c in arg.args if c != arg2]).doit()
rem = SuperMatAdd(*[c for c in expr.args if c not in [c*v for c,v in zip(coeffs,var)]]).doit()
elif expr.is_MatMul:
rem = expr
for arg in expr.args:
if arg in var:
for i, v in enumerate(var):
if arg == v:
coeffs[i] = SuperMatMul(*[c for c in expr.args if c != v]).doit()
rem = ZeroMatrix(expr.shape[0], expr.shape[1])
else:
rem = expr # If no match is made, we leave remainder as expr
for i, v in enumerate(var):
if expr == v:
coeffs[i] = Identity(expr.shape[0])
rem = ZeroMatrix(expr.shape[0],expr.shape[1])
return coeffs, rem
def create_blockform(A,B,C,D):
"""
Create new matrix by top bottom method i.e. create top half of matrix then create bottom
Args:
A, B, C, D - The four partitions of the block matrix. Must be 2-D i.e. all of form [[.]]
Returns:
The full blockform i.e. [[A, B], [C, D]]
"""
top = []
for row1, row2 in zip(A,B):
top.append(row1+row2)
bottom = []
for row1, row2 in zip(C,D):
bottom.append(row1+row2)
return top+bottom
def get_variables(expr):
"""
Returns a list of all the 'Variable' objects in the given expr.
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat, Variable
variables_in_expr = []
stack = [(expr, 0)]
while len(stack) > 0:
sub_expr, level = stack.pop()
if isinstance(sub_expr, Variable) and sub_expr not in variables_in_expr:
variables_in_expr.append(sub_expr)
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append((arg, level+1))
return variables_in_expr
######### Other miscellaneous functions #########
def create_distr_name(dep_vars=None, cond_vars=None) -> str:
"""
Creates a name based on the given variables of a distribution and the variables it is
conditioned on
:param dep_vars: The random variables of a distribution. x in p(x|z)
:param cond_vars: The conditioned-on variables of a distribution. z in p(x|z)
:return: A string of the name.
"""
name = ''
if dep_vars:
if not isinstance(dep_vars[0], list):
name += ','.join([v.name for v in dep_vars])
else:
dep_vars_x = dep_vars[0]
dep_vars_y = dep_vars[1]
name += ','.join([v.name for v in dep_vars_x + dep_vars_y])
if cond_vars:
name += '|' + ','.join([v.name for v in cond_vars])
return name
######## GUI lexer ########
class Token(object, metaclass=ABCMeta):
"""
Abstract base class for parser tokens
"""
def __init__(self, *args) -> None:
self.value = args[0]
def __eq__(self, other: 'Token'):
return self.value == other.value
def __str__(self):
return self.value
def __repr__(self):
return type(self).__name__ + '(value=' + self.value + ')'
class DiagToken(Token):
ALLOWED_VALUES = ['diag', 'blockdiag', 'blkdiag']
def __init__(self, t : str) -> None:
"""
Initialises token for diagonal symbols
:param t: The name of the token from the range - 'diag'|'blockdiag'|'blkdiag'
"""
assert t in DiagToken.ALLOWED_VALUES, "t must be one of {}".format(
DiagToken.ALLOWED_VALUES)
super(DiagToken, self).__init__(t)
class OperatorToken(Token):
ALLOWED_VALUES = ['+', '-', '*']
def __init__(self, op : str) -> None:
"""
Initialises token for operator symbols
:param op: The name of the token from the range - '+'|'-'|'*'
"""
assert op in OperatorToken.ALLOWED_VALUES, "op must be one of {}".format(
OperatorToken.ALLOWED_VALUES)
super(OperatorToken, self).__init__(op)
class PlusToken(OperatorToken):
def __init__(self):
super().__init__('+')
class MinusToken(OperatorToken):
def __init__(self):
super().__init__('-')
class StarToken(OperatorToken):
def __init__(self):
super().__init__('*')
class ParenToken(Token):
ALLOWED_VALUES = ['(', ')', '[', ']', '{', '}']
def __init__(self, paren : str) -> None:
"""
Initialises token for parentheses symbols
:param paren: The name of the token from the range - ')'|'('|']'|'['|'{'|'}'
"""
assert paren in ParenToken.ALLOWED_VALUES, "paren must be one of {}".format(
ParenToken.ALLOWED_VALUES)
super(ParenToken, self).__init__(paren)
class LRoundParenToken(ParenToken):
def __init__(self):
super(LRoundParenToken, self).__init__('(')
class RRoundParenToken(ParenToken):
def __init__(self):
super(RRoundParenToken, self).__init__(')')
class LBoxParenToken(ParenToken):
def __init__(self):
super(LBoxParenToken, self).__init__('[')
class RBoxParenToken(ParenToken):
def __init__(self):
super(RBoxParenToken, self).__init__(']')
class LCurlyParenToken(ParenToken):
def __init__(self):
super(LCurlyParenToken, self).__init__('{')
class RCurlyParenToken(ParenToken):
def __init__(self):
super(RCurlyParenToken, self).__init__('}')
class MatIdentifierToken(Token):
def __init__(self, mat : str) -> None:
"""
Initialises token for matrix variable identifier symbols
:param mat: The name of the token. Must start with a upper case letter and only have
alphanumeric characters and/or '_'.
"""
super(MatIdentifierToken, self).__init__(mat)
class VecIdentifierToken(Token):
def __init__(self, vec : str) -> None:
"""
Initialises token for vector variable identifier symbols
:param vec: The name of the token. Must start with a lower case letter and only have
alphanumeric characters and/or '_'.
"""
super(VecIdentifierToken, self).__init__(vec)
class KernelToken(Token):
def __init__(self, name : str, arg1 : str, arg2 : str) -> None:
"""
Initialises token for kernel function symbols.
:param name: The kernel name. Can start with lower or upper case letters
:param arg1: The first argument of the kernel
:param arg2: The second argumnet of the kernel
"""
super(KernelToken, self).__init__(name)
self.arg1 = arg1
self.arg2 = arg2
def __eq__(self, other: Token):
return isinstance(other, KernelToken) and \
(self.value == other.value and self.arg1 == other.arg1 and self.arg2 == other.arg2)
def __str__(self):
return self.value + '(' + self.arg1 + ',' + self.arg2 + ')'
def __repr__(self):
return type(self).__name__ + '(value=' + self.value + ', arg1=' + self.arg1 + ', arg2=' + \
self.arg2 + ')'
class GroupToken(Token):
"""
Groups the supplied tokens into a single token
"""
def __init__(self, tokens: List[Token]):
"""
Initialises the token that groups a sequence of tokens together.
:param tokens: The list of tokens to group
"""
super(GroupToken, self).__init__(tokens)
def tokens(self) -> List[Token]:
return self.value
class InvToken(Token):
ALLOWED_VALUES = ['.I', '^-1', '^{-1}']
def __init__(self, op: str) -> None:
"""
Initialises token representing the inverse operation.
:param op: Must be one of '.I', '^-1', '^{-1}'
"""
assert op in InvToken.ALLOWED_VALUES, "op must be one of {}".format(
InvToken.ALLOWED_VALUES)
super(InvToken, self).__init__(op)
class TransToken(Token):
ALLOWED_VALUES = ['.T', '\'', '^t', '^T', '^{t}', '^{T}']
def __init__(self, op : str) -> None:
"""
Initialises token representing the inverse operation.
:param op: Must be one of ".T", "'", "^t", "^T", "^{t}", "^{T}".
"""
assert op in TransToken.ALLOWED_VALUES, "op must be one of {}".format(
TransToken.ALLOWED_VALUES)
super(TransToken, self).__init__(op)
def get_tokens(expr: str) -> List[Token]:
"""
Converts a string expression into a list of tokens. An exception is raised
if the expression doesn't give a valid parse
:param expr: The expression which we want to turn into a list of tokens.
:return: The list of tokens
"""
# Useful functions
def match_to_symbol(s: str) -> Optional[
Union[MatIdentifierToken, VecIdentifierToken, KernelToken]]:
"""
Determines whether expr matches to mat_identifier, vec_identifier or kernel
:param s: The expression which we want to match
:return: A token if there is a match otherwise we return None
"""
if mat_identifier.fullmatch(s):
return MatIdentifierToken(s)
elif vec_identifier.fullmatch(s):
return VecIdentifierToken(s)
elif kernel.fullmatch(s):
# Break up 's' into the kernel name and the two arguments
match = s.split("(")
name = match[0]
arg1, arg2 = match[1].strip(")").split(",")
return KernelToken(name, arg1, arg2)
else:
return ValueError("Invalid string: {}. Should match regexes: {}, {} or {}".format(
s, mat_identifier.pattern, vec_identifier.pattern, kernel.pattern))
def match_to_mat_op(s: str) -> Optional[Union[TransToken, InvToken]]:
"""
Determines whether s matches inv_sym or trans_sym
:param s: String to be matched
:return: A TransToken or InvToken depending on s
"""
if inv_sym.fullmatch(s):
return InvToken(s)
elif trans_sym.fullmatch(s):
return TransToken(s)
else:
raise ValueError("Invalid string: {}. Should match regexes: {} or {}".format(s, inv_sym.pattern, trans_sym.pattern))
def make_paren_token(s: str) -> Optional[ParenToken]:
if s == '(':
return LRoundParenToken()
elif s == ')':
return RRoundParenToken()
elif s == '[':
return LBoxParenToken()
elif s == ']':
return RBoxParenToken()
elif s == '{':
return LCurlyParenToken()
elif s == '}':
return RCurlyParenToken()
else:
raise ValueError("Invalid paren token. Must be one of '(',')','[',']','{','}'. Provided: %s" % (s))
def make_operator_token(s: str) -> Optional[OperatorToken]:
if s == '+':
return PlusToken()
elif s == '-':
return MinusToken()
elif s == '*':
return StarToken()
else:
raise ValueError('Invalid token. Must be one of "+", "-" or "*". Specified: {}'.format(s))
# Remove meaningless spaces
expr = expr.replace(" ", "")
## Regex expressions ##
# Low-level expressions
digit = re.compile(r"[0-9_]")
lower_char = re.compile(r"[a-z]")
upper_char = re.compile(r"[A-Z]")
operators = re.compile(r"[\+\-\*]")
diag_op = re.compile(r"diag|blkdiag|blockdiag")
inv_sym = re.compile(r"\.I|\^\-1|\^\{\-1\}")
trans_sym = re.compile(r"\.T|\'|\^t|\^T|\^\{t\}|\^\{T\}")
# Matrix and vectors
mat_identifier = re.compile(r"{1}(?:{0}|{1}|{2})*".format( \
lower_char.pattern, upper_char.pattern, digit.pattern))
vec_identifier = re.compile(r"{0}(?:{0}|{1}|{2})*".format( \
lower_char.pattern, upper_char.pattern, digit.pattern))
# Kernels
kernel = re.compile(r"(?:{0}|{1})\((?:{2}|{3}),(?:{2}|{3})\)".format( \
lower_char.pattern, upper_char.pattern, vec_identifier.pattern, mat_identifier.pattern))
# Matrices, vectors and kernels
symbols = re.compile(r"{0}|{1}|{2}".format(
mat_identifier.pattern, vec_identifier.pattern, kernel.pattern)
)
# Full expression to match
#expr_re = re.compile(
# r"^(\()?(?:({0})(\[))?(\()?({1})((?:{3}|{4})|\)|\])?((?:{3}|{4})|\)|\])?((?:{3}|{4})|\)|\])?((?:(?:{2})\(?(?:(?:{0})\[)?(?:{1})(?:(?:{3}|{4})|\)|\])?(?:(?:{3}|{4})|\)|\])?(?:(?:{3}|{4})|\)|\])?)*)(\))?". \
# format(diag_op.pattern, symbols.pattern, operators.pattern, inv_sym.pattern,
# trans_sym.pattern))
expr_re = re.compile(
r"^(\()?(?:({0})(\[))?(\()?({1})((?:(?:[\)\]])?(?:{3}|{4})?)*)((?:(?:{2})\(?(?:(?:{0})\[)?(?:{1})(?:(?:[\)\]])?(?:{3}|{4})?)*)*)(\))?".\
format(diag_op.pattern, symbols.pattern, operators.pattern, inv_sym.pattern,
trans_sym.pattern))
# First match first part of expression then recursively match remainder
tokens = []
expr_match = expr_re.fullmatch(expr)
if expr_match:
groups = expr_match.groups()
#print("groups: ", groups)
if groups[0]: # '('
tokens.append(LRoundParenToken())
if groups[1]: # diag_op
tokens.append(DiagToken(groups[1]))
if groups[2]: # '['
tokens.append(make_paren_token(groups[2]))
if groups[3]: # '('
tokens.append(make_paren_token(groups[3]))
if groups[4]: # mat_identifier|vec_identifier|kernel
tokens.append(match_to_symbol(groups[4]))
# Alternations between (inv_sym|trans_sym) and ]|)
#if groups[5]: # ) | ]
# tokens.append(make_paren_token(groups[5]))
#if groups[6]: # inv_sym | trans_sym
# tokens.append(match_to_mat_op(groups[6]))
close_expr = groups[5]
close_expr_pat = re.compile(r"([\)\]])?({0}|{1})?((?:[\)\]]?(?:{0}|{1})?)*)".format(
inv_sym.pattern, trans_sym.pattern))
while len(close_expr) > 0:
close_expr_groups = close_expr_pat.fullmatch(close_expr).groups()
if close_expr_groups[0]: # ) | ]
tokens.append(make_paren_token(close_expr_groups[0]))
if close_expr_groups[1]: # inv_sym | trans_sym
tokens.append(match_to_mat_op(close_expr_groups[1]))
close_expr = close_expr_groups[2]
# (inv_sym|trans_sym)|']'|')' (3 times)
#for i in range(5,8):
# if groups[i]:
# try:
# token = make_paren_token(groups[i])
# except ValueError:
# token = match_to_mat_op(groups[i])
# tokens.append(token)
## Repeat for the rest of the expression
right = groups[6] # The remainder of the expression if it exists excluding last bracket
#right_regex = re.compile(
# r"^({0})(\()?(?:({4})(\[))?({1})((?:{2}|{3})|\)|\])?((?:{2}|{3})|\)|\])?((?:{2}|{3})|\)|\])?((?:(?:{0})\(?(?:(?:{4})\[)?(?:{1})(?:(?:{2}|{3})|\)|\])?(?:(?:{2}|{3})|\)|\])?(?:(?:{2}|{3})|\)|\])?)*)".format(\
# operators.pattern, symbols.pattern, inv_sym.pattern, trans_sym.pattern,
# diag_op.pattern))
right_regex = re.compile(
r"^({0})(\()?(?:({4})(\[))?({1})((?:(?:[\)\]])?(?:{2}|{3})?)*)((?:(?:{0})\(?(?:(?:{4})\[)?(?:{1})(?:(?:[\)\]])?(?:{2}|{3})?)*)*)".format( \
operators.pattern, symbols.pattern, inv_sym.pattern, trans_sym.pattern,
diag_op.pattern))
while len(right) > 0:
subgroups = right_regex.fullmatch(right).groups()
#print("subgroups: ", subgroups)
if subgroups[0]: # operators
tokens.append(make_operator_token(subgroups[0]))
else:
raise RuntimeError("Scanning error: Missing operator")
if subgroups[1]: # '('
tokens.append(make_paren_token(subgroups[1]))
if subgroups[2]: # 'diag_op'
tokens.append(DiagToken(subgroups[2]))
if subgroups[3]: # '['
tokens.append(make_paren_token(subgroups[3]))
if subgroups[4]: # mat_identifier|vec_identifier|kernel
tokens.append(match_to_symbol(subgroups[4]))
else:
raise RuntimeError("Scanning error: Missing mat_identifier, vec_identifier or kernel.")
# Alternations between (inv_sym|trans_sym) and ]|)
# if groups[5]: # ) | ]
# tokens.append(make_paren_token(groups[5]))
# if groups[6]: # inv_sym | trans_sym
# tokens.append(match_to_mat_op(groups[6]))
close_expr = subgroups[5]
close_expr_pat = re.compile(r"([\)\]])?({0}|{1})?((?:[\)\]]?(?:{0}|{1})?)*)".format(
inv_sym.pattern, trans_sym.pattern))
while len(close_expr) > 0:
close_expr_groups = close_expr_pat.fullmatch(close_expr).groups()
if close_expr_groups[0]: # ) | ]
tokens.append(make_paren_token(close_expr_groups[0]))
if close_expr_groups[1]: # inv_sym | trans_sym
tokens.append(match_to_mat_op(close_expr_groups[1]))
close_expr = close_expr_groups[2]
# (inv_sym|trans_sym)|']'|')' (3 times)
#for i in range(5, 8):
# if subgroups[i]:
# try:
# token = make_paren_token(subgroups[i])
# except ValueError:
# token = match_to_mat_op(subgroups[i])
#
# tokens.append(token)
right = subgroups[6]#[8]
if groups[7]:
tokens.append(RRoundParenToken())
return tokens
else:
raise Exception("Invalid input")
def tokens_to_string(tokens : List[Token]) -> str:
"""
Converts a list of tokens to the string they represent.
:param tokens: The ordered list of tokens
:return: The string representation of the list of tokens
"""
output = ""
for token in tokens:
if any([isinstance(token, token_class) for token_class in \
[DiagToken, OperatorToken, ParenToken, MatIdentifierToken, VecIdentifierToken]]):
output += token.value
elif isinstance(token, InvToken) or isinstance(token, TransToken):
sym = token.value
if isinstance(sym, KernelToken):
output += sym.value + "(" + sym.arg1 + "," + sym.arg2 + ")"
elif isinstance(sym, GroupToken):
output += tokens_to_string(sym.tokens())
else:
output += sym.value
if isinstance(token, InvToken):
output += ".I"
else:
output += ".T"
elif isinstance(token, GroupToken):
output += tokens_to_string(token.tokens())
else:
output += token.value +"(" + token.arg1 + ","+ token.arg2 + ")"
return output
######## GUI AST classes ########
## AST Printer stuff ##
class VisitorBase(object, metaclass=ABCMeta):
"""
Abstract class for Visitor from the Visitor pattern.
"""
def visit_binary(self, binary: 'Binary'):
raise NotImplementedError()
def visit_unary(self, unary: 'Unary'):
raise NotImplementedError()
def visit_literal(self, literal: 'Literal'):
raise NotImplementedError()
def visit_kernel_literal(self, kern_lit: 'KernelLiteral'):
raise NotImplementedError()
def visit_grouping(self, grouping: 'Grouping'):
raise NotImplementedError()
def visit_diag(self, diag: 'Diag'):
raise NotImplementedError()
def visit_matop(self, matop: 'MatOp'):
raise NotImplementedError()
class ASTPrinter(VisitorBase):
def print_ast(self, expr: 'ASTNode'):
return expr.accept(self)
def visit_binary(self, binary: 'Binary'):
return self.parenthesise(binary.operator, binary.left, binary.right)
def visit_unary(self, unary: 'Unary'):
return self.parenthesise(unary.operator, unary.right)
def visit_literal(self, literal: 'Literal'):
return self.parenthesise(literal.value)
def visit_kernel_literal(self, kern_lit: 'KernelLiteral'):
return self.parenthesise(kern_lit.name, kern_lit.arg1, kern_lit.arg2)
def visit_grouping(self, grouping: 'Grouping'):
return self.parenthesise("group", grouping.expr)
def visit_diag(self, diag: 'Diag'):
return self.parenthesise(diag.diag_op, diag.expr)
def visit_matop(self, matop: 'MatOp'):
return self.parenthesise(matop.mat_op, matop.expr)
def parenthesise(self, name: str, *exprs: Iterable['ASTNode']):
out_str = "( " + name
for expr in exprs:
out_str += " "
if isinstance(expr, ASTNode):
out_str += expr.accept(self)
else:
out_str += expr
out_str += ")"
return out_str
## Node classes ##
class ASTNode(object, metaclass=ABCMeta):
def __eq__(self, other):
if type(self) == type(other):
return all([self.__dict__[k] == other.__dict__[k] for k in self.__dict__.keys() if not k.startswith('_')])
return False
def accept(self, visitor: VisitorBase):
raise NotImplementedError("Should be implemented by subclasses.")
class Binary(ASTNode):
def __init__(self, left, operator: OperatorToken, right):
self.left = left
self.operator = operator.value
self.right = right
def __str__(self):
return str(self.left) + self.operator + str(self.right)
def __repr__(self):
return "Binary(left={}, operator={}, right={}".format(self.left, self.operator, self.right) + ")"
def accept(self, visitor: VisitorBase):
return visitor.visit_binary(self)
class Unary(ASTNode):
def __init__(self, operator: OperatorToken, right):
self.operator = operator.value
self.right = right
def __str__(self):
return self.operator + str(self.right)
def __repr__(self):
return "Unary(operator={}, right={})".format(self.operator, self.right)
def accept(self, visitor: VisitorBase):
return visitor.visit_unary(self)
class Literal(ASTNode):
def __init__(self, value: str):
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return "Literal(value={})".format(repr(self.value))
def accept(self, visitor: VisitorBase):
return visitor.visit_literal(self)
class KernelLiteral(ASTNode):
def __init__(self, name: str, arg1: str, arg2: str):
self.name = name
self.arg1 = arg1
self.arg2 = arg2
def __str__(self):
return self.name + "(" + self.arg1 + ", " + self.arg2 + ")"
def __repr__(self):
return "KernelLiteral(name={}, arg1={}, arg2={})".format(self.name, self.arg1, self.arg2)
def accept(self, visitor: VisitorBase):
return visitor.visit_kernel_literal(self)
class Grouping(ASTNode):
def __init__(self, expr):
self.expr = expr
def __str__(self):
return "(" + str(self.expr) + ")"
def __repr__(self):
return "Grouping(expr={})".format(self.expr)
def accept(self, visitor: VisitorBase):
return visitor.visit_grouping(self)
class Diag(ASTNode):
def __init__(self, diag_op: DiagToken, expr):
self.diag_op = diag_op.value
self.expr = expr
def __repr__(self):
return "Diag(diag_op={}, expr={})".format(self.diag_op, self.expr)
def __str__(self):
return self.diag_op + "[" + str(self.expr) + "]"
def accept(self, visitor: VisitorBase):
return visitor.visit_diag(self)
class MatOp(ASTNode):
def __init__(self, expr, mat_op: Union[InvToken, TransToken]):
self.expr = expr
self.mat_op = mat_op.value
def __repr__(self):
return "MatOp(expr={}, mat_op={})".format(self.expr, self.mat_op)
def __str__(self):
return str(self.expr) + self.mat_op
def accept(self, visitor: VisitorBase):
return visitor.visit_matop(self)
# Parsing functions
def parse(tokens):
"""
Parses a list of tokens to produce an expression with a dictionary
of the objects created
"""
current = 0 # Index of current token.
def previous() -> Token:
return tokens[current-1]
def advance() -> Token:
nonlocal current
if not is_at_end():
current += 1
return previous()
def peek() -> Token:
return tokens[current]
def is_at_end() -> bool:
return current == len(tokens)
def match(*token_types) -> bool:
for token_type in token_types:
if check(token_type):
advance()
return True
return False
def check(token_type) -> bool:
if is_at_end():
return False
return isinstance(peek(), token_type)
def consume(token_type, message: str) -> Token:
if check(token_type):
return advance()
raise error(peek(), message)
def error(token: Token, message: str):
return RuntimeError(message + " Actual: " + token.value)
def primary():
if match(MatIdentifierToken, VecIdentifierToken):
return Literal(previous().value)
elif match(KernelToken):
kern_tok = previous() # type: KernelToken
return KernelLiteral(kern_tok.value, kern_tok.arg1, kern_tok.arg2)
elif match(LRoundParenToken):
expr = expression()
consume(RRoundParenToken, "Expect ')' after expression.")
return Grouping(expr)
elif match(DiagToken):
diag_op = previous() # type: DiagToken
consume(LBoxParenToken, "Expect '[' after diag_op and before expression.")
expr = expression()
consume(RBoxParenToken, "Expect ']' after expression.")
return Diag(diag_op, expr)
def unary():
if match(MinusToken):
operator = previous() # type: OperatorToken
right = unary()
out_expr = Unary(operator, right)
else:
out_expr = primary()
if match(TransToken, InvToken):
matop = previous() # type: Union[TransToken, InvToken]
return MatOp(out_expr, matop)
else:
return out_expr
def multiplication():
expr = unary()
while match(StarToken):
operator = previous() # type: OperatorToken
right = unary()
expr = Binary(expr, operator, right)
return expr
def addition():
expr = multiplication()
while match(PlusToken, MinusToken):
operator = previous() # type: OperatorToken
right = multiplication()
expr = Binary(expr, operator, right)
return expr
def expression():
return addition()
return expression()
def print_ast(ast: ASTNode):
ast_printer = ASTPrinter()
return ast_printer.print_ast(ast)
# Interpreter
class Interpreter(VisitorBase, metaclass=ABCMeta):
def __init__(self, namespace: Dict[str, Any]):
"""
Initialises interpreter.
:param namespace: Dictionary mapping names to Python objects that are used to evaluate
expression. For example for a SymGP Constant named 'A', we would have the entry:
namespace['A'] = Constant('A')
For Kernels, we have to append '_kern' to the Kernel name to distinguish it from matrix
symbols.
We assume in all the 'visit*' functions below that all the required objects have been defined
previously.
"""
self._ns = namespace
def interpret(self, expr: ASTNode):
return self.evaluate(expr)
def evaluate(self, expr: ASTNode):
return expr.accept(self)
def visit_binary(self, expr: Binary):
left = self.evaluate(expr.left)
right = self.evaluate(expr.right)
if expr.operator == '+':
return left + right
elif expr.operator == '-':
return left - right
elif expr.operator == '*':
return left * right
else:
return None
def visit_unary(self, expr: Unary):
right = self.evaluate(expr.right)
if expr.operator == '-':
return -right
else:
return None
def visit_literal(self, expr: Literal):
return self._ns[expr.value]
def visit_kernel_literal(self, expr: KernelLiteral):
from symgp.kernels import Kernel
arg1, arg2 = self._ns[expr.arg1], self._ns[expr.arg2]
kern = self._ns[expr.name + '_kern'] # type: Kernel
return kern(arg1, arg2)
def visit_grouping(self, expr: Grouping):
return self.evaluate(expr.expr)
def visit_diag(self, expr: Diag):
from symgp.superexpressions.supermatbase import SuperMatBase
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
arg = self.evaluate(expr.expr) # type: SuperMatBase
if expr.diag_op == 'diag':
return SuperDiagMat(arg)
elif expr.diag_op == 'blkdiag' or expr.diag_op == 'blockdiag':
return SuperBlockDiagMat(arg)
else:
return None
def visit_matop(self, expr: MatOp):
from symgp.superexpressions import SuperMatTranspose, SuperMatInverse
arg = self.evaluate(expr.expr)
trans_ops = [".T", "'", "^t", "^T", "^{t}", '^{T}']
inv_ops = [".I", "^-1", "^{-1}"]
if expr.mat_op in trans_ops:
return SuperMatTranspose(arg)
elif expr.mat_op in inv_ops:
return SuperMatInverse(arg)
else:
return None
|
jna29/SymGP
|
symgp/utils/utils.py
|
Python
|
mit
| 115,211
|
[
"VisIt"
] |
2bf2d1007118aca6cec3dacc7fae4f2120223121537964726b9f953272c39ab7
|
# Copyright (C) 2011-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the ELinOS environment."""
import os
import glob
import gdb
def warn(msg):
print "warning: %s" % msg
def get_elinos_environment():
"""Return the ELinOS environment.
If the ELinOS environment is properly set up, return a dictionary
which contains:
* The path to the ELinOS project at key 'project';
* The path to the ELinOS CDK at key 'cdk';
* The ELinOS target name at key 'target' (Eg. 'i486-linux');
* A list of Xenomai install prefixes (which could be empty, if
the ELinOS project does not include Xenomai) at key 'xenomai'.
If one of these cannot be found, print a warning; the corresponding
value in the returned dictionary will be None.
"""
result = {}
for key in ("project", "cdk", "target"):
var = "ELINOS_" + key.upper()
if var in os.environ:
result[key] = os.environ[var]
else:
warn("%s not set" % var)
result[key] = None
if result["project"] is not None:
result["xenomai"] = glob.glob(result["project"] + "/xenomai-[0-9.]*")
else:
result["xenomai"] = []
return result
def elinos_init():
"""Initialize debugger environment for ELinOS.
Let the debugger know where to find the ELinOS libraries on host. This
assumes that an ELinOS environment is properly set up. If some environment
variables are missing, warn about which library may be missing.
"""
elinos_env = get_elinos_environment()
solib_dirs = []
# System libraries
if None in (elinos_env[key] for key in ("cdk", "target")):
warn("ELinOS system libraries will not be loaded")
else:
solib_prefix = "%s/%s" % (elinos_env["cdk"], elinos_env["target"])
solib_dirs += ["%s/%s" % (solib_prefix, "lib")]
gdb.execute("set solib-absolute-prefix %s" % solib_prefix)
# Xenomai libraries. Those are optional, so have a lighter warning
# if they cannot be located.
if elinos_env["project"] is None:
warn("Xenomai libraries may not be loaded")
else:
for dir in elinos_env['xenomai']:
solib_dirs += ["%s/%s"
% (dir, "xenomai-build/usr/realtime/lib")]
if len(solib_dirs) != 0:
gdb.execute("set solib-search-path %s" % ":".join(solib_dirs))
if __name__ == "__main__":
elinos_init()
|
mattstock/binutils-bexkat1
|
gdb/system-gdbinit/elinos.py
|
Python
|
gpl-2.0
| 3,080
|
[
"CDK"
] |
604eaec2fb7fa49e8daa0088dba98b53404d33105dd8e6f5fd82685c51f2ec1c
|
""" Base classes for classification """
import numpy
import os
import cPickle
import warnings
import logging
import timeit
# base class
from pySPACE.missions.nodes.base_node import BaseNode
# representation of the linear classification vector
from pySPACE.resources.data_types.feature_vector import FeatureVector
# the output is a prediction vector
from pySPACE.resources.data_types.prediction_vector import PredictionVector
class RegularizedClassifierBase(BaseNode):
""" Basic class for regularized (kernel) classifiers with extra support in the linear case
**Parameters**
:class_labels:
Sets the labels of the classes.
This can be done automatically, but setting it will be better,
if you want to have similar predictions values
for classifiers trained on different sets.
Otherwise this variable is built up by occurrence of labels.
Furthermore the important class (ir_class) should get the
second position in the list, such that it gets higher
prediction values by the classifier.
(*recommended, default: []*)
:complexity:
Complexity sets the weighting of punishment for misclassification
in comparison to generalizing classification from the data.
Value in the range from 0 to infinity.
(*optional, default: 1*)
:weight:
Defines an array with two entries to give different complexity weight
on the two used classes.
Set the parameter C of class i to weight*C.
(*optional, default: [1,1]*)
:kernel_type:
Defines the used kernel function.
One of the following Strings: 'LINEAR', 'POLY','RBF', 'SIGMOID'.
- LINEAR ::
u'*v
- POLY ::
(gamma*u'*v + offset)^exponent
- RBF ::
exp(-gamma*|u-v|^2)
- SIGMOID ::
tanh(gamma*u'*v + offset)
(*optional, default: 'LINEAR'*)
:exponent:
Defines parameter for the 'POLY'-kernel.
Equals parameter /degree/ in libsvm-package.
(*optional, default: 2*)
:gamma:
Defines parameter for 'POLY'-,'RBF'- and 'SIGMOID'-kernel.
In libsvm-package it was set to 1/num_features.
For RBF-Kernels we calculate it as described in:
:Paper:
A practical Approach to Model Selection for Support vector
Machines with a Gaussian Kernel
:Author: M. Varewyck and J.-P. Martens.
:Formula: 15
The quasi-optimal complexity should then be found in [0.5,2,8]
or better to say log_2 C should be found in [-1,1,3].
For testing a wider range, you may try: [-2,...,4].
A less accurate version would be to use 1/(num_features*sqrt(2)).
For the other kernels we set it to 1/num_features.
.. warning::
For the RBF-Parameter selection the
the :class:`~pySPACE.missions.nodes.postprocessing.feature_normalization.HistogramFeatureNormalizationNode`
should be used before.
(*optional, default: None*)
:offset:
Defines parameter for 'POLY'- and 'SIGMOID'-kernel.
Equals parameter /coef0/ in libsvm-package.
(*optional, default: 0*)
:nu:
Defines parameter for 'nu-SVC', 'one-class SVM' and 'nu-SVR'. It
approximates the fraction of training errors and support vectors.
Value in the range from 0 to 1.
(*optional, default: 0.5*)
:epsilon:
Defines parameter for 'epsilon-SVR'.
Set the epsilon in loss function of epsilon-SVR.
Equals parameter /p/ in libsvm-package.
(*optional, default: 0.1*)
:tolerance:
tolerance of termination criterion, same default as in libsvm.
In the SOR implementation the tolerance may be reduced to
one tenth of the complexity, if it is higher than this value.
Otherwise it would be no valid stopping criterion.
(*optional, default: 0.001*)
:max_time:
Time for the construction of the classifier
For LibSVM we restrict the number of steps but for cvxopt
we use a signal handling to stop processes.
This may happen, when the parameters are bad chosen or
the problem matrix is to large.
Parameter is still in testing and implementation phase.
The time is given in seconds and as a default, one hour is used.
(*optional, default: 3600*)
:keep_vectors:
After training the training data is normally deleted,
except this variable is set to True.
(*optional, default: False*)
:use_list:
Switch to store samples as *list*. If set to *False* they are stored
as arrays. Used for compatibility with LIBSVM. This parameter should
not be changed by the user.
(*optional, default False*)
:multinomial:
Accept more than two classes.
(*optional, default: False*)
.. note:: Not all parameter effects are implemented for all inheriting nodes.
Kernels are available for LibSVMClassifierNode and
partially for other nodes.
The *tolerance* has only an effect on Liblinear, LibSVM and SOR classifier.
:input: FeatureVector
:output: PredictionVector
:Author: Mario Krell (mario.krell@dfki.de)
:Created: 2012/03/28
"""
def __init__(self, regression = False,
complexity = 1, weight = [1,1], kernel_type = 'LINEAR',
exponent = 2, gamma = None, offset = 0, nu = 0.5, epsilon = 0.1,
class_labels = [], debug = False, max_time = 3600,
tolerance=0.001,
complexities_path = None,
keep_vectors=False, max_steps=1,forget_oldest=False,
keep_label=None, use_list=False,
multinomial=False,
**kwargs):
super(RegularizedClassifierBase, self).__init__(**kwargs)
# type conversion
complexity=float(complexity)
if complexity<1e-10:
self._log("Complexity (%.42f) is very small. Try rescaling data or check this behavior."%complexity, level = logging.WARNING)
if self.is_retrainable():
keep_vectors=True
self.set_permanent_attributes(samples=None, labels=None,
classes=class_labels,
weight=weight,
kernel_type=kernel_type,
complexity=complexity,
exponent=exponent, gamma=gamma,
offset=offset, nu=nu,
epsilon=epsilon, debug=debug,
tolerance=tolerance,
w=None, b=0, dim=None,
feature_names= None,
complexities_path=complexities_path,
regression=regression,
keep_vectors=keep_vectors,
max_time=max_time,
steps=0, max_steps=max_steps,
forget_oldest=forget_oldest,
keep_label=keep_label,
retraining_needed=False,
use_list=use_list,
multinomial=multinomial,
classifier_information={}
)
def stop_training(self):
""" Wrapper around stop training for measuring times"""
if self.samples is None or len(self.samples) == 0:
self._log("No training data given to classification node (%s), "
% self.__class__.__name__ + "wrong class labels "
+ "used or your classifier is not using samples.",
level=logging.CRITICAL)
start_time_stamp = timeit.default_timer()
super(RegularizedClassifierBase, self).stop_training()
stop_time_stamp = timeit.default_timer()
if not self.classifier_information.has_key("Training_time(classifier)"):
self.classifier_information["Training_time(classifier)"] = \
stop_time_stamp - start_time_stamp
else:
self.classifier_information["Training_time(classifier)"] += \
stop_time_stamp - start_time_stamp
def is_trainable(self):
""" Returns whether this node is trainable. """
return True
def is_supervised(self):
""" Returns whether this node requires supervised training """
return True
def delete_training_data(self):
""" Check if training data can be deleted to save memory """
if not (self.keep_vectors or self.is_retrainable()):
self.samples = []
self.labels = []
def __getstate__(self):
""" Return a pickable state for this object """
odict = super(RegularizedClassifierBase, self).__getstate__()
if self.kernel_type == 'LINEAR':
if 'labels' in odict:
odict['labels'] = []
if 'samples' in odict:
odict['samples'] = []
if 'model' in odict:
del odict['model']
else:
if 'model' in odict:
del odict['model']
return odict
def store_state(self, result_dir, index=None):
""" Stores this node in the given directory *result_dir* """
if self.store and self.kernel_type == 'LINEAR':
node_dir = os.path.join(result_dir, self.__class__.__name__)
from pySPACE.tools.filesystem import create_directory
create_directory(node_dir)
try:
self.features
except:
if type(self.w) == FeatureVector:
self.features = self.w
elif not self.w is None:
self.features = FeatureVector(self.w.T, self.feature_names)
else:
self.features=None
if not self.features is None:
# This node stores the learned features
name = "%s_sp%s.pickle" % ("features", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.features, protocol=2))
result_file.close()
name = "%s_sp%s.yaml" % ("features", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(str(self.features))
result_file.close()
del self.features
def __setstate__(self, sdict):
""" Restore object from its pickled state"""
super(RegularizedClassifierBase, self).__setstate__(sdict)
if self.kernel_type != 'LINEAR':
# Retraining the svm is not a semantically clean way of restoring
# an object but its by far the most simple solution
self._log("Requires retraining of the classifier")
if self.samples != None:
self._stop_training()
def get_sensor_ranking(self):
""" Transform the classification vector to a sensor ranking
This method will fail, if the classification vector variable
``self.features`` is not existing.
This is for example the case when using nonlinear classification with
kernels.
"""
if not "features" in self.__dict__:
self.features = FeatureVector(
numpy.atleast_2d(self.w).astype(numpy.float64),
self.feature_names)
self._log("No features variable existing to create generic sensor "
"ranking in %s."%self.__class__.__name__, level=logging.ERROR)
# channel name is what comes after the first underscore
feat_channel_names = [chnames.split('_')[1]
for chnames in self.features.feature_names]
from collections import defaultdict
ranking_dict = defaultdict(float)
for i in range(len(self.features[0])):
ranking_dict[feat_channel_names[i]] += abs(self.features[0][i])
ranking = sorted(ranking_dict.items(),key=lambda t: t[1])
return ranking
def _train(self, data, class_label):
""" Trains the classifier on the given data
It is assumed that the class_label parameter
contains information about the true class the data belongs to
"""
if self.feature_names is None:
try:
self.feature_names = data.feature_names
except AttributeError as e:
warnings.warn("Use a feature generator node before a " +
"classification node.")
raise e
if self.dim is None:
self.dim = data.shape[1]
if self.samples is None:
self.samples = []
if self.labels is None:
self.labels = []
if class_label not in self.classes and not "REST" in self.classes and \
not self.regression:
warnings.warn("Please give the expected classes to the classifier! "
+ "%s unknown. "%class_label
+ "Therefore define the variable 'class_labels' in "
+ "your spec file, where you use your classifier. "
+ "For further info look at the node documentation.")
if self.multinomial or not(len(self.classes) == 2):
self.classes.append(class_label)
self.set_permanent_attributes(classes=self.classes)
# main step of appending data to the list *self.samples*
if class_label in self.classes or self.regression:
self.append_sample(data)
if not self.regression and class_label in self.classes:
self.labels.append(self.classes.index(class_label))
elif not self.regression and "REST" in self.classes:
self.labels.append(self.classes.index("REST"))
elif self.regression: # regression!
try:
self.labels.append(float(class_label))
except ValueError: # one-class-classification is regression-like
self.labels.append(1)
else: # case, where data is irrelevant
pass
def train(self,data,label):
""" Special mapping for multi-class classification """
#one vs. REST case
if "REST" in self.classes and not label in self.classes:
label = "REST"
# one vs. one case
if not self.multinomial and len(self.classes) == 2 and \
not label in self.classes:
return
start_time_stamp = timeit.default_timer()
super(RegularizedClassifierBase, self).train(data, label)
stop_time_stamp = timeit.default_timer()
if not self.classifier_information.has_key("Training_time(classifier)"):
self.classifier_information["Training_time(classifier)"] = \
stop_time_stamp - start_time_stamp
else:
self.classifier_information["Training_time(classifier)"] += \
stop_time_stamp - start_time_stamp
def append_sample(self,sample):
""" Some methods need a list of arrays as lists and some prefer arrays
"""
data_array = sample.view(numpy.ndarray)
if self.use_list:
self.samples.append(map(float, list(data_array[0, :])))
else:
self.samples.append(data_array[0, :])
def _execute(self, x):
""" Executes the classifier on the given data vector in the linear case
prediction value = <w,data>+b
"""
if self.kernel_type == 'LINEAR':
data = x.view(numpy.ndarray)
# Let the SVM classify the given data: <w,data>+b
if self.w is None:
prediction_value = 0
self.w = numpy.zeros(x.shape[1])
else:
prediction_value = float(numpy.dot(self.w.T, data[0, :]))+self.b
# one-class multinomial handling of REST class
if "REST" in self.classes and self.multinomial:
if "REST" == self.classes[0]:
label = self.classes[1]
elif "REST" == self.classes[1]:
label = self.classes[0]
prediction_value *= -1
# Look up class label
# prediction_value --> {-1,1} --> {0,1} --> Labels
elif prediction_value > 0:
label = self.classes[1]
else:
label = self.classes[0]
return PredictionVector(label=label, prediction=prediction_value,
predictor=self)
def print_variables(self):
""" Debug function for printing the classifier and the slack variables
"""
# Precision does not work here because of the strange dtype.
numpy.set_printoptions(edgeitems=50, precision=4, suppress=False,
threshold=50)
# ...Setting the dtype to list doesn't work either.
print self.print_w
print 'This is the classification vector w and b=', self.b, '.'
print self.num_retained_features, ' out of ', self.dim, \
' features have been used.'
print self.num_sv, " vectors of ", self.num_samples, " have been used."
# print self.t, "are the Slack variables."
if not((numpy.array(self.t) >= 0).all()):
print "There are negative slack variables! Classification failed?"
print "%i vectors of %i have been used for the inner margin and" \
% (self.inner_margin, self.num_samples)
numpy.set_printoptions(edgeitems=100, linewidth=75, precision=5,
suppress=True, threshold=1000)
print numpy.array(self.ti), "are the inner Slack variables."
numpy.set_printoptions(edgeitems=3, infstr='Inf', linewidth=75,
nanstr='NaN', precision=8, suppress=False,
threshold=1000)
def kernel_func(self, u, v):
""" Returns the kernel function applied on x and y
- POLY ::
(gamma*u'*v + offset)^exponent
- RBF ::
exp(-gamma*|u-v|^2)
- SIGMOID ::
tanh(gamma*u'*v + offset)
"""
if not self.kernel_type == "LINEAR" and self.gamma is None:
self.calculate_gamma()
if self.kernel_type == "LINEAR":
return float(numpy.dot(u, v))
elif self.kernel_type == "POLY":
h = float(numpy.dot(u, v))
return (self.gamma*h+self.offset)**self.exponent
elif self.kernel_type == "RBF":
return numpy.exp(-self.gamma*float(numpy.sum((u - v)**2)))
elif self.kernel_type == "SIGMOID":
h = float(numpy.dot(u, v))
return numpy.tanh(self.gamma*h+self.offset)
elif self.kernel_type.startswith("lambda "):
function = eval(self.kernel_type)
return float(function(u, v))
def calculate_gamma(self):
""" Calculate default gamma
This defines a parameter for 'POLY'-,'RBF'- and 'SIGMOID'-kernel.
We calculate the parameter `gamma` as described in the base node
description.
"""
if (self.kernel_type == 'POLY' or self.kernel_type == 'SIGMOID') \
and self.gamma is None:
self.gamma = 1.0/self.dim
elif self.kernel_type == 'RBF' and self.gamma is None and \
not self.regression:
a = self.labels.count(self.classes.index(self.classes[0]))
b = self.labels.count(self.classes.index(self.classes[1]))
if a > b:
relevant = 1
else:
relevant = 0
relevant_samples = []
for i, label in enumerate(self.labels):
if label == relevant:
relevant_samples.append(self.samples[i])
variance = numpy.median(numpy.var(numpy.array(self.samples),
axis=0))
self.gamma = 0.5/(variance*self.dim)
self._log(
"No parameter gamma specified for the kernel. Using: %f."\
% self.gamma,
level=logging.WARNING)
elif self.gamma is None:
self.gamma = 0.001
class TimeoutException(Exception):
""" Break up for to long simplex iterations """
pass
|
Crespo911/pyspace
|
pySPACE/missions/nodes/classification/base.py
|
Python
|
gpl-3.0
| 21,922
|
[
"Gaussian"
] |
4698ead4933d9cbe9a4a14682ba540258d89781f680957d8526e1fcd4f746829
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
import logging
import os
import gzip
# These imports add support for multistream BZ2 files
# This is a Python2 backport for bz2file from Python3
# Because of http://bugs.python.org/issue20781
from bz2file import BZ2File
from extractcode import EXTRACT_SUFFIX
DEBUG = False
logger = logging.getLogger(__name__)
# import sys
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
# logger.setLevel(logging.DEBUG)
def uncompress(location, target_dir, decompressor, suffix=EXTRACT_SUFFIX):
"""
Uncompress a compressed file at location in the target_dir using the
`decompressor` object. The uncompressed file is named after the original
archive with a `suffix` added.
Return a list of warning messages. Raise Exceptions on errors.
"""
# FIXME: do not create a sub-directory and instead strip the "compression"
# extension such gz, etc. or introspect the archive header to get the file
# name when present.
assert location
assert target_dir
assert decompressor
if DEBUG:
logger.debug('uncompress: ' + location)
warnings = []
target_location = os.path.join(target_dir, os.path.basename(location) + suffix)
with decompressor(location, 'rb') as compressed:
with open(target_location, 'wb') as uncompressed:
chunk = compressed.read()
uncompressed.write(chunk)
if getattr(decompressor, 'has_trailing_garbage', False):
warnings.append(location +': Trailing garbage found and ignored.')
return warnings
class GzipFileWithTrailing(gzip.GzipFile):
"""
A subclass of gzip.GzipFile supporting files with trailing garbage. Ignore
the garbage.
"""
# TODO: what is first_file??
first_file = True
gzip_magic = '\037\213'
has_trailing_garbage = False
def _read_gzip_header(self):
# read the first two bytes
magic = self.fileobj.read(2)
# rewind two bytes back
self.fileobj.seek(-2, os.SEEK_CUR)
is_gzip = magic != self.gzip_magic
if is_gzip and not self.first_file:
self.first_file = False
self.has_trailing_garbage = True
raise EOFError, 'Trailing garbage found'
self.first_file = False
gzip.GzipFile._read_gzip_header(self)
def uncompress_gzip(location, target_dir):
"""
Uncompress a gzip compressed file at location in the target_dir.
Return a warnings mapping of path -> warning.
"""
return uncompress(location, target_dir, GzipFileWithTrailing)
def uncompress_bzip2(location, target_dir):
"""
Uncompress a bzip2 compressed file at location in the target_dir.
Return a warnings mapping of path->warning.
"""
return uncompress(location, target_dir, BZ2File)
|
vinodpanicker/scancode-toolkit
|
src/extractcode/uncompress.py
|
Python
|
apache-2.0
| 4,180
|
[
"VisIt"
] |
4cd64fb5b5d0c3301329e86af837c06227d5fb4ce65730f8605e8100119875b1
|
"""
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------
Ingester Test
A unit test for wikipedia_revisions_ingester.py and run_ingester.py
Run with python -m ingest_utils.ingester_test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import json
import xml.sax
import subprocess
import os
import signal
import sys
import copy
import time
from io import BytesIO
from os import path
from ingest_utils import wikipedia_revisions_ingester as wiki_ingester
import resource
import math
import os
test_length = 10000
text_length = 20000
memory_boundary = 40000#in KB
time_limit = 2 #seconds
with open("ingest_utils/testdata/mediawiki_header.xml", "r") as f:
mediawiki_header = ""
for line in f:
mediawiki_header = mediawiki_header + line
non_talk_page_header = "<page>\n<title>This is not a talk page</title>\n<ns>6</ns>\n<id>111111</id>\n"
def generateInfiniteXML(length, w):
cnt = 0
text = 'x' * text_length
w.write(mediawiki_header)
for cnt in range(length):
content = "<revision>\n<id>{id}</id>\n<text>{text}</text>\n</revision>\n".format(id = cnt, text= text)
w.write(content)
w.write("</page>\n")
w.write(non_talk_page_header)
text = text * 5
for cnt in range(length):
content = "<revision>\n<id>{id}</id>\n<text>{text}</text>\n</revision>\n".format(id = cnt, text= text)
w.write(content)
w.write("</page>\n</mediawiki>")
class TestWikiIngester(unittest.TestCase):
def test_ingester(self):
input_file = path.join('ingest_utils', 'testdata', 'test_wiki_dump.xml')
for i, line in enumerate(wiki_ingester.parse_stream(input_file)):
if i == 0:
self.assertEqual(line['comment'], 'a test comment 1')
if i == 1:
self.assertEqual(line['page_title'], 'Third Page (namespace 1)')
self.assertEqual(line['text'], ' The first revision on the third page. Written by Tinker JJ. Has a comment.')
if i == 2 or i == 3:
self.assertEqual(line['page_id'], '54197571')
# The fourth revision includes a large text component.
self.assertEqual(i, 4)
# This is a test on parsing very large xml files to make sure the streaming
# doesn't consume too much memory.
if not(path.exists("ingest_utils/testdata/gigantic_xml.xml")):
with open("ingest_utils/testdata/gigantic_xml.xml", "w") as w:
generateInfiniteXML(test_length, w)
input_file = path.join('ingest_utils', 'testdata', 'gigantic_xml.xml')
start = time.time()
for i, line in enumerate(wiki_ingester.parse_stream(input_file)):
if i % 5000 == 0:
memory_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
self.assertLessEqual(memory_usage, memory_boundary)
costed_time = time.time() - start
self.assertLessEqual(costed_time, time_limit)
print("Time spent on parsing: ", costed_time)
os.system("rm ingest_utils/testdata/gigantic_xml.xml")
if __name__ == '__main__':
unittest.main()
|
conversationai/wikidetox
|
experimental/extract_pov_edits/ingester_test.py
|
Python
|
apache-2.0
| 3,644
|
[
"TINKER"
] |
3468b5f16fc40e280515f87cf31d517695f3fe13b866ef1d03ce57a38bb25499
|
"""CSShellCmd class emulates the behaviour of a shell to edit the CS config.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import cmd
import os
from DIRAC.Core.Base.CLI import CLI, colorize
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.private.Modificator import Modificator
from DIRAC.ConfigurationSystem.Client.ConfigurationClient import ConfigurationClient
class CSShellCLI(CLI):
def __init__(self):
CLI.__init__(self)
self.serverURL = ""
self.serverName = ""
self.modificator = None
self.connected = False
self.dirty = False
self.root = "/"
self.do_connect("")
def update_prompt(self):
if self.connected:
self.prompt = "[" + colorize(self.serverName, "green") + ":" + self.root + " ]% "
else:
self.prompt = "[" + colorize("disconnected", "red") + ":" + self.root + " ]% "
def do_connect(self, line):
"""connect
Connect to the CS
Usage: connect <URL> (Connect to the CS at the specified URL)
connect (Connect to the default CS URL of your config)
"""
if line == "":
self.serverURL = gConfigurationData.getMasterServer()
self.serverName = gConfigurationData.getName()
else:
self.serverURL = self.serverName = line
if self.serverURL is None:
print("Unable to connect to the default server. Maybe you don't have a proxy ?")
return self.do_disconnect("")
print("Trying to connect to " + self.serverURL + "...", end=" ")
self.modificator = Modificator(ConfigurationClient(url=self.serverURL))
rv = self.modificator.loadFromRemote()
rv2 = self.modificator.loadCredentials()
if rv["OK"] == False or rv2["OK"] == False:
print("failed: ", end=" ")
if rv["OK"] is False:
print(rv["Message"])
else:
print(rv2["Message"])
self.connected = False
self.update_prompt()
else:
self.connected = True
self.update_prompt()
print("done.")
def do_disconnect(self, _line):
"""Disconnect from CS"""
if self.connected and self.dirty:
res = input("Do you want to commit your changes into the CS ? [y/N] ")
if res.lower() in ["y", "yes"]:
self.do_commit("")
self.serverURL = self.serverName = ""
self.modificator = None
self.connected = False
self.update_prompt()
def do_ls(self, line):
"""ls
List the sections and options of CS of the current root"""
if self.connected:
secs = self.modificator.getSections(self.root)
opts = self.modificator.getOptions(self.root)
if line.startswith("-") and "l" in line:
for i in secs:
print(colorize(i, "blue") + " ")
for i in opts:
print(i + " ")
else:
for i in secs:
print(colorize(i, "blue") + " ", end=" ")
for i in opts:
print(i + " ", end=" ")
print("")
def do_cd(self, line):
"""cd
Go one directory deeper in the CS"""
# Check if invariant holds
if self.connected:
assert self.root == "/" or not self.root.endswith("/")
assert self.root.startswith("/")
secs = self.modificator.getSections(self.root)
if line == "..":
self.root = os.path.dirname(self.root)
self.update_prompt()
else:
if os.path.normpath(line) in secs:
if self.root == "/":
self.root = self.root + os.path.normpath(line)
else:
self.root = self.root + "/" + os.path.normpath(line)
self.update_prompt()
else:
print("cd: no such section: " + line)
def complete_cd(self, text, _line, _begidx, _endidx):
secs = self.modificator.getSections(self.root)
return [(s + "/") for s in secs if s.startswith(text)]
def do_cat(self, line):
"""cat
Read the content of an option in the CS"""
if self.connected:
opts = self.modificator.getOptionsDict(self.root)
if line in opts:
print(opts[line])
else:
print("cat: No such option")
def complete_cat(self, text, _line, _begidx, _endidx):
opts = self.modificator.getOptions(self.root)
return [o for o in opts if o.startswith(text)]
do_less = do_cat
complete_less = complete_cat
def do_mkdir(self, line):
"""mkdir
Create a new section in the CS"""
if self.connected:
self.modificator.createSection(self.root + "/" + line)
self.dirty = True
complete_mkdir = complete_cd
def do_rmdir(self, line):
"""rmdir
Delete a section in the CS"""
if self.connected:
self.modificator.removeSection(self.root + "/" + line)
self.dirty = True
complete_rmdir = complete_cd
def do_rm(self, line):
"""rm
Delete an option in the CS"""
if self.connected:
self.modificator.removeOption(self.root + "/" + line)
self.dirty = True
complete_rm = complete_cat
def do_set(self, line):
"""set
Set an option in the CS (or create it if it does not exists)
Usage: set <str> to set a string option (will be stored as a string in CS)
set <str>,<str>,... to set a list option (will be stored as a list in CS)
"""
if self.connected:
line = line.split(" ", 2)
if len(line) != 2:
print("Usage: set <key> <value>")
else:
self.modificator.setOptionValue(self.root + "/" + line[0], line[1])
self.dirty = True
complete_set = complete_cat
def do_unset(self, line):
"""unset
Unset an option in the CS: Making the option equal to the
empty string."""
if self.connected:
self.modificator.setOptionValue(self.root + "/" + line, "")
self.dirty = True
complete_unset = complete_cat
def do_commit(self, _line):
"""commit
Commit the modifications to the CS"""
if self.connected and self.dirty:
self.modificator.commit()
def default(self, line):
"""Override [Cmd.default(line)] function."""
if line == "EOF":
if self.prompt:
print()
return self.do_quit(line)
else:
cmd.Cmd.default(self, line)
def do_quit(self, _line):
"""quit
Quit"""
self.do_disconnect("")
CLI.do_quit(self, _line)
|
ic-hep/DIRAC
|
src/DIRAC/ConfigurationSystem/Client/CSShellCLI.py
|
Python
|
gpl-3.0
| 7,156
|
[
"DIRAC"
] |
57c401c34fdf89cd36b172cdd209557fcdc0e1298adcd2201be78bd0f7333b07
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
import procmemory
def drawVertices(myscreen, weave, vertexType, vertexRadius, vertexColor):
pts = weave.getVertices( vertexType )
print " got ",len(pts)," of type ", vertexType
for p in pts:
myscreen.addActor( camvtk.Sphere(center=(p.x,p.y,p.z), radius=vertexRadius, color=vertexColor ) )
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../stl/demo.stl")
stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
myscreen.addActor(stl)
stl.SetWireframe()
#stl.SetSurface()
stl.SetColor(camvtk.grey)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
cutter = ocl.CylCutter(0.3, 5)
#cutter = ocl.BallCutter(0.4, 5)
#cutter = ocl.BullCutter(0.4, 0.1, 5)
print "fiber..."
fiber_range=30
Nmax = 400
yvals = [float(n-float(Nmax)/2)/Nmax*float(fiber_range) for n in xrange(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*float(fiber_range) for n in xrange(0,Nmax+1)]
zvals=[ 1.6523]
bpc_x = ocl.BatchPushCutter()
bpc_y = ocl.BatchPushCutter()
bpc_x.setXDirection()
bpc_y.setYDirection()
bpc_x.setSTL(s)
bpc_y.setSTL(s)
bpc_x.setCutter(cutter)
bpc_y.setCutter(cutter)
# create fibers
for zh in zvals:
for y in yvals:
f1 = ocl.Point(-15.5,y,zh) # start point of fiber
f2 = ocl.Point(15.5,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
bpc_x.appendFiber(f)
for x in xvals:
f1 = ocl.Point(x,-15.5,zh) # start point of fiber
f2 = ocl.Point(x,15.5,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
bpc_y.appendFiber(f)
# run
bpc_x.run()
bpc_y.run()
xfibers = bpc_x.getFibers()
yfibers = bpc_y.getFibers()
fibers = xfibers+yfibers
print " got ",len(xfibers)," xfibers"
print " got ",len(yfibers)," yfibers"
print "rendering fibers and CL-points."
w = ocl.Weave()
print "push fibers to Weave...",
for f in fibers:
w.addFiber(f)
print "done."
print "Weave build()...",
mem1 = procmemory.resident()
print "before ", mem1
w.build()
#w.build2()
mem2 = procmemory.resident()
print "after ", float(mem2)/float(1024*1024), " MB"
print " build() memory: ",float(mem2-mem1)/float(1024*1024)," MB"
print "done"
print "face_traverse..."
w.face_traverse()
print "done."
w_clpts = w.getCLVertices()
w_ipts = w.getINTVertices()
w_edges = w.getEdges()
w_loop = w.getLoops()
vertexRadius = 0.007
drawVertices(myscreen, w, ocl.WeaveVertexType.CL, vertexRadius, camvtk.red)
drawVertices(myscreen, w, ocl.WeaveVertexType.INT, vertexRadius, camvtk.orange)
drawVertices(myscreen, w, ocl.WeaveVertexType.FULLINT, vertexRadius, camvtk.yellow)
drawVertices(myscreen, w, ocl.WeaveVertexType.ADJ, vertexRadius, camvtk.green)
drawVertices(myscreen, w, ocl.WeaveVertexType.TWOADJ, vertexRadius, camvtk.lblue)
print " got: ", len(w_edges), " edges"
print " got: ", len(w_loop), " loops"
# draw the loops
nloop = 0
for lop in w_loop:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=camvtk.yellow) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=camvtk.yellow) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=camvtk.yellow) )
previous=p
n=n+1
print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop+1
# draw edges of weave
ne = 0
zoffset=0.0 # 1
dzoffset = 0.000 # 5
for e in w_edges:
p1 = e[0]
p2 = e[1]
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z+zoffset+ne*dzoffset), p2=(p2.x,p2.y,p2.z+zoffset+ne*dzoffset) ) )
ne = ne+1
print "done."
myscreen.camera.SetPosition(0.8051, 0.8051, 3.5)
myscreen.camera.SetFocalPoint(0.805, 0.805, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
JohnyEngine/CNC
|
opencamlib/scripts/fiber/fiber_16_weave2_STL.py
|
Python
|
apache-2.0
| 4,845
|
[
"VTK"
] |
c535739735962c55a2e00ad95bfddb8898d67e7b6c820d3a53a9febfa97eb352
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# textarea - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.textarea import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
heromod/migrid
|
mig/cgi-bin/textarea.py
|
Python
|
gpl-2.0
| 1,104
|
[
"Brian"
] |
3910098bcc138a450e553418ac15c42b7a4ec309431c341ea1e3ba849de6364f
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry
words = [
"like",
"just",
"love",
"know",
"never",
"want",
"time",
"out",
"there",
"make",
"look",
"eye",
"down",
"only",
"think",
"heart",
"back",
"then",
"into",
"about",
"more",
"away",
"still",
"them",
"take",
"thing",
"even",
"through",
"long",
"always",
"world",
"too",
"friend",
"tell",
"try",
"hand",
"thought",
"over",
"here",
"other",
"need",
"smile",
"again",
"much",
"cry",
"been",
"night",
"ever",
"little",
"said",
"end",
"some",
"those",
"around",
"mind",
"people",
"girl",
"leave",
"dream",
"left",
"turn",
"myself",
"give",
"nothing",
"really",
"off",
"before",
"something",
"find",
"walk",
"wish",
"good",
"once",
"place",
"ask",
"stop",
"keep",
"watch",
"seem",
"everything",
"wait",
"got",
"yet",
"made",
"remember",
"start",
"alone",
"run",
"hope",
"maybe",
"believe",
"body",
"hate",
"after",
"close",
"talk",
"stand",
"own",
"each",
"hurt",
"help",
"home",
"god",
"soul",
"new",
"many",
"two",
"inside",
"should",
"true",
"first",
"fear",
"mean",
"better",
"play",
"another",
"gone",
"change",
"use",
"wonder",
"someone",
"hair",
"cold",
"open",
"best",
"any",
"behind",
"happen",
"water",
"dark",
"laugh",
"stay",
"forever",
"name",
"work",
"show",
"sky",
"break",
"came",
"deep",
"door",
"put",
"black",
"together",
"upon",
"happy",
"such",
"great",
"white",
"matter",
"fill",
"past",
"please",
"burn",
"cause",
"enough",
"touch",
"moment",
"soon",
"voice",
"scream",
"anything",
"stare",
"sound",
"red",
"everyone",
"hide",
"kiss",
"truth",
"death",
"beautiful",
"mine",
"blood",
"broken",
"very",
"pass",
"next",
"forget",
"tree",
"wrong",
"air",
"mother",
"understand",
"lip",
"hit",
"wall",
"memory",
"sleep",
"free",
"high",
"realize",
"school",
"might",
"skin",
"sweet",
"perfect",
"blue",
"kill",
"breath",
"dance",
"against",
"fly",
"between",
"grow",
"strong",
"under",
"listen",
"bring",
"sometimes",
"speak",
"pull",
"person",
"become",
"family",
"begin",
"ground",
"real",
"small",
"father",
"sure",
"feet",
"rest",
"young",
"finally",
"land",
"across",
"today",
"different",
"guy",
"line",
"fire",
"reason",
"reach",
"second",
"slowly",
"write",
"eat",
"smell",
"mouth",
"step",
"learn",
"three",
"floor",
"promise",
"breathe",
"darkness",
"push",
"earth",
"guess",
"save",
"song",
"above",
"along",
"both",
"color",
"house",
"almost",
"sorry",
"anymore",
"brother",
"okay",
"dear",
"game",
"fade",
"already",
"apart",
"warm",
"beauty",
"heard",
"notice",
"question",
"shine",
"began",
"piece",
"whole",
"shadow",
"secret",
"street",
"within",
"finger",
"point",
"morning",
"whisper",
"child",
"moon",
"green",
"story",
"glass",
"kid",
"silence",
"since",
"soft",
"yourself",
"empty",
"shall",
"angel",
"answer",
"baby",
"bright",
"dad",
"path",
"worry",
"hour",
"drop",
"follow",
"power",
"war",
"half",
"flow",
"heaven",
"act",
"chance",
"fact",
"least",
"tired",
"children",
"near",
"quite",
"afraid",
"rise",
"sea",
"taste",
"window",
"cover",
"nice",
"trust",
"lot",
"sad",
"cool",
"force",
"peace",
"return",
"blind",
"easy",
"ready",
"roll",
"rose",
"drive",
"held",
"music",
"beneath",
"hang",
"mom",
"paint",
"emotion",
"quiet",
"clear",
"cloud",
"few",
"pretty",
"bird",
"outside",
"paper",
"picture",
"front",
"rock",
"simple",
"anyone",
"meant",
"reality",
"road",
"sense",
"waste",
"bit",
"leaf",
"thank",
"happiness",
"meet",
"men",
"smoke",
"truly",
"decide",
"self",
"age",
"book",
"form",
"alive",
"carry",
"escape",
"damn",
"instead",
"able",
"ice",
"minute",
"throw",
"catch",
"leg",
"ring",
"course",
"goodbye",
"lead",
"poem",
"sick",
"corner",
"desire",
"known",
"problem",
"remind",
"shoulder",
"suppose",
"toward",
"wave",
"drink",
"jump",
"woman",
"pretend",
"sister",
"week",
"human",
"joy",
"crack",
"grey",
"pray",
"surprise",
"dry",
"knee",
"less",
"search",
"bleed",
"caught",
"clean",
"embrace",
"future",
"king",
"son",
"sorrow",
"chest",
"hug",
"remain",
"sat",
"worth",
"blow",
"daddy",
"final",
"parent",
"tight",
"also",
"create",
"lonely",
"safe",
"cross",
"dress",
"evil",
"silent",
"bone",
"fate",
"perhaps",
"anger",
"class",
"scar",
"snow",
"tiny",
"tonight",
"continue",
"control",
"dog",
"edge",
"mirror",
"month",
"suddenly",
"comfort",
"given",
"loud",
"quickly",
"gaze",
"plan",
"rush",
"stone",
"town",
"battle",
"ignore",
"spirit",
"stood",
"stupid",
"yours",
"brown",
"build",
"dust",
"hey",
"kept",
"pay",
"phone",
"twist",
"although",
"ball",
"beyond",
"hidden",
"nose",
"taken",
"fail",
"float",
"pure",
"somehow",
"wash",
"wrap",
"angry",
"cheek",
"creature",
"forgotten",
"heat",
"rip",
"single",
"space",
"special",
"weak",
"whatever",
"yell",
"anyway",
"blame",
"job",
"choose",
"country",
"curse",
"drift",
"echo",
"figure",
"grew",
"laughter",
"neck",
"suffer",
"worse",
"yeah",
"disappear",
"foot",
"forward",
"knife",
"mess",
"somewhere",
"stomach",
"storm",
"beg",
"idea",
"lift",
"offer",
"breeze",
"field",
"five",
"often",
"simply",
"stuck",
"win",
"allow",
"confuse",
"enjoy",
"except",
"flower",
"seek",
"strength",
"calm",
"grin",
"gun",
"heavy",
"hill",
"large",
"ocean",
"shoe",
"sigh",
"straight",
"summer",
"tongue",
"accept",
"crazy",
"everyday",
"exist",
"grass",
"mistake",
"sent",
"shut",
"surround",
"table",
"ache",
"brain",
"destroy",
"heal",
"nature",
"shout",
"sign",
"stain",
"choice",
"doubt",
"glance",
"glow",
"mountain",
"queen",
"stranger",
"throat",
"tomorrow",
"city",
"either",
"fish",
"flame",
"rather",
"shape",
"spin",
"spread",
"ash",
"distance",
"finish",
"image",
"imagine",
"important",
"nobody",
"shatter",
"warmth",
"became",
"feed",
"flesh",
"funny",
"lust",
"shirt",
"trouble",
"yellow",
"attention",
"bare",
"bite",
"money",
"protect",
"amaze",
"appear",
"born",
"choke",
"completely",
"daughter",
"fresh",
"friendship",
"gentle",
"probably",
"six",
"deserve",
"expect",
"grab",
"middle",
"nightmare",
"river",
"thousand",
"weight",
"worst",
"wound",
"barely",
"bottle",
"cream",
"regret",
"relationship",
"stick",
"test",
"crush",
"endless",
"fault",
"itself",
"rule",
"spill",
"art",
"circle",
"join",
"kick",
"mask",
"master",
"passion",
"quick",
"raise",
"smooth",
"unless",
"wander",
"actually",
"broke",
"chair",
"deal",
"favorite",
"gift",
"note",
"number",
"sweat",
"box",
"chill",
"clothes",
"lady",
"mark",
"park",
"poor",
"sadness",
"tie",
"animal",
"belong",
"brush",
"consume",
"dawn",
"forest",
"innocent",
"pen",
"pride",
"stream",
"thick",
"clay",
"complete",
"count",
"draw",
"faith",
"press",
"silver",
"struggle",
"surface",
"taught",
"teach",
"wet",
"bless",
"chase",
"climb",
"enter",
"letter",
"melt",
"metal",
"movie",
"stretch",
"swing",
"vision",
"wife",
"beside",
"crash",
"forgot",
"guide",
"haunt",
"joke",
"knock",
"plant",
"pour",
"prove",
"reveal",
"steal",
"stuff",
"trip",
"wood",
"wrist",
"bother",
"bottom",
"crawl",
"crowd",
"fix",
"forgive",
"frown",
"grace",
"loose",
"lucky",
"party",
"release",
"surely",
"survive",
"teacher",
"gently",
"grip",
"speed",
"suicide",
"travel",
"treat",
"vein",
"written",
"cage",
"chain",
"conversation",
"date",
"enemy",
"however",
"interest",
"million",
"page",
"pink",
"proud",
"sway",
"themselves",
"winter",
"church",
"cruel",
"cup",
"demon",
"experience",
"freedom",
"pair",
"pop",
"purpose",
"respect",
"shoot",
"softly",
"state",
"strange",
"bar",
"birth",
"curl",
"dirt",
"excuse",
"lord",
"lovely",
"monster",
"order",
"pack",
"pants",
"pool",
"scene",
"seven",
"shame",
"slide",
"ugly",
"among",
"blade",
"blonde",
"closet",
"creek",
"deny",
"drug",
"eternity",
"gain",
"grade",
"handle",
"key",
"linger",
"pale",
"prepare",
"swallow",
"swim",
"tremble",
"wheel",
"won",
"cast",
"cigarette",
"claim",
"college",
"direction",
"dirty",
"gather",
"ghost",
"hundred",
"loss",
"lung",
"orange",
"present",
"swear",
"swirl",
"twice",
"wild",
"bitter",
"blanket",
"doctor",
"everywhere",
"flash",
"grown",
"knowledge",
"numb",
"pressure",
"radio",
"repeat",
"ruin",
"spend",
"unknown",
"buy",
"clock",
"devil",
"early",
"false",
"fantasy",
"pound",
"precious",
"refuse",
"sheet",
"teeth",
"welcome",
"add",
"ahead",
"block",
"bury",
"caress",
"content",
"depth",
"despite",
"distant",
"marry",
"purple",
"threw",
"whenever",
"bomb",
"dull",
"easily",
"grasp",
"hospital",
"innocence",
"normal",
"receive",
"reply",
"rhyme",
"shade",
"someday",
"sword",
"toe",
"visit",
"asleep",
"bought",
"center",
"consider",
"flat",
"hero",
"history",
"ink",
"insane",
"muscle",
"mystery",
"pocket",
"reflection",
"shove",
"silently",
"smart",
"soldier",
"spot",
"stress",
"train",
"type",
"view",
"whether",
"bus",
"energy",
"explain",
"holy",
"hunger",
"inch",
"magic",
"mix",
"noise",
"nowhere",
"prayer",
"presence",
"shock",
"snap",
"spider",
"study",
"thunder",
"trail",
"admit",
"agree",
"bag",
"bang",
"bound",
"butterfly",
"cute",
"exactly",
"explode",
"familiar",
"fold",
"further",
"pierce",
"reflect",
"scent",
"selfish",
"sharp",
"sink",
"spring",
"stumble",
"universe",
"weep",
"women",
"wonderful",
"action",
"ancient",
"attempt",
"avoid",
"birthday",
"branch",
"chocolate",
"core",
"depress",
"drunk",
"especially",
"focus",
"fruit",
"honest",
"match",
"palm",
"perfectly",
"pillow",
"pity",
"poison",
"roar",
"shift",
"slightly",
"thump",
"truck",
"tune",
"twenty",
"unable",
"wipe",
"wrote",
"coat",
"constant",
"dinner",
"drove",
"egg",
"eternal",
"flight",
"flood",
"frame",
"freak",
"gasp",
"glad",
"hollow",
"motion",
"peer",
"plastic",
"root",
"screen",
"season",
"sting",
"strike",
"team",
"unlike",
"victim",
"volume",
"warn",
"weird",
"attack",
"await",
"awake",
"built",
"charm",
"crave",
"despair",
"fought",
"grant",
"grief",
"horse",
"limit",
"message",
"ripple",
"sanity",
"scatter",
"serve",
"split",
"string",
"trick",
"annoy",
"blur",
"boat",
"brave",
"clearly",
"cling",
"connect",
"fist",
"forth",
"imagination",
"iron",
"jock",
"judge",
"lesson",
"milk",
"misery",
"nail",
"naked",
"ourselves",
"poet",
"possible",
"princess",
"sail",
"size",
"snake",
"society",
"stroke",
"torture",
"toss",
"trace",
"wise",
"bloom",
"bullet",
"cell",
"check",
"cost",
"darling",
"during",
"footstep",
"fragile",
"hallway",
"hardly",
"horizon",
"invisible",
"journey",
"midnight",
"mud",
"nod",
"pause",
"relax",
"shiver",
"sudden",
"value",
"youth",
"abuse",
"admire",
"blink",
"breast",
"bruise",
"constantly",
"couple",
"creep",
"curve",
"difference",
"dumb",
"emptiness",
"gotta",
"honor",
"plain",
"planet",
"recall",
"rub",
"ship",
"slam",
"soar",
"somebody",
"tightly",
"weather",
"adore",
"approach",
"bond",
"bread",
"burst",
"candle",
"coffee",
"cousin",
"crime",
"desert",
"flutter",
"frozen",
"grand",
"heel",
"hello",
"language",
"level",
"movement",
"pleasure",
"powerful",
"random",
"rhythm",
"settle",
"silly",
"slap",
"sort",
"spoken",
"steel",
"threaten",
"tumble",
"upset",
"aside",
"awkward",
"bee",
"blank",
"board",
"button",
"card",
"carefully",
"complain",
"crap",
"deeply",
"discover",
"drag",
"dread",
"effort",
"entire",
"fairy",
"giant",
"gotten",
"greet",
"illusion",
"jeans",
"leap",
"liquid",
"march",
"mend",
"nervous",
"nine",
"replace",
"rope",
"spine",
"stole",
"terror",
"accident",
"apple",
"balance",
"boom",
"childhood",
"collect",
"demand",
"depression",
"eventually",
"faint",
"glare",
"goal",
"group",
"honey",
"kitchen",
"laid",
"limb",
"machine",
"mere",
"mold",
"murder",
"nerve",
"painful",
"poetry",
"prince",
"rabbit",
"shelter",
"shore",
"shower",
"soothe",
"stair",
"steady",
"sunlight",
"tangle",
"tease",
"treasure",
"uncle",
"begun",
"bliss",
"canvas",
"cheer",
"claw",
"clutch",
"commit",
"crimson",
"crystal",
"delight",
"doll",
"existence",
"express",
"fog",
"football",
"gay",
"goose",
"guard",
"hatred",
"illuminate",
"mass",
"math",
"mourn",
"rich",
"rough",
"skip",
"stir",
"student",
"style",
"support",
"thorn",
"tough",
"yard",
"yearn",
"yesterday",
"advice",
"appreciate",
"autumn",
"bank",
"beam",
"bowl",
"capture",
"carve",
"collapse",
"confusion",
"creation",
"dove",
"feather",
"girlfriend",
"glory",
"government",
"harsh",
"hop",
"inner",
"loser",
"moonlight",
"neighbor",
"neither",
"peach",
"pig",
"praise",
"screw",
"shield",
"shimmer",
"sneak",
"stab",
"subject",
"throughout",
"thrown",
"tower",
"twirl",
"wow",
"army",
"arrive",
"bathroom",
"bump",
"cease",
"cookie",
"couch",
"courage",
"dim",
"guilt",
"howl",
"hum",
"husband",
"insult",
"led",
"lunch",
"mock",
"mostly",
"natural",
"nearly",
"needle",
"nerd",
"peaceful",
"perfection",
"pile",
"price",
"remove",
"roam",
"sanctuary",
"serious",
"shiny",
"shook",
"sob",
"stolen",
"tap",
"vain",
"void",
"warrior",
"wrinkle",
"affection",
"apologize",
"blossom",
"bounce",
"bridge",
"cheap",
"crumble",
"decision",
"descend",
"desperately",
"dig",
"dot",
"flip",
"frighten",
"heartbeat",
"huge",
"lazy",
"lick",
"odd",
"opinion",
"process",
"puzzle",
"quietly",
"retreat",
"score",
"sentence",
"separate",
"situation",
"skill",
"soak",
"square",
"stray",
"taint",
"task",
"tide",
"underneath",
"veil",
"whistle",
"anywhere",
"bedroom",
"bid",
"bloody",
"burden",
"careful",
"compare",
"concern",
"curtain",
"decay",
"defeat",
"describe",
"double",
"dreamer",
"driver",
"dwell",
"evening",
"flare",
"flicker",
"grandma",
"guitar",
"harm",
"horrible",
"hungry",
"indeed",
"lace",
"melody",
"monkey",
"nation",
"object",
"obviously",
"rainbow",
"salt",
"scratch",
"shown",
"shy",
"stage",
"stun",
"third",
"tickle",
"useless",
"weakness",
"worship",
"worthless",
"afternoon",
"beard",
"boyfriend",
"bubble",
"busy",
"certain",
"chin",
"concrete",
"desk",
"diamond",
"doom",
"drawn",
"due",
"felicity",
"freeze",
"frost",
"garden",
"glide",
"harmony",
"hopefully",
"hunt",
"jealous",
"lightning",
"mama",
"mercy",
"peel",
"physical",
"position",
"pulse",
"punch",
"quit",
"rant",
"respond",
"salty",
"sane",
"satisfy",
"savior",
"sheep",
"slept",
"social",
"sport",
"tuck",
"utter",
"valley",
"wolf",
"aim",
"alas",
"alter",
"arrow",
"awaken",
"beaten",
"belief",
"brand",
"ceiling",
"cheese",
"clue",
"confidence",
"connection",
"daily",
"disguise",
"eager",
"erase",
"essence",
"everytime",
"expression",
"fan",
"flag",
"flirt",
"foul",
"fur",
"giggle",
"glorious",
"ignorance",
"law",
"lifeless",
"measure",
"mighty",
"muse",
"north",
"opposite",
"paradise",
"patience",
"patient",
"pencil",
"petal",
"plate",
"ponder",
"possibly",
"practice",
"slice",
"spell",
"stock",
"strife",
"strip",
"suffocate",
"suit",
"tender",
"tool",
"trade",
"velvet",
"verse",
"waist",
"witch",
"aunt",
"bench",
"bold",
"cap",
"certainly",
"click",
"companion",
"creator",
"dart",
"delicate",
"determine",
"dish",
"dragon",
"drama",
"drum",
"dude",
"everybody",
"feast",
"forehead",
"former",
"fright",
"fully",
"gas",
"hook",
"hurl",
"invite",
"juice",
"manage",
"moral",
"possess",
"raw",
"rebel",
"royal",
"scale",
"scary",
"several",
"slight",
"stubborn",
"swell",
"talent",
"tea",
"terrible",
"thread",
"torment",
"trickle",
"usually",
"vast",
"violence",
"weave",
"acid",
"agony",
"ashamed",
"awe",
"belly",
"blend",
"blush",
"character",
"cheat",
"common",
"company",
"coward",
"creak",
"danger",
"deadly",
"defense",
"define",
"depend",
"desperate",
"destination",
"dew",
"duck",
"dusty",
"embarrass",
"engine",
"example",
"explore",
"foe",
"freely",
"frustrate",
"generation",
"glove",
"guilty",
"health",
"hurry",
"idiot",
"impossible",
"inhale",
"jaw",
"kingdom",
"mention",
"mist",
"moan",
"mumble",
"mutter",
"observe",
"ode",
"pathetic",
"pattern",
"pie",
"prefer",
"puff",
"rape",
"rare",
"revenge",
"rude",
"scrape",
"spiral",
"squeeze",
"strain",
"sunset",
"suspend",
"sympathy",
"thigh",
"throne",
"total",
"unseen",
"weapon",
"weary",
]
n = 1626
# Note about US patent no 5892470: Here each word does not represent a given digit.
# Instead, the digit represented by a word is variable, it depends on the previous word.
def mn_encode(message):
out = []
for i in range(len(message) / 8):
word = message[8 * i:8 * i + 8]
x = int(word, 16)
w1 = (x % n)
w2 = ((x / n) + w1) % n
w3 = ((x / n / n) + w2) % n
out += [words[w1], words[w2], words[w3]]
return out
def mn_decode(wlist):
out = ''
for i in range(len(wlist) / 3):
word1, word2, word3 = wlist[3 * i:3 * i + 3]
w1 = words.index(word1)
w2 = (words.index(word2)) % n
w3 = (words.index(word3)) % n
x = w1 + n * ((w2 - w1) % n) + n * n * ((w3 - w2) % n)
out += '%08x' % x
return out
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
print 'I need arguments: a hex string to encode, or a list of words to decode'
elif len(sys.argv) == 2:
print ' '.join(mn_encode(sys.argv[1]))
else:
print mn_decode(sys.argv[1:])
|
thirdkey-solutions/granary
|
granary/electrum_v1_mnemonic.py
|
Python
|
mit
| 18,829
|
[
"CRYSTAL",
"VisIt"
] |
7f8dd430b4725d34cfbc3dc860e35748e9ec4632333626c5f166f3be3bcf402c
|
#!/usr/bin/env python3
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
'''
Calculate the parameters for pointing model J.Condon (1992)
AltAz: Astropy N=0,E=pi/2, Libnova S=0,W=pi/1
'''
__author__ = 'wildi.markus@bluewin.ch'
import sys
import argparse
import logging
import os
import importlib
import numpy as np
import scipy.optimize
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord,EarthLocation
from astropy.coordinates import Longitude,Latitude,Angle
from astropy.coordinates.representation import SphericalRepresentation
from astropy.utils import iers
# astropy pre 1.2.1 may not work correctly
# wget http://maia.usno.navy.mil/ser7/finals2000A.all
# together with IERS_A_FILE
try:
iers.IERS.iers_table = iers.IERS_A.open(iers.IERS_A_FILE)
# ###########
except:
print('download:')
print('wget http://maia.usno.navy.mil/ser7/finals2000A.all')
sys.exit(1)
from u_point.structures import Point,Parameter
from u_point.callback import AnnoteFinder
from u_point.callback import AnnotatedPlot
from u_point.script import Script
# find out how caching works
#from astropy.utils import iers
#iers.IERS.iers_table = iers.IERS_A.open(iers.IERS_A_URL)
class PointingModel(Script):
def __init__(self, lg=None,break_after=None, base_path=None, obs=None,analyzed_positions=None,fit_sxtr=None):
Script.__init__(self,lg=lg,break_after=break_after,base_path=base_path,obs=obs,analyzed_positions=analyzed_positions)
#
self.fit_sxtr=fit_sxtr
self.transform_name=None
self.refraction_method=None
def fetch_coordinates(self,ptfn=None):
self.fetch_positions(sys_exit=True,analyzed=True)
# old irait data do not enable
#self.fetch_mount_meteo(sys_exit=True,analyzed=True,with_nml_id=False)
cats=list()
mnts=list()
imgs=list()
nmls=list()
if len(self.sky_anl)==0:
self.lg.error('fetch_coordinates: nothing to analyze, exiting')
sys.exit(1)
self.eq_mount=False
if self.sky_anl[0].eq_mount:
self.eq_mount=True
self.transform_name=None
if self.sky_anl[0].transform_name:
self.transform_name=self.sky_anl[0].transform_name
logger.info('transformation done with: {}'.format(self.transform_name))
self.refraction_method=None
if self.sky_anl[0].refraction_method:
self.refraction_method=self.sky_anl[0].refraction_method
# ToDo
#logger.info('refraction_method: {}'.format(self.refraction_method))
for i,sky in enumerate(self.sky_anl):
if i > self.break_after:
break
if self.fit_sxtr:
if sky.mnt_ll_sxtr is None:
continue
mnt_ll=sky.mnt_ll_sxtr
else:
if sky.mnt_ll_astr is None:
continue
mnt_ll=sky.mnt_ll_astr
cats.append(sky.cat_ll_ap)
mnts.append(mnt_ll)
imgs.append(sky.image_fn)
nmls.append(sky.nml_id)
return cats,mnts,imgs,nmls
# fit projections with a gaussian
def fit_projection_helper(self,function=None, parameters=None, y=None, x = None):
def local_f(params):
for i,p in enumerate(parameters):
p.set(params[i])
return y - function(x)
p = [param() for param in parameters]
return scipy.optimize.leastsq(local_f, p)
def fit_gaussian(self,x):
return self.height() * np.exp(-(x-self.mu())**2/2./self.sigma()**2) #+ background()
def fit_projection_and_plot(self,vals=None, bins=None, axis=None,fit_title=None,fn_frac=None,prefix=None,plt_no=None,plt=None):
'''
Fit the projections of differences (not corrected data) as well as the residues (lat,lon coordinates).
To compare differences ('1') and residues ('2') the plot titles are labled with a letter and '1' and '2'.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
cnt_bins, rbins, patches = ax.hist(vals,bins,normed=True, facecolor='green', alpha=0.75)
bins=rbins[:-1] #ToDO why?
# estimator, width
wd=np.sqrt(np.abs(np.sum((bins-cnt_bins)**2*cnt_bins)/np.sum(cnt_bins)))
self.mu=Parameter(0.)
self.sigma=Parameter(wd) #arcsec!
self.height=Parameter(cnt_bins.max())
background=Parameter(0.)
parameters=[self.mu,self.sigma,self.height]# ToDo a bit a contradiction
res,stat=self.fit_projection_helper(function=self.fit_gaussian,parameters=parameters, y=cnt_bins, x=bins)
if stat != 1:
self.lg.warn('fit projection not converged, status: {}'.format(stat))
y=self.fit_gaussian(bins)
l = ax.plot(bins, y, 'r--', linewidth=2)
ax.set_xlabel('{} {} [arcsec]'.format(prefix,axis))
ax.set_ylabel('number of events normalized')
if prefix in 'difference': # ToDo ugly
# as of 2017-03-18 TeX does not work
#ax.set_title(r'{0} {1} {2} $\mu$={3:.2f},$\sigma$={4:.2f} [arcsec] \n catalog_not_corrected - star'.format(plt_no,prefix,axis,self.mu(), self.sigma()))
ax.set_title(r'{0} {1} {2} mu={3:.2f},sigma={4:.2f} [arcsec] \n catalog_not_corrected - star'.format(plt_no,prefix,axis,self.mu(), self.sigma()))
fn_ftmp=fn_frac.replace(' ','_').replace('+','_')
axis_tmp=axis.replace(' ','_').replace('+','_')
fig.savefig(os.path.join(self.base_path,'{}_catalog_not_corrected_projection_{}_{}.png'.format(prefix,axis_tmp,fn_ftmp)))
else:
#ax.set_title(r'{0} {1} {2} $\mu$={3:.2f},$\sigma$={4:.2f} [arcsec], fit: {5}'.format(plt_no,prefix,axis,self.mu(), self.sigma(),fit_title))
ax.set_title(r'{0} {1} {2} mu={3:.2f},sigma={4:.2f} [arcsec], fit: {5}'.format(plt_no,prefix,axis,self.mu(), self.sigma(),fit_title))
#ToDo ugly
fn_ftmp=fn_frac.replace(' ','_').replace('+','_')
axis_tmp=axis.replace(' ','_').replace('+','_')
fig.savefig(os.path.join(self.base_path,'{}_projection_{}_{}.png'.format(prefix,axis_tmp,fn_ftmp)))
def prepare_plot(self, cats=None,mnts=None,imgs=None,nmls=None,selected=None,model=None):
stars=list()
for i, ct in enumerate(cats):
if not i in selected:
#self.lg.debug('star: {} dropped'.format(i))
continue
mt=mnts[i] # readability
mts=mt.represent_as(SphericalRepresentation)
# ToDo may not the end of the story
cts=ct.represent_as(SphericalRepresentation)
df_lon= Longitude(cts.lon.radian-mts.lon.radian,u.radian, wrap_angle=Angle(np.pi,u.radian))
df_lat= Latitude(cts.lat.radian-mts.lat.radian,u.radian)
#print(df_lat,df_lon)
#if df_lat.radian < 0./60./180.*np.pi:
# pass
#elif df_lat.radian > 20./60./180.*np.pi:
# pass
#else:
# continue
# residuum: difference st.cats(fit corrected) - st.star
#
res_lon=Longitude(
float(model.d_lon(cts.lon.radian,cts.lat.radian,cts.lon.radian-mts.lon.radian)),
u.radian,
wrap_angle=Angle(np.pi,u.radian))
res_lat=Latitude(
float(model.d_lat(cts.lon.radian,cts.lat.radian,cts.lat.radian-mts.lat.radian)),u.radian)
try:
image_fn=imgs[i]
except:
image_fn='no image file'
try:
nml_id=nmls[i]
except:
nml_id='no nml_id'
st=Point(
cat_lon=cts.lon,
cat_lat=cts.lat,
mnt_lon=mts.lon,
mnt_lat=mts.lat,
df_lat=df_lat,
df_lon=df_lon,
res_lat=res_lat,
res_lon=res_lon,
image_fn=image_fn,
nml_id=nml_id,
)
stars.append(st)
return stars
def annotate_plot(self,fig=None,ax=None,aps=None,ds9_display=None,delete=None):
af = AnnoteFinder(
ax=ax, # leading plot, put it on the list
aps=aps,
xtol=1.,
ytol=1.,
ds9_display=ds9_display,
lg=self.lg,
annotate_fn=False,
analyzed=True,
delete_one=self.delete_one_position,)
return af
def create_plot(self,fig=None,ax=None,title=None,xlabel=None,ylabel=None,lon=None,lat=None,fn=None):
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
ax.scatter(lon,lat,picker=20)
fig.savefig(os.path.join(self.base_path,fn.replace(' ','_').replace('+','_')))
def plot_results(self, stars=None,args=None):
'''
Plot the differences and residues as a function of lon,lat or dlon,dlat or a combination of them.
'''
import matplotlib
# this varies from distro to distro:
matplotlib.rcParams["backend"] = "TkAgg"
import matplotlib.pyplot as plt
#matplotlib.rc('text', usetex = True)
#matplotlib.rc('font', **{'family':"sans-serif"})
#params = {'text.latex.preamble': [r'\usepackage{siunitx}',
# r'\usepackage{sfmath}', r'\sisetup{detect-family = true}',
# r'\usepackage{amsmath}']}
#plt.rcParams.update(params)
plt.ioff()
fit_title=model.fit_title
if args.fit_plus_poly:
fit_title +='C+PP'
frag='_' + self.transform_name.upper()[0:2]
fit_title += frag
sx='_AS'
if args.fit_sxtr:
sx='_SX'
fit_title += sx
frag= '_'+ self.refraction_method.upper()[0:2]
fit_title += frag
fn_frac=fit_title
if args.fit_plus_poly:
fn_frac+='c_plus_poly'
if self.eq_mount:
lon_label='hour angle'
lat_label='declination'
else:
lat_label='altitude'
lon_label='azimuth N=0,E=90'
if 'nova' in self.transform_name:
lon_label='azimuth S=0,W=90'
az_cat_deg=[x.cat_lon.degree for x in stars]
alt_cat_deg=[x.cat_lat.degree for x in stars]
plots=list()
elements=list()
elements.append('A1 difference: catalog_not_corrected - star')
elements.append('d({}) [arcmin]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=[x.df_lon.arcmin for x in stars]
lat=[x.df_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_catalog_not_corrected_star{0}.png'.format(fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.df_lon.arcmin,x.df_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('B1 difference {}: catalog_not_corrected - star'.format(lon_label))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=az_cat_deg
lat=[x.df_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_{0}_d{0}_catalog_not_corrected_star{1}.png'.format(lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.df_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('C1 difference {}: catalog_not_corrected - star'.format(lat_label))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=az_cat_deg
lat=[x.df_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_{0}_d{1}_catalog_not_corrected_star{2}.png'.format(lon_label,lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.df_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('D1 difference {}: catalog_not_corrected - star'.format(lon_label))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=alt_cat_deg
lat=[x.df_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
# ToDo: think about that:
#ax.scatter(alt_cat_deg ,[x.df_lon.arcmin/ np.tan(x.mnt_lat.radian) for x in stars])
elements.append('difference_{0}_d{1}_catalog_not_corrected_star{2}.png'.format(lat_label,lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.df_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('E1 difference {}: catalog_not_corrected - star'.format(lat_label))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=alt_cat_deg
lat=[x.df_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_{0}_d{0}_catalog_not_corrected_star{1}.png'.format(lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.df_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
## residuum, ax05 is below
elements.append('A2 residuum: catalog_corrected - star {}'.format(fit_title))
elements.append('d({}) [arcmin]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=[x.res_lon.arcmin for x in stars]
lat=[x.res_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_catalog_corrected_star_{}.png'.format(fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.res_lon.arcmin,x.res_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('B2 residuum {} catalog_corrected - star, fit: {}'.format(lon_label,fit_title))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=az_cat_deg
lat=[x.res_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{0}_catalog_corrected_star_{1}.png'.format(lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.res_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('D2 residuum {} catalog_corrected - star, fit: {}'.format(lon_label,fit_title))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=alt_cat_deg
lat=[x.res_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{1}_catalog_corrected_star_{2}.png'.format(lat_label,lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.res_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('C2 residuum {} catalog_corrected - star, fit: {}'.format(lat_label,fit_title))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=az_cat_deg
lat=[x.res_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{1}_catalog_corrected_star_{2}.png'.format(lon_label,lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.res_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('E2 residuum {} catalog_corrected - star, fit: {}'.format(lat_label,fit_title))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=alt_cat_deg
lat=[x.res_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{0}_catalog_corrected_star_{1}.png'.format(lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.res_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('K measurement locations catalog')
elements.append('{} [deg]'.format(lon_label))
elements.append('{} [deg]'.format(lat_label))
lon=[x.cat_lon.degree for x in stars]
lat=[x.cat_lat.degree for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('measurement_locations_catalog_{0}.png'.format(fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.cat_lat.degree,x.image_fn) for x in stars])
plots.append(elements)
annotes_skycoords=['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.cat_lat.degree,x.image_fn) for x in stars]
figs=list()
axs=list()
aps=list()
nml_ids=[x.nml_id for x in stars]
# aps must be complete
for elements in plots:
lon=elements[3]
lat=elements[4]
fig = plt.figure()
ax = fig.add_subplot(111)
self.create_plot(fig=fig,ax=ax,title=elements[0],xlabel=elements[1],ylabel=elements[2],lon=lon,lat=lat,fn=elements[5])
figs.append(fig)
axs.append(ax)
# it deppends what is needed:
#annotes=elements[6]
annotes=annotes_skycoords
aps.append(AnnotatedPlot(xx=ax,nml_id=nml_ids,lon=lon,lat=lat,annotes=annotes))
afs=list()
for i,ax in enumerate(axs):
af=self.annotate_plot(fig=figs[i],ax=axs[i],aps=aps,ds9_display=args.ds9_display,delete=args.delete)
# ToDo ??removing this list inhibits call back on all but one plot
afs.append(af)
figs[i].canvas.mpl_connect('button_press_event',af.mouse_event)
if args.delete:
figs[i].canvas.mpl_connect('key_press_event',af.keyboard_event)
# ToDo why that?
#figs[0].canvas.mpl_connect('button_press_event',afs[0].mouse_event)
#if args.delete:
# figs[0].canvas.mpl_connect('key_press_event',afs[0].keyboard_event)
self.fit_projection_and_plot(vals=[x.df_lon.arcsec for x in stars], bins=args.bins,axis='{}'.format(lon_label), fit_title=fit_title,fn_frac=fn_frac,prefix='difference',plt_no='P1',plt=plt)
self.fit_projection_and_plot(vals=[x.df_lat.arcsec for x in stars], bins=args.bins,axis='{}'.format(lat_label),fit_title=fit_title,fn_frac=fn_frac,prefix='difference',plt_no='P2',plt=plt)
self.fit_projection_and_plot(vals=[x.res_lon.arcsec for x in stars],bins=args.bins,axis='{}'.format(lon_label), fit_title=fit_title,fn_frac=fn_frac,prefix='residuum',plt_no='Q1',plt=plt)
self.fit_projection_and_plot(vals=[x.res_lat.arcsec for x in stars],bins=args.bins,axis='{}'.format(lat_label),fit_title=fit_title,fn_frac=fn_frac,prefix='residuum',plt_no='Q2',plt=plt)
plt.show()
def select_stars(self, stars=None):
slctd=list()
drppd=list()
for i,st in enumerate(stars):
#st=Point(
# cat_lon=cat_aa.az,cat_lat=cat_aa.alt,
# mnt_lon=mnt_aa.az,mnt_lat=mnt_aa.alt,
# df_lat=df_alt,df_lon=df_az,
# res_lat=res_alt,res_lon=res_az
dist2 = st.res_lat.radian**2 + st.res_lon.radian**2
if dist2> (30./3600./180.*np.pi)**2:
slctd.append(i)
else:
drppd.append(i)
# ToDo not yet drop set()
selected=list(set(slctd))
dropped=list(set(drppd))
self.lg.debug('Number of selected stars: {} '.format(len(selected)))
return selected, dropped
# really ugly!
def arg_float(value):
if 'm' in value:
return -float(value[1:])
else:
return float(value)
if __name__ == "__main__":
parser= argparse.ArgumentParser(prog=sys.argv[0], description='Fit an AltAz or EQ pointing model')
parser.add_argument('--level', dest='level', default='INFO', help=': %(default)s, debug level')
parser.add_argument('--toconsole', dest='toconsole', action='store_true', default=False, help=': %(default)s, log to console')
parser.add_argument('--break_after', dest='break_after', action='store', default=10000000, type=int, help=': %(default)s, read max. positions, mostly used for debuging')
parser.add_argument('--base-path', dest='base_path', action='store', default='/tmp/u_point/',type=str, help=': %(default)s , directory where images are stored')
parser.add_argument('--analyzed-positions', dest='analyzed_positions', action='store', default='analyzed_positions.anl', help=': %(default)s, already observed positions')
#
parser.add_argument('--obs-longitude', dest='obs_lng', action='store', default=123.2994166666666,type=arg_float, help=': %(default)s [deg], observatory longitude + to the East [deg], negative value: m10. equals to -10.')
parser.add_argument('--obs-latitude', dest='obs_lat', action='store', default=-75.1,type=arg_float, help=': %(default)s [deg], observatory latitude [deg], negative value: m10. equals to -10.')
parser.add_argument('--obs-height', dest='obs_height', action='store', default=3237.,type=arg_float, help=': %(default)s [m], observatory height above sea level [m], negative value: m10. equals to -10.')
#
parser.add_argument('--fit-sxtr', dest='fit_sxtr', action='store_true', default=False, help=': %(default)s, True fit SExtractor results')
# group model
parser.add_argument('--model-class', dest='model_class', action='store', default='u_upoint', help=': %(default)s, specify your model, see e.g. model/altaz.py')
parser.add_argument('--fit-plus-poly', dest='fit_plus_poly', action='store_true', default=False, help=': %(default)s, True: Condon 1992 with polynom')
# group plot
parser.add_argument('--plot', dest='plot', action='store_true', default=False, help=': %(default)s, plot results')
parser.add_argument('--bins', dest='bins', action='store', default=40,type=int, help=': %(default)s, number of bins used in the projection histograms')
parser.add_argument('--ds9-display', dest='ds9_display', action='store_true', default=False, help=': %(default)s, inspect image and region with ds9')
parser.add_argument('--delete', dest='delete', action='store_true', default=False, help=': %(default)s, True: click on data point followed by keyboard <Delete> deletes selected ananlyzed point from file --analyzed-positions')
args=parser.parse_args()
if args.toconsole:
args.level='DEBUG'
if not os.path.exists(args.base_path):
os.makedirs(args.base_path)
pth, fn = os.path.split(sys.argv[0])
filename=os.path.join(args.base_path,'{}.log'.format(fn.replace('.py',''))) # ToDo datetime, name of the script
logformat= '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
logging.basicConfig(filename=filename, level=args.level.upper(), format= logformat)
logger = logging.getLogger()
if args.toconsole:
# http://www.mglerner.com/blog/?p=8
soh = logging.StreamHandler(sys.stdout)
soh.setLevel(args.level)
logger.addHandler(soh)
if not os.path.exists(args.base_path):
os.makedirs(args.base_path)
obs=EarthLocation(lon=float(args.obs_lng)*u.degree, lat=float(args.obs_lat)*u.degree, height=float(args.obs_height)*u.m)
# now load model class
md = importlib.import_module('model.'+args.model_class)
logger.info('model loaded: {}'.format(args.model_class))
# required methods: fit_model, d_lon, d_lat
model=md.Model(lg=logger)
pm= PointingModel(lg=logger,break_after=args.break_after,base_path=args.base_path,obs=obs,analyzed_positions=args.analyzed_positions,fit_sxtr=args.fit_sxtr)
# cat,mnt: AltAz, or HA,dec coordinates
cats,mnts,imgs,nmls=pm.fetch_coordinates()
# check model type, mount type
if pm.eq_mount:
if 'hadec' not in model.model_type():
logger.error('u_model: model: {}, type: {}'.format(args.model_class, model.model_type()))
logger.error('u_model: specify hadec model type, exiting')
sys.exit(1)
else:
if 'altaz' not in model.model_type():
logger.error('u_model: model: {}, type: {}'.format(args.model_class, model.model_type()))
logger.error('u_model: specify altaz model type, exiting')
sys.exit(1)
if cats is None or len(cats)==0:
logger.error('u_model: nothing to analyze, exiting')
sys.exit(1)
selected=list(range(0,len(cats))) # all
if pm.eq_mount:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,obs=pm.obs)
else:
try:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,fit_plus_poly=args.fit_plus_poly)
except TypeError as e:
ptfn=pm.expand_base_path(fn=args.analyzed_positions)
logger.error('u_model: presumably empty file: {}, exception: {},exiting'.format(ptfn,e))
sys.exit(1)
stars=pm.prepare_plot(cats=cats,mnts=mnts,imgs=imgs,nmls=nmls,selected=selected,model=model)
if args.plot:
pm.plot_results(stars=stars,args=args)
# for the moment
sys.exit(1)
selected,dropped=pm.select_stars(stars=stars)
logger.info('number of selected: {}, dropped: {} '.format(len(selected),len(dropped)))
if pm.eq_mount:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,obs=pm.obs)
else:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,fit_plus_poly=args.fit_plus_poly)
stars=pm.prepare_plot(cats=cats,mnts=mnts,nmls=nmls,selected=selected,model=model)
pm.plot_results(stars=stars,args=args)
logger.info('number of selected: {}, dropped: {} '.format(len(selected),len(dropped)))
if pm.eq_mount:
res=model.fit_model(cats=cats,mnts=mnts,selected=dropped,obs=pm.obs)
else:
res=model.fit_model(cats=cats,mnts=mnts,selected=dropped,fit_plus_poly=args.fit_plus_poly)
stars=pm.prepare_plot(cats=cats,mnts=mnts,nmls=nmls,selected=dropped,model=model)
pm.plot_results(stars=stars,args=args)
sys.exit(0)
|
jstrobl/rts2
|
scripts/u_point/u_model.py
|
Python
|
lgpl-3.0
| 25,850
|
[
"Gaussian",
"VisIt"
] |
ae3c81239e7aaf9758dba6e70e86238670edd8db42119d6f0bd5eb7ad24f6a89
|
from basesynapse import BaseSynapse
import numpy as np
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
cuda_src = """
__global__ void dummy_synapse(
%(type)s *buffer,
int buffer_ld,
int buffer_curr,
int buffer_delay_steps,
int syn_num,
int *pre_neu_idx,
int *delay,
%(type)s *syn_state )
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int tot_threads = gridDim.x * blockDim.x;
int pre;
int dl;
int col;
for( int i=tid; i<syn_num; i+=tot_threads ){
dl = delay[i];
col = buffer_curr - dl;
if( col < 0 )
col += buffer_delay_steps;
pre = pre_neu_idx[i];
syn_state[i] = buffer[ col*buffer_ld+pre ];
}
return;
}
"""
class DummySynapse(BaseSynapse):
def __init__( self, s_dict, synapse_state, dt, debug=False):
self.debug = debug
#self.dt = dt
self.num = len( s_dict['id'] )
if s_dict.has_key( 'delay' ):
self.delay = garray.to_gpu(np.round(np.asarray( s_dict['delay'])*1e-3/dt ).astype(np.int32) )
else:
self.delay = garray.zeros( self.num, dtype=np.int32 )
self.pre = garray.to_gpu( np.asarray( s_dict['pre'], dtype=np.int32 ))
self.state = synapse_state
self.update = self.get_gpu_kernel()
@property
def synapse_class(self): return int(0)
def update_state(self, buffer, st = None):
self.update.prepared_async_call(
self.gpu_grid,\
self.gpu_block,\
st,\
buffer.gpot_buffer.gpudata,\
buffer.gpot_buffer.ld,
buffer.gpot_current,
buffer.gpot_delay_steps,
self.num,\
self.pre.gpudata,\
self.delay.gpudata,\
self.state)
def get_gpu_kernel(self):
self.gpu_block = (128,1,1)
self.gpu_grid = (min( 6*cuda.Context.get_device().MULTIPROCESSOR_COUNT,\
(self.num-1)/self.gpu_block[0] + 1), 1)
mod = SourceModule( \
cuda_src % {"type": dtype_to_ctype(np.float64)},\
options=["--ptxas-options=-v"])
func = mod.get_function("dummy_synapse")
func.prepare('PiiiiPPP')# [ np.intp, # neuron state buffer
# np.int32, # buffer width
# np.int32, # buffer position
# np.int32, # buffer delay steps
# np.int32, # syn_num
# np.intp, # pre-synaptic neuron list
# np.intp, # delay step
# np.intp ] ) # cond array
return func
|
cerrno/neurokernel
|
neurokernel/LPU/synapses/DummySynapse.py
|
Python
|
bsd-3-clause
| 2,755
|
[
"NEURON"
] |
ea98e27dd5106c81cb906941f13f80532547b6427f0ce4bcbe42eae640b28eec
|
from __future__ import print_function
import numpy as np
from ase import Atom, Atoms
from ase.calculators.calculator import Calculator
from ase.optimize import FIRE, BFGS
from ase.data import atomic_numbers
from ase.data.vdw import vdw_radii
class RepulsivePotential(Calculator):
"""Purely repulsive potential (Gaussian)"""
implemented_properties = ['energy', 'forces']
def calculate(self, atoms, properties, changes):
radii_a = np.array([
vdw_radii[atomic_numbers[a.symbol]] for a in atoms])
self.radii_a = radii_a
# last atom is the moving one
energy = 0.0
forces = np.zeros((len(atoms), 3))
for a in range(len(atoms) - 1):
d_c = atoms.get_distance(a, -1, mic=True, vector=True)
d = np.linalg.norm(d_c)
sigma2 = radii_a[a]**2 / (2 * np.log(2))
pre = np.exp(- d**2 / (2 * sigma2))
energy += pre
forces[-1] += pre * d_c / sigma2
self.results['energy'] = energy
self.results['forces'] = forces
def voids(atoms_in):
"""Find location and size of voids in a given structure.
Returns the voids as 'X' atoms. The atoms' charge is misused
to contain the voids' radius.
"""
trials = 6 # XXX do not hardwire
atoms = atoms_in.copy()
# append moving atom
atoms.append(Atom('X'))
atoms.set_calculator(RepulsivePotential())
voids_a = Atoms()
voids_a.set_cell(atoms.get_cell())
voids_a.set_pbc(atoms.get_pbc())
positions = atoms.get_positions()
for pos in positions[:-1]:
for c in range(trials):
positions[-1] = pos + 0.1 * np.random.uniform(-1, 1, size=3)
atoms.set_positions(positions)
# XXX do not hardwire
relax = FIRE(atoms,
logfile=None
)
# XXX do not hardwire
relax.run(fmax=0.001, steps=100)
# get minimal distance
Rmin = 100000
for b in range(len(atoms) - 1):
R = atoms.get_distance(b, -1, mic=True)
if R < Rmin:
Rmin = R
# check if new or better
voids_a.append(Atom('X',
atoms.get_positions()[-1],
charge=Rmin))
voids_a.set_positions(voids_a.get_positions(wrap=True))
remove = []
last = len(voids_a) - 1
for ia, a in enumerate(voids_a[:-1]):
d = voids_a.get_distance(ia, -1, mic=True)
if d < a.charge or d < Rmin:
if a.charge > Rmin:
remove.append(last)
else:
remove.append(ia)
remove.sort()
if last not in remove:
p = voids_a.get_positions()[-1]
print('found new void at [%g,%g,%g], R=%g' %
(p[0], p[1], p[2], Rmin))
for a in remove[::-1]:
if a != last:
p = voids_a.get_positions()[a]
print('removing void at [%g,%g,%g], R=%g' %
(p[0], p[1], p[2], voids_a[a].charge))
voids_a.pop(a)
return voids_a
|
grhawk/ASE
|
tools/ase/utils/voids.py
|
Python
|
gpl-2.0
| 3,344
|
[
"ASE",
"Gaussian"
] |
9bf8bf50581c534ea3f5c0138b9d3686970080043cbd771337e1063c106998ef
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.