text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# disable missing docstring
# pylint: disable=C0111
import json
from lettuce import world, step
from nose.tools import assert_equal, assert_true # pylint: disable=E0611
from common import type_in_codemirror, open_new_course
from advanced_settings import change_value, ADVANCED_MODULES_KEY
from course_import import import_file
DISPLAY_NAME = "Display Name"
MAXIMUM_ATTEMPTS = "Maximum Attempts"
PROBLEM_WEIGHT = "Problem Weight"
RANDOMIZATION = 'Randomization'
SHOW_ANSWER = "Show Answer"
TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts"
MATLAB_API_KEY = "Matlab API key"
@step('I have created a Blank Common Problem$')
def i_created_blank_common_problem(step):
step.given('I am in Studio editing a new unit')
step.given("I have created another Blank Common Problem")
@step('I have created a unit with advanced module "(.*)"$')
def i_created_unit_with_advanced_module(step, advanced_module):
step.given('I am in Studio editing a new unit')
url = world.browser.url
step.given("I select the Advanced Settings")
change_value(step, ADVANCED_MODULES_KEY, '["{}"]'.format(advanced_module))
world.visit(url)
world.wait_for_xmodule()
@step('I have created an advanced component "(.*)" of type "(.*)"')
def i_create_new_advanced_component(step, component_type, advanced_component):
world.create_component_instance(
step=step,
category='advanced',
component_type=component_type,
advanced_component=advanced_component
)
@step('I have created another Blank Common Problem$')
def i_create_new_common_problem(step):
world.create_component_instance(
step=step,
category='problem',
component_type='Blank Common Problem'
)
@step('when I mouseover on "(.*)"')
def i_mouseover_on_html_component(step, element_class):
action_css = '.{}'.format(element_class)
world.trigger_event(action_css, event='mouseover')
@step(u'I can see Reply to Annotation link$')
def i_see_reply_to_annotation_link(_step):
css_selector = 'a.annotatable-reply'
world.wait_for_visible(css_selector)
@step(u'I see that page has scrolled "(.*)" when I click on "(.*)" link$')
def i_see_annotation_problem_page_scrolls(_step, scroll_direction, link_css):
scroll_js = "$(window).scrollTop();"
scroll_height_before = world.browser.evaluate_script(scroll_js)
world.css_click("a.{}".format(link_css))
scroll_height_after = world.browser.evaluate_script(scroll_js)
if scroll_direction == "up":
assert scroll_height_after < scroll_height_before
elif scroll_direction == "down":
assert scroll_height_after > scroll_height_before
@step('I have created an advanced problem of type "(.*)"$')
def i_create_new_advanced_problem(step, component_type):
world.create_component_instance(
step=step,
category='problem',
component_type=component_type,
is_advanced=True
)
@step('I edit and select Settings$')
def i_edit_and_select_settings(_step):
world.edit_component_and_select_settings()
@step('I see the advanced settings and their expected values$')
def i_see_advanced_settings_with_values(step):
world.verify_all_setting_entries(
[
[DISPLAY_NAME, "Blank Common Problem", True],
[MATLAB_API_KEY, "", False],
[MAXIMUM_ATTEMPTS, "", False],
[PROBLEM_WEIGHT, "", False],
[RANDOMIZATION, "Never", False],
[SHOW_ANSWER, "Finished", False],
[TIMER_BETWEEN_ATTEMPTS, "0", False],
])
@step('I can modify the display name')
def i_can_modify_the_display_name(_step):
# Verifying that the display name can be a string containing a floating point value
# (to confirm that we don't throw an error because it is of the wrong type).
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, '3.4')
verify_modified_display_name()
@step('my display name change is persisted on save')
def my_display_name_change_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name()
@step('the problem display name is "(.*)"$')
def verify_problem_display_name(step, name):
assert_equal(name.upper(), world.browser.find_by_css('.problem-header').text)
@step('I can specify special characters in the display name')
def i_can_modify_the_display_name_with_special_chars(_step):
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, "updated ' \" &")
verify_modified_display_name_with_special_chars()
@step('I can specify html in the display name and save')
def i_can_modify_the_display_name_with_html(_step):
"""
If alert appear on save then UnexpectedAlertPresentException
will occur and test will fail.
"""
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, "<script>alert('test')</script>")
verify_modified_display_name_with_html()
world.save_component()
@step('my special characters and persisted on save')
def special_chars_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name_with_special_chars()
@step('I can revert the display name to unset')
def can_revert_display_name_to_unset(_step):
world.revert_setting_entry(DISPLAY_NAME)
verify_unset_display_name()
@step('my display name is unset on save')
def my_display_name_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_unset_display_name()
@step('I can select Per Student for Randomization')
def i_can_select_per_student_for_randomization(_step):
world.browser.select(RANDOMIZATION, "Per Student")
verify_modified_randomization()
@step('my change to randomization is persisted')
def my_change_to_randomization_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_randomization()
@step('I can revert to the default value for randomization')
def i_can_revert_to_default_for_randomization(step):
world.revert_setting_entry(RANDOMIZATION)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Never", False)
@step('I can set the weight to "(.*)"?')
def i_can_set_weight(_step, weight):
set_weight(weight)
verify_modified_weight()
@step('my change to weight is persisted')
def my_change_to_weight_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_weight()
@step('I can revert to the default value of unset for weight')
def i_can_revert_to_default_for_unset_weight(step):
world.revert_setting_entry(PROBLEM_WEIGHT)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the weight to "(.*)", it remains unset')
def set_the_weight_to_abc(step, bad_weight):
set_weight(bad_weight)
# We show the clear button immediately on type, hence the "True" here.
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", True)
world.save_component_and_reopen(step)
# But no change was actually ever sent to the model, so on reopen, explicitly_set is False
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the max attempts to "(.*)", it will persist as a valid integer$')
def set_the_max_attempts(step, max_attempts_set):
# on firefox with selenium, the behavior is different.
# eg 2.34 displays as 2.34 and is persisted as 2
index = world.get_setting_entry_index(MAXIMUM_ATTEMPTS)
world.set_field_value(index, max_attempts_set)
world.save_component_and_reopen(step)
value = world.css_value('input.setting-input', index=index)
assert value != "", "max attempts is blank"
assert int(value) >= 0
@step('Edit High Level Source is not visible')
def edit_high_level_source_not_visible(step):
verify_high_level_source_links(step, False)
@step('Edit High Level Source is visible')
def edit_high_level_source_links_visible(step):
verify_high_level_source_links(step, True)
@step('If I press Cancel my changes are not persisted')
def cancel_does_not_save_changes(step):
world.cancel_component(step)
step.given("I edit and select Settings")
step.given("I see the advanced settings and their expected values")
@step('I have enabled latex compiler')
def enable_latex_compiler(step):
url = world.browser.url
step.given("I select the Advanced Settings")
change_value(step, 'Enable LaTeX Compiler', 'true')
world.visit(url)
world.wait_for_xmodule()
@step('I have created a LaTeX Problem')
def create_latex_problem(step):
step.given('I am in Studio editing a new unit')
step.given('I have enabled latex compiler')
world.create_component_instance(
step=step,
category='problem',
component_type='Problem Written in LaTeX',
is_advanced=True
)
@step('I edit and compile the High Level Source')
def edit_latex_source(_step):
open_high_level_source()
type_in_codemirror(1, "hi")
world.css_click('.hls-compile')
@step('my change to the High Level Source is persisted')
def high_level_source_persisted(_step):
def verify_text(driver):
css_sel = '.problem div>span'
return world.css_text(css_sel) == 'hi'
world.wait_for(verify_text, timeout=10)
@step('I view the High Level Source I see my changes')
def high_level_source_in_editor(_step):
open_high_level_source()
assert_equal('hi', world.css_value('.source-edit-box'))
@step(u'I have an empty course')
def i_have_empty_course(step):
open_new_course()
@step(u'I import the file "([^"]*)"$')
def i_import_the_file(_step, filename):
import_file(filename)
@step(u'I go to the vertical "([^"]*)"$')
def i_go_to_vertical(_step, vertical):
world.css_click("span:contains('{0}')".format(vertical))
@step(u'I go to the unit "([^"]*)"$')
def i_go_to_unit(_step, unit):
loc = "window.location = $(\"span:contains('{0}')\").closest('a').attr('href')".format(unit)
world.browser.execute_script(loc)
@step(u'I see a message that says "([^"]*)"$')
def i_can_see_message(_step, msg):
msg = json.dumps(msg) # escape quotes
world.css_has_text("h2.title", msg)
@step(u'I can edit the problem$')
def i_can_edit_problem(_step):
world.edit_component()
@step(u'I edit first blank advanced problem for annotation response$')
def i_edit_blank_problem_for_annotation_response(_step):
world.edit_component(1)
text = """
<problem>
<annotationresponse>
<annotationinput><text>Text of annotation</text></annotationinput>
</annotationresponse>
</problem>"""
type_in_codemirror(0, text)
world.save_component()
@step(u'I can see cheatsheet$')
def verify_cheat_sheet_displaying(_step):
world.css_click("a.cheatsheet-toggle")
css_selector = 'article.simple-editor-cheatsheet'
world.wait_for_visible(css_selector)
def verify_high_level_source_links(step, visible):
if visible:
assert_true(world.is_css_present('.launch-latex-compiler'),
msg="Expected to find the latex button but it is not present.")
else:
assert_true(world.is_css_not_present('.launch-latex-compiler'),
msg="Expected not to find the latex button but it is present.")
world.cancel_component(step)
def verify_modified_weight():
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "3.5", True)
def verify_modified_randomization():
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Per Student", True)
def verify_modified_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, '3.4', True)
def verify_modified_display_name_with_special_chars():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "updated ' \" &", True)
def verify_modified_display_name_with_html():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "<script>alert('test')</script>", True)
def verify_unset_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, 'Blank Advanced Problem', False)
def set_weight(weight):
index = world.get_setting_entry_index(PROBLEM_WEIGHT)
world.set_field_value(index, weight)
def open_high_level_source():
world.edit_component()
world.css_click('.launch-latex-compiler > a')
|
jelugbo/tundex
|
cms/djangoapps/contentstore/features/problem-editor.py
|
Python
|
agpl-3.0
| 12,644
|
[
"VisIt"
] |
d97007b9a0442f5e9b8ecf204d80c6e9cd72913de953c522f31ec1343aaff739
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support classes for generating code from abstract syntax trees."""
try:
import _ast
except ImportError:
from genshi.template.ast24 import _ast, parse
else:
def parse(source, mode):
return compile(source, '', mode, _ast.PyCF_ONLY_AST)
from genshi.compat import IS_PYTHON2
__docformat__ = 'restructuredtext en'
class ASTCodeGenerator(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def __init__(self, tree):
self.lines_info = []
self.line_info = None
self.code = ''
self.line = None
self.last = None
self.indent = 0
self.blame_stack = []
self.visit(tree)
if self.line.strip():
self.code += self.line + '\n'
self.lines_info.append(self.line_info)
self.line = None
self.line_info = None
def _change_indent(self, delta):
self.indent += delta
def _new_line(self):
if self.line is not None:
self.code += self.line + '\n'
self.lines_info.append(self.line_info)
self.line = ' '*4*self.indent
if len(self.blame_stack) == 0:
self.line_info = []
self.last = None
else:
self.line_info = [(0, self.blame_stack[-1],)]
self.last = self.blame_stack[-1]
def _write(self, s):
if len(s) == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
else:
if self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += s
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
try:
self.blame_stack.append((node.lineno, node.col_offset,))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
raise Exception('Unhandled node type %r' % type(node))
ret = visitor(node)
if info:
self.blame_stack.pop()
return ret
def visit_Module(self, node):
for n in node.body:
self.visit(n)
visit_Interactive = visit_Module
visit_Suite = visit_Module
def visit_Expression(self, node):
self._new_line()
return self.visit(node.body)
# arguments = (expr* args, identifier? vararg,
# identifier? kwarg, expr* defaults)
def visit_arguments(self, node):
first = True
no_default_count = len(node.args) - len(node.defaults)
for i, arg in enumerate(node.args):
if not first:
self._write(', ')
else:
first = False
self.visit(arg)
if i >= no_default_count:
self._write('=')
self.visit(node.defaults[i - no_default_count])
if getattr(node, 'vararg', None):
if not first:
self._write(', ')
else:
first = False
self._write('*' + node.vararg)
if getattr(node, 'kwarg', None):
if not first:
self._write(', ')
else:
first = False
self._write('**' + node.kwarg)
if not IS_PYTHON2:
# In Python 3 arguments get a special node
def visit_arg(self, node):
self._write(node.arg)
# FunctionDef(identifier name, arguments args,
# stmt* body, expr* decorator_list)
def visit_FunctionDef(self, node):
decarators = ()
if hasattr(node, 'decorator_list'):
decorators = getattr(node, 'decorator_list')
else: # different name in earlier Python versions
decorators = getattr(node, 'decorators', ())
for decorator in decorators:
self._new_line()
self._write('@')
self.visit(decorator)
self._new_line()
self._write('def ' + node.name + '(')
self.visit(node.args)
self._write('):')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# ClassDef(identifier name, expr* bases, stmt* body)
def visit_ClassDef(self, node):
self._new_line()
self._write('class ' + node.name)
if node.bases:
self._write('(')
self.visit(node.bases[0])
for base in node.bases[1:]:
self._write(', ')
self.visit(base)
self._write(')')
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
# Return(expr? value)
def visit_Return(self, node):
self._new_line()
self._write('return')
if getattr(node, 'value', None):
self._write(' ')
self.visit(node.value)
# Delete(expr* targets)
def visit_Delete(self, node):
self._new_line()
self._write('del ')
self.visit(node.targets[0])
for target in node.targets[1:]:
self._write(', ')
self.visit(target)
# Assign(expr* targets, expr value)
def visit_Assign(self, node):
self._new_line()
for target in node.targets:
self.visit(target)
self._write(' = ')
self.visit(node.value)
# AugAssign(expr target, operator op, expr value)
def visit_AugAssign(self, node):
self._new_line()
self.visit(node.target)
self._write(' ' + self.binary_operators[node.op.__class__] + '= ')
self.visit(node.value)
# Print(expr? dest, expr* values, bool nl)
def visit_Print(self, node):
self._new_line()
self._write('print')
if getattr(node, 'dest', None):
self._write(' >> ')
self.visit(node.dest)
if getattr(node, 'values', None):
self._write(', ')
else:
self._write(' ')
if getattr(node, 'values', None):
self.visit(node.values[0])
for value in node.values[1:]:
self._write(', ')
self.visit(value)
if not node.nl:
self._write(',')
# For(expr target, expr iter, stmt* body, stmt* orelse)
def visit_For(self, node):
self._new_line()
self._write('for ')
self.visit(node.target)
self._write(' in ')
self.visit(node.iter)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# While(expr test, stmt* body, stmt* orelse)
def visit_While(self, node):
self._new_line()
self._write('while ')
self.visit(node.test)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# If(expr test, stmt* body, stmt* orelse)
def visit_If(self, node):
self._new_line()
self._write('if ')
self.visit(node.test)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'orelse', None):
self._new_line()
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# With(expr context_expr, expr? optional_vars, stmt* body)
def visit_With(self, node):
self._new_line()
self._write('with ')
self.visit(node.context_expr)
if getattr(node, 'optional_vars', None):
self._write(' as ')
self.visit(node.optional_vars)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if IS_PYTHON2:
# Raise(expr? type, expr? inst, expr? tback)
def visit_Raise(self, node):
self._new_line()
self._write('raise')
if not node.type:
return
self._write(' ')
self.visit(node.type)
if not node.inst:
return
self._write(', ')
self.visit(node.inst)
if not node.tback:
return
self._write(', ')
self.visit(node.tback)
else:
# Raise(expr? exc from expr? cause)
def visit_Raise(self, node):
self._new_line()
self._write('raise')
if not node.exc:
return
self._write(' ')
self.visit(node.exc)
if not node.cause:
return
self._write(' from ')
self.visit(node.cause)
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
def visit_TryExcept(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'handlers', None):
for handler in node.handlers:
self.visit(handler)
self._new_line()
if getattr(node, 'orelse', None):
self._write('else:')
self._change_indent(1)
for statement in node.orelse:
self.visit(statement)
self._change_indent(-1)
# excepthandler = (expr? type, expr? name, stmt* body)
def visit_ExceptHandler(self, node):
self._new_line()
self._write('except')
if getattr(node, 'type', None):
self._write(' ')
self.visit(node.type)
if getattr(node, 'name', None):
self._write(', ')
self.visit(node.name)
self._write(':')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
visit_excepthandler = visit_ExceptHandler
# TryFinally(stmt* body, stmt* finalbody)
def visit_TryFinally(self, node):
self._new_line()
self._write('try:')
self._change_indent(1)
for statement in node.body:
self.visit(statement)
self._change_indent(-1)
if getattr(node, 'finalbody', None):
self._new_line()
self._write('finally:')
self._change_indent(1)
for statement in node.finalbody:
self.visit(statement)
self._change_indent(-1)
# Assert(expr test, expr? msg)
def visit_Assert(self, node):
self._new_line()
self._write('assert ')
self.visit(node.test)
if getattr(node, 'msg', None):
self._write(', ')
self.visit(node.msg)
def visit_alias(self, node):
self._write(node.name)
if getattr(node, 'asname', None):
self._write(' as ')
self._write(node.asname)
# Import(alias* names)
def visit_Import(self, node):
self._new_line()
self._write('import ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# ImportFrom(identifier module, alias* names, int? level)
def visit_ImportFrom(self, node):
self._new_line()
self._write('from ')
if node.level:
self._write('.' * node.level)
self._write(node.module)
self._write(' import ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# Exec(expr body, expr? globals, expr? locals)
def visit_Exec(self, node):
self._new_line()
self._write('exec ')
self.visit(node.body)
if not node.globals:
return
self._write(', ')
self.visit(node.globals)
if not node.locals:
return
self._write(', ')
self.visit(node.locals)
# Global(identifier* names)
def visit_Global(self, node):
self._new_line()
self._write('global ')
self.visit(node.names[0])
for name in node.names[1:]:
self._write(', ')
self.visit(name)
# Expr(expr value)
def visit_Expr(self, node):
self._new_line()
self.visit(node.value)
# Pass
def visit_Pass(self, node):
self._new_line()
self._write('pass')
# Break
def visit_Break(self, node):
self._new_line()
self._write('break')
# Continue
def visit_Continue(self, node):
self._new_line()
self._write('continue')
### EXPRESSIONS
def with_parens(f):
def _f(self, node):
self._write('(')
f(self, node)
self._write(')')
return _f
bool_operators = {_ast.And: 'and', _ast.Or: 'or'}
# BoolOp(boolop op, expr* values)
@with_parens
def visit_BoolOp(self, node):
joiner = ' ' + self.bool_operators[node.op.__class__] + ' '
self.visit(node.values[0])
for value in node.values[1:]:
self._write(joiner)
self.visit(value)
binary_operators = {
_ast.Add: '+',
_ast.Sub: '-',
_ast.Mult: '*',
_ast.Div: '/',
_ast.Mod: '%',
_ast.Pow: '**',
_ast.LShift: '<<',
_ast.RShift: '>>',
_ast.BitOr: '|',
_ast.BitXor: '^',
_ast.BitAnd: '&',
_ast.FloorDiv: '//'
}
# BinOp(expr left, operator op, expr right)
@with_parens
def visit_BinOp(self, node):
self.visit(node.left)
self._write(' ' + self.binary_operators[node.op.__class__] + ' ')
self.visit(node.right)
unary_operators = {
_ast.Invert: '~',
_ast.Not: 'not',
_ast.UAdd: '+',
_ast.USub: '-',
}
# UnaryOp(unaryop op, expr operand)
def visit_UnaryOp(self, node):
self._write(self.unary_operators[node.op.__class__] + ' ')
self.visit(node.operand)
# Lambda(arguments args, expr body)
@with_parens
def visit_Lambda(self, node):
self._write('lambda ')
self.visit(node.args)
self._write(': ')
self.visit(node.body)
# IfExp(expr test, expr body, expr orelse)
@with_parens
def visit_IfExp(self, node):
self.visit(node.body)
self._write(' if ')
self.visit(node.test)
self._write(' else ')
self.visit(node.orelse)
# Dict(expr* keys, expr* values)
def visit_Dict(self, node):
self._write('{')
for key, value in zip(node.keys, node.values):
self.visit(key)
self._write(': ')
self.visit(value)
self._write(', ')
self._write('}')
# ListComp(expr elt, comprehension* generators)
def visit_ListComp(self, node):
self._write('[')
self.visit(node.elt)
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self._write(' for ')
self.visit(generator.target)
self._write(' in ')
self.visit(generator.iter)
for ifexpr in generator.ifs:
self._write(' if ')
self.visit(ifexpr)
self._write(']')
# GeneratorExp(expr elt, comprehension* generators)
def visit_GeneratorExp(self, node):
self._write('(')
self.visit(node.elt)
for generator in node.generators:
# comprehension = (expr target, expr iter, expr* ifs)
self._write(' for ')
self.visit(generator.target)
self._write(' in ')
self.visit(generator.iter)
for ifexpr in generator.ifs:
self._write(' if ')
self.visit(ifexpr)
self._write(')')
# Yield(expr? value)
def visit_Yield(self, node):
self._write('yield')
if getattr(node, 'value', None):
self._write(' ')
self.visit(node.value)
comparision_operators = {
_ast.Eq: '==',
_ast.NotEq: '!=',
_ast.Lt: '<',
_ast.LtE: '<=',
_ast.Gt: '>',
_ast.GtE: '>=',
_ast.Is: 'is',
_ast.IsNot: 'is not',
_ast.In: 'in',
_ast.NotIn: 'not in',
}
# Compare(expr left, cmpop* ops, expr* comparators)
@with_parens
def visit_Compare(self, node):
self.visit(node.left)
for op, comparator in zip(node.ops, node.comparators):
self._write(' ' + self.comparision_operators[op.__class__] + ' ')
self.visit(comparator)
# Call(expr func, expr* args, keyword* keywords,
# expr? starargs, expr? kwargs)
def visit_Call(self, node):
self.visit(node.func)
self._write('(')
first = True
for arg in node.args:
if not first:
self._write(', ')
first = False
self.visit(arg)
for keyword in node.keywords:
if not first:
self._write(', ')
first = False
# keyword = (identifier arg, expr value)
self._write(keyword.arg)
self._write('=')
self.visit(keyword.value)
if getattr(node, 'starargs', None):
if not first:
self._write(', ')
first = False
self._write('*')
self.visit(node.starargs)
if getattr(node, 'kwargs', None):
if not first:
self._write(', ')
first = False
self._write('**')
self.visit(node.kwargs)
self._write(')')
# Repr(expr value)
def visit_Repr(self, node):
self._write('`')
self.visit(node.value)
self._write('`')
# Num(object n)
def visit_Num(self, node):
self._write(repr(node.n))
# Str(string s)
def visit_Str(self, node):
self._write(repr(node.s))
if not IS_PYTHON2:
# Bytes(bytes s)
def visit_Bytes(self, node):
self._write(repr(node.s))
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, node):
self.visit(node.value)
self._write('.')
self._write(node.attr)
# Subscript(expr value, slice slice, expr_context ctx)
def visit_Subscript(self, node):
self.visit(node.value)
self._write('[')
def _process_slice(node):
if isinstance(node, _ast.Ellipsis):
self._write('...')
elif isinstance(node, _ast.Slice):
if getattr(node, 'lower', 'None'):
self.visit(node.lower)
self._write(':')
if getattr(node, 'upper', None):
self.visit(node.upper)
if getattr(node, 'step', None):
self._write(':')
self.visit(node.step)
elif isinstance(node, _ast.Index):
self.visit(node.value)
elif isinstance(node, _ast.ExtSlice):
self.visit(node.dims[0])
for dim in node.dims[1:]:
self._write(', ')
self.visit(dim)
else:
raise NotImplemented('Slice type not implemented')
_process_slice(node.slice)
self._write(']')
# Name(identifier id, expr_context ctx)
def visit_Name(self, node):
self._write(node.id)
# List(expr* elts, expr_context ctx)
def visit_List(self, node):
self._write('[')
for elt in node.elts:
self.visit(elt)
self._write(', ')
self._write(']')
# Tuple(expr *elts, expr_context ctx)
def visit_Tuple(self, node):
self._write('(')
for elt in node.elts:
self.visit(elt)
self._write(', ')
self._write(')')
class ASTTransformer(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
return node
return visitor(node)
def _clone(self, node):
clone = node.__class__()
for name in getattr(clone, '_attributes', ()):
try:
setattr(clone, 'name', getattr(node, name))
except AttributeError:
pass
for name in clone._fields:
try:
value = getattr(node, name)
except AttributeError:
pass
else:
if value is None:
pass
elif isinstance(value, list):
value = [self.visit(x) for x in value]
elif isinstance(value, tuple):
value = tuple(self.visit(x) for x in value)
else:
value = self.visit(value)
setattr(clone, name, value)
return clone
visit_Module = _clone
visit_Interactive = _clone
visit_Expression = _clone
visit_Suite = _clone
visit_FunctionDef = _clone
visit_ClassDef = _clone
visit_Return = _clone
visit_Delete = _clone
visit_Assign = _clone
visit_AugAssign = _clone
visit_Print = _clone
visit_For = _clone
visit_While = _clone
visit_If = _clone
visit_With = _clone
visit_Raise = _clone
visit_TryExcept = _clone
visit_TryFinally = _clone
visit_Assert = _clone
visit_ExceptHandler = _clone
visit_Import = _clone
visit_ImportFrom = _clone
visit_Exec = _clone
visit_Global = _clone
visit_Expr = _clone
# Pass, Break, Continue don't need to be copied
visit_BoolOp = _clone
visit_BinOp = _clone
visit_UnaryOp = _clone
visit_Lambda = _clone
visit_IfExp = _clone
visit_Dict = _clone
visit_ListComp = _clone
visit_GeneratorExp = _clone
visit_Yield = _clone
visit_Compare = _clone
visit_Call = _clone
visit_Repr = _clone
# Num, Str don't need to be copied
visit_Attribute = _clone
visit_Subscript = _clone
visit_Name = _clone
visit_List = _clone
visit_Tuple = _clone
visit_comprehension = _clone
visit_excepthandler = _clone
visit_arguments = _clone
visit_keyword = _clone
visit_alias = _clone
visit_Slice = _clone
visit_ExtSlice = _clone
visit_Index = _clone
del _clone
|
dag/genshi
|
genshi/template/astutil.py
|
Python
|
bsd-3-clause
| 24,467
|
[
"VisIt"
] |
9da73e67ed868bd607238aaaae7a41b4850aebf6b6ba86fea037a314e8ceb3e9
|
# todo: ABC for all displays. Maybe auto-detect fake vs real
from .display_base import DisplayDriver, LEDThing, Gradient
import vtk
class GraphicsDisplay(DisplayDriver):
def __init__(self):
self.__renderer = vtk.vtkRenderer()
self.__rend_win = vtk.vtkRenderWindow()
self.__rend_win.SetSize(1280, 1024)
self.__rend_win.AddRenderer(self.__renderer)
self.__rend_win_interactor = vtk.vtkRenderWindowInteractor()
self.__rend_win_interactor.SetRenderWindow(self.__rend_win)
self.__update_list = []
self.__xxx_color = 10
width, height = self.__rend_win.GetSize()
#print self.__rend_win.GetSize()
#super(GraphicsDisplay, self).__init__(width, height)
super(GraphicsDisplay, self).__init__()
def _led_display_context(self, led):
assert isinstance(led, LEDThing), \
'{0} must be an LEDThing'.format(led)
source = vtk.vtkSphereSource()
source.SetCenter(led.x, led.y, led.z)
source.SetRadius(led.radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.__renderer.AddActor(actor)
return actor
def _start_update(self):
self.__update_list = []
def _add_set_to_update(self, led):
self.__update_list.append((led,
(led.red / 255.0, led.green / 255.0, led.blue / 255.0)))
def _complete_update(self):
#print "start _complete_update"
for led, color in self.__update_list:
actor = led.display_context
actor.GetProperty().SetColor(color[0], color[1], color[2])
#print("end _complete_update")
self.__rend_win.Render()
self.__update_list = []
def __tick_callback(self, obj, event):
if self.__paused:
return
self.__algo_tick_callback()
def __char_event_callback(self, obj, event):
key = obj.GetKeyCode()
print "key", key, type(key)
if key == ' ':
self.__paused = not self.__paused
return
elif key == 's':
self.__paused = True
self.__algo_tick_callback()
def run(self, algo_tick_callback):
self.__paused = False
self.__algo_tick_callback = algo_tick_callback
self.__rend_win_interactor.Initialize()
self.__rend_win.Render()
self.__rend_win_interactor.AddObserver('TimerEvent', self.__tick_callback)
self.__rend_win_interactor.AddObserver('CharEvent',
self.__char_event_callback)
timer_id = self.__rend_win_interactor.CreateRepeatingTimer(10)
self.__rend_win_interactor.Start()
|
stuart-stanley/stormlight-archive
|
src/displays/graphics_based_displays.py
|
Python
|
apache-2.0
| 2,820
|
[
"VTK"
] |
f3d0b16f1a2794740fdc54c60d096f4532108176e15ced93274f954002e7bfd4
|
#
# general_behavior.py - The primary Owyl behavior tree
# Copyright (C) 2014 Hanson Robotics
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# system imports
import copy
import os
import random
import time
# tool imports
import owyl
from owyl import blackboard
import rospy
import roslib
import ConfigParser
# message imports
from std_msgs.msg import String
from blender_api_msgs.msg import AvailableEmotionStates, AvailableGestures
from blender_api_msgs.msg import EmotionState
from blender_api_msgs.msg import SetGesture
# local stuff.
from face_track import FaceTrack
# Basic holder for emotion-expression properties and probabilities
class Emotion:
def __init__(self, name) :
self.name = name
self.probability = 0.0
self.min_intensity = 0.0
self.max_intensity = 1.0
self.min_duration = 5.0
self.max_duration = 15.0
# Basic holder for gesture properties and probabilities
class Gesture:
def __init__(self, name):
self.name = name
self.probability = 0.0
self.min_intensity = 0.0
self.max_intensity = 1.0
self.min_repeat = 0.0
self.max_repeat = 1.0
self.min_speed = 0.0
self.max_speed = 1.0
class Tree():
# ---------
# Config File utilities
def unpack_config_emotions(self, config, emo_class) :
def get_values(from_config, num_values):
rtn_values = [float(z.strip()) for z in from_config.split(",")]
if len(rtn_values) != num_values:
raise Exception("List lengths don't match!")
return rtn_values
names = [x.strip() for x in config.get("emotion", emo_class).split(",")]
numb = len(names)
probs = get_values(config.get("emotion", \
emo_class + "_probabilities"), numb)
mins = get_values(config.get("emotion", \
emo_class + "_intensity_min"), numb)
maxs = get_values(config.get("emotion", \
emo_class + "_intensity_max"), numb)
dins = get_values(config.get("emotion", \
emo_class + "_duration_min"), numb)
daxs = get_values(config.get("emotion", \
emo_class + "_duration_max"), numb)
self.blackboard["emotion_classes"].append(emo_class)
emos = []
for (n,p,mi,mx,di,dx) in zip (names, probs, mins, maxs, dins, daxs):
emo = Emotion(n)
emo.probability = p
emo.min_intensity = mi
emo.max_intensity = mx
emo.min_duration = di
emo.max_duration = dx
emos.append(emo)
self.blackboard[emo_class] = emos
def unpack_config_gestures(self, config, ges_class) :
def get_values(from_config, num_values):
rtn_values = [float(z.strip()) for z in from_config.split(",")]
if len(rtn_values) != num_values:
raise Exception("List lengths don't match!")
return rtn_values
names = [x.strip() for x in config.get("gesture", ges_class).split(",")]
numb = len(names)
probs = get_values(config.get("gesture", \
ges_class + "_probabilities"), numb)
mins = get_values(config.get("gesture", \
ges_class + "_intensity_min"), numb)
maxs = get_values(config.get("gesture", \
ges_class + "_intensity_max"), numb)
rins = get_values(config.get("gesture", \
ges_class + "_repeat_min"), numb)
raxs = get_values(config.get("gesture", \
ges_class + "_repeat_max"), numb)
sins = get_values(config.get("gesture", \
ges_class + "_speed_min"), numb)
saxs = get_values(config.get("gesture", \
ges_class + "_speed_max"), numb)
self.blackboard["gesture_classes"].append(ges_class)
gestures = []
for (n,p,mi,mx,ri,rx,si,sa) in zip (names, probs, mins, maxs, rins, raxs, sins, saxs):
ges = Gesture(n)
ges.probability = p
ges.min_intensity = mi
ges.max_intensity = mx
ges.min_repeat = ri
ges.max_repeat = rx
ges.min_speed = si
ges.max_speed = sa
gestures.append(ges)
self.blackboard[ges_class] = gestures
def __init__(self):
self.blackboard = blackboard.Blackboard("rig expressions")
config = ConfigParser.ConfigParser()
config.readfp(open(os.path.join(os.path.dirname(__file__), "../behavior.cfg")))
self.blackboard["sadness_happiness"] = config.getfloat("emotion", "sadness_happiness")
self.blackboard["irritation_amusement"] = config.getfloat("emotion", "irritation_amusement")
self.blackboard["confusion_comprehension"] = config.getfloat("emotion", "confusion_comprehension")
self.blackboard["boredom_engagement"] = config.getfloat("emotion", "boredom_engagement")
self.blackboard["recoil_surprise"] = config.getfloat("emotion", "recoil_surprise")
self.blackboard["current_emotion"] = config.get("emotion", "default_emotion")
self.blackboard["current_emotion_intensity"] = config.getfloat("emotion", "default_emotion_intensity")
self.blackboard["current_emotion_duration"] = config.getfloat("emotion", "default_emotion_duration")
self.blackboard["emotion_classes"] = []
self.blackboard["gesture_classes"] = []
self.blackboard["emotion_scale_stage"] = config.getfloat("emotion", "emotion_scale_stage")
self.blackboard["emotion_scale_closeup"] = config.getfloat("emotion", "emotion_scale_closeup")
self.blackboard["gesture_scale_stage"] = config.getfloat("gesture", "gesture_scale_stage")
self.blackboard["gesture_scale_closeup"] = config.getfloat("gesture", "gesture_scale_closeup")
self.unpack_config_emotions(config, "frustrated_emotions")
self.unpack_config_emotions(config, "positive_emotions")
self.unpack_config_emotions(config, "non_positive_emotion")
self.unpack_config_emotions(config, "bored_emotions")
self.unpack_config_emotions(config, "non_bored_emotion")
self.unpack_config_emotions(config, "sleep_emotions")
self.unpack_config_emotions(config, "non_sleep_emotion")
self.unpack_config_emotions(config, "wake_up_emotions")
self.unpack_config_emotions(config, "new_arrival_emotions")
self.unpack_config_gestures(config, "positive_gestures")
self.unpack_config_gestures(config, "bored_gestures")
self.unpack_config_gestures(config, "sleep_gestures")
self.unpack_config_gestures(config, "wake_up_gestures")
self.blackboard["min_duration_for_interaction"] = config.getfloat("interaction", "duration_min")
self.blackboard["max_duration_for_interaction"] = config.getfloat("interaction", "duration_max")
self.blackboard["time_to_change_face_target_min"] = config.getfloat("interaction", "time_to_change_face_target_min")
self.blackboard["time_to_change_face_target_max"] = config.getfloat("interaction", "time_to_change_face_target_max")
self.blackboard["glance_probability"] = config.getfloat("interaction", "glance_probability")
self.blackboard["glance_probability_for_new_faces"] = config.getfloat("interaction", "glance_probability_for_new_faces")
self.blackboard["glance_probability_for_lost_faces"] = config.getfloat("interaction", "glance_probability_for_lost_faces")
self.blackboard["sleep_probability"] = config.getfloat("boredom", "sleep_probability")
self.blackboard["sleep_duration_min"] = config.getfloat("boredom", "sleep_duration_min")
self.blackboard["sleep_duration_max"] = config.getfloat("boredom", "sleep_duration_max")
self.blackboard["search_for_attention_duration_min"] = config.getfloat("boredom", "search_for_attention_duration_min")
self.blackboard["search_for_attention_duration_max"] = config.getfloat("boredom", "search_for_attention_duration_max")
self.blackboard["wake_up_probability"] = config.getfloat("boredom", "wake_up_probability")
self.blackboard["time_to_wake_up"] = config.getfloat("boredom", "time_to_wake_up")
##### Other System Variables #####
self.blackboard["show_expression_since"] = time.time()
# ID's of faces newly seen, or lost. Integer ID.
self.blackboard["new_face"] = 0
self.blackboard["lost_face"] = 0
# IDs of faces in the scene, updated once per cycle
self.blackboard["face_targets"] = []
# IDs of faces in the scene, updated immediately
self.blackboard["background_face_targets"] = []
self.blackboard["current_glance_target"] = 0
self.blackboard["current_face_target"] = 0
self.blackboard["interact_with_face_target_since"] = 0.0
self.blackboard["sleep_since"] = 0.0
self.blackboard["bored_since"] = 0.0
self.blackboard["is_interruption"] = False
self.blackboard["is_sleeping"] = False
self.blackboard["blender_mode"] = ""
self.blackboard["performance_system_on"] = False
self.blackboard["stage_mode"] = False
self.blackboard["random"] = 0.0
##### ROS Connections #####
self.facetrack = FaceTrack(self.blackboard)
rospy.Subscriber("behavior_switch", String, self.behavior_switch_callback)
rospy.Subscriber("/blender_api/available_emotion_states",
AvailableEmotionStates, self.get_emotion_states_cb)
rospy.Subscriber("/blender_api/available_gestures",
AvailableGestures, self.get_gestures_cb)
# cmd_blendermode needs to go away eventually...
self.tracking_mode_pub = rospy.Publisher("/cmd_blendermode", String, queue_size=1, latch=True)
self.emotion_pub = rospy.Publisher("/blender_api/set_emotion_state", EmotionState, queue_size=1)
self.gesture_pub = rospy.Publisher("/blender_api/set_gesture", SetGesture, queue_size=1)
self.tree = self.build_tree()
time.sleep(0.1)
while not rospy.is_shutdown():
self.tree.next()
# Pick a random expression out of the class of expressions,
# and display it. Return the display emotion, or None if none
# were picked.
def pick_random_expression(self, emo_class_name):
random_number = random.random()
tot = 0
emo = None
emos = self.blackboard[emo_class_name]
for emotion in emos:
tot += emotion.probability
if random_number <= tot:
emo = emotion
break
if emo:
intensity = random.uniform(emo.min_intensity, emo.max_intensity)
duration = random.uniform(emo.min_duration, emo.max_duration)
self.show_emotion(emo.name, intensity, duration)
return emo
def pick_random_gesture(self, ges_class_name):
random_number = random.random()
tot = 0
ges = None
gestures = self.blackboard[ges_class_name]
for gesture in gestures:
tot += gesture.probability
if random_number <= tot:
ges = gesture
break
if ges:
intensity = random.uniform(ges.min_intensity, ges.max_intensity)
repeat = random.uniform(ges.min_repeat, ges.max_repeat)
speed = random.uniform(ges.min_speed, ges.max_speed)
self.show_gesture(ges.name, intensity, repeat, speed)
return ges
# Pick the name of a random emotion, excluding those from
# the exclude list
def pick_random_emotion_name(self, exclude) :
ixnay = [ex.name for ex in exclude]
emos = self.blackboard["emotions"]
if None == emos:
return None
emo_name = random.choice([other for other in emos if other not in ixnay])
return emo_name
# Pick a so-called "instant" or "flash" expression
def pick_instant(self, emo_class, exclude_class) :
emo = self.pick_random_expression(exclude_class)
if emo :
exclude = self.blackboard[emo_class]
emo_name = self.pick_random_emotion_name(exclude)
tense = random.uniform(emo.min_intensity, emo.max_intensity)
durat = random.uniform(emo.min_duration, emo.max_duration)
self.show_emotion(emo_name, tense, durat)
# time.sleep(durat) # XXX Sleep is a bad idea, blocks events ...
return emo_name
# ------------------------------------------------------------------
# The various behavior trees
# Actions that are taken when a face becomes visible.
# If there were no people in the scene, she always interacts with that person
# If she is already interacting with someone else in the scene,
# she will either glance at the new face or ignore it, depends on the dice roll
# If she has been interacting with another person for a while,
# the probability of glancing at a new face is higher
def someone_arrived(self) :
tree = owyl.sequence(
self.is_someone_arrived(),
owyl.selector(
##### There previously were no people in the scene #####
owyl.sequence(
self.were_no_people_in_the_scene(),
self.assign_face_target(variable="current_face_target", value="new_face"),
self.record_start_time(variable="interact_with_face_target_since"),
self.show_expression(emo_class="new_arrival_emotions"),
self.interact_with_face_target(id="current_face_target", new_face=True)
),
##### Currently interacting with someone #####
owyl.sequence(
self.is_interacting_with_someone(),
self.dice_roll(event="glance_new_face"),
self.glance_at_new_face()
),
##### Does Nothing #####
owyl.sequence(
self.print_status(str="----- Ignoring the new face!"),
owyl.succeed()
)
),
self.clear_new_face_target()
)
return tree
# ---------------------------
# Actions that are taken when a face leaves
# If she was interacting with that person, she will be frustrated
# If she was interacting with someone else,
# she will either glance at the lost face or ignore it, depends on the dice roll
def someone_left(self) :
tree = owyl.sequence(
self.is_someone_left(),
owyl.selector(
##### Was Interacting With That Person #####
owyl.sequence(
self.was_interacting_with_that_person(),
self.show_frustrated_expression(),
self.return_to_neutral_position()
),
##### Is Interacting With Someone Else #####
owyl.sequence(
self.is_interacting_with_someone(),
self.dice_roll(event="glance_lost_face"),
self.glance_at_lost_face()
),
##### Does Nothing #####
owyl.sequence(
self.print_status(str="----- Ignoring the lost face!"),
owyl.succeed()
)
),
self.clear_lost_face_target()
)
return tree
# -----------------------------
# Interact with people
# If she is not currently interacting with anyone, or it's time to switch target
# she will start interacting with someone else
# Otherwise she will continue with the current interaction
# she may also glance at other people if there are more than one people in the scene
def interact_with_people(self) :
tree = owyl.sequence(
self.is_face_target(),
owyl.selector(
##### Start A New Interaction #####
owyl.sequence(
owyl.selector(
self.is_not_interacting_with_someone(),
owyl.sequence(
self.is_more_than_one_face_target(),
self.is_time_to_change_face_target()
)
),
self.select_a_face_target(),
self.record_start_time(variable="interact_with_face_target_since"),
self.interact_with_face_target(id="current_face_target", new_face=False)
),
##### Glance At Other Faces & Continue With The Last Interaction #####
owyl.sequence(
self.print_status(str="----- Continue interaction"),
owyl.selector(
owyl.sequence(
self.is_more_than_one_face_target(),
self.dice_roll(event="group_interaction"),
self.select_a_glance_target(),
self.glance_at(id="current_glance_target")
),
owyl.succeed()
),
self.interact_with_face_target(id="current_face_target", new_face=False)
)
)
)
return tree
# -------------------
# Nothing interesting is happening
# she will look around and search for attention
# she may go to sleep, and it's more likely to happen if she has been bored for a while
# she wakes up whenever there's an interruption, e.g. someone arrives
# or after timeout
def nothing_is_happening(self) :
tree = owyl.sequence(
owyl.selector(
##### Is Not Sleeping #####
owyl.sequence(
self.is_not_sleeping(),
owyl.selector(
##### Go To Sleep #####
owyl.sequence(
self.dice_roll(event="go_to_sleep"),
self.record_start_time(variable="sleep_since"),
self.print_status(str="----- Go to sleep!"),
self.go_to_sleep()
),
##### Search For Attention #####
self.search_for_attention()
)
),
##### Is Sleeping #####
owyl.selector(
##### Wake Up #####
owyl.sequence(
self.dice_roll(event="wake_up"),
self.is_time_to_wake_up(),
self.wake_up(),
),
##### Continue To Sleep #####
owyl.sequence(
self.print_status(str="----- Continue to sleep."),
self.go_to_sleep()
)
)
),
##### If Interruption && Sleeping -> Wake Up #####
owyl.sequence(
self.is_interruption(),
self.is_sleeping(),
self.wake_up(),
self.print_status(str="----- Interruption: Wake up!"),
)
)
return tree
# ------------------------------------------------------------------
# Build the main tree
def build_tree(self):
eva_behavior_tree = \
owyl.repeatAlways(
owyl.selector(
owyl.sequence(
self.is_scripted_performance_system_on(),
self.sync_variables(),
########## Main Events ##########
owyl.selector(
self.someone_arrived(),
self.someone_left(),
self.interact_with_people(),
self.nothing_is_happening()
)
),
# Turn on scripted performances
# This point is reached only when scripting is turned off.
owyl.sequence(
self.idle_spin(),
self.is_scripted_performance_system_off(),
self.start_scripted_performance_system()
)
)
)
return owyl.visit(eva_behavior_tree, blackboard=self.blackboard)
# Print a single status message
@owyl.taskmethod
def print_status(self, **kwargs):
print kwargs["str"]
yield True
# Print emotional state
@owyl.taskmethod
def sync_variables(self, **kwargs):
self.blackboard["face_targets"] = self.blackboard["background_face_targets"]
# print "\n========== Emotion Space =========="
# print "Looking at face: " + str(self.blackboard["current_face_target"])
# print "sadness_happiness: " + str(self.blackboard["sadness_happiness"])[:5]
# print "irritation_amusement: " + str(self.blackboard["irritation_amusement"])[:5]
# print "confusion_comprehension: " + str(self.blackboard["confusion_comprehension"])[:5]
# print "boredom_engagement: " + str(self.blackboard["boredom_engagement"])[:5]
# print "recoil_surprise: " + str(self.blackboard["recoil_surprise"])[:5]
# print "Current Emotion: " + self.blackboard["current_emotion"] + " (" + str(self.blackboard["current_emotion_intensity"])[:5] + ")"
yield True
# @owyl.taskmethod
# def set_emotion(self, **kwargs):
# self.blackboard[kwargs["variable"]] = kwargs["value"]
# yield True
# @owyl.taskmethod
# def update_emotion(self, **kwargs):
# if kwargs["lower_limit"] > 0.0:
# self.blackboard[kwargs["variable"]] = kwargs["lower_limit"]
# self.blackboard[kwargs["variable"]] *= random.uniform(kwargs["min"], kwargs["max"])
# if self.blackboard[kwargs["variable"]] > 1.0:
# self.blackboard[kwargs["variable"]] = 1.0
# elif self.blackboard[kwargs["variable"]] <= 0.0:
# self.blackboard[kwargs["variable"]] = 0.01
# yield True
@owyl.taskmethod
def dice_roll(self, **kwargs):
if kwargs["event"] == "glance_new_face":
if self.blackboard["glance_probability_for_new_faces"] > 0 and self.blackboard["interact_with_face_target_since"] > 0:
skew = (time.time() - self.blackboard["interact_with_face_target_since"]) / self.blackboard["time_to_change_face_target_max"]
if random.random() < self.blackboard["glance_probability_for_new_faces"] + skew:
yield True
else:
yield False
else:
yield False
elif kwargs["event"] == "group_interaction":
if random.random() < self.blackboard["glance_probability"]:
yield True
else:
yield False
elif kwargs["event"] == "go_to_sleep":
if self.blackboard["sleep_probability"] > 0 and self.blackboard["bored_since"] > 0:
skew = (time.time() - self.blackboard["bored_since"]) / \
(self.blackboard["search_for_attention_duration_max"] / self.blackboard["sleep_probability"])
if random.random() < self.blackboard["sleep_probability"] + skew:
yield True
else:
yield False
else:
yield False
elif kwargs["event"] == "wake_up":
if random.random() < self.blackboard["wake_up_probability"]:
yield True
else:
yield False
else:
if random.random() > 0.5:
yield True
else:
yield False
@owyl.taskmethod
def is_someone_arrived(self, **kwargs):
self.blackboard["is_interruption"] = False
if self.blackboard["new_face"] > 0:
self.blackboard["bored_since"] = 0
print("----- Someone arrived! id: " + str(self.blackboard["new_face"]))
yield True
else:
yield False
@owyl.taskmethod
def is_someone_left(self, **kwargs):
self.blackboard["is_interruption"] = False
if self.blackboard["lost_face"] > 0:
print("----- Someone left! id: " + str(self.blackboard["lost_face"]))
yield True
else:
yield False
@owyl.taskmethod
def is_interacting_with_someone(self, **kwargs):
if self.blackboard["current_face_target"]:
"----- Is Interacting With Someone!"
yield True
else:
yield False
@owyl.taskmethod
def is_not_interacting_with_someone(self, **kwargs):
if not self.blackboard["current_face_target"]:
yield True
else:
yield False
@owyl.taskmethod
def were_no_people_in_the_scene(self, **kwargs):
if len(self.blackboard["face_targets"]) == 1:
print("----- Previously, no one in the scene!")
yield True
else:
yield False
@owyl.taskmethod
def was_interacting_with_that_person(self, **kwargs):
if self.blackboard["current_face_target"] == self.blackboard["lost_face"]:
self.blackboard["current_face_target"] = 0
print("----- Lost face " + str(self.blackboard["lost_face"]) +
", but was interacting with them!")
yield True
else:
yield False
@owyl.taskmethod
def is_face_target(self, **kwargs):
if len(self.blackboard["face_targets"]) > 0:
yield True
else:
yield False
@owyl.taskmethod
def is_more_than_one_face_target(self, **kwargs):
if len(self.blackboard["face_targets"]) > 1:
yield True
else:
yield False
@owyl.taskmethod
def is_time_to_change_face_target(self, **kwargs):
if self.blackboard["interact_with_face_target_since"] > 0 and \
(time.time() - self.blackboard["interact_with_face_target_since"]) >= \
random.uniform(self.blackboard["time_to_change_face_target_min"], self.blackboard["time_to_change_face_target_max"]):
print "----- Time to start a new interaction!"
yield True
else:
yield False
@owyl.taskmethod
def is_time_to_wake_up(self, **kwargs):
if self.blackboard["sleep_since"] > 0 and (time.time() - self.blackboard["sleep_since"]) >= self.blackboard["time_to_wake_up"]:
yield True
else:
yield False
@owyl.taskmethod
def is_sleeping(self, **kwargs):
if self.blackboard["is_sleeping"]:
yield True
else:
yield False
@owyl.taskmethod
def is_not_sleeping(self, **kwargs):
if not self.blackboard["is_sleeping"]:
yield True
else:
yield False
@owyl.taskmethod
def is_interruption(self, **kwargs):
if self.blackboard["is_interruption"]:
yield True
else:
yield False
@owyl.taskmethod
def is_scripted_performance_system_on(self, **kwargs):
if self.blackboard["performance_system_on"]:
yield True
else:
yield False
@owyl.taskmethod
def is_scripted_performance_system_off(self, **kwargs):
if not self.blackboard["performance_system_on"]:
yield True
else:
yield False
@owyl.taskmethod
def assign_face_target(self, **kwargs):
self.blackboard[kwargs["variable"]] = self.blackboard[kwargs["value"]]
yield True
@owyl.taskmethod
def select_a_face_target(self, **kwargs):
self.blackboard["current_face_target"] = random.choice(self.blackboard["face_targets"])
yield True
@owyl.taskmethod
def select_a_glance_target(self, **kwargs):
target = random.choice(self.blackboard["face_targets"])
while target == self.blackboard["current_face_target"]:
target = random.choice(self.blackboard["face_targets"])
self.blackboard["current_glance_target"] = target
yield True
@owyl.taskmethod
def record_start_time(self, **kwargs):
self.blackboard[kwargs["variable"]] = time.time()
yield True
@owyl.taskmethod
def interact_with_face_target(self, **kwargs):
if self.blackboard["blender_mode"] != "TrackDev":
self.tracking_mode_pub.publish("TrackDev")
self.blackboard["blender_mode"] = "TrackDev"
time.sleep(0.1)
face_id = self.blackboard[kwargs["id"]]
self.facetrack.look_at_face(face_id)
if self.should_show_expression("positive_emotions") or kwargs["new_face"]:
# Show a positive expression, either with or without an instant expression in advance
if random.random() < self.blackboard["non_positive_emotion_probabilities"]:
self.pick_instant("positive_emotions", "non_positive_emotion")
else:
self.pick_random_expression("positive_emotions")
##### Show A Positive Gesture #####
self.pick_random_gesture("positive_gestures")
interval = 0.01
duration = random.uniform(self.blackboard["min_duration_for_interaction"], self.blackboard["max_duration_for_interaction"])
print "----- Interacting w/face id:" + str(face_id) + " for " + str(duration)[:5] + " seconds"
self.break_if_interruptions(interval, duration)
yield True
@owyl.taskmethod
def glance_at(self, **kwargs):
face_id = self.blackboard[kwargs["id"]]
print "----- Glancing at face:" + str(face_id)
glance_seconds = 1
self.facetrack.glance_at_face(face_id, glance_seconds)
yield True
@owyl.taskmethod
def glance_at_new_face(self, **kwargs):
face_id = self.blackboard["new_face"]
print "----- Glancing at new face:" + str(face_id)
glance_seconds = 1
self.facetrack.glance_at_face(face_id, glance_seconds)
yield True
@owyl.taskmethod
def glance_at_lost_face(self, **kwargs):
print "----- Glancing at lost face:" + str(self.blackboard["lost_face"])
face_id = self.blackboard["lost_face"]
self.facetrack.glance_at_face(face_id, 1)
yield True
@owyl.taskmethod
def show_expression(self, **kwargs):
self.pick_random_expression(kwargs["emo_class"])
yield True
@owyl.taskmethod
def show_frustrated_expression(self, **kwargs):
self.pick_random_expression("frustrated_emotions")
yield True
@owyl.taskmethod
def return_to_neutral_position(self, **kwargs):
self.facetrack.look_at_face(0)
yield True
# Accept an expression name, intensity and duration, and publish it
# as a ros message.
def show_emotion(self, expression, intensity, duration):
# Update the blackboard
self.blackboard["current_emotion"] = expression
self.blackboard["current_emotion_intensity"] = intensity
self.blackboard["current_emotion_duration"] = duration
# Create the message
exp = EmotionState()
exp.name = self.blackboard["current_emotion"]
exp.magnitude = self.blackboard["current_emotion_intensity"]
intsecs = int(duration)
exp.duration.secs = intsecs
exp.duration.nsecs = 1000000000 * (duration - intsecs)
self.emotion_pub.publish(exp)
print "----- Show expression: " + expression + " (" + str(intensity)[:5] + ") for " + str(duration)[:4] + " seconds"
self.blackboard["show_expression_since"] = time.time()
# Accept an gesture name, intensity, repeat (perform how many times) and speed
# and then publish it as a ros message.
def show_gesture(self, gesture, intensity, repeat, speed):
ges = SetGesture()
ges.name = gesture
ges.magnitude = intensity
ges.repeat = repeat
ges.speed = speed
self.gesture_pub.publish(ges)
print "----- Show gesture: " + gesture + " (" + str(intensity)[:5] + ")"
@owyl.taskmethod
def search_for_attention(self, **kwargs):
print("----- Search for attention")
if self.blackboard["bored_since"] == 0:
self.blackboard["bored_since"] = time.time()
if self.blackboard["blender_mode"] != "LookAround":
self.tracking_mode_pub.publish("LookAround")
self.blackboard["blender_mode"] = "LookAround"
if self.should_show_expression("bored_emotions"):
# Show a bored expression, either with or without an instant expression in advance
if random.random() < self.blackboard["non_bored_emotion_probabilities"]:
self.pick_instant("bored_emotions", "non_bored_emotion")
else:
self.pick_random_expression("bored_emotions")
##### Show A Bored Gesture #####
self.pick_random_gesture("bored_gestures")
interval = 0.01
duration = random.uniform(self.blackboard["search_for_attention_duration_min"], self.blackboard["search_for_attention_duration_max"])
self.break_if_interruptions(interval, duration)
yield True
# To determine whether it is a good time to show another expression
# Can be used to avoid making expressions too frequently
def should_show_expression(self, emo_class):
if (time.time() - self.blackboard["show_expression_since"]) >= (self.blackboard["current_emotion_duration"] / 4):
return True
else:
return False
@owyl.taskmethod
def go_to_sleep(self, **kwargs):
self.blackboard["is_sleeping"] = True
self.blackboard["bored_since"] = 0.0
##### Show A Sleep Expression #####
self.pick_random_emotion_name(self.blackboard["sleep_emotions"])
##### Show A Sleep Gesture #####
self.pick_random_gesture("sleep_gestures")
interval = 0.01
duration = random.uniform(self.blackboard["sleep_duration_min"], self.blackboard["sleep_duration_max"])
self.break_if_interruptions(interval, duration)
yield True
@owyl.taskmethod
def wake_up(self, **kwargs):
print "----- Wake up!"
self.blackboard["is_sleeping"] = False
self.blackboard["sleep_since"] = 0.0
self.blackboard["bored_since"] = 0.0
##### Show A Wake Up Expression #####
self.pick_random_expression("wake_up_emotions")
##### Show A Wake Up Gesture #####
self.pick_random_gesture("wake_up_gestures")
yield True
@owyl.taskmethod
def clear_new_face_target(self, **kwargs):
if not self.blackboard["is_interruption"]:
print "----- Cleared new face: " + str(self.blackboard["new_face"])
self.blackboard["new_face"] = 0
yield True
@owyl.taskmethod
def clear_lost_face_target(self, **kwargs):
print "----- Cleared lost face: " + str(self.blackboard["lost_face"])
self.blackboard["lost_face"] = 0
yield True
# XXX old-style API -- should be removed.
@owyl.taskmethod
def start_scripted_performance_system(self, **kwargs):
if self.blackboard["blender_mode"] != "Dummy":
# No need to set Dummy mode
#self.tracking_mode_pub.publish("Dummy")
self.blackboard["blender_mode"] = "Dummy"
yield True
# This avoids burning CPU time when the behavior system is off.
# Mostly it sleeps, and periodically checks for interrpt messages.
@owyl.taskmethod
def idle_spin(self, **kwargs):
if self.blackboard["performance_system_on"]:
yield True
# Sleep for 1 second.
time.sleep(1)
yield True
def break_if_interruptions(self, interval, duration):
while duration > 0:
time.sleep(interval)
duration -= interval
if self.blackboard["is_interruption"]:
break
# Return the subset of 'core' strings that are in 'avail' strings.
# Note that 'avail' strings might contain longer names,
# e.g. "happy-3", whereas core just contains "happy". We want to
# return "happy-3" in that case, as well as happy-2 and happy-1
# if they are there.
def set_intersect(self, emo_class, avail) :
emos = self.blackboard[emo_class]
rev = []
for emo in emos:
for a in avail:
if emo.name in a:
# Copy the emotion, but give it the new name!
nemo = copy.deepcopy(emo)
nemo.name = a
rev.append(nemo)
# Now, renormalize the probabilities
tot = 0.0
for emo in rev:
tot += emo.probability
for emo in rev:
emo.probability /= tot
self.blackboard[emo_class] = rev
# Get the list of available emotions. Update our master list,
# and cull the various subclasses appropriately.
def get_emotion_states_cb(self, msg) :
print("Available Emotion States:" + str(msg.data))
# Update the complete list of emtions.
self.blackboard["emotions"] = msg.data
# Reconcile the other classes
self.set_intersect("frustrated_emotions", msg.data)
self.set_intersect("positive_emotions", msg.data)
self.set_intersect("bored_emotions", msg.data)
self.set_intersect("sleep_emotions", msg.data)
self.set_intersect("wake_up_emotions", msg.data)
self.set_intersect("new_arrival_emotions", msg.data)
def get_gestures_cb(self, msg) :
print("Available Gestures:" + str(msg.data))
# Rescale the intensity of the expressions.
def rescale_intensity(self, emo_scale, gest_scale) :
for emo_class in self.blackboard["emotion_classes"]:
for emo in self.blackboard[emo_class]:
emo.min_intensity *= emo_scale
emo.max_intensity *= emo_scale
for ges_class in self.blackboard["gesture_classes"]:
for ges in self.blackboard[ges_class]:
ges.min_intensity *= gest_scale
ges.max_intensity *= gest_scale
# Turn behaviors on and off.
def behavior_switch_callback(self, data):
if data.data == "btree_on":
self.blackboard["is_interruption"] = False
emo_scale = self.blackboard["emotion_scale_closeup"]
ges_scale = self.blackboard["gesture_scale_closeup"]
# If the current mode is stage mode, then tone things down.
if self.blackboard["stage_mode"]:
print("----- Switch to close-up mode")
emo_scale /= self.blackboard["emotion_scale_stage"]
ges_scale /= self.blackboard["gesture_scale_stage"]
else:
print("----- Behavior tree enabled, closeup mode.")
self.rescale_intensity(emo_scale, ges_scale)
self.blackboard["stage_mode"] = False
self.blackboard["performance_system_on"] = True
elif data.data == "btree_on_stage":
self.blackboard["is_interruption"] = False
emo_scale = self.blackboard["emotion_scale_stage"]
ges_scale = self.blackboard["gesture_scale_stage"]
# If previously in close-up mode, exaggerate the emotions
# for the stage settting.
if self.blackboard["performance_system_on"] and not self.blackboard["stage_mode"]:
print("----- Switch to stage mode")
emo_scale /= self.blackboard["emotion_scale_closeup"]
ges_scale /= self.blackboard["gesture_scale_closeup"]
else:
print("----- Behavior tree enabled, stage mode.")
self.rescale_intensity(emo_scale, ges_scale)
self.blackboard["stage_mode"] = True
self.blackboard["performance_system_on"] = True
elif data.data == "btree_off":
self.blackboard["is_interruption"] = True
self.blackboard["performance_system_on"] = False
self.blackboard["stage_mode"] = False
print("---- Behavior tree disabled")
|
linas/eva_behavior
|
src/general_behavior.py
|
Python
|
lgpl-2.1
| 34,678
|
[
"VisIt"
] |
0f89405e7414a1f5a54b51bc3c474c60f4e805e9de5b98235c2b3b769645f8a6
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import math
import re
import os
import textwrap
import warnings
from collections import OrderedDict, deque
from io import StringIO
import numpy as np
from functools import partial
from pathlib import Path
from inspect import getfullargspec as getargspec
from itertools import groupby
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.io import zopen
from monty.dev import requires
from pymatgen.util.coord import in_coord_list_pbc, pbc_diff, \
find_in_coord_list_pbc
from monty.string import remove_non_ascii
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import SpaceGroup, SYMM_DATA
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.electronic_structure.core import Magmom
from pymatgen.core.operations import MagSymmOp
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
BibliographyData, Entry = None, None
"""
Wrapper classes for Cif input and output from Structures.
"""
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k
for k in SYMM_DATA['space_group_encoding'].keys()}
space_groups.update({sub_spgrp(k): k
for k in SYMM_DATA['space_group_encoding'].keys()})
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0],
"symm_ops.json")) \
as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock:
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops \
and self.data == other.data \
and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = ["data_{}".format(self.header)]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append("{} {}".format(k, v))
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += '\n ' + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = '\n ' + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ';\n' + textwrap.fill(v, self.maxlen) + '\n;'
# add quotes if necessary
if v == '':
return '""'
if (" " in v or v[0] == "_") \
and not (v[0] == "'" and v[-1] == "'") \
and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r'''([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)''')
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(('', '', '', ' '.join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
try:
data[s[0]] = "".join(q.popleft())
except IndexError:
data[s[0]] = ""
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible error in cif format"
" error at {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile:
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string,
flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if 'powder_pattern' in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
with zopen(str(filename), "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser:
"""
Parses a CIF file. Attempts to fix CIFs that are out-of-spec, but will
issue warnings if corrections applied. These are also stored in the
CifParser's errors attribute.
Args:
filename (str): CIF filename, bzipped or gzipped CIF files are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
def __init__(self, filename, occupancy_tolerance=1., site_tolerance=1e-4):
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, (str, Path)):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.errors = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = ['_space_group_magn', '_atom_site_moment',
'_space_group_symop_magn']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif'] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ['_cell_modulation_dimension', '_cell_wave_vector']
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags['magcif_incommensurate'] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = StringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens']
if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.errors.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.errors.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.errors.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1/3., 2/3.)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac/comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.errors.append("Some fractional co-ordinates rounded to ideal values to "
"avoid finite precision errors.")
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom),
lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
else:
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord,
atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(self, data, length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i])
for i in length_strings]
angles = [str2float(data["_cell_angle_" + i])
for i in angle_strings]
if not lattice_type:
return Lattice.from_lengths_and_angles(lengths, angles)
else:
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in ["_symmetry_cell_setting",
"_space_group_crystal_system"]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(
getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings
if l in required_args)
angles = (a for a in angle_strings
if a in required_args)
return self.get_lattice(data, lengths, angles,
lattice_type=lattice_type)
except AttributeError as exc:
self.errors.append(str(exc))
warnings.warn(exc)
else:
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in ["_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_"]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, str):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.errors.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in ["_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_"]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.errors.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "",
d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s)
for s in xyz]
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Spacegroup from %s used." % symmetry_label
warnings.warn(msg)
self.errors.append(msg)
break
except Exception as ex:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in ["_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_"]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. " \
"Defaulting to P1."
warnings.warn(msg)
self.errors.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ['x', 'y', 'z']]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [i - np.floor(i) for i
in
op.translation_vector + centering_op.translation_vector]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal))
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get(
"_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (
data.data.get("_space_group_magn.number_BNS").split("."))))
msg = MagneticSpaceGroup(id)
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get(
"_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
return NotImplementedError(
"Non-standard settings not currently supported.")
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError(
"Incomplete specification to implement.")
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.errors.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
def parse_oxi_states(self, data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]:
str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \
str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
def parse_magmoms(self, data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception(
'Magmoms given in terms of crystal axes in magCIF spec.')
try:
magmoms = {
data["_atom_site_moment_label"][i]:
np.array(
[str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i])]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {"Hw": "H", "Ow": "O", "Wat": "O",
"wat": "O", "OH": "", "OH2": "", "NO3": "N"}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the
# first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match(r"{}\d*".format(parsed_sym), sym)):
msg = "{} parsed as {}".format(sym, parsed_sym)
warnings.warn(msg)
self.errors.append(msg)
return parsed_sym
def _get_structure(self, data, primitive):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError(
"Incommensurate structures not currently supported.")
elif self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c,
atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(
data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Specie(symbol, o_s)
except:
el = DummySpecie(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i],
np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
self.errors.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [sum(c.values()) for c in coord_to_species.values()
if not set(c.elements) == {Element("O"), Element("H")}]
if any([o > 1 for o in sum_occu]):
msg = "Some occupancies (%s) sum to > 1! If they are within " \
"the tolerance, they will be rescaled." % str(sum_occu)
warnings.warn(msg)
self.errors.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Specie 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Specie will have
# fictious oxidation state).
raise NotImplementedError(
'Disordered magnetic structures not currently supported.')
if coord_to_species.items():
for comp, group in groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1]):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in
tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords,
magmoms_in=tmp_magmom,
lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) \
and len(allspecies) == len(allmagmoms):
site_properties = dict()
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords,
site_properties=site_properties)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags['magcif']:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
Returns:
List of Structures.
"""
structures = []
for d in self._cif.data.values():
try:
s = self._get_structure(d, primitive)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.errors.append(str(exc))
warnings.warn(str(exc))
if self.errors:
warnings.warn("Issues encountered while parsing CIF:")
for error in self.errors:
warnings.warn(error)
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
@requires(BibliographyData, "Bibliographic data extraction requires pybtex.")
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
bibtex_keys = {'author': ('_publ_author_name', '_citation_author_name'),
'title': ('_publ_section_title', '_citation_title'),
'journal': ('_journal_name_full', '_journal_name_abbrev',
'_citation_journal_full', '_citation_journal_abbrev'),
'volume': ('_journal_volume', '_citation_journal_volume'),
'year': ('_journal_year', '_citation_year'),
'number': ('_journal_number', '_citation_number'),
'page_first': ('_journal_page_first', '_citation_page_first'),
'page_last': ('_journal_page_last', '_citation_page_last'),
'doi': ('_journal_DOI', '_citation_DOI')}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
if isinstance(data[tag], list):
bibtex_entry[field] = data[tag][0]
else:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if 'author' in bibtex_entry:
# separate out semicolon authors
if isinstance(bibtex_entry["author"], str):
if ";" in bibtex_entry["author"]:
bibtex_entry["author"] = bibtex_entry["author"].split(";")
if isinstance(bibtex_entry['author'], list):
bibtex_entry['author'] = ' and '.join(bibtex_entry['author'])
# convert to bibtex page range format, use empty string if not specified
if ('page_first' in bibtex_entry) or ('page_last' in bibtex_entry):
bibtex_entry['pages'] = '{0}--{1}'.format(bibtex_entry.get('page_first', ''),
bibtex_entry.get('page_last', ''))
bibtex_entry.pop('page_first', None) # and remove page_first, page_list if present
bibtex_entry.pop('page_last', None)
# cite keys are given as cif-reference-idx in order they are found
entries['cif-reference-{}'.format(idx)] = Entry('article', list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format='bibtex')
def as_dict(self):
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
return len(self.errors) > 0
class CifWriter:
def __init__(self, struct, symprec=None, write_magmoms=False):
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
"""
if write_magmoms and symprec:
warnings.warn(
"Magnetic symmetry cannot currently be detected by pymatgen,"
"disabling symmetry detection.")
symprec = None
format_str = "{:.8f}"
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec)
spacegroup = (sf.get_space_group_symbol(),
sf.get_space_group_number())
# Needs the refined struture when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ['a', 'b', 'c']:
block["_cell_length_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
for cell_attr in ['alpha', 'beta', 'gamma']:
block["_cell_angle_" + cell_attr] = format_str.format(
getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = "%.8f" % latt.volume
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(
op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = \
["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id",
"_symmetry_equiv_pos_as_xyz"])
try:
symbol_to_oxinum = OrderedDict([
(el.__str__(),
float(el.oxi_state))
for el in sorted(comp.elements)])
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
except (TypeError, AttributeError):
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in
sorted(comp.elements)])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 1
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(
site.properties.get('magmom', getattr(sp, 'spin', 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(
magmom, latt)
atom_site_moment_label.append(
"{}{}".format(sp.symbol, count))
atom_site_moment_crystalaxis_x.append("%.5f" % moment[0])
atom_site_moment_crystalaxis_y.append("%.5f" % moment[1])
atom_site_moment_crystalaxis_z.append("%.5f" % moment[2])
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(sorted(sites, key=lambda s: tuple([abs(x) for x in
s.frac_coords]))[0],
len(sites))
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (t[0].species.average_electroneg,
-t[1], t[0].a, t[0].b, t[0].c)):
for sp, occu in site.species.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append("{0:f}".format(site.a))
atom_site_fract_y.append("{0:f}".format(site.b))
atom_site_fract_z.append("{0:f}".format(site.c))
atom_site_label.append("{}{}".format(sp.symbol, count))
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = \
atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(["_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy"])
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block[
"_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block[
"_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block[
"_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(["_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z"])
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
|
montoyjh/pymatgen
|
pymatgen/io/cif.py
|
Python
|
mit
| 58,184
|
[
"Avogadro",
"CRYSTAL",
"pymatgen"
] |
289ea6c9ec942cf5ded74a7d3021c89672788e05c3adb66614d9b09835b65638
|
#!/usr/bin/env python2
"""
This is an unmaintained one-shot script, only included in the repo for
reference.
"""
from pokedex.db import connect, identifier_from_name
from pokedex.db.tables import Encounter, EncounterMethod, EncounterSlot, Language, Location, LocationArea, Pokemon, Version
session = connect()
def get_version(name):
return session.query(Version).filter_by(identifier=identifier_from_name(name)).one()
R = get_version(u'red')
B = get_version(u'blue')
Ye = get_version(u'yellow')
G = get_version(u'gold')
S = get_version(u'silver')
C = get_version(u'crystal')
RU = get_version(u'ruby')
SA = get_version(u'sapphire')
EM = get_version(u'emerald')
FR = get_version(u'firered')
LG = get_version(u'leafgreen')
DI = get_version(u'diamond')
PE = get_version(u'pearl')
PT = get_version(u'platinum')
HG = get_version(u'heartgold')
SS = get_version(u'soulsilver')
BL = get_version(u'black')
WH = get_version(u'white')
B2 = get_version(u'black-2')
W2 = get_version(u'white-2')
X = get_version(u'x')
Y = get_version(u'y')
OR = get_version(u'omega-ruby')
AS = get_version(u'alpha-sapphire')
def normal_gift_data():
return [
# Gen I
[ u'bulbasaur', [ R, B ], 5, u'pallet-town' ],
[ u'charmander', [ R, B ], 5, u'pallet-town' ],
[ u'squirtle', [ R, B ], 5, u'pallet-town' ],
[ u'pikachu', [ Ye ], 5, u'pallet-town' ],
[ u'bulbasaur', [ Ye ], 10, u'cerulean-city' ],
[ u'charmander', [ Ye ], 10, u'kanto-route-24' ],
[ u'squirtle', [ Ye ], 10, u'vermilion-city' ],
#[ u'aerodactyl', [ R, B, Ye ], 30, u'pewter-city', u'museum-of-science', u'Pewter Museum of Science' ],
[ u'magikarp', [ R, B, Ye ], 5, u'kanto-route-4', u'pokemon-center', u'Pokemon Center' ],
#[ u'omanyte', [ R, B, Ye ], 30, u'mt-moon', u'b2f' ],
#[ u'kabuto', [ R, B, Ye ], 30, u'mt-moon', u'b2f' ],
[ u'hitmonlee', [ R, B, Ye ], 30, u'saffron-city', u'fighting-dojo', u'Fighting Dojo' ],
[ u'hitmonchan', [ R, B, Ye ], 30, u'saffron-city', u'fighting-dojo', u'Fighting Dojo' ],
[ u'eevee', [ R, B, Ye ], 25, u'celadon-city', u'celadon-mansion', u'Celadon Mansion rooftop' ],
[ u'lapras', [ R, B, Ye ], 15, u'saffron-city', u'silph-co-7f', u'Silph Co. 7F' ],
# Gen II
[ u'chikorita', [ G, S, C ], 5, u'new-bark-town' ],
[ u'cyndaquil', [ G, S, C ], 5, u'new-bark-town' ],
[ u'totodile', [ G, S, C ], 5, u'new-bark-town' ],
[ u'spearow', [ G, S, C ], 10, u'goldenrod-city', u'north-gate', u'North Gate' ],
[ u'eevee', [ G, S, C ], 20, u'goldenrod-city', u'bills-house', u"Bill's house" ],
[ u'shuckle', [ G, S, C ], 15, u'cianwood-city', u'manias-house', u"Mania's house" ],
[ u'dratini', [ C ], 15, u'dragons-den' ],
[ u'tyrogue', [ G, S, C ], 10, u'mt-mortar', u'b1f' ],
# Gen III
# Note Lileep + Anorith are not listed because they are not *gifts*
# They're note quite encounters either
# but that's outta scope of gift logic
[ u'treecko', [ RU, SA, EM ], 5, u'hoenn-route-101' ],
[ u'torchic', [ RU, SA, EM ], 5, u'hoenn-route-101' ],
[ u'mudkip' , [ RU, SA, EM ], 5, u'hoenn-route-101' ],
[ u'castform', [ RU, SA, EM ], 25, u'hoenn-route-119', u'weather-institute', u'Weather Institute' ],
[ u'beldum', [ RU, SA, EM ], 5, u'mossdeep-city', u'stevens-house', u"Steven's house" ],
[ u'chikorita', [ EM ], 5, u'littleroot-town' ],
[ u'cyndaquil', [ EM ], 5, u'littleroot-town' ],
[ u'totodile', [ EM ], 5, u'littleroot-town' ],
[ u'bulbasaur', [ FR, LG ], 5, u'pallet-town' ],
[ u'charmander', [ FR, LG ], 5, u'pallet-town' ],
[ u'squirtle', [ FR, LG ], 5, u'pallet-town' ],
#[ u'aerodactyl', [ FR, LG ], 5, u'pewter-city', u'museum-of-science' ],
[ u'magikarp', [ FR, LG ], 5, u'kanto-route-4', u'pokemon-center' ],
#[ u'omanyte', [ FR, LG ], 5, u'mt-moon', u'b2f' ],
#[ u'kabuto', [ FR, LG ], 5, u'mt-moon', u'b2f' ],
[ u'hitmonlee', [ FR, LG ], 25, u'saffron-city', u'fighting-dojo' ],
[ u'hitmonchan', [ FR, LG ], 25, u'saffron-city', u'fighting-dojo' ],
[ u'eevee', [ FR, LG ], 25, u'celadon-city', u'celadon-mansion' ],
[ u'lapras', [ FR, LG ], 25, u'saffron-city', u'silph-co-7f' ],
# Gen IV
[ u'turtwig', [ DI, PE ], 5, u'lake-verity', u'before-galactic-intervention' ],
[ u'chimchar', [ DI, PE ], 5, u'lake-verity', u'before-galactic-intervention' ],
[ u'piplup', [ DI, PE ], 5, u'lake-verity', u'before-galactic-intervention' ],
[ u'turtwig', [ PT ], 5, u'sinnoh-route-201' ],
[ u'chimchar', [ PT ], 5, u'sinnoh-route-201' ],
[ u'piplup', [ PT ], 5, u'sinnoh-route-201' ],
[ u'eevee', [ DI, PE, ], 5, u'hearthome-city' ],
[ u'eevee', [ PT ], 20, u'hearthome-city' ],
[ u'porygon', [ PT ], 25, u'veilstone-city' ],
[ u'chikorita', [ HG, SS ], 5, u'new-bark-town' ],
[ u'cyndaquil', [ HG, SS ], 5, u'new-bark-town' ],
[ u'totodile', [ HG, SS ], 5, u'new-bark-town' ],
[ u'spearow', [ HG, SS ], 20, u'goldenrod-city', u'north-gate' ],
[ u'eevee', [ HG, SS ], 5, u'goldenrod-city', u'bills-house' ],
[ u'shuckle', [ HG, SS ], 15, u'cianwood-city', u'kirks-house', u"Kirk's house" ],
[ u'dratini', [ HG, SS ], 15, u'dragons-den' ],
[ u'tyrogue', [ HG, SS ], 10, u'mt-mortar', u'b1f' ],
[ u'bulbasaur', [ HG, SS ], 5, u'pallet-town' ],
[ u'charmander', [ HG, SS ], 5, u'pallet-town' ],
[ u'squirtle', [ HG, SS ], 5, u'pallet-town' ],
[ u'treecko', [ HG, SS ], 5, u'saffron-city', u'silph-co-7f' ],
[ u'torchic', [ HG, SS ], 5, u'saffron-city', u'silph-co-7f' ],
[ u'mudkip' , [ HG, SS ], 5, u'saffron-city', u'silph-co-7f' ],
# Gen V
[ u'snivy', [ BL, WH ], 5, u'nuvema-town' ],
[ u'tepig', [ BL, WH ], 5, u'nuvema-town' ],
[ u'oshawott', [ BL, WH ], 5, u'nuvema-town' ],
[ u'pansage', [ BL, WH ], 10, u'dreamyard' ], # not the basement
[ u'pansear', [ BL, WH ], 10, u'dreamyard' ],
[ u'panpour', [ BL, WH ], 10, u'dreamyard' ],
[ u'zorua', [ BL, WH ], 10, u'castelia-city', u'game-freak-hq-1f', u'Game Freak HQ 1F' ],
#[ u'tirtouga', [ BL, WH ], 25, u'relic-castle', u'a' ],
#[ u'archen', [ BL, WH ], 25, u'relic-castle', u'a' ],
#[ u'omanyte', [ BL, WH ], 25, u'twist-mountain' ],
#[ u'kabuto', [ BL, WH ], 25, u'twist-mountain' ],
#[ u'aerodactyl', [ BL, WH ], 25, u'twist-mountain' ],
#[ u'lileep', [ BL, WH ], 25, u'twist-mountain' ],
#[ u'anorith', [ BL, WH ], 25, u'twist-mountain' ],
#[ u'cranidos', [ BL, WH ], 25, u'twist-mountain' ],
#[ u'shieldon', [ BL, WH ], 25, u'twist-mountain' ],
[ u'magikarp', [ BL, WH ], 5, u'marvelous-bridge' ],
[ u'snivy', [ B2, W2 ], 5, u'aspertia-city' ],
[ u'tepig', [ B2, W2 ], 5, u'aspertia-city' ],
[ u'oshawott', [ B2, W2 ], 5, u'aspertia-city' ],
[ u'zorua', [ B2, W2 ], 25, u'driftveil-city' ],
[ u'deerling', [ B2, W2 ], 30, u'unova-route-6', u'weather-institute', u'Weather Institute' ],
[ u'eevee', [ B2, W2 ], 10, u'castelia-city' ],
#[ u'omanyte', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'kabuto', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'aerodactyl', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'lileep', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'anorith', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'cranidos', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'shieldon', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'tirtouga', [ B2, W2 ], 25, u'join-avenue' ],
#[ u'archen', [ B2, W2 ], 25, u'join-avenue' ],
[ u'magikarp', [ B2, W2 ], 5, u'marvelous-bridge' ],
#[ u'tirtouga', [ B2, W2 ], 25, u'nacrene-city', u'museum', u'Nacrene City Museum' ],
#[ u'archen', [ B2, W2 ], 25, u'nacrene-city', u'museum'],
#[ u'omanyte', [ B2, W2 ], 25, u'twist-mountain' ],
#[ u'kabuto', [ B2, W2 ], 25, u'twist-mountain' ],
#[ u'aerodactyl', [ B2, W2 ], 25, u'twist-mountain' ],
#[ u'lileep', [ B2, W2 ], 25, u'twist-mountain' ],
#[ u'anorith', [ B2, W2 ], 25, u'twist-mountain' ],
#[ u'cranidos', [ B2, W2 ], 25, u'twist-mountain' ],
#[ u'shieldon', [ B2, W2 ], 25, u'twist-mountain' ],
# These are shiny...
[ u'dratini', [ W2 ], 1, u'floccesy-town' ],
[ u'gible', [ B2 ], 1, u'floccesy-town' ],
# Gen VI
[ u'chespin', [ X, Y ], 5, u'aquacorde-town' ],
[ u'fennekin', [ X, Y ], 5, u'aquacorde-town' ],
[ u'froakie', [ X, Y ], 5, u'aquacorde-town' ],
[ u'bulbasaur', [ X, Y ], 10, u'lumiose-city' ],
[ u'charmander', [ X, Y ], 10, u'lumiose-city' ],
[ u'squirtle', [ X, Y ], 10, u'lumiose-city' ],
[ u'tyrunt', [ X, Y ], 20, u'glittering-cave', u'unknown-area-303' ], # 304 means ceiling
[ u'amaura', [ X, Y ], 20, u'glittering-cave', u'unknown-area-303' ],
[ u'lucario', [ X, Y ], 32, u'tower-of-mastery' ],
[ u'lapras', [ X, Y ], 30, u'kalos-route-12' ],
[ u'treecko', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'torchic', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'mudkip', [ OR, AS ], 5, u'hoenn-route-101' ],
# cosplay pikachu is given to you the first time you participate in a contest
[ u'pikachu', [ OR, AS ], 20, u'slateport-city', u'contest-hall', u"Contest Hall" ],
[ u'pikachu', [ OR, AS ], 20, u'verdanturf-town', u'contest-hall', u"Contest Hall" ],
[ u'pikachu', [ OR, AS ], 20, u'fallarbor-town', u'contest-hall', u"Contest Hall" ],
[ u'pikachu', [ OR, AS ], 20, u'lilycove-city', u'contest-hall', u"Contest Hall" ],
[ u'latios', [ OR ], 30, u'southern-island' ], # eon tickets ignored here - they're not gifts?
[ u'latias', [ AS ], 30, u'southern-island' ],
[ u'castform', [ OR, AS ], 30, u'hoenn-route-119', u'weather-institute' ],
[ u'chikorita', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'cyndaquil', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'totodile', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'snivy', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'tepig', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'oshawott', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'beldum', [ OR, AS ], 1, u'mossdeep-city', u'stevens-house' ],
[ u'turtwig', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'chimchar', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'piplup', [ OR, AS ], 5, u'hoenn-route-101' ],
[ u'camerupt', [ OR, AS ], 40, u'battle-resort' ],
[ u'sharpedo', [ OR, AS ], 40, u'battle-resort' ],
]
def egg_gift_data():
return [
[ u'togepi', [ G, S, C ], 5, u'violet-city' ],
[ u'pichu', [ C ], 5, u'johto-route-34' ],
[ u'cleffa', [ C ], 5, u'johto-route-34' ],
[ u'igglybuff', [ C ], 5, u'johto-route-34' ],
[ u'tyrogue', [ C ], 5, u'johto-route-34' ],
[ u'smoochum', [ C ], 5, u'johto-route-34' ],
[ u'elekid', [ C ], 5, u'johto-route-34' ],
[ u'magby', [ C ], 5, u'johto-route-34' ],
[ u'wynaut', [ RU, SA, EM ], 5, u'lavaridge-town' ],
[ u'togepi', [ FR, LG ], 5, u'water-labyrinth' ],
[ u'togepi', [ DI, PE, PT ], 1, u'eterna-city', u'west-gate', u'West Gate' ],
[ u'happiny', [ DI, PE, ], 1, u'hearthome-city', u'west-gate', u'West Gate' ],
[ u'riolu', [ DI, PE, PT ], 1, u'iron-island', u'b2f-left' ],
[ u'togepi', [ HG, SS ], 1, u'violet-city', u'poke-mart', u'Poke Mart' ],
[ u'mareep', [ HG, SS ], 1, u'violet-city', u'pokemon-center', u'Pokemon Center' ],
[ u'wooper', [ HG, SS ], 1, u'violet-city', u'pokemon-center' ],
[ u'slugma', [ HG, SS ], 1, u'violet-city', u'pokemon-center' ],
[ u'larvesta', [ BL, WH ], 1, u'unova-route-18' ],
[ u'happiny', [ B2, W2 ], 1, u'nacrene-city', u'west-gate', u'West Gate' ],
[ u'wynaut', [ OR, AS ], 1, u'lavaridge-town' ],
[ u'togepi', [ OR, AS ], 1, u'lavaridge-town' ],
]
def record_method_and_gifts(gift_method, gift_data):
en = session.query(Language).filter_by(identifier=u'en').one()
for gift_datum in gift_data:
pokemon_name = identifier_from_name(gift_datum[0])
versions = gift_datum[1]
level = identifier_from_name(str(gift_datum[2]))
location_name = identifier_from_name(gift_datum[3])
area_name = None
if len(gift_datum) > 4:
area_name = identifier_from_name(gift_datum[4])
pokemon = session.query(Pokemon ).filter_by(identifier=pokemon_name ).one()
location = session.query(Location ).filter_by(identifier=location_name ).one()
location_area = session.query(LocationArea).filter_by(identifier=area_name, location_id=location.id).first()
# Some of these don't exist yet
if not location_area:
location_area = LocationArea(
location_id = location.id,
game_index = 0, # cause who knows what this means
identifier = area_name
)
area_prose = None
if area_name != None:
area_prose = gift_datum[5]
location_area.name_map[en] = area_prose
session.add(location_area)
session.commit()
for version in versions:
encounter_slot = session.query(EncounterSlot).filter_by(
version_group_id = version.version_group_id,
encounter_method_id = gift_method.id
).first()
if not encounter_slot:
encounter_slot = EncounterSlot(
version_group_id = version.version_group_id,
encounter_method_id = gift_method.id,
# No priority over or under other events/conditions
slot = None,
# Rarity is meaningless for gifts, but say that it's
# 100% to help out code that expects rarity to be defined.
rarity = 100,
)
session.add(encounter_slot)
session.commit()
encounter_info = {
'version_id': version.id,
'location_area_id': location_area.id,
'encounter_slot_id': encounter_slot.id,
'pokemon_id': pokemon.id,
'min_level': level,
'max_level': level
}
encounter = session.query(Encounter).filter_by(**encounter_info).first()
if not encounter:
encounter = Encounter(**encounter_info)
session.add(encounter)
session.commit()
normal_gift_method = session.query(EncounterMethod).filter_by(identifier=u'gift').one()
record_method_and_gifts(normal_gift_method, normal_gift_data())
egg_gift_method = session.query(EncounterMethod).filter_by(identifier=u'gift-egg').one()
record_method_and_gifts(egg_gift_method, egg_gift_data())
|
veekun/pokedex
|
scripts/add-gift-encounters.py
|
Python
|
mit
| 16,338
|
[
"CRYSTAL"
] |
7b9a5e8526681d8a296ed983bf0b0cb4f26667bcaeb92dc0c8b30d5f32164044
|
#!/usr/bin/python
"""
Copyright 2010 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import re
import Cookie
import dbSession
import dbShared
import cgi
import MySQLdb
from xml.dom import minidom
import ghNames
#
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
else:
currentUser = ''
loginResult = 'success'
sid = form.getfirst('gh_sid', '')
# Get form info
wpID = form.getfirst("wpID", "")
galaxy = form.getfirst("galaxy", "")
planet = form.getfirst("planet", "")
spawnID = form.getfirst("resID", "")
spawnName = form.getfirst("resName", "")
price = form.getfirst("price", "0")
concentration = form.getfirst("concentration", "")
location = form.getfirst("location", "")
wpName = form.getfirst("wpName", "")
shareLevel = form.getfirst("shareLevel", "")
forceOp = form.getfirst("forceOp", "")
waypointID = form.getfirst("waypointID", "")
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
wpID = dbShared.dbInsertSafe(wpID)
galaxy = dbShared.dbInsertSafe(galaxy)
planet = dbShared.dbInsertSafe(planet)
spawnID = dbShared.dbInsertSafe(spawnID)
spawnName = dbShared.dbInsertSafe(spawnName)
price = dbShared.dbInsertSafe(price)
concentration = dbShared.dbInsertSafe(concentration)
location = dbShared.dbInsertSafe(location)
wpName = dbShared.dbInsertSafe(wpName)
shareLevel = dbShared.dbInsertSafe(shareLevel)
forceOp = dbShared.dbInsertSafe(forceOp)
waypointID = dbShared.dbInsertSafe(waypointID)
lattitude = ""
longitude = ""
result = ""
# Get a session
logged_state = 0
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
def n2n(inVal):
if (inVal == '' or inVal == None or inVal == 'undefined' or inVal == 'None'):
return 'NULL'
else:
return str(inVal)
def addWaypoint(spawnID, planetID, price, concentration, lattitude, longitude, wpName, shareLevel):
# Add new waypoint
returnStr = ""
conn = dbShared.ghConn()
cursor = conn.cursor()
tempSQL = "INSERT INTO tWaypoint (spawnID, planetID, owner, price, concentration, lattitude, longitude, waypointType, waypointName, shareLevel, entered) VALUES (" + str(spawnID) + "," + str(planetID) + ",'" + currentUser + "'," + price + "," + str(concentration) + "," + str(lattitude) + "," + str(longitude) + ",'u','" + wpName + "'," + str(shareLevel) + ",NOW());"
try:
cursor.execute(tempSQL)
returnStr = "Waypoint added."
waypointID = cursor.lastrowid
except:
returnStr = 'Error: Add Failed.'
if str(waypointID).isdigit():
dbShared.logEvent("INSERT INTO tResourceEvents (spawnID, userID, eventTime, eventType, planetID) VALUES (" + str(spawnID) + ",'" + currentUser + "',NOW(),'w'," + str(planetID) + ");","w",currentUser, galaxy, str(spawnID))
cursor.close()
conn.close()
return returnStr
def updateWaypoint(waypointID, spawnID, planetID, price, concentration, lattitude, longitude, wpName, shareLevel):
# Update waypoint information
returnStr = ""
conn = dbShared.ghConn()
cursor = conn.cursor()
tempSQL = "UPDATE tWaypoint SET spawnID=" + str(spawnID) + ", planetID=" + str(planetID) + ", price=" + price + ", concentration=" + str(concentration) + ", lattitude=" + str(lattitude) + ", longitude=" + str(longitude) + ", waypointName='" + wpName + "', shareLevel=" + str(shareLevel) + " WHERE waypointID=" + str(waypointID) + ";"
cursor.execute(tempSQL)
result = cursor.rowcount
if (result < 1):
returnStr = "Error: waypoint not updated."
else:
returnStr = " waypoint updated."
cursor.close()
conn.close()
return returnStr
def getSpawnID(resName, galaxy):
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("SELECT spawnID FROM tResources WHERE galaxy=" + galaxy + " AND spawnName='" + resName + "';")
row = cursor.fetchone()
if row == None:
newid = -1
else:
newid = row[0]
cursor.close()
conn.close()
return newid
# Check for errors
errstr = ""
if (shareLevel.isdigit() != True):
errstr = errstr + "Error: Invalid share level. \r\n"
else:
if (int(shareLevel) != 256):
errstr = errstr + "Error: You can only post public waypoints. \r\n"
if (len(spawnName) < 1 and spawnID == ""):
errstr = errstr + "Error: no resource name. \r\n"
if (spawnID == "" and galaxy == ""):
errstr = errstr + "Error: no galaxy selected. \r\n"
if ((wpName == "none" or len(wpName) < 1) and spawnID == ""):
errstr = errstr + "Error: You must enter a Name/Description of the waypoint. \r\n"
if re.search('\W', spawnName):
errstr = errstr + "Error: spawn name contains illegal characters."
if (forceOp != "edit" and planet.isdigit() == False):
errstr = errstr + "Error: planet must be provided to post resource unless editing."
if (concentration[-1:] == "%"):
concentration = concentration[:-1]
#sys.stderr.write("conc: " + concentration)
if (concentration.isdigit() != True):
errstr = errstr + "Error: Concentration entered was not a valid number."
else:
if float(concentration) < 0 or float(concentration) > 100:
errstr = errstr + "Error: Concentration should be a number between 0 and 100"
if (location.find(",") > -1):
try:
lattitude = int(location[:location.find(",")].rstrip())
longitude = int(location[location.find(",")+1:].lstrip())
if (lattitude < -8192 or lattitude > 8192 or longitude < -8192 or longitude > 8192):
errstr = errstr + "Error: Invalid location coordinates. Value too large. "
except ValueError:
errstr = errstr + "Error: Could not identify lat/lon as numbers. "
else:
errstr = errstr + "Error: location is not in the right format. Separate the cooridinates with a comma."
# Only process if no errors or just verifying
if (errstr == "" or (forceOp == "verify" and wpID != None and wpID.isdigit())):
result = ""
if (logged_state > 0):
if forceOp != "verify":
if (spawnName == "" or spawnName == None):
spawnName = ghNames.getSpawnName(spawnID)
# First see if resource is entered at all
if (spawnID == ""):
spawnID = getSpawnID(spawnName, galaxy)
if (spawnID > -1 or forceOp == "verify"):
# waypoint already entered?
if (wpID != None and wpID.isdigit()):
# check owner
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
if (cursor):
cursor.execute('SELECT owner FROM tWaypoint WHERE waypointID=' + str(wpID) + ';')
row = cursor.fetchone()
if (row != None):
owner = row[0]
else:
owner = ''
cursor.close()
if forceOp == "verify":
if galaxy.isdigit():
if owner != currentUser:
dbShared.logUserEvent(currentUser, galaxy, "w", wpID, "v")
result = "Verified!"
else:
result = "Error: You can not verify your own waypoint."
else:
result = "Error: You did not specify the galaxy."
else:
# edit it
if owner == currentUser:
result = "edit: "
result = result + updateWaypoint(wpID, spawnID, planet, price, int(concentration), lattitude, longitude, wpName, shareLevel)
else:
result = "Error: You are not the owner of that waypoint."
else:
result = "Error: No database connection"
conn.close()
else:
# check for duplicate public waypoints
try:
conn = dbShared.ghConn()
cursor = conn.cursor()
except Exception:
result = "Error: could not connect to database"
owner = ''
if (cursor):
if int(shareLevel) == 256:
cursor.execute('SELECT owner FROM tWaypoint WHERE spawnID=' + str(spawnID) + ' AND planetID=' + str(planet) + ' AND shareLevel=256 AND lattitude BETWEEN ' + str(lattitude-50) + ' AND ' + str(lattitude + 50) + ' AND longitude BETWEEN ' + str(longitude - 50) + ' AND ' + str(longitude + 50) + ';')
row = cursor.fetchone()
if (row != None):
owner = row[0]
cursor.close()
if owner == '':
result = addWaypoint(spawnID, planet, price, int(concentration), lattitude, longitude, wpName, shareLevel)
else:
result = "Error: That public waypoint has already been entered by " + owner
else:
result = "Error: No database connection"
conn.close()
else:
# spawn cannot be found
result = "Error: I can not find the resource name you entered in this galaxy."
else:
result = "Error: must be logged in to add waypoints"
else:
result = errstr
print 'Content-type: text/xml\n'
doc = minidom.Document()
eRoot = doc.createElement("result")
doc.appendChild(eRoot)
eName = doc.createElement("waypointID")
tName = doc.createTextNode(waypointID)
eName.appendChild(tName)
eRoot.appendChild(eName)
eText = doc.createElement("resultText")
tText = doc.createTextNode(result)
eText.appendChild(tText)
eRoot.appendChild(eText)
print doc.toxml()
if (result.find("Error:") > -1):
sys.exit(500)
else:
sys.exit(200)
|
clreinki/GalaxyHarvester
|
postWaypoint.py
|
Python
|
agpl-3.0
| 9,961
|
[
"Galaxy"
] |
6bb347276bbc3a55529783e219441b057addc7dcd530c0b9a5d036212d519a40
|
import json
from coalib.bearlib.abstractions.Linter import linter
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='coffeelint')
class CoffeeLintBear:
"""
Check CoffeeScript code for a clean and consistent style.
For more information about coffeelint, visit <http://www.coffeelint.org/>.
"""
LANGUAGES = "CoffeeScript"
severity_map = {'warn': RESULT_SEVERITY.NORMAL,
'error': RESULT_SEVERITY.MAJOR}
@staticmethod
def create_arguments(filename, file, config_file):
return '--reporter=raw', filename
def process_output(self, output, filename, file):
output = json.loads(output)
assert len(output) == 1, "More than 1 file parsed, something went wrong"
for item in tuple(output.values())[0]:
yield Result.from_values(
origin="{} ({})".format(self.name, item['rule']),
message=item['message'],
file=filename,
line=item.get('lineNumber', None),
end_line=item.get('lineNumberEnd', None),
severity=self.severity_map[item['level']],
additional_info=item.get('description',
item.get('context', "")))
|
sims1253/coala-bears
|
bears/coffee_script/CoffeeLintBear.py
|
Python
|
agpl-3.0
| 1,322
|
[
"VisIt"
] |
f58b6643bb29f9eb77af8dc92aa11a4e14080f35f76e0a09d24a8b4295a91500
|
# -*- coding:UTF-8 -*-
# ---------------------------------------------------#
# Aim of the program:
# Create plots for ENSO_metrics
# ---------------------------------------------------#
# ---------------------------------------------------#
# Import the right packages
# ---------------------------------------------------#
from __future__ import print_function
# Run matplotlib background to prevent
# display localhost error after console disconnected
# and to speed up
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
# Import other libs
from glob import iglob as GLOBiglob
import json
from os import makedirs as OS__makedirs
from os.path import exists as OSpath__exists
from os.path import join as OSpath__join
# ENSO_metrics functions
#from EnsoCollectionsLib import defCollection
from EnsoMetrics.EnsoCollectionsLib import defCollection
from EnsoMetricPlot import main_plotter
import sys
from PMPdriver_lib import AddParserArgument
# ---------------------------------------------------#
# Arguments
# ---------------------------------------------------#
param = AddParserArgument()
# Metrics Collection
metric_collection = param.metricsCollection
# Pre-defined options
mip = param.mip
exp = param.exp
# model
if param.modnames is None:
model = "IPSL-CM5A-LR"
else:
model = param.modnames[0]
# Realizations
run = param.realization
# case id
case_id = param.case_id
# Switches
debug = param.debug
"""
metric_collection = "ENSO_perf"
#metric_collection = "ENSO_tel"
#metric_collection = "ENSO_proc"
mip = "cmip5"
exp = "historical"
model = "IPSL-CM5A-LR"
run = "r1i1p1"
case_id = "v20200305"
debug = True
"""
# ---------------------------------------------------#
# Check Arguments
# ---------------------------------------------------#
print("metric_collection:", metric_collection)
print("mip:", mip)
print("exp:", exp)
print("model:", model)
print("run:", run)
print("case_id:", case_id)
print("debug:", debug)
# ---------------------------------------------------#
path_main = "/p/user_pub/pmp/pmp_results/pmp_v1.1.2"
path_in_json = OSpath__join(path_main, "metrics_results", "enso_metric", mip, exp, case_id, metric_collection)
path_in_nc = OSpath__join(path_main, "diagnostic_results", "enso_metric", mip, exp, case_id, metric_collection)
if debug:
path_main = "/work/lee1043/imsi/result_test"
path_out = OSpath__join(path_main, "graphics", "enso_metric", mip, exp, case_id, metric_collection)
if not OSpath__exists(path_out):
try:
OS__makedirs(path_out)
print("path_out:", path_out)
except:
pass
pattern = "_".join([mip, exp, metric_collection, case_id])
# ---------------------------------------------------#
# Main
# ---------------------------------------------------#
# read json file
filename_js = OSpath__join(path_in_json, pattern + "_allModels_allRuns.json")
print('filename_js:', filename_js)
with open(filename_js) as ff:
data_json = json.load(ff)['RESULTS']['model'][model][run]
ff.close()
del ff, filename_js
# loop on metrics
metrics = sorted(defCollection(metric_collection)['metrics_list'].keys(), key=lambda v: v.upper())
for met in metrics:
try:
print('met:', met)
# get NetCDF file name
filename_nc = OSpath__join(path_in_nc, pattern + "_" + model + "_" + run + "_" + met + ".nc")
print("filename_nc:", filename_nc)
# get diagnostic values for the given model and observations
if metric_collection == "ENSO_tel" and "Map" in met:
dict_dia = data_json["value"][met+"Corr"]["diagnostic"]
diagnostic_values = dict((key1, None) for key1 in dict_dia.keys())
diagnostic_units = ""
else:
dict_dia = data_json["value"][met]["diagnostic"]
diagnostic_values = dict((key1, dict_dia[key1]["value"]) for key1 in dict_dia.keys())
diagnostic_units = data_json["metadata"]["metrics"][met]["diagnostic"]["units"]
# get metric values computed with the given model and observations
if metric_collection == "ENSO_tel" and "Map" in met:
list1, list2 = [met+"Corr", met+"Rmse"], ["diagnostic", "metric"]
dict_met = data_json["value"]
metric_values = dict((key1, {model: [dict_met[su][ty][key1]["value"] for su, ty in zip(list1, list2)]})
for key1 in dict_met[list1[0]]["metric"].keys())
metric_units = [data_json["metadata"]["metrics"][su]["metric"]["units"] for su in list1]
else:
dict_met = data_json["value"][met]["metric"]
metric_values = dict((key1, {model: dict_met[key1]["value"]}) for key1 in dict_met.keys())
metric_units = data_json["metadata"]["metrics"][met]["metric"]["units"]
# figure name
figure_name = "_".join([mip, exp, metric_collection, model, run, met])
# this function needs:
# - the name of the metric collection: metric_collection
# - the name of the metric: metric
# - the name of the model: modname (!!!!! this must be the name given when computed because it is the name used
# for in the netCDF files and in the json file !!!!!)
# - name of the exp: exp
# - name of the netCDF file name and path: filename_nc
# - a dictionary containing the diagnostic values: diagnostic_values (e.g., {"ERA-Interim": 1, "Tropflux": 1.1,
# modname: 1.5})
# - the diagnostic units: diagnostic_units
# - a dictionary containing the metric values: metric_values (e.g., {"ERA-Interim": {modname: 1.5},
# "Tropflux": {modname: 1.36}})
# - the metric units: metric_units
# - (optional) the path where to save the plots: path_out
# - (optional) the name of the plots: name_png
main_plotter(metric_collection, met, model, exp, filename_nc, diagnostic_values,
diagnostic_units, metric_values, metric_units, member=run, path_png=path_out,
name_png=figure_name)
except Exception as e:
print("## ERROR:", e)
pass
|
eguil/ENSO_metrics
|
pmp_driver/PMPdriver_plot.py
|
Python
|
bsd-3-clause
| 6,359
|
[
"NetCDF"
] |
065bd8279e7e4e21e8043992a115464a83f005f340dce31eabdaa209965995ab
|
import os
import subprocess
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
USERNAME = 'admin'
gunicorn = None
########################################################################
# #
# Set up the required components for an integration test. Components #
# such as PostgreSQL and Apache are assumed to already be installed on #
# the system. The system is assumed to be Debian. See #
# tests/docker/Dockerfile. #
# #
########################################################################
if os.environ.get('WORKSPACE'):
SCRIPT_PATH = os.path.join(os.environ['WORKSPACE'], 'tests/docker/scripts')
else:
SCRIPT_PATH = '/'
SCRIPT_CREATE_DB = os.path.join(SCRIPT_PATH, 'create-db.sh')
def pytest_configure(config):
subprocess.check_call([SCRIPT_CREATE_DB])
start_gunicorn()
def pytest_unconfigure(config):
stop_gunicorn()
def start_gunicorn():
global gunicorn
gunicorn_log = open("reports/gunicorn.log", "ab")
gunicorn = subprocess.Popen(['gunicorn', 'navtest_wsgi:application'],
stdout=gunicorn_log,
stderr=subprocess.STDOUT)
def stop_gunicorn():
if gunicorn:
gunicorn.terminate()
############
# #
# Fixtures #
# #
############
@pytest.fixture
def selenium(selenium, base_url):
"""Fixture to initialize the selenium web driver with a NAV session logged
in as the admin user.
"""
from nav.bootstrap import bootstrap_django
bootstrap_django(__file__)
from nav.web.auth import create_session_cookie
selenium.implicitly_wait(10)
wait = WebDriverWait(selenium, 10)
cookie = create_session_cookie(USERNAME)
# visit a non-existent URL just to set the site context for cookies
selenium.get('{}/images/400'.format(base_url))
wait.until(EC.text_to_be_present_in_element((By.TAG_NAME, "h1"),
"Not found"))
print("Cookies after first fetch: {!r}".format(selenium.get_cookies()))
selenium.delete_all_cookies()
print("Setting session cookie for {}: {!r}".format(USERNAME, cookie))
selenium.add_cookie(cookie)
# Cookie modification is also _non-blocking_ in Selenium, so we need to
# wait for the cookie to become present in the browser before we continue!
wait.until(_session_cookie_is_present(cookie))
print("Cookies after set, before refresh: {!r}".format(
selenium.get_cookies()))
selenium.refresh()
print("Cookies after refresh: {!r}".format(selenium.get_cookies()))
yield selenium
print("Cookies after test: {!r}".format(selenium.get_cookies()))
class _session_cookie_is_present(object):
"""Selenium expectation for verifying that a session cookie is set"""
def __init__(self, session_cookie):
self.session_cookie = session_cookie
def __call__(self, driver):
for cookie in driver.get_cookies():
if cookie['name'] == self.session_cookie['name']:
return cookie['value'] == self.session_cookie['value']
@pytest.fixture(scope="session")
def base_url():
return os.environ.get('TARGETURL', 'http://localhost:8000')
@pytest.fixture
def chrome_options(chrome_options):
# All options stolen from https://stackoverflow.com/questions/48450594/selenium-timed-out-receiving-message-from-renderer
# AGGRESSIVE: options.setPageLoadStrategy(PageLoadStrategy.NONE) # https://www.skptricks.com/2018/08/timed-out-receiving-message-from-renderer-selenium.html
chrome_options.add_argument("start-maximized") # https://stackoverflow.com/a/26283818/1689770
chrome_options.add_argument("enable-automation") # https://stackoverflow.com/a/43840128/1689770
chrome_options.add_argument("--headless") # only if you are ACTUALLY running headless
chrome_options.add_argument("--no-sandbox") # https://stackoverflow.com/a/50725918/1689770
chrome_options.add_argument("--disable-infobars") # https://stackoverflow.com/a/43840128/1689770
chrome_options.add_argument("--disable-dev-shm-usage") # https://stackoverflow.com/a/50725918/1689770
chrome_options.add_argument("--disable-browser-side-navigation") # https://stackoverflow.com/a/49123152/1689770
chrome_options.add_argument("--disable-gpu") # https://stackoverflow.com/questions/51959986/how-to-solve-selenium-chromedriver-timed-out-receiving-message-from-renderer-exc
return chrome_options
|
UNINETT/nav
|
tests/functional/conftest.py
|
Python
|
gpl-2.0
| 4,760
|
[
"VisIt"
] |
b99730762232c5d6648ea0c60ed64f05bcfa9142625cab1643162df09f8b0d1c
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for admin change view.
"""
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.admin import ChangeUserAdminPage
from common.test.acceptance.tests.helpers import AcceptanceTest
class UnicodeUsernameAdminTest(AcceptanceTest):
"""
Tests if it is possible to update users with unicode usernames in the admin.
"""
shard = 21
# The word below reads "Omar II", in Arabic. It also contains a space and
# an Eastern Arabic Number another option is to use the Esperanto fake
# language but this was used instead to test non-western letters.
FIXTURE_USERNAME = u'عمر ٢'
# From the db fixture `unicode_user.json`
FIXTURE_USER_ID = 1000
def setUp(self):
"""
Initializes and visits the change user admin page as a superuser.
"""
# Some state is constructed by the parent setUp() routine
super(UnicodeUsernameAdminTest, self).setUp()
AutoAuthPage(self.browser, staff=True, superuser=True).visit()
# Load page objects for use by the tests
self.page = ChangeUserAdminPage(self.browser, self.FIXTURE_USER_ID)
# Navigate to the index page and get testing!
self.page.visit()
def test_update_first_name(self):
"""
As a superuser I should be able to update the first name of a user with unicode username.
"""
self.assertNotEqual(self.page.first_name, 'John')
self.assertEqual(self.page.username, self.FIXTURE_USERNAME)
self.page.change_first_name('John')
self.assertFalse(self.page.is_browser_on_page(), 'Should redirect to the admin user list view on success')
# Visit the page again to verify changes
self.page.visit()
self.assertEqual(self.page.first_name, 'John', 'The first name should be updated')
|
cpennington/edx-platform
|
common/test/acceptance/tests/lms/test_unicode_username_admin.py
|
Python
|
agpl-3.0
| 1,911
|
[
"VisIt"
] |
87a744630b5061734ceaa1faa34a41fad21d445e939b28ff55a52ee3f8d61f06
|
'''
Created on Apr 20, 2015
@author: ayan
'''
from __future__ import (absolute_import, division, print_function)
from netCDF4 import Dataset
import numpy as np
import hashlib
import warnings
from collections import OrderedDict
from gridded.pysgrid.utils import GridPadding #TODO Remove this from the loading system
from gridded.pysgrid.read_netcdf import NetCDFDataset, parse_padding, find_grid_topology_var
from gridded.pysgrid.utils import calculate_angle_from_true_east, pair_arrays
from gridded.pysgrid.variables import SGridVariable
from gridded.utilities import gen_celltree_mask_from_center_mask
node_alternate_names = ['node','nodes', 'psi', 'vertex','vertices', 'point','points']
center_alternate_names = ['center','centers','face','faces','cell','cells']
edge1_alternate_names = ['edge1','u']
edge2_alternate_names = ['edge2','v']
class SGrid(object):
padding_slices = {'both': (1, -1),
'none': (None, None),
'low': (1, None),
'high': (None, 1)
}
topology_dimension = 2
def __init__(self,
node_lon=None,
node_lat=None,
node_mask=None,
center_lon=None,
center_lat=None,
center_mask=None,
edge1_lon=None,
edge1_lat=None,
edge1_mask=None,
edge2_lon=None,
edge2_lat=None,
edge2_mask=None,
edges=None,
node_padding=None,
edge1_padding=None,
edge2_padding=None,
grid_topology_var=None,
variables=None,
grid_variables=None,
dimensions=None,
node_dimensions=None,
node_coordinates=None,
edge1_coordinates=None,
edge2_coordinates=None,
angles=None,
edge1_dimensions=None,
edge2_dimensions=None,
faces=None,
face_padding=None,
face_coordinates=None,
face_dimensions=None,
vertical_padding=None,
vertical_dimensions=None,
tree=None, #Fixme: should this be initilizable here?
use_masked_boundary=False,
*args,
**kwargs):
self.node_lon = node_lon
self.node_lat = node_lat
self.node_mask = node_mask
self.center_lon = center_lon
self.center_lat = center_lat
self.center_mask = center_mask
self.edge1_lon = edge1_lon
self.edge1_lat = edge1_lat
self.edge1_mask = edge1_mask
self.edge2_lon = edge2_lon
self.edge2_lat = edge2_lat
self.edge2_mask = edge2_mask
self.edges = edges # Fixme: is this needed?
self.node_padding = node_padding
self.edge1_padding = edge1_padding
self.edge2_padding = edge2_padding
self.grid_topology_var = grid_topology_var
self.variables = variables
self.grid_variables = grid_variables
self.dimensions = dimensions
self.node_dimensions = node_dimensions
self.node_coordinates = node_coordinates
self.edge1_coordinates = edge1_coordinates
self.edge2_coordinates = edge2_coordinates
self.angles = angles
self.edge1_dimensions = edge1_dimensions
self.edge2_dimensions = edge2_dimensions
self.faces = faces
self.face_padding = face_padding
self.face_coordinates = face_coordinates
self.face_dimensions = face_dimensions
self.vertical_padding = vertical_padding
self.vertical_dimensions = vertical_dimensions
self.tree = tree
self.use_masked_boundary = use_masked_boundary
self._l_coeffs = None
self._m_coeffs = None
# used for nearest neighbor interpolation
self._kd_trees = {}
self._cell_tree = None
self._log_ind_memo_dict = OrderedDict()
self._cell_ind_memo_dict = OrderedDict()
self._cell_tree_mask = None
@classmethod
def load_grid(cls, nc):
if isinstance(nc, Dataset):
pass
else:
nc = Dataset(nc, 'r')
topology_var = find_grid_topology_var(nc)
sa = SGridAttributes(nc, cls.topology_dimension, topology_var)
dimensions = sa.get_dimensions()
node_dimensions, node_coordinates = sa.get_node_coordinates()
grid_topology_var = sa.get_topology_var()
edge1_dimensions, edge1_padding = sa.get_attr_dimension('edge1_dimensions') # noqa
edge2_dimensions, edge2_padding = sa.get_attr_dimension('edge2_dimensions') # noqa
edge1_coordinates = sa.get_attr_coordinates('edge1_coordinates')
edge2_coordinates = sa.get_attr_coordinates('edge2_coordinates')
angles = sa.get_angles()
vertical_dimensions, vertical_padding = sa.get_attr_dimension('vertical_dimensions') # noqa
node_lon, node_lat = sa.get_cell_node_lat_lon()
center_lon, center_lat = sa.get_cell_center_lat_lon()
edge1_lon, edge1_lat = sa.get_cell_edge1_lat_lon()
edge2_lon, edge2_lat = sa.get_cell_edge2_lat_lon()
face_dimensions, face_padding = sa.get_attr_dimension('face_dimensions') # noqa
face_coordinates = sa.get_attr_coordinates('face_coordinates')
node_mask, center_mask, edge1_mask, edge2_mask = sa.get_masks(node_lon,
center_lon,
edge1_lon,
edge2_lon)
sgrid = cls(angles=angles,
node_lon=node_lon,
node_lat=node_lat,
node_mask=node_mask,
center_lon=center_lon,
center_lat=center_lat,
center_mask=center_mask,
edge1_lon=edge1_lon,
edge1_lat=edge1_lat,
edge1_mask=edge1_mask,
edge2_lon=edge2_lon,
edge2_lat=edge2_lat,
edge2_mask=edge2_mask,
dimensions=dimensions,
edge1_coordinates=edge1_coordinates,
edge1_dimensions=edge1_dimensions,
edge1_padding=edge1_padding,
edge2_coordinates=edge2_coordinates,
edge2_dimensions=edge2_dimensions,
edge2_padding=edge2_padding,
edges=None,
face_coordinates=face_coordinates,
face_dimensions=face_dimensions,
face_padding=face_padding,
faces=None,
grid_topology_var=grid_topology_var,
grid_variables=None,
node_coordinates=node_coordinates,
node_dimensions=node_dimensions,
node_padding=None,
variables=None,
vertical_dimensions=vertical_dimensions,
vertical_padding=vertical_padding)
sa.get_variable_attributes(sgrid)
return sgrid
@property
def info(self):
"""
Summary of information about the grid
This needs to be implimented -- see UGrid for example
"""
names = ", ".join([name for name, at in vars(self).items()
if not name.startswith("_") if at is not None])
msg = ("SGrid object with defined:\n"
" {}".format(names))
return msg
def get_all_face_padding(self):
if self.face_padding is not None:
all_face_padding = self.face_padding
else:
all_face_padding = []
return all_face_padding
def get_all_edge_padding(self):
all_edge_padding = []
if self._edge1_padding is not None:
all_edge_padding += self._edge1_padding
if self._edge2_padding is not None:
all_edge_padding += self._edge2_padding
return all_edge_padding
def all_padding(self):
all_padding = self.get_all_face_padding() + self.get_all_edge_padding()
if self.vertical_padding is not None:
all_padding += self.vertical_padding
return all_padding
def save_as_netcdf(self, filepath):
"""
save the grid as a netcdf file
:param filepath: path to the file to be created and saved to
"""
with Dataset(filepath, 'w') as nclocal:
grid_vars = self._save_common_components(nclocal)
# Add attributes to the grid_topology variable.
grid_vars.face_dimensions = self.face_dimensions
if self.vertical_dimensions is not None:
grid_vars.vertical_dimensions = self.vertical_dimensions
if self.face_coordinates is not None:
grid_vars.face_coordinates = ' '.join(self.face_coordinates)
@property
def non_grid_variables(self):
non_grid_variables = [variable for variable in self.variables if
variable not in self.grid_variables]
return non_grid_variables
@property
def nodes(self):
return np.stack((self.node_lon, self.node_lat), axis=-1)
@property
def centers(self):
return np.stack((self.center_lon, self.center_lat), axis=-1)
@property
def node_padding(self):
if hasattr(self, '_node_padding') and self._node_padding:
return self._node_padding
else:
return (None, None)
@node_padding.setter
def node_padding(self, val):
self._node_padding = val
@property
def center_padding(self):
if hasattr(self, '_center_padding') and self._center_padding:
return self._center_padding
elif hasattr(self, 'center_lon') and self.center_lon is not None:
face_shape = self.center_lon.shape
node_shape = self.node_lon.shape
diff = np.array(face_shape) - node_shape
rv = []
for dim in (0,1):
rv.append(('low', 'both', 'none')[diff[dim]])
if rv[-1] == 'low':
warnings.warn('Assuming low padding for faces')
return tuple(rv)
else:
return (None, None)
@center_padding.setter
def center_padding(self, val):
self._center_padding = val
@property
def edge1_padding(self):
if hasattr(self, '_edge1_padding') and self._edge1_padding:
if isinstance(self._edge1_padding[0], GridPadding):
return (self._edge1_padding[0].padding, None)
else:
return self._edge1_padding
else:
return (self.center_padding[0], None)
@edge1_padding.setter
def edge1_padding(self, val):
self._edge1_padding = val
@property
def edge2_padding(self):
if hasattr(self, '_edge2_padding') and self._edge2_padding:
if isinstance(self._edge2_padding[0], GridPadding):
return (None, self._edge2_padding[0].padding)
else:
return self._edge2_padding
else:
return (None, self.center_padding[1])
@edge2_padding.setter
def edge2_padding(self, val):
self._edge2_padding = val
def infer_location(self, variable):
"""
Assuming default is psi grid, check variable dimensions to determine which grid
it is on.
"""
shape = None
try:
shape = np.array(variable.shape)
except:
return None # Variable has no shape attribute!
if len(variable.shape) < 2:
return None
difference = (shape[-2:] - self.node_lon.shape).tolist()
if (difference == [1, 1] or difference == [-1, -1]) and self.center_lon is not None:
location = 'center'
elif difference == [1, 0] and self.edge1_lon is not None:
location = 'edge1'
elif difference == [0, 1] and self.edge2_lon is not None:
location = 'edge2'
elif difference == [0, 0] and self.node_lon is not None:
location = 'node'
else:
location = None
return location
def _save_common_components(self, nc_file):
grid_var = self.grid_topology_var
# Create dimensions.
for grid_dim in self.dimensions:
dim_name, dim_size = grid_dim
nc_file.createDimension(dim_name, dim_size)
# Create variables.
center_lon, center_lat = self.face_coordinates
center_lon_obj = getattr(self, center_lon)
center_lat_obj = getattr(self, center_lat)
center_lon = nc_file.createVariable(center_lon_obj.variable,
center_lon_obj.dtype,
center_lon_obj.dimensions)
center_lat = nc_file.createVariable(center_lat_obj.variable,
center_lat_obj.dtype,
center_lat_obj.dimensions)
center_lon[:] = self.center_lon[:]
center_lat[:] = self.center_lat[:]
try:
node_lon, node_lat = self.node_coordinates
except TypeError:
pass
else:
node_lon_obj = getattr(self, node_lon)
grid_node_lon = nc_file.createVariable(node_lon_obj.variable,
node_lon_obj.dtype,
node_lon_obj.dimensions)
node_lat_obj = getattr(self, node_lat)
grid_node_lat = nc_file.createVariable(node_lat_obj.variable,
node_lat_obj.dtype,
node_lat_obj.dimensions)
grid_node_lon[:] = self.node_lon[:]
grid_node_lat[:] = self.node_lat[:]
grid_var_obj = getattr(self, grid_var)
grid_vars = nc_file.createVariable(grid_var_obj.variable,
grid_var_obj.dtype)
grid_vars.cf_role = 'grid_topology'
grid_vars.topology_dimension = self.topology_dimension
grid_vars.node_dimensions = self.node_dimensions
if self.edge1_dimensions is not None:
grid_vars.edge1_dimensions = self.edge1_dimensions
if self.edge2_dimensions is not None:
grid_vars.edge2_dimensions = self.edge2_dimensions
if self.node_coordinates is not None:
grid_vars.node_coordinates = ' '.join(self.node_coordinates)
if self.edge1_coordinates is not None:
grid_vars.edge1_coordinates = ' '.join(self.edge1_coordinates)
if self.edge2_coordinates is not None:
grid_vars.edge2_coordinates = ' '.join(self.edge2_coordinates)
if hasattr(self, 'angle'):
angle_obj = getattr(self, 'angle', None)
grid_angle = nc_file.createVariable(angle_obj.variable,
angle_obj.dtype,
angle_obj.dimensions
)
if self.angles is not None:
grid_angle[:] = self.angles[:]
for dataset_variable in self.variables:
dataset_var_obj = getattr(self, dataset_variable)
try:
dataset_grid_var = nc_file.createVariable(
dataset_var_obj.variable,
dataset_var_obj.dtype,
dataset_var_obj.dimensions
)
except RuntimeError:
continue
else:
axes = []
if dataset_var_obj.grid is not None:
dataset_grid_var.grid = grid_var
if dataset_var_obj.standard_name is not None:
dataset_grid_var.standard_name = dataset_var_obj.standard_name # noqa
if dataset_var_obj.coordinates is not None:
dataset_grid_var.coordinates = ' '.join(dataset_var_obj.coordinates) # noqa
if dataset_var_obj.x_axis is not None:
x_axis = 'X: {0}'.format(dataset_var_obj.x_axis)
axes.append(x_axis)
if dataset_var_obj.y_axis is not None:
y_axis = 'Y: {0}'.format(dataset_var_obj.y_axis)
axes.append(y_axis)
if dataset_var_obj.z_axis is not None:
z_axis = 'Z: {0}'.format(dataset_var_obj.z_axis)
axes.append(z_axis)
if axes:
dataset_grid_var.axes = ' '.join(axes)
return grid_vars
def _get_geo_mask(self, name):
if name == 'node':
return self.node_mask
elif name == 'center':
return self.center_mask
elif name == 'edge1':
return self.edge1_mask
elif name == 'edge2':
return self.edge2_mask
else:
raise ValueError('Invalid grid name {0}'.format(name))
def _get_grid_vars(self, name):
if name == 'node':
return (self.node_lon, self.node_lat)
elif name == 'center':
return (self.center_lon, self.center_lat)
elif name == 'edge1':
return (self.edge1_lon, self.edge1_lat)
elif name == 'edge2':
return (self.edge2_lon, self.edge2_lat)
else:
raise ValueError('Invalid grid name {0}'.format(name))
def _hash_of_pts(self, points):
"""
Returns a SHA1 hash of the array of points passed in
"""
return hashlib.sha1(points.tobytes()).hexdigest()
def _add_memo(self, points, item, D, _copy=False, _hash=None):
"""
:param points: List of points to be hashed.
:param item: Result of computation to be stored.
:param D: Dict that will store hash -> item mapping.
:param _hash: If hash is already computed it may be passed in here.
"""
if _copy:
item = item.copy()
item.setflags(write=False)
if _hash is None:
_hash = self._hash_of_pts(points)
if D is not None and len(D) > 6:
D.popitem(last=False)
D[_hash] = item
D[_hash].setflags(write=False)
def _get_memoed(self, points, D, _copy=False, _hash=None):
if _hash is None:
_hash = self._hash_of_pts(points)
if (D is not None and _hash in D):
return D[_hash].copy() if _copy else D[_hash]
else:
return None
def _compute_transform_coeffs(self):
"""
https://www.particleincell.com/2012/quad-interpolation/
This computes the and b coefficients of the equations
x = a1 + a2*l + a3*m + a4*l*m
y = b1 + b2*l + b3*m + b4*l*m
The results are memoized per grid since their geometry is different, and
is not expected to change over the lifetime of the object.
"""
lon, lat = self.node_lon, self.node_lat
l_coeffs = self._l_coeffs = np.zeros((lon[0:-1, 0:-1].shape + (4,)), dtype=np.float64)
m_coeffs = self._m_coeffs = self._l_coeffs.copy('C')
indices = np.stack(np.indices(lon[0:-1, 0:-1].shape), axis=-1).reshape(-1, 2)
polyx = self.get_variable_by_index(lon, indices)
polyy = self.get_variable_by_index(lat, indices)
# for every cell
A = np.array(([1, 0, 0, 0],
[1, 0, 1, 0],
[1, 1, 1, 1],
[1, 1, 0, 0],
))
# A = np.array(([1, 0, 0, 0],
# [1, 1, 0, 0],
# [1, 1, 1, 1],
# [1, 0, 1, 0],
# ))
# polyx = np.matrix(polyx)
# polyy = np.matrix(polyy)
AI = np.linalg.inv(A)
a = np.dot(AI, polyx.T).T
b = np.dot(AI, polyy.T).T
self._l_coeffs = np.asarray(a).reshape(l_coeffs.shape)
self._m_coeffs = np.asarray(b).reshape(m_coeffs.shape)
def get_efficient_slice(self,
points=None,
indices=None,
location=None,
_memo=False,
_copy=False,
_hash=None):
"""
Computes the minimum 2D slice that captures all the provided points/indices
within.
:param points: Nx2 array of longitude/latitude. (Optional)
:param indices: Nx2 array of logical cell indices (Optional, but required if points omitted)
:param location: 'center', 'edge1', 'edge2','node'
"""
if indices is None:
indices = self.locate_faces(points, _memo, _copy, _hash)
xmin = indices[:, 0].astype('uint32').min()
ymin = indices[:, 1].astype('uint32').min()
xmax = indices[:, 0].astype('uint32').max() + 1
ymax = indices[:, 1].astype('uint32').max() + 1
if location in edge1_alternate_names:
xmax += 1
elif location in edge2_alternate_names:
ymax += 1
elif location in node_alternate_names:
xmax += 1
ymax += 1
elif location in center_alternate_names:
pass
else:
raise ValueError('location not recognized')
x_slice = slice(xmin, xmax)
y_slice = slice(ymin, ymax)
return (x_slice, y_slice)
def locate_faces(self,
points,
_memo=False,
_copy=False,
_hash=None,
use_mask=True):
"""
Given a list of points, returns a list of x, y indices of the cell
that contains each respective point
Points that are not on the node grid will have an index of -1
If a single point is passed in, a single index will be returned.
If a sequence of points is passed in an array of indexes will be returned.
:param points: The points that you want to locate -- (lon, lat). If the shape of point
is 1D, function will return a scalar index. If it is 2D, it will return
a 1D array of indices.
:type points: array-like containing one or more points: shape (2,) for one point,
shape (N, 2) for more than one point.
:param grid: The grid on which you want to locate the points
:type grid: Name of the grid ('node', 'center', 'edge1', 'edge2)
This version utilizes the CellTree data structure.
"""
points = np.asarray(points, dtype=np.float64)
just_one = (points.ndim == 1)
points = points.reshape(-1, 2)
if _memo:
if _hash is None:
_hash = self._hash_of_pts(points)
result = self._get_memoed(points, self._cell_ind_memo_dict, _copy, _hash)
if result is not None:
return result
if self._cell_tree is None:
self.build_celltree(use_mask=use_mask)
tree = self._cell_tree[0]
rev_arrs = None
if self._cell_tree_mask is not None:
rev_arrs = self._cell_tree_mask[1]
indices = tree.locate(points)
if rev_arrs is not None:
indices = rev_arrs[indices]
lon, lat = self.node_lon, self.node_lat
x = indices % (lat.shape[1] - 1)
y = indices // (lat.shape[1] - 1)
ind = np.column_stack((y, x))
ind[ind[:, 0] == -1] = [-1, -1]
if just_one:
res = ind[0]
return res
else:
res = np.ma.masked_less(ind, 0)
if _memo:
self._add_memo(points, res, self._cell_ind_memo_dict, _copy, _hash)
return res
def locate_nearest(self,
points,
grid,
_memo=False,
_copy=False,
_hash=None):
points = np.asarray(points, dtype=np.float64)
points = points.reshape(-1, 2)
if self._kd_trees[grid] is None:
self.build_kdtree(grid)
tree = self._kd_trees[grid]
lin_indices = np.array(tree.query(points))[1].astype(np.int32)
lon, lat = self._get_grid_vars(grid)
ind = np.unravel_index(lin_indices, shape=lon.shape)
ind = np.array(ind).T
return ind
def apply_padding_to_idxs(self,
idxs,
padding=('none','none')):
'''
Given a list of indexes, increment each dimension to compensate for padding.
Input indexes are assumed to be cell indexes
'''
for dim, typ in enumerate(padding):
if typ == 'none' or typ == 'high' or typ is None:
continue
elif typ == 'both' or typ == 'low':
idxs[:,dim] += 1
else:
raise ValueError('unrecognized padding type in dimension {0}: {1}'.format(dim, typ))
return idxs
def get_padding_by_location(self, location):
d = {'center': 'center_padding',
'edge1': 'edge1_padding',
'edge2': 'edge2_padding',
'node': 'node_padding'}
for k, v in d.items():
if location == k:
return getattr(self, v)
def get_padding_slices(self,
padding=('none','none')):
'''
Given a pair of padding types, return a numpy slice object you can use directly on
data or lon/lat variables
'''
lo_offsets = [0,0]
hi_offsets = [0,0]
for dim, typ in enumerate(padding):
if typ == 'none':
continue
elif typ == 'high':
hi_offsets[dim] -= 1
elif typ == 'low':
lo_offsets[dim] += 1
elif typ == 'both':
hi_offsets[dim] -= 1
lo_offsets[dim] += 1
else:
hi_offsets[dim] = None
lo_offsets[dim] = 0
return (np.s_[lo_offsets[0]:hi_offsets[0], lo_offsets[1]:hi_offsets[1]])
def get_variable_by_index(self, var, index):
"""
index = index arr of quads (maskedarray only)
var = ndarray/ma.array
returns ndarray/ma.array
ordering is idx, idx+[0,1], idx+[1,1], idx+[1,0]
masked values from var remain masked
Function to get the node values of a given face index.
Emulates the 'self.grid.nodes[self.grid.nodes.faces[index]]'
paradigm of unstructured grids.
"""
var = var[:]
if isinstance(var, np.ma.MaskedArray) and isinstance(index, np.ma.MaskedArray):
rv = np.ma.empty((index.shape[0], 4), dtype=np.float64)
if index.mask is not np.bool_(): # because False is not False. Thanks numpy
rv.mask = np.zeros_like(rv, dtype=bool)
rv.mask[:] = index.mask[:, 0][:, np.newaxis]
rv.harden_mask()
else:
rv = np.zeros((index.shape[0], 4), dtype=np.float64)
raw = np.ravel_multi_index(index.T, var.shape, mode='clip')
rv[:, 0] = np.take(var, raw)
raw += np.array(var.shape[1], dtype=np.int32)
rv[:, 1] = np.take(var, raw)
raw += 1
rv[:, 2] = np.take(var, raw)
raw -= np.array(var.shape[1], dtype=np.int32)
rv[:, 3] = np.take(var, raw)
return rv
def get_variable_at_index(self, var, index):
var = var[:]
rv = np.ma.zeros((index.shape[0], 1), dtype=np.float64)
mask = np.ma.zeros((index.shape[0], 1), dtype=bool)
raw = np.ravel_multi_index(index.T, var.shape, mode='clip')
rv[:, 0] = np.take(var, raw)
if var.mask is False:
mask[:, 0] = np.take(var.mask, raw)
return np.ma.array(rv, mask=mask)
def build_kdtree(self, grid='node'):
"""Builds the kdtree for the specified grid"""
try:
from scipy.spatial import cKDTree
except ImportError:
raise ImportError("The scipy package is required to use "
"SGrid.locate_nearest\n"
" -- nearest neighbor interpolation")
lon, lat = self._get_grid_vars(grid)
if lon is None or lat is None:
raise ValueError("{0}_lon and {0}_lat must be defined in order to "
"create and use KDTree for this grid".format(grid))
lin_points = np.column_stack((lon.ravel(), lat.ravel()))
self._kd_trees[grid] = cKDTree(lin_points, leafsize=4)
def build_celltree(self, use_mask=True):
"""
Builds the celltree across the grid defined by nodes (self.node_lon, self.node_lat)
If center masking is provided in self.center_mask, it will remove masked cells, and
take precedence over any node masking for celltree insertion.
If node masking is provided in self.node_mask and self.center_mask is not provided,
it will remove masked nodes from the grid, which also removes all adjacent cells
:param use_mask: If False, ignores all masks and builds the celltree over the raw
arrays. Does nothing if self.node_mask or self.center_mask are not
present
"""
try:
from cell_tree2d import CellTree
except ImportError:
raise ImportError("the cell_tree2d package must be installed to use the "
"celltree search:\n"
"https://github.com/NOAA-ORR-ERD/cell_tree2d/")
lon, lat = self.node_lon, self.node_lat
if lon is None or lat is None:
raise ValueError("node_lon and node_lat must be defined in order to create and "
"use CellTree for this grid")
if (use_mask and
((self.node_mask is not None and self.node_mask is not False) or
(self.center_mask is not None and self.center_mask is not False))):
if np.any(self.center_mask):
cell_mask = gen_celltree_mask_from_center_mask(self.center_mask, self.get_padding_slices(self.center_padding))
else:
pass
lin_faces = np.empty(shape=(lon[1::,1::].size,4))
if lin_faces.shape[0] != cell_mask.size:
raise ValueError("Could not match mask and faces array length. If padding is in use, please set self.center_padding")
lon = np.ma.MaskedArray(lon[:].copy())
lat = np.ma.MaskedArray(lat[:].copy())
#Water cells grab all nodes that belong to them
node_mask = np.zeros_like(lon, dtype=np.bool_)
node_mask[:-1,:-1] += ~cell_mask
node_mask[:-1,1:] += ~cell_mask
node_mask[1:,1:] += ~cell_mask
node_mask[1:,:-1] += ~cell_mask
node_mask = ~node_mask
lon.mask = node_mask
lat.mask = node_mask
masked_faces_idxs = np.zeros_like(node_mask, dtype=np.int32)
masked_faces_idxs[node_mask] = -1
tmp = np.where(~ node_mask.ravel())[0]
masked_faces_idxs[~node_mask] = np.arange(0,len(tmp))
lin_faces = np.full(shape=(lon[0:-1,0:-1].size,4), fill_value=-1, dtype=np.int32)
lin_faces[:,0] = np.ravel(masked_faces_idxs[0:-1, 0:-1])
lin_faces[:,1] = np.ravel(masked_faces_idxs[0:-1, 1:])
lin_faces[:,2] = np.ravel(masked_faces_idxs[1:, 1:])
lin_faces[:,3] = np.ravel(masked_faces_idxs[1:, 0:-1])
lin_faces[cell_mask.reshape(-1)] = [-1,-1,-1,-1]
lin_faces = np.ma.masked_less(lin_faces, 0).compressed().reshape(-1,4)
#need to make a reversal_array. This is an array of the same length
#as the unmasked nodes that contains the 'true' LINEAR index of the
#unmasked node. When CellTree gives back an index, it's 'true'
#index is discovered using this array
reversal_array = np.where(~cell_mask.reshape(-1))[0].astype(np.int32)
#append a -1 to preserve -1 entries when back-translating the indices
reversal_array = np.concatenate((reversal_array, np.array([-1,])))
self._cell_tree_mask = (node_mask, reversal_array)
else:
self._cell_tree_mask = None
y_size = lon.shape[0]
x_size = lon.shape[1]
lin_faces = np.array([np.array([[x, x + 1, x + x_size + 1, x + x_size]
for x in range(0, x_size - 1, 1)]) + y * x_size
for y in range (0, y_size - 1)])
lin_faces = np.ascontiguousarray(lin_faces.reshape(-1, 4).astype(np.int32))
if isinstance(lon, np.ma.MaskedArray) and lon.mask is not False and use_mask:
lin_nodes = np.ascontiguousarray(np.column_stack((np.ma.compressed(lon[:]),np.ma.compressed(lat[:]))).reshape(-1, 2).astype(np.float64))
else:
lin_nodes = np.ascontiguousarray(np.stack((lon, lat), axis=-1).reshape(-1, 2).astype(np.float64))
self._cell_tree = (CellTree(lin_nodes, lin_faces), lin_nodes, lin_faces)
def nearest_var_to_points(self,
points,
variable,
indices=None,
grid=None,
alphas=None,
mask=None,
slices=None,
_memo=False,
slice_grid=True,
_hash=None,
_copy=False):
if grid is None:
grid = self.infer_location(variable)
if indices is None:
# ind has to be writable
indices = self.locate_nearest(points, grid, _memo, _copy, _hash)
[yslice, xslice] = self.get_efficient_slice(points, indices, grid, _memo, _copy, _hash)
if slices is not None:
slices = slices + (yslice,)
slices = slices + (xslice,)
else:
slices = (yslice, xslice)
if self.infer_location(variable) is not None:
variable = variable[slices]
if len(variable.shape) > 2:
raise ValueError("Variable has too many dimensions to \
associate with grid. Please specify slices.")
ind = indices.copy() - [yslice.start, xslice.start]
result = self.get_variable_at_index(variable, ind)
return result
def interpolate_var_to_points(self,
points,
variable,
location=None,
fill_value=0,
indices=None,
alphas=None,
padding=None,
slices=None,
_memo=False,
_hash=None,
_copy=False):
"""
Interpolates a variable on one of the grids to an array of points.
:param points: Nx2 Array of lon/lat coordinates to be interpolated to.
:param variable: Array-like of values to associate at location on grid
(node, center, edge1, edge2). This may be more than a
2 dimensional array, but you must pass 'slices' kwarg
with appropriate slice collection to reduce it to 2 dimensions.
:param location: One of ('node', 'center', 'edge1', 'edge2').
'edge1' is conventionally associated with the 'vertical' edges
and likewise 'edge2' with the 'horizontal'. Determines type of
interpolation, see below for details
:param fill_value: If masked values are encountered in interpolation, this value
takes the place of the masked value
:param indices: If computed already, array of Nx2 cell indices can be passed in
to increase speed.
:param alphas: If computed already, array of alphas can be passed in to increase
speed.
Depending on the location specified, different interpolation will be used.
For 'center', no interpolation
For 'edge1' or 'edge2', interpolation is linear, edge to edge across the cell
For 'node', interpolation is bilinear from the four nodes of each cell
The variable specified may be any array-like.
- With a numpy array:
sgrid.interpolate_var_to_points(points, sgrid.u[time_idx, depth_idx])
- With a raw netCDF Variable:
sgrid.interpolate_var_to_points(points, nc.variables['u'], slices=[time_idx, depth_idx])
If you have pre-computed information, you can pass it in to avoid unnecessary
computation and increase performance.
- ind = # precomputed indices of points
- alphas = # precomputed alphas (useful if interpolating to the same points frequently)
sgrid.interpolate_var_to_points(points, sgrid.u, indices=ind, alphas=alphas,
slices=[time_idx, depth_idx])
"""
# eventually should remove next line once celltree can support it
points = points.reshape(-1, 2)
ind = indices
if hash is None:
_hash = self._hash_of_pts(points)
if location is None:
location = self.infer_location(variable)
warnings.warn('No location provided. Assuming data is on {0}'.format(location))
if ind is None:
# ind has to be writable
ind = self.locate_faces(points, _memo, _copy, _hash)
if (ind.mask).all():
return np.ma.masked_all((points.shape[0]))
if self._l_coeffs is None:
self._compute_transform_coeffs()
logical_coords = self.geo_to_logical(points, indices=ind)
if alphas is None:
#Better name for this would be per_cell_logical_offset
alphas = per_cell_log_offset = logical_coords - ind
if padding is None:
padding = self.get_padding_by_location(location)
#Setup done. Determine slicing and zero-align indices and slice variable
idxs = self.apply_padding_to_idxs(ind.copy(), padding=padding)
[xslice, yslice] = self.get_efficient_slice(indices=idxs, location=location, _memo=_memo, _copy=_copy, _hash=_hash)
if slices is not None:
slices = slices + (xslice,)
slices = slices + (yslice,)
else:
slices = (xslice, yslice)
zero_aligned_idxs = idxs.copy() - [xslice.start, yslice.start]
var = variable[slices]
if len(var.shape) > 2:
raise ValueError("Variable has too many dimensions to \
associate with grid. Please specify slices.")
if not isinstance(var, np.ma.MaskedArray):
#this is because MFDataset isn't always returning a masked array, the same as pre netCDF 1.4 behavior
#Until they fix this, we need to ensure it gets masked.
var = np.ma.MaskedArray(var, mask=False)
if location in center_alternate_names:
#No interpolation across the cell
result = self.get_variable_at_index(var, zero_aligned_idxs).filled(fill_value)
elif location in edge1_alternate_names:
#interpolate as a uniform gradient from 'left side' to 'right side'
center_idxs = self.apply_padding_to_idxs(ind.copy(), padding=self.get_padding_by_location('center'))
if self.center_mask is None:
cm = np.zeros((self.node_lon.shape[0] - 1, self.node_lon.shape[1] - 1)).astype(np.bool_)
cm = np.ma.MaskedArray(cm, mask=False)
else:
cm = gen_celltree_mask_from_center_mask(self.center_mask, np.s_[:])
cm = np.ma.MaskedArray(cm, mask=False)
u2_offset = [0, 1]
alpha_dim_idx = 0
alpha = per_cell_log_offset[:,alpha_dim_idx]
u1 = self.get_variable_at_index(var, zero_aligned_idxs)
m1 = np.logical_xor(self.get_variable_at_index(cm, center_idxs), self.get_variable_at_index(cm, center_idxs - u2_offset))
u1.mask = np.logical_or(u1.mask, m1)
u1 = u1.filled(fill_value)
u2 = self.get_variable_at_index(var, zero_aligned_idxs + u2_offset)
m2 = np.logical_xor(self.get_variable_at_index(cm, center_idxs), self.get_variable_at_index(cm, center_idxs + u2_offset))
u2.mask = np.logical_or(u2.mask, m2)
u2 = u2.filled(fill_value)
result = u1 + (alpha[:,np.newaxis] * (u2-u1))
elif location in edge2_alternate_names:
#interpolate as a uniform gradient from 'bottom' to 'top'
center_idxs = self.apply_padding_to_idxs(ind.copy(), padding=self.get_padding_by_location('center'))
if self.center_mask is None:
cm = np.zeros((self.node_lon.shape[0] - 1, self.node_lon.shape[1] - 1)).astype(np.bool_)
cm = np.ma.MaskedArray(cm, mask=False)
else:
cm = gen_celltree_mask_from_center_mask(self.center_mask, np.s_[:])
cm = np.ma.MaskedArray(cm, mask=False)
v2_offset = [1, 0]
alpha_dim_idx = 1
alpha = per_cell_log_offset[:,alpha_dim_idx]
v1 = self.get_variable_at_index(var, zero_aligned_idxs)
m1 = np.logical_xor(self.get_variable_at_index(cm, center_idxs), self.get_variable_at_index(cm, center_idxs - v2_offset))
v1.mask = np.logical_or(v1.mask, m1)
v1 = v1.filled(fill_value)
v2 = self.get_variable_at_index(var, zero_aligned_idxs + v2_offset)
m2 = np.logical_xor(self.get_variable_at_index(cm, center_idxs), self.get_variable_at_index(cm, center_idxs + v2_offset))
v2.mask = np.logical_or(v2.mask, m2)
v2 = v2.filled(fill_value)
result = v1 + (alpha[:,np.newaxis] * (v2-v1))
elif location in node_alternate_names:
l = per_cell_log_offset[:,0]
m = per_cell_log_offset[:,1]
#Each corner alpha is the ratio Area_opposite/Area_total
#Since Area_total is unit square (1), each corner is simply Area_opposite
aa = 1 - l - m + l * m
ab = m - l * m
ac = l * m
ad = l - l * m
alphas = np.stack((aa, ab, ac, ad), axis=-1)
vals = self.get_variable_by_index(var, zero_aligned_idxs)
vals *= alphas
result = np.sum(vals, axis=1)
else:
raise ValueError('invalid location name')
return result
interpolate = interpolate_var_to_points
def geo_to_logical(self,
points,
indices=None,
_memo=False,
_copy=False,
_hash=None):
"""
Given a list of lon/lat points, converts them to l/m coordinates in
logical cell space.
"""
if _memo:
if _hash is None:
_hash = self._hash_of_pts(points)
result = self._get_memoed(points, self._log_ind_memo_dict, _copy, _hash)
if result is not None:
return result
if self._l_coeffs is None:
self._compute_transform_coeffs()
if indices is None:
indices = self.locate_faces(points,
_memo=_memo,
_copy=_copy,
_hash=_hash)
a = self._l_coeffs[indices[:, 0], indices[:, 1]]
b = self._m_coeffs[indices[:, 0], indices[:, 1]]
(l, m) = self.x_to_l(points[:,0], points[:,1], a, b)
result = indices.copy() + np.stack((l, m), axis=-1)
if _memo:
self._add_memo(points, result, self._log_ind_memo_dict, _copy, _hash)
return result
@staticmethod
def x_to_l(x, y, a, b):
"""
Params:
x: x coordinate of point
y: y coordinate of point
a: x coefficients
b: y coefficients
Returns:
(l,m) - coordinate in logical space to use for interpolation
Eqns:
m = (-bb +- sqrt(bb^2 - 4*aa*cc))/(2*aa)
l = (l-a1 - a3*m)/(a2 + a4*m)
"""
def quad_eqn(l, m, t, aa, bb, cc):
"""
solves the following eqns for m and l
m = (-bb +- sqrt(bb^2 - 4*aa*cc))/(2*aa)
l = (l-a1 - a3*m)/(a2 + a4*m)
"""
if len(aa) == 0:
return
k = bb * bb - 4 * aa * cc
k = np.ma.masked_less(k, 0)
det = np.ma.sqrt(k)
m1 = (-bb - det) / (2 * aa)
l1 = (x[t] - a[0][t] - a[2][t] *
m1) / (a[1][t] + a[3][t] * m1)
m2 = (-bb + det) / (2 * aa)
l2 = (x[t] - a[0][t] - a[2][t] *
m2) / (a[1][t] + a[3][t] * m2)
t1 = np.logical_or(l1 < 0, l1 > 1)
t2 = np.logical_or(m1 < 0, m1 > 1)
t3 = np.logical_or(t1, t2)
m[t] = np.choose(t3, (m1, m2))
l[t] = np.choose(t3, (l1, l2))
a = a.T
b = b.T
aa = a[3] * b[2] - a[2] * b[3]
bb = a[3] * b[0] - a[0] * b[3] + a[1] * \
b[2] - a[2] * b[1] + x * b[3] - y * a[3]
cc = a[1] * b[0] - a[0] * b[1] + x * b[1] - y * a[1]
m = np.zeros(bb.shape)
l = np.zeros(bb.shape)
t = aa[:] == 0
# Attempts to solve the simpler linear case first.
with np.errstate(invalid='ignore'):
m[t] = -cc[t] / bb[t]
l[t] = (x[t] - a[0][t] - a[2][t] * m[t]) / (a[1][t] + a[3][t] * m[t])
# now solve the quadratic cases
quad_eqn(l, m, ~t, aa[~t], bb[~t], cc[~t])
return (l, m)
class SGridAttributes(object):
"""
Class containing methods to help with getting the
attributes for either SGrid.
"""
def __init__(self, nc, topology_dim, topology_variable):
self.nc = nc
self.ncd = NetCDFDataset(self.nc)
self.topology_dim = topology_dim
self.topology_variable = topology_variable
self.topology_var = self.nc.variables[self.topology_variable]
def get_dimensions(self):
ds_dims = self.nc.dimensions
grid_dims = [(ds_dim, len(ds_dims[ds_dim])) for ds_dim in ds_dims]
return grid_dims
def get_topology_var(self):
grid_topology_var = find_grid_topology_var(self.nc)
return grid_topology_var
def get_attr_dimension(self, attr_name):
try:
attr_dim = getattr(self.topology_var, attr_name)
except AttributeError:
attr_dim = None
attr_padding = None
else:
attr_dim_padding = parse_padding(attr_dim, self.topology_variable)
attr_padding = attr_dim_padding
return attr_dim, attr_padding
def get_attr_coordinates(self, attr_name):
try:
attr_coordinates_raw = getattr(self.topology_var, attr_name)
except AttributeError:
location_name = attr_name.split('_')[0]
attr_coordinates = self.ncd.find_coordinates_by_location(location_name, self.topology_dim) # noqa
else:
attr_coordinates_val = attr_coordinates_raw.split(' ')
attr_coordinates = tuple(attr_coordinates_val)
return attr_coordinates
def get_node_coordinates(self):
node_dims = self.topology_var.node_dimensions
node_dimensions = node_dims
try:
node_coordinates = self.topology_var.node_coordinates
except AttributeError:
grid_cell_node_vars = self.ncd.find_node_coordinates(node_dimensions) # noqa
node_coordinates = grid_cell_node_vars
else:
node_coordinate_val = node_coordinates.split(' ')
node_coordinates = tuple(node_coordinate_val)
return node_dimensions, node_coordinates
def get_variable_attributes(self, sgrid):
dataset_variables = []
grid_variables = []
nc_variables = self.nc.variables
for nc_variable in nc_variables:
nc_var = nc_variables[nc_variable]
sgrid_var = SGridVariable.create_variable(nc_var, sgrid)
setattr(sgrid, sgrid_var.variable, sgrid_var)
dataset_variables.append(nc_var.name)
if hasattr(nc_var, 'grid'):
grid_variables.append(nc_var.name)
sgrid.variables = dataset_variables
sgrid.grid_variables = grid_variables
def get_angles(self):
angles = self.nc.variables.get('angle')
if not angles:
# FIXME: Get rid of pair_arrays.
center_lon, center_lat = self.get_cell_center_lat_lon()
cell_centers = pair_arrays(center_lon, center_lat)
centers_start = cell_centers[..., :-1, :]
centers_end = cell_centers[..., 1:, :]
angles = calculate_angle_from_true_east(centers_start, centers_end)
return angles
def get_cell_center_lat_lon(self):
try:
grid_cell_center_lon_var, grid_cell_center_lat_var = self.get_attr_coordinates('face_coordinates') # noqa
except TypeError:
center_lat, center_lon = None, None
else:
center_lat = self.nc[grid_cell_center_lat_var]
center_lon = self.nc[grid_cell_center_lon_var]
return center_lon, center_lat
def get_cell_node_lat_lon(self):
try:
node_lon_var, node_lat_var = self.get_node_coordinates()[1]
except TypeError:
node_lon, node_lat = None, None
else:
node_lat = self.nc[node_lat_var]
node_lon = self.nc[node_lon_var]
return node_lon, node_lat
def get_cell_edge1_lat_lon(self):
try:
edge1_lon_var, edge1_lat_var = self.get_attr_coordinates('edge1_coordinates')
except:
edge1_lon, edge1_lat = None, None
else:
edge1_lon = self.nc[edge1_lon_var]
edge1_lat = self.nc[edge1_lat_var]
return edge1_lon, edge1_lat
def get_cell_edge2_lat_lon(self):
try:
edge2_lon_var, edge2_lat_var = self.get_attr_coordinates('edge2_coordinates')
except TypeError:
edge2_lon, edge2_lat = None, None
else:
edge2_lon = self.nc[edge2_lon_var]
edge2_lat = self.nc[edge2_lat_var]
return edge2_lon, edge2_lat
def get_masks(self, node, center, edge1, edge2):
node_shape = node.shape if node and node.shape else None
center_shape = center.shape if center and center.shape else None
edge1_shape = edge1.shape if edge1 and edge1.shape else None
edge2_shape = edge2.shape if edge2 and edge2.shape else None
mask_candidates = [var.name for var in self.nc.variables.values() if 'mask' in var.name or (hasattr(var, 'long_name') and 'mask' in var.long_name)]
node_mask = center_mask = edge1_mask = edge2_mask = None
for mc in mask_candidates:
if node_shape and self.nc.variables[mc].shape == node_shape and node_mask is None:
node_mask = self.nc.variables[mc]
if center_shape and self.nc.variables[mc].shape == center_shape and center_mask is None:
center_mask = self.nc.variables[mc]
if edge1_shape and self.nc.variables[mc].shape == edge1_shape and edge1_mask is None:
edge1_mask = self.nc.variables[mc]
if edge2_shape and self.nc.variables[mc].shape == edge2_shape and edge2_mask is None:
edge2_mask = self.nc.variables[mc]
return node_mask, center_mask, edge1_mask, edge2_mask
def load_grid(nc):
"""
Get a SGrid object from a netCDF4.Dataset or file/URL.
:param str or netCDF4.Dataset nc: a netCDF4 Dataset or URL/filepath
to the netCDF file
:return: SGrid object
:rtype: sgrid.SGrid
"""
if isinstance(nc, Dataset):
pass
else:
nc = Dataset(nc, 'r')
return SGrid.load_grid(nc)
|
NOAA-ORR-ERD/gridded
|
gridded/pysgrid/sgrid.py
|
Python
|
unlicense
| 53,042
|
[
"NetCDF"
] |
f3aaa58032a2d837d827f5ab2181b4c4dadb5fe87fc60f942f67b0ab25551da9
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import sys
import struct
import textwrap
if sys.version_info[0] == 2:
from StringIO import StringIO
elif sys.version_info[0] > 2:
from io import StringIO
from .color import *
_debug = False
_verbose = False
indent = " "
def is_verbose():
return _verbose
def is_debug():
return _debug
def set_verbose(flag):
global _verbose
_verbose = flag
def set_debug(flag):
global _debug
_debug = False
def msg(message, *args):
cprint("@*b{==>} %s" % cescape(message))
for arg in args:
print(indent + str(arg))
def info(message, *args, **kwargs):
fmt = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get('wrap', False)
cprint("@%s{==>} %s" % (fmt, cescape(str(message))), stream=stream)
for arg in args:
if wrap:
lines = textwrap.wrap(
str(arg), initial_indent=indent, subsequent_indent=indent
)
for line in lines:
stream.write(line + '\n')
else:
stream.write(indent + str(arg) + '\n')
def verbose(message, *args, **kwargs):
if _verbose:
kwargs.setdefault('format', 'c')
info(message, *args, **kwargs)
def debug(message, *args, **kwargs):
if _debug:
kwargs.setdefault('format', 'g')
kwargs.setdefault('stream', sys.stderr)
info(message, *args, **kwargs)
def error(message, *args, **kwargs):
kwargs.setdefault('format', '*r')
kwargs.setdefault('stream', sys.stderr)
info("Error: " + str(message), *args, **kwargs)
def warn(message, *args, **kwargs):
kwargs.setdefault('format', '*Y')
kwargs.setdefault('stream', sys.stderr)
info("Warning: " + str(message), *args, **kwargs)
def die(message, *args, **kwargs):
error(message, *args, **kwargs)
sys.exit(1)
def hline(label=None, **kwargs):
"""Draw a labeled horizontal line.
Options:
char Char to draw the line with. Default '-'
max_width Maximum width of the line. Default is 64 chars.
"""
char = kwargs.pop('char', '-')
max_width = kwargs.pop('max_width', 64)
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function."
% next(kwargs.iterkeys()))
rows, cols = terminal_size()
if not cols:
cols = max_width
else:
cols -= 2
cols = min(max_width, cols)
label = str(label)
prefix = char * 2 + " "
suffix = " " + (cols - len(prefix) - clen(label)) * char
out = StringIO()
out.write(prefix)
out.write(label)
out.write(suffix)
print(out.getvalue())
def terminal_size():
"""Gets the dimensions of the console: (rows, cols)."""
def ioctl_GWINSZ(fd):
try:
import fcntl # Not available on Windows
import termios # Not available on Windows
rc = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return rc
rc = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not rc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
rc = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not rc:
rc = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(rc[0]), int(rc[1])
|
CDSherrill/psi4
|
psi4/driver/util/tty/__init__.py
|
Python
|
lgpl-3.0
| 4,312
|
[
"Psi4"
] |
3389436e5c7e5e40dffce42ea8519f0f025d0079ac15b4c1f4ec2a914a0d070b
|
"""
It keeps the service configuration parameters like maximum running threads, number of processes, etc. ,
which can be configured in CS.
"""
from DIRAC.Core.Utilities import Network, List
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.private.Protocols import gDefaultProtocol
class ServiceConfiguration:
def __init__(self, nameList):
self.serviceName = nameList[0]
self.serviceURL = None
self.nameList = nameList
self.pathList = []
for svcName in nameList:
self.pathList.append(PathFinder.getServiceSection(svcName))
def getOption(self, optionName):
if optionName[0] == "/":
return gConfigurationData.extractOptionFromCFG(optionName)
for path in self.pathList:
value = gConfigurationData.extractOptionFromCFG("%s/%s" % (path, optionName))
if value:
return value
return None
def getAddress(self):
return ("", self.getPort())
def getHandlerLocation(self):
return self.getOption("HandlerPath")
def getName(self):
return self.serviceName
def setURL(self, sURL):
self.serviceURL = sURL
def __getCSURL(self, URL=None):
optionValue = self.getOption("URL")
if optionValue:
return optionValue
return URL
def registerAlsoAs(self):
optionValue = self.getOption("RegisterAlsoAs")
if optionValue:
return List.fromChar(optionValue)
else:
return []
def getMaxThreads(self):
try:
return int(self.getOption("MaxThreads"))
except Exception:
return 15
def getMinThreads(self):
try:
return int(self.getOption("MinThreads"))
except Exception:
return 1
def getMaxWaitingPetitions(self):
try:
return int(self.getOption("MaxWaitingPetitions"))
except Exception:
return 100
def getMaxMessagingConnections(self):
try:
return int(self.getOption("MaxMessagingConnections"))
except Exception:
return 20
def getMaxThreadsForMethod(self, actionType, method):
try:
return int(self.getOption("ThreadLimit/%s/%s" % (actionType, method)))
except Exception:
return 15
def getCloneProcesses(self):
try:
return int(self.getOption("CloneProcesses"))
except Exception:
return 0
def getPort(self):
try:
return int(self.getOption("Port"))
except Exception:
return 9876
def getProtocol(self):
optionValue = self.getOption("Protocol")
if optionValue:
return optionValue
return gDefaultProtocol
def getHostname(self):
hostname = self.getOption("/DIRAC/Hostname")
if not hostname:
return Network.getFQDN()
return hostname
def getURL(self):
"""
Build the service URL
"""
if self.serviceURL:
return self.serviceURL
protocol = self.getProtocol()
serviceURL = self.__getCSURL()
if serviceURL:
if serviceURL.find(protocol) != 0:
urlFields = serviceURL.split(":")
urlFields[0] = protocol
serviceURL = ":".join(urlFields)
self.setURL(serviceURL)
return serviceURL
hostName = self.getHostname()
port = self.getPort()
serviceURL = "%s://%s:%s/%s" % (protocol, hostName, port, self.getName())
if serviceURL[-1] == "/":
serviceURL = serviceURL[:-1]
self.setURL(serviceURL)
return serviceURL
def getContextLifeTime(self):
optionValue = self.getOption("ContextLifeTime")
try:
return int(optionValue)
except Exception:
return 21600
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/DISET/private/ServiceConfiguration.py
|
Python
|
gpl-3.0
| 4,044
|
[
"DIRAC"
] |
f4a6211b4d998243550d4bccc59ff201edf712bb982cdf088aa9ab6502e30a13
|
"""
The general purpose tools to manipulate the pipeline with the mlab interface.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>,
# Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2007-2015 Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import numpy
# Enthought library imports.
from tvtk.api import tvtk
# MayaVi related imports.
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.core.module_manager import ModuleManager
from mayavi.core.source import Source
from mayavi.core.filter import Filter
from .engine_manager import get_engine, engine_manager, get_null_engine
from .figure import gcf
######################################################################
# Utility functions.
def add_dataset(dataset, name='', **kwargs):
"""Add a dataset object to the Mayavi pipeline.
**Parameters**
:dataset: a tvtk dataset, or a Mayavi source.
The dataset added to the Mayavi pipeline
:figure: a figure identifier number or string, None or False, optionnal.
If no `figure` keyword argument is given, the data
is added to the current figure (a new figure if created if
necessary).
If a `figure` keyword argument is given, it should either the name
the number of the figure the dataset should be added to, or None,
in which case the data is not added to the pipeline.
If figure is False, a null engine is created. This null
engine does not create figures, and is mainly usefull for
tensting, or using the VTK algorithms without visualization.
**Returns**
The corresponding Mayavi source is returned.
"""
if isinstance(dataset, tvtk.Object):
d = VTKDataSource()
d.data = dataset
elif isinstance(dataset, Source):
d = dataset
else:
raise TypeError(
"first argument should be either a TVTK object"\
" or a mayavi source")
if len(name) > 0:
d.name = name
if not 'figure' in kwargs:
# No figure has been specified, retrieve the default one.
gcf()
engine = get_engine()
elif kwargs['figure'] is False:
# Get a null engine that we can use.
engine = get_null_engine()
elif kwargs['figure'] is not None:
figure = kwargs['figure']
engine = engine_manager.find_figure_engine(figure)
engine.current_scene = figure
else:
# Return early, as we don't want to add the source to an engine.
return d
engine.add_source(d)
return d
def add_module_manager(object):
""" Add a module-manager, to control colors and legend bars to the
given object.
"""
return get_engine().add_module(ModuleManager(), object)
def _traverse(node):
""" Generator to traverse a tree accessing the nodes' children
attribute.
**Example**
Here is a simple example printing the names of all the objects in
the pipeline::
for obj in mlab.pipeline.traverse(mlab.gcf()):
print obj.name
"""
try:
for leaf in node.children:
for leaflet in _traverse(leaf):
yield leaflet
except AttributeError:
pass
yield node
def get_vtk_src(mayavi_object, stop_at_filter=True):
""" Goes up the Mayavi pipeline to find the data sources of a given
object.
**Parameters**
:object: any Mayavi visualization object
:stop_at_filter: optional boolean flag: if True, the first object
exposing data found going up the pipeline is
returned. If False, only the source itself
is returned.
**Returns**
:sources: List of vtk data sources (vtk data sources, and not
Mayavi source objects).
**Notes**
This function traverses the Mayavi pipeline. Thus the input
object 'mayavi_object' should already be added to the pipeline.
"""
if isinstance(mayavi_object, tvtk.Object) \
and hasattr(mayavi_object, 'point_data'):
# We have been passed a tvtk source
return [mayavi_object]
if not (hasattr(mayavi_object, 'parent')
or isinstance(mayavi_object, Source)):
raise TypeError('Cannot find data source for given object %s' % (
mayavi_object))
while True:
# XXX: If the pipeline is not a DAG, this is an infinite loop
if isinstance(mayavi_object, Source):
if stop_at_filter or not isinstance(mayavi_object, Filter):
return mayavi_object.outputs
mayavi_object = mayavi_object.parent
def _has_scalar_data(object):
"""Tests if an object has scalar data.
"""
data_sources = get_vtk_src(object)
for source in data_sources:
if source.point_data.scalars is not None:
return True
elif source.cell_data.scalars is not None:
return True
return False
def _has_vector_data(object):
"""Tests if an object has vector data.
"""
data_sources = get_vtk_src(object)
for source in data_sources:
if source.point_data.vectors is not None:
return True
elif source.cell_data.vectors is not None:
return True
return False
def _has_tensor_data(object):
"""Tests if an object has tensor data.
"""
data_sources = get_vtk_src(object)
for source in data_sources:
if source.point_data.tensors is not None:
return True
elif source.cell_data.tensors is not None:
return True
return False
def _find_module_manager(object=None, data_type=None):
"""If an object is specified, returns its module_manager, elsewhere finds
the first module_manager in the scene.
"""
if object is None:
for object in _traverse(gcf()):
if isinstance(object, ModuleManager):
if data_type == 'scalar':
if not _has_scalar_data(object):
continue
try:
if not object.actor.mapper.scalar_visibility:
continue
except AttributeError:
pass
if data_type == 'vector' and not _has_vector_data(object):
continue
if data_type == 'tensor' and not _has_tensor_data(object):
continue
return object
else:
if hasattr(object, 'module_manager'):
if ((data_type == 'scalar' and _has_scalar_data(object))
or (data_type == 'vector' and _has_vector_data(object))
or (data_type == 'tensor' and _has_tensor_data(object))
or data_type is None):
return object.module_manager
else:
print("This object has no %s data" % data_type)
else:
print("This object has no color map")
return None
def _typical_distance(data_obj):
""" Returns a typical distance in a cloud of points.
This is done by taking the size of the bounding box, and dividing it
by the cubic root of the number of points.
"""
x_min, x_max, y_min, y_max, z_min, z_max = data_obj.bounds
distance = numpy.sqrt(((x_max - x_min) ** 2 + (y_max - y_min) ** 2 +
(z_max - z_min) ** 2) / (4 *
data_obj.number_of_points ** (0.33)))
if distance == 0:
return 1
else:
return 0.4 * distance
def _min_distance(x, y, z):
""" Return the minimum interparticle distance in a cloud of points.
This is done by brute force calculation of all the distances
between particle couples.
"""
distances = numpy.sqrt((x.reshape((-1,)) - x.reshape((1, -1))) ** 2
+ (y.reshape((-1,)) - y.reshape((1, -1))) ** 2
+ (z.reshape((-1,)) - z.reshape((1, -1))) ** 2
)
return distances[distances != 0].min()
def _min_axis_distance(x, y, z):
""" Return the minimum interparticle distance in a cloud of points
along one of the axis.
This is done by brute force calculation of all the distances with
norm infinity between particle couples.
"""
def axis_min(a):
a = numpy.abs(a.reshape((-1,)) - a.reshape((-1, 1)))
a = a[a > 0]
if a.size == 0:
return numpy.inf
return a.min()
distances = min(axis_min(x), axis_min(y), axis_min(z))
if distances == numpy.inf:
return 1
else:
return distances
def set_extent(module, extents):
""" Attempts to set the physical extents of the given module.
The extents are given as (xmin, xmax, ymin, ymax, zmin, zmax).
This does not work on an image plane widget, as this module does
not have an actor.
Once you use this function on a module, be aware that other
modules applied on the same data source will not share the same
scale. Thus for instance an outline module will not respect the
outline of the actors whose extent you modified. You should pass
in the same "extents" parameter for this to work.You can have a
look at the wigner.py example for a heavy use of this
functionnality.
**Note**
This function does not work on some specific modules, such as
Outline, Axes, or ImagePlaneWidget. For Outline and Axes, use the
extent keyword argument of mlab.pipeline.outline and
mlab.pipeline.axes.
"""
if numpy.all(extents == 0.):
# That the default setting.
return
if not hasattr(module, 'actor'):
print('Cannot set extents for %s' % module)
return
xmin, xmax, ymin, ymax, zmin, zmax = extents
xo = 0.5 * (xmax + xmin)
yo = 0.5 * (ymax + ymin)
zo = 0.5 * (zmax + zmin)
extentx = 0.5 * (xmax - xmin)
extenty = 0.5 * (ymax - ymin)
extentz = 0.5 * (zmax - zmin)
# Now the actual bounds.
xmin, xmax, ymin, ymax, zmin, zmax = module.actor.actor.bounds
# Scale the object
boundsx = 0.5 * (xmax - xmin)
boundsy = 0.5 * (ymax - ymin)
boundsz = 0.5 * (zmax - zmin)
xs, ys, zs = module.actor.actor.scale
if not numpy.allclose(xmin, xmax):
scalex = xs * extentx / boundsx
else:
scalex = 1
if not numpy.allclose(ymin, ymax):
scaley = ys * extenty / boundsy
else:
scaley = 1
if not numpy.allclose(zmin, zmax):
scalez = zs * extentz / boundsz
else:
scalez = 1
module.actor.actor.scale = (scalex, scaley, scalez)
## Remeasure the bounds
xmin, xmax, ymin, ymax, zmin, zmax = module.actor.actor.bounds
xcenter = 0.5 * (xmax + xmin)
ycenter = 0.5 * (ymax + ymin)
zcenter = 0.5 * (zmax + zmin)
# Center the object
module.actor.actor.origin = (0., 0., 0.)
xpos, ypos, zpos = module.actor.actor.position
module.actor.actor.position = (xpos + xo - xcenter, ypos + yo - ycenter,
zpos + zo - zcenter)
def start_recording(ui=True):
"""Start automatic script recording. If the `ui` parameter is
`True`, it creates a recorder with a user interface, if not it
creates a vanilla recorder without a UI.
**Returns**
The `Recorder` instance created.
"""
from apptools.scripting.api import start_recording as start
e = get_engine()
msg = "Current engine, %s, is already being recorded." % (e)
assert e.recorder is None, msg
r = start(e, ui=ui)
return r
def stop_recording(file=None):
"""Stop the automatic script recording.
**Parameters**
:file: An open file or a filename or `None`. If this is `None`,
nothing is saved.
"""
from apptools.scripting.api import stop_recording as stop
e = get_engine()
r = e.recorder
if r is not None:
stop(e, save=False)
if type(file) is str:
f = open(file, 'w')
r.save(f)
f.close()
elif hasattr(file, 'write') and hasattr(file, 'flush'):
r.save(file)
|
dmsurti/mayavi
|
mayavi/tools/tools.py
|
Python
|
bsd-3-clause
| 12,386
|
[
"Mayavi",
"VTK"
] |
4e5841375431f3cf21eb79c7ff16ee3d1d32a95b9da86d85f92a85a397f7cdee
|
from sklearn import svm, linear_model
from sklearn.cross_validation import train_test_split, PredefinedSplit
from sklearn.preprocessing import scale, StandardScaler
from sklearn.lda import LDA
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from matplotlib import pyplot as plt
from load import load
import datetime
import pandas
import numpy
from scipy.optimize import fmin
#for h in xrange(24):
# vec = numpy.zeros(len(X))
# vec[(train['hour'] == h).values] = 1.0
# X['h%d' % h] = vec
def cv_split(data, step=30, predict=10, start=30):
"""
Take the last n days from each month and return all previous
days as the training dataset. Returns iterator through months.
If a month is given, return the split for only that month.
Arguments:
start - Where to start the rolling cross validation (days from
first day).
step - Step size in days to next cross-validation split.
predict - Number of days to predict at each split.
"""
start = 24 * start
step = 24 * step
predict = 24 * predict
for i in xrange(start, len(data) - predict, step):
yield data.iloc[:i], data.iloc[i:i + predict]
if False:
for train, test in cv_split(load('train.csv')):
print len(train), len(test)
sys.exit()
class Features:
def __init__(self, weights=numpy.ones(8)):
self.scaler = StandardScaler()
self._weights = numpy.diag(weights)
def fit_transform(self, Xdf):
X = self.pick(Xdf)
X = self.scaler.fit_transform(X)
return self.weight(X)
def weight(self, X):
# Reverse some scalings for important features
X = numpy.dot(X, self._weights)
#X[:,1] *= 2.0 # hour
return X
def transform(self, Xdf):
X = self.pick(Xdf)
X = self.scaler.transform(X)
return self.weight(X)
def pick(self, data):
X = pandas.DataFrame()
X['weather'] = data['weather']
X['hour'] = data['hour']
X['hourage'] = data['hourage']
X['workingday'] = data['workingday']
X['holiday'] = 1.0 * data['holiday']
X['temp'] = data['temp']
X['humidity'] = data['humidity']
X['windspeed'] = data['windspeed']
return X
def opt_svr():
"""
Optimize weights in SVM kernel. A larger variance in a feature value
increases its "importance" or "weight" in support vector regression
with a Gaussian kernel. Optimize these weights for the best predictions.
This takes a while and has not converged for me. However, it wasn't a total
failure. I got some useful weights by printing them at each step of the opt,
which improved things somewhat.
"""
data = load('train.csv')
def f(x):
scores = []
epsilon = x[0]
weights = x[1:]
print 'training with epsilon:', epsilon
print 'training with weights:', weights
for train, test in cv_split(data, start=100, step=100, predict=10):
features = Features(weights=weights)
X_train = features.fit_transform(train)
X_test = features.transform(test)
yscaler = StandardScaler()
y_train = yscaler.fit_transform(numpy.asarray(train['count'].values, dtype='float'))
y_test = yscaler.transform(numpy.asarray(test['count'].values, dtype='float'))
svr = svm.SVR(kernel='rbf', degree=3, verbose=True, tol=1.0e-6, shrinking=False, epsilon=epsilon)
svr.fit(X_train, y_train)
#y_pred = svr.predict(X_test)
#plt.plot_date(train['dates'], train['count'], c='b')
#plt.plot_date(test['dates'], yscaler.inverse_transform(y_test), c='g')
#plt.plot_date(test['dates'], yscaler.inverse_transform(y_pred), c='r')
#plt.show()
scores.append(1.0 - svr.score(X_test, y_test))
print ' sub-score:', scores[-1]
avg_score = numpy.mean(scores)
print 'score:', avg_score
print ''
return avg_score
# epsilon, feature-weights... results from a previous optimization. Negative
# values should be identical to positive ??.
x0 = [0.1, 1.40158166, 9.34358376, 2.03396555, 1.36130647, 1.0, 1.0, 1.0, 1.0]
x0 = [0.02, 1.40148374, 9.34562319, 2.16667722, 1.43079, -2.87906537, -1.13396606, -1.08639336, 0.18894128]
res = fmin(f, x0)
print res
def grid_search():
"""
Scikit learn grid search over SVR hyper-parameters.
This takes a long time.
"""
data = load('train.csv')
test = load('test.csv')
grid = [
{'C': [1, 10, 100], 'epsilon': [0.05, 0.1, 0.2]}
]
for train, test in cv_split(data, start=100, step=100, predict=10):
features = Features()
X_train = features.fit_transform(train)
X_test = features.transform(test)
yscaler = StandardScaler()
y_train = yscaler.fit_transform(numpy.asarray(train['count'].values, dtype='float'))
y_test = yscaler.transform(numpy.asarray(test['count'].values, dtype='float'))
X = numpy.concatenate([X_train, X_test])
y = numpy.concatenate([y_train, y_test])
cv = PredefinedSplit(len(train) * [-1] + len(test) * [0])
svr = svm.SVR(kernel='rbf', degree=3, verbose=True, tol=1.0e-6, shrinking=False)
gs = GridSearchCV(svr, grid, cv=cv, scoring='r2', n_jobs=1)
gs.fit(X, y)
print 'Best params'
print gs.best_params_
for params, mean_score, scores in gs.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
#y_true, y_pred = y_test, clf.predict(X_test)
#print(classification_report(y_true, y_pred))
#y_pred = svr.predict(X_test)
#plt.plot_date(train['dates'], train['count'], c='b')
#plt.plot_date(test['dates'], yscaler.inverse_transform(y_test), c='g')
#plt.plot_date(test['dates'], yscaler.inverse_transform(y_pred), c='r')
#plt.show()
scores.append(1.0 - svr.score(X_test, y_test))
print ' sub-score:', scores[-1]
def compete(submit=False):
train = load('train.csv')
test = load('test.csv')
# Wow. Bad.
weights = [1.40148374, 9.34562319, 2.16667722, 1.43079, -2.87906537, -1.13396606, -1.08639336, 0.18894128]
#weights = [1.40148374, 9.34562319, 2.16667722, 1.43079, 2.87906537, 1.13396606, 1.08639336, 0.18894128]
# terminated opt
#weights = [ 1.40158166 9.34358376 2.03396555 1.36130647 -2.9131261 -1.11267807 -1.09537982 0.29371516]
features = Features(weights=weights)
X = features.fit_transform(train)
X_test = features.transform(test)
yscaler = StandardScaler()
y = yscaler.fit_transform(numpy.asarray(train['count'].values, dtype='float'))
svr = svm.SVR(kernel='rbf', degree=3, verbose=True, tol=1.0e-6, shrinking=False, epsilon=0.05)
svr.fit(X, y)
y_pred = svr.predict(X_test)
print 'score:', svr.score(X, y)
plt.plot_date(train['dates'], train['count'], c='b')
plt.plot_date(test['dates'], yscaler.inverse_transform(y_pred), c='g')
plt.show()
if submit:
y_pred = yscaler.inverse_transform(y_pred)
y_pred[y_pred < 0.0] = 0.0
submission = pandas.DataFrame()
submission['datetime'] = test['dates']
submission['count'] = y_pred #.round().astype('int')
submission.to_csv('submission-' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') + '.csv', index=False)
compete(submit=False)
|
bauerca/BikeShareDemand_Carl
|
svm.py
|
Python
|
gpl-2.0
| 7,079
|
[
"Gaussian"
] |
79850832780b38d9d167ce16779b8b51dea20038a250b07f969bbb25747acc52
|
# -*- coding: utf-8 -*-
"""
This module performs modifications on the network parameters during conversion
from analog to spiking.
.. autosummary::
:nosignatures:
normalize_parameters
@author: rbodo
"""
import os
import json
from collections import OrderedDict
from tensorflow.keras.models import Model
import numpy as np
def normalize_parameters(model, config, **kwargs):
"""Normalize the parameters of a network.
The parameters of each layer are normalized with respect to the maximum
activation, or the ``n``-th percentile of activations.
Generates plots of the activity- and weight-distribution before and after
normalization. Note that plotting the activity-distribution can be very
time- and memory-consuming for larger networks.
"""
from snntoolbox.parsing.utils import get_inbound_layers_with_params
print("Normalizing parameters...")
norm_dir = kwargs[str('path')] if 'path' in kwargs else \
os.path.join(config.get('paths', 'log_dir_of_current_run'),
'normalization')
activ_dir = os.path.join(norm_dir, 'activations')
if not os.path.exists(activ_dir):
os.makedirs(activ_dir)
# Store original weights for later plotting
if not os.path.isfile(os.path.join(activ_dir, 'weights.npz')):
weights = {}
for layer in model.layers:
w = layer.get_weights()
if len(w) > 0:
weights[layer.name] = w[0]
np.savez_compressed(os.path.join(activ_dir, 'weights.npz'), **weights)
batch_size = config.getint('simulation', 'batch_size')
# Either load scale factors from disk, or get normalization data set to
# calculate them.
x_norm = None
if 'scale_facs' in kwargs:
scale_facs = kwargs[str('scale_facs')]
elif 'x_norm' in kwargs or 'dataflow' in kwargs:
if 'x_norm' in kwargs:
x_norm = kwargs[str('x_norm')]
elif 'dataflow' in kwargs:
x_norm = []
dataflow = kwargs[str('dataflow')]
num_samples_norm = config.getint('normalization', 'num_samples',
fallback='')
if num_samples_norm == '':
num_samples_norm = len(dataflow) * dataflow.batch_size
while len(x_norm) * batch_size < num_samples_norm:
x = dataflow.next()
if isinstance(x, tuple): # Remove class label if present.
x = x[0]
x_norm.append(x)
x_norm = np.concatenate(x_norm)
print("Using {} samples for normalization.".format(len(x_norm)))
sizes = [
len(x_norm) * np.array(layer.output_shape[1:]).prod() * 32 /
(8 * 1e9) for layer in model.layers if len(layer.weights) > 0]
size_str = ['{:.2f}'.format(s) for s in sizes]
print("INFO: Need {} GB for layer activations.\n".format(size_str) +
"May have to reduce size of data set used for normalization.")
scale_facs = OrderedDict({model.layers[0].name: 1})
else:
import warnings
warnings.warn("Scale factors or normalization data set could not be "
"loaded. Proceeding without normalization.",
RuntimeWarning)
return
# If scale factors have not been computed in a previous run, do so now.
if len(scale_facs) == 1:
i = 0
sparsity = []
for layer in model.layers:
# Skip if layer has no parameters
if len(layer.weights) == 0:
continue
activations = try_reload_activations(layer, model, x_norm,
batch_size, activ_dir)
nonzero_activations = activations[np.nonzero(activations)]
sparsity.append(1 - nonzero_activations.size / activations.size)
del activations
perc = get_percentile(config, i)
scale_facs[layer.name] = get_scale_fac(nonzero_activations, perc)
print("Scale factor: {:.2f}.".format(scale_facs[layer.name]))
# Since we have calculated output activations here, check at this
# point if the output is mostly negative, in which case we should
# stick to softmax. Otherwise ReLU is preferred.
# Todo: Determine the input to the activation by replacing the
# combined output layer by two distinct layers ``Dense`` and
# ``Activation``!
# if layer.activation == 'softmax' and settings['softmax_to_relu']:
# softmax_inputs = ...
# if np.median(softmax_inputs) < 0:
# print("WARNING: You allowed the toolbox to replace "
# "softmax by ReLU activations. However, more than "
# "half of the activations are negative, which "
# "could reduce accuracy. Consider setting "
# "settings['softmax_to_relu'] = False.")
# settings['softmax_to_relu'] = False
i += 1
# Write scale factors to disk
filepath = os.path.join(norm_dir, config.get('normalization',
'percentile') + '.json')
from snntoolbox.utils.utils import confirm_overwrite
if config.get('output', 'overwrite') or confirm_overwrite(filepath):
with open(filepath, str('w')) as f:
json.dump(scale_facs, f)
np.savez_compressed(os.path.join(norm_dir, 'activations', 'sparsity'),
sparsity=sparsity)
# Apply scale factors to normalize the parameters.
for layer in model.layers:
# Skip if layer has no parameters
if len(layer.weights) == 0:
continue
# Scale parameters
parameters = layer.get_weights()
if layer.activation.__name__ == 'softmax':
# When using a certain percentile or even the max, the scaling
# factor can be extremely low in case of many output classes
# (e.g. 0.01 for ImageNet). This amplifies weights and biases
# greatly. But large biases cause large offsets in the beginning
# of the simulation (spike input absent).
scale_fac = 1.0
print("Using scale factor {:.2f} for softmax layer.".format(
scale_fac))
else:
scale_fac = scale_facs[layer.name]
inbound = get_inbound_layers_with_params(layer)
if len(inbound) == 0: # Input layer
parameters_norm = [
parameters[0] * scale_facs[model.layers[0].name] / scale_fac,
parameters[1] / scale_fac]
elif len(inbound) == 1:
parameters_norm = [
parameters[0] * scale_facs[inbound[0].name] / scale_fac,
parameters[1] / scale_fac]
else:
# In case of this layer receiving input from several layers, we can
# apply scale factor to bias as usual, but need to rescale weights
# according to their respective input.
parameters_norm = [parameters[0], parameters[1] / scale_fac]
if parameters[0].ndim == 4:
# In conv layers, just need to split up along channel dim.
offset = 0 # Index offset at input filter dimension
for inb in inbound:
f_out = inb.filters # Num output features of inbound layer
f_in = range(offset, offset + f_out)
parameters_norm[0][:, :, f_in, :] *= \
scale_facs[inb.name] / scale_fac
offset += f_out
else:
# Fully-connected layers need more consideration, because they
# could receive input from several conv layers that are
# concatenated and then flattened. The neuron position in the
# flattened layer depend on the image_data_format.
raise NotImplementedError
# Check if the layer happens to be Sparse
# if the layer is sparse, add the mask to the list of parameters
if len(parameters) == 3:
parameters_norm.append(parameters[-1])
# Update model with modified parameters
layer.set_weights(parameters_norm)
# Plot distributions of weights and activations before and after norm.
if 'normalization_activations' in eval(config.get('output', 'plot_vars')):
from snntoolbox.simulation.plotting import plot_hist
from snntoolbox.simulation.plotting import plot_max_activ_hist
# All layers in one plot. Assumes model.get_weights() returns
# [w, b, w, b, ...].
# from snntoolbox.simulation.plotting import plot_weight_distribution
# plot_weight_distribution(norm_dir, model)
print("Plotting distributions of weights and activations before and "
"after normalizing...")
# Load original parsed model to get parameters before normalization
weights = np.load(os.path.join(activ_dir, 'weights.npz'))
for idx, layer in enumerate(model.layers):
# Skip if layer has no parameters
if len(layer.weights) == 0:
continue
label = str(idx) + layer.__class__.__name__ \
if config.getboolean('output', 'use_simple_labels') \
else layer.name
parameters = weights[layer.name]
parameters_norm = layer.get_weights()[0]
weight_dict = {'weights': parameters.flatten(),
'weights_norm': parameters_norm.flatten()}
plot_hist(weight_dict, 'Weight', label, norm_dir)
# Load activations of model before normalization
activations = try_reload_activations(layer, model, x_norm,
batch_size, activ_dir)
if activations is None or x_norm is None:
continue
# Compute activations with modified parameters
nonzero_activations = activations[np.nonzero(activations)]
activations_norm = get_activations_layer(model.input, layer.output,
x_norm, batch_size)
activation_dict = {'Activations': nonzero_activations,
'Activations_norm':
activations_norm[np.nonzero(activations_norm)]}
scale_fac = scale_facs[layer.name]
plot_hist(activation_dict, 'Activation', label, norm_dir,
scale_fac)
ax = tuple(np.arange(len(layer.output_shape))[1:])
plot_max_activ_hist(
{'Activations_max': np.max(activations, axis=ax)},
'Maximum Activation', label, norm_dir, scale_fac)
print('')
def get_scale_fac(activations, percentile):
"""
Determine the activation value at ``percentile`` of the layer distribution.
Parameters
----------
activations: np.array
The activations of cells in a specific layer, flattened to 1-d.
percentile: int
Percentile at which to determine activation.
Returns
-------
scale_fac: float
Maximum (or percentile) of activations in this layer.
Parameters of the respective layer are scaled by this value.
"""
return np.percentile(activations, percentile) if activations.size else 1
def get_percentile(config, layer_idx=None):
"""Get percentile at which to draw the maximum activation of a layer.
Parameters
----------
config: configparser.ConfigParser
Settings.
layer_idx: Optional[int]
Layer index.
Returns
-------
: int
Percentile.
"""
perc = config.getfloat('normalization', 'percentile')
if config.getboolean('normalization', 'normalization_schedule'):
assert layer_idx >= 0, "Layer index needed for normalization schedule."
perc = apply_normalization_schedule(perc, layer_idx)
return perc
def apply_normalization_schedule(perc, layer_idx):
"""Transform percentile according to some rule, depending on layer index.
Parameters
----------
perc: float
Original percentile.
layer_idx: int
Layer index, used to decrease the scale factor in higher layers, to
maintain high spike rates.
Returns
-------
: int
Modified percentile.
"""
return int(perc - layer_idx * 0.02)
def get_activations_layer(layer_in, layer_out, x, batch_size=None):
"""
Get activations of a specific layer, iterating batch-wise over the complete
data set.
Parameters
----------
layer_in: keras.layers.Layer
The input to the network.
layer_out: keras.layers.Layer
The layer for which we want to get the activations.
x: np.array
The samples to compute activations for. With data of the form
(channels, num_rows, num_cols), x_train has dimension
(batch_size, channels*num_rows*num_cols) for a multi-layer perceptron,
and (batch_size, channels, num_rows, num_cols) for a convolutional net.
batch_size: Optional[int]
Batch size
Returns
-------
activations: ndarray
The activations of cells in a specific layer. Has the same shape as
``layer_out``.
"""
if batch_size is None:
batch_size = 10
if len(x) % batch_size != 0:
x = x[: -(len(x) % batch_size)]
return Model(layer_in, layer_out).predict(x, batch_size)
def get_activations_batch(ann, x_batch):
"""Compute layer activations of an ANN.
Parameters
----------
ann: keras.models.Model
Needed to compute activations.
x_batch: np.array
The input samples to use for determining the layer activations. With
data of the form (channels, num_rows, num_cols), X has dimension
(batch_size, channels*num_rows*num_cols) for a multi-layer perceptron,
and (batch_size, channels, num_rows, num_cols) for a convolutional net.
Returns
-------
activations_batch: list[tuple[np.array, str]]
Each tuple ``(activations, label)`` represents a layer in the ANN for
which an activation can be calculated (e.g. ``Dense``,
``Conv2D``).
``activations`` containing the activations of a layer. It has the same
shape as the original layer, e.g.
(batch_size, n_features, n_rows, n_cols) for a convolution layer.
``label`` is a string specifying the layer type, e.g. ``'Dense'``.
"""
activations_batch = []
for layer in ann.layers:
# Todo: This list should be replaced by
# ``not in eval(config.get('restrictions', 'spiking_layers')``
if layer.__class__.__name__ in ['Input', 'InputLayer', 'Flatten',
'Concatenate', 'ZeroPadding2D',
'Reshape']:
continue
activations = Model(ann.input, layer.output).predict_on_batch(x_batch)
activations_batch.append((activations, layer.name))
return activations_batch
def try_reload_activations(layer, model, x_norm, batch_size, activ_dir):
try:
activations = np.load(os.path.join(activ_dir,
layer.name + '.npz'))['arr_0']
except IOError:
if x_norm is None:
return
print("Calculating activations of layer {} ...".format(layer.name))
activations = get_activations_layer(model.input, layer.output, x_norm,
batch_size)
print("Writing activations to disk...")
np.savez_compressed(os.path.join(activ_dir, layer.name), activations)
else:
print("Loading activations stored during a previous run.")
return np.array(activations)
|
NeuromorphicProcessorProject/snn_toolbox
|
snntoolbox/conversion/utils.py
|
Python
|
mit
| 16,015
|
[
"NEURON"
] |
0141cc51546d21cc204a3497d5ad40de9e28fb46c84526dbe449d6519fdc1533
|
import numpy as np
class AdalineGD(object):
"""ADAptive LInear NEuron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=50):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
"""Compute linear activation"""
return self.net_input(X)
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.activation(X) >= 0.0, 1, -1)
# Class AdalineSGD (Stochastic Gradient Descent)
from numpy.random import seed
class AdalineSGD(object):
"""ADAptive LInear NEuron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
shuffle : bool (default: True)
Shuffles training data every epoch if True to prevent cycles.
random_state : int (default: None)
Set random state for shuffling and initializing the weights.
"""
def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
if random_state:
seed(random_state)
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
self._initialize_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
for xi, target in zip(X, y):
cost.append(self._update_weights(xi, target))
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
return self
def partial_fit(self, X, y):
"""Fit training data without reinitializing the weights"""
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def _shuffle(self, X, y):
"""Shuffle training data"""
r = np.random.permutation(len(y))
return X[r], y[r]
def _initialize_weights(self, m):
"""Initialize weights to zeros"""
self.w_ = np.zeros(1 + m)
self.w_initialized = True
def _update_weights(self, xi, target):
"""Apply Adaline learning rule to update the weights"""
output = self.net_input(xi)
error = (target - output)
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta * error
cost = 0.5 * error ** 2
return cost
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, X):
"""Compute linear activation"""
return self.net_input(X)
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.activation(X) >= 0.0, 1, -1)
|
petritn/MachineLearning
|
Objects/CAdaline.py
|
Python
|
gpl-3.0
| 4,736
|
[
"NEURON"
] |
cac9c70ab5f4e5cb228663ffebd6a947894c5278cdb68720bbee1be18407ee48
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import sys
from ast import Import, ImportFrom, NodeVisitor, parse
from collections import defaultdict
from os.path import dirname, sep
from typing import Dict, List, Optional, Tuple
from setup import PROVIDERS_REQUIREMENTS
sys.path.append(os.path.join(dirname(__file__), os.pardir))
AIRFLOW_PROVIDERS_FILE_PREFIX = f"airflow{sep}providers{sep}"
AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX = f"tests{sep}providers{sep}"
AIRFLOW_PROVIDERS_IMPORT_PREFIX = "airflow.providers."
# List of information messages generated
infos: List[str] = []
# List of warnings generated
warnings: List[str] = []
# list of errors generated
errors: List[str] = []
# store dependencies
dependencies: Dict[str, List[str]] = defaultdict(list)
def find_provider(provider_elements: List[str]) -> Optional[str]:
"""
Finds provider name from the list of elements provided. It looks the providers up
in PROVIDERS_REQUIREMENTS dict taken from the setup.py.
:param provider_elements: array of elements of the path (split)
:return: provider name or None if no provider could be found
"""
provider = ""
separator = ""
provider_keys = PROVIDERS_REQUIREMENTS.keys()
for element in provider_elements:
provider = provider + separator + element
if provider in provider_keys:
return provider
separator = "."
return None
def get_provider_from_file_name(file_name: str) -> Optional[str]:
"""
Retrieves provider name from file name
:param file_name: name of the file
:return: provider name or None if no provider could be found
"""
if (
AIRFLOW_PROVIDERS_FILE_PREFIX not in file_name
and AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX not in file_name
):
# We should only check file that are provider
errors.append(f"Wrong file not in the providers package = {file_name}")
return None
suffix = get_file_suffix(file_name)
split_path = suffix.split(sep)[2:]
provider = find_provider(split_path)
if not provider and file_name.endswith("__init__.py"):
infos.append(f"Skipped file = {file_name}")
elif not provider:
warnings.append(f"Provider not found for path = {file_name}")
return provider
def get_file_suffix(file_name) -> Optional[str]:
if AIRFLOW_PROVIDERS_FILE_PREFIX in file_name:
return file_name[file_name.find(AIRFLOW_PROVIDERS_FILE_PREFIX) :]
if AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX in file_name:
return file_name[file_name.find(AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX) :]
return None
def get_provider_from_import(import_name: str) -> Optional[str]:
"""
Retrieves provider name from file name
:param import_name: name of the import
:return: provider name or None if no provider could be found
"""
if AIRFLOW_PROVIDERS_IMPORT_PREFIX not in import_name:
# skip silently - we expect non-providers imports
return None
suffix = import_name[import_name.find(AIRFLOW_PROVIDERS_IMPORT_PREFIX) :]
split_import = suffix.split(".")[2:]
provider = find_provider(split_import)
if not provider:
warnings.append(f"Provider not found for import = {import_name}")
return provider
class ImportFinder(NodeVisitor):
"""
AST visitor that collects all imported names in its imports
"""
def __init__(self, filename: str) -> None:
self.imports: List[str] = []
self.filename = filename
self.handled_import_exception = List[str]
self.tried_imports: List[str] = []
def process_import(self, import_name: str) -> None:
self.imports.append(import_name)
def get_import_name_from_import_from(self, node: ImportFrom) -> List[str]:
"""
Retrieves import name from the "from" import.
:param node: ImportFrom name
:return: import name
"""
import_names: List[str] = []
for alias in node.names:
name = alias.name
fullname = f'{node.module}.{name}' if node.module else name
import_names.append(fullname)
return import_names
def visit_Import(self, node: Import):
for alias in node.names:
self.process_import(alias.name)
def visit_ImportFrom(self, node: ImportFrom):
if node.module == '__future__':
return
for fullname in self.get_import_name_from_import_from(node):
self.process_import(fullname)
def get_imports_from_file(file_name: str) -> List[str]:
"""
Retrieves imports from file.
:param file_name: name of the file
:return: list of import names
"""
try:
with open(file_name, encoding="utf-8") as f:
root = parse(f.read(), file_name)
except Exception:
print(f"Error when opening file {file_name}", file=sys.stderr)
raise
visitor = ImportFinder(file_name)
visitor.visit(root)
return visitor.imports
def check_if_different_provider_used(file_name: str) -> None:
file_provider = get_provider_from_file_name(file_name)
if not file_provider:
return
imports = get_imports_from_file(file_name)
for import_name in imports:
import_provider = get_provider_from_import(import_name)
if import_provider and file_provider != import_provider:
dependencies[file_provider].append(import_provider)
def parse_arguments() -> Tuple[str, str, str]:
import argparse
parser = argparse.ArgumentParser(
description='Checks if dependencies between packages are handled correctly.'
)
parser.add_argument(
"-f", "--provider-dependencies-file", help="Stores dependencies between providers in the file(.json)"
)
parser.add_argument(
"-d", "--documentation-file", help="Updates package documentation in the file specified (.rst)"
)
parser.add_argument('files', nargs='*')
args = parser.parse_args()
if len(args.files) < 1:
parser.print_usage()
print()
sys.exit(2)
return args.files, args.provider_dependencies_file, args.documentation_file
PREFIX = " "
HEADER = """
========================== ===========================
Package Extras
========================== ===========================
"""
FOOTER = """========================== ===========================
"""
def insert_documentation(deps_dict: Dict[str, List[str]], res: List[str]) -> None:
res += HEADER.splitlines(keepends=True)
for package, deps in deps_dict.items():
deps_str = ",".join(deps)
res.append(f"{package:27}{deps_str}\n")
res += FOOTER.splitlines(keepends=True)
if __name__ == '__main__':
print()
files, provider_dependencies_file_name, documentation_file_name = parse_arguments()
num_files = 0
for file in files:
check_if_different_provider_used(file)
num_files += 1
print(f"Verified {num_files} files.")
if infos:
print("\nInformation messages:\n")
for info in infos:
print(PREFIX + info)
print(f"Total: {len(infos)} information messages.")
if warnings:
print("\nWarnings!\n")
for warning in warnings:
print(PREFIX + warning)
print(f"Total: {len(warnings)} warnings.")
if errors:
print("\nErrors!\n")
for error in errors:
print(PREFIX + error)
print(f"Total: {len(errors)} errors.")
unique_sorted_dependencies: Dict[str, List[str]] = {}
for key in sorted(dependencies.keys()):
unique_sorted_dependencies[key] = sorted(set(dependencies[key]))
if provider_dependencies_file_name:
with open(provider_dependencies_file_name, "w") as providers_file:
json.dump(unique_sorted_dependencies, providers_file, indent=2)
providers_file.write("\n")
print()
print(f"Written provider dependencies to the file {provider_dependencies_file_name}")
print()
if documentation_file_name:
with open(documentation_file_name, encoding="utf-8") as documentation_file:
text = documentation_file.readlines()
replacing = False
result: List[str] = []
for line in text:
if line.startswith(" .. START PACKAGE DEPENDENCIES HERE"):
replacing = True
result.append(line)
insert_documentation(unique_sorted_dependencies, result)
if line.startswith(" .. END PACKAGE DEPENDENCIES HERE"):
replacing = False
if not replacing:
result.append(line)
with open(documentation_file_name, "w", encoding="utf-8") as documentation_file:
documentation_file.write("".join(result))
print()
print(f"Written package extras to the file {documentation_file_name}")
print()
if errors:
print()
print("ERROR! Errors found during verification. Exiting!")
print()
sys.exit(1)
print()
print("Verification complete! Success!")
print()
|
apache/incubator-airflow
|
tests/build_provider_packages_dependencies.py
|
Python
|
apache-2.0
| 9,837
|
[
"VisIt"
] |
a6f78a1c6e5cf2c88ae7195bd0012261c2c2c233ec361e7c68ddd266573a06d5
|
#!/usr/bin/python
#author: Vadim M. Gumerov; 09/08/2017
import sys, getopt, fileinput, os, traceback
import collections
'''Script cretaes COGs based on results of blast all vs all. Input: bunch of blast files in 6 outformat.
'''
#usage:
INPUT_DIR = "./blast_output"
OUTPUT_FILENAME = "COGs.txt"
DELIM="@"
IDENTITY_THRESHOLD=0
EVAL_THRESHOLD = 1e-90
COVERAGE_THRESHOLD = 90.0
DO_MERGE=True
#{g1:2, g2:10, ...}
GENOMENM_TO_NUM_OF_PROTS = dict()
#{ prot11:[(prot21, eval), (prot23, eval), ...], prot12:[(prot25, eval), (prot29, eval), ...] }
#FIRST_GENOME_TO_SEC_GENOME = collections.defaultdict(list)
#{ Genome1ToGenome2:{prot11:[(prot21, eval), (prot23, eval), ...], prot12:[(prot25, eval), (prot29, eval), ...]}, Genome2ToGenome1: {...}, ... ], ...}
GENOMENM_TO_PROT_TOPROT_LIST_MAP = dict()
PROT_TO_CLUSTER = dict()
PROT_TO_CLUSTERSET = collections.defaultdict(set)
CLUSTER_TO_PROTSET = collections.defaultdict(set)
GENOME_PAIRS = list()
GENOME_SET = set()
USAGE = sys.argv[0] + ' -i input directory -o output file name -d delimeter -t identity threshold -e E-value threashold -v coverage threshold -m merge clusters or not (yes|no)'
#DEFAULT_PARAMS = ["INPUT_DIR ", "OUTPUT_FILENAME ", "DELIM ", "COLUMN_NUM ", "IDENTITY_THRESHOLD ", "EVAL_THRESHOLD ", "COVERAGE_THRESHOLD ", "DO_MERGE "]
#DEFAULT_VALUES = [INPUT_DIR, OUTPUT_FILENAME, DELIM, COLUMN_NUM, IDENTITY_THRESHOLD, EVAL_THRESHOLD, COVERAGE_THRESHOLD, DO_MERGE]
def initialyze(argv):
global INPUT_DIR, OUTPUT_FILENAME, DELIM, IDENTITY_THRESHOLD, EVAL_THRESHOLD, COVERAGE_THRESHOLD, DO_MERGE
try:
opts, args = getopt.getopt(argv[1:],"hi:o:d:c:t:e:v:m",["inputDir=", "outputFileName=", "delimeter=", "identity=", "eValue=", "coverage=", "merge="])
except getopt.GetoptError:
print (USAGE + " Error")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print (USAGE)
sys.exit()
elif opt in ("-i", "--inputDir"):
INPUT_DIR = arg.strip()
elif opt in ("-o", "--outputFileName"):
OUTPUT_FILENAME = str(arg).strip()
elif opt in ("-d", "--delimeter"):
DELIM = arg
elif opt in ("-t", "--identity"):
IDENTITY_THRESHOLD = float(arg)
elif opt in ("-e", "--eValue"):
EVAL_THRESHOLD = float(arg)
elif opt in ("-v", "--coverage"):
COVERAGE_THRESHOLD = float(arg)
elif opt in ("-m", "--merge"):
if arg == "no":
DO_MERGE = False
#process input data and generate sets of unique proteins with eval <= eval threshold for each input genome in comparison one vs one
def processInputData():
os.chdir(INPUT_DIR)
for inputFile in os.listdir(os.getcwd()):
numOfProteinsInGenome = set()
try:
for record in fileinput.input(inputFile):
recordSpl = record.split("\t")
firstProtFull = recordSpl[0]
firstProtFullSplt = firstProtFull.split(DELIM)
firstProt = firstProtFullSplt[1]
secondProtFull = recordSpl[1]
secondProtFullSplt = secondProtFull.split(DELIM)
secondProt = secondProtFullSplt[1]
# firstProtFullSplt[0] and secondProtFullSplt[0] are unique identifiers of files with proteins
# importanrt if protein sets of each genome were provided in sepratae files
if "[" in firstProtFullSplt[1]:
firstOrganismName = firstProtFullSplt[0] + firstProt.split("[")[1].replace("]", "")
else:
firstOrganismName = firstProtFullSplt[0]
if "[" in firstProtFullSplt[1]:
secondOrganismName = secondProtFullSplt[0] + secondProt.split("[")[1].replace("]", "")
else:
secondOrganismName = secondProtFullSplt[0]
if firstOrganismName != secondOrganismName:
eVal = recordSpl[10]
coverage = recordSpl[12]
identity = recordSpl[2]
GENOME_SET.add(firstOrganismName)
numOfProteinsInGenome.add(firstProt)
oneGenomeNmToAnotherGenomeNm = firstOrganismName + "_vs_" + secondOrganismName
if not (secondOrganismName + "_vs_" + firstOrganismName) in GENOME_PAIRS and not oneGenomeNmToAnotherGenomeNm in GENOME_PAIRS:
GENOME_PAIRS.append(oneGenomeNmToAnotherGenomeNm)
#GENOMENM_TO_NUM_OF_PROTS[firstGenomeNm] = len(numOfProteinsInGenome)
if oneGenomeNmToAnotherGenomeNm in GENOMENM_TO_PROT_TOPROT_LIST_MAP:
checkAndAddProteins(GENOMENM_TO_PROT_TOPROT_LIST_MAP[oneGenomeNmToAnotherGenomeNm], eVal, coverage, identity, firstProt, secondProt)
else:
GENOMENM_TO_PROT_TOPROT_LIST_MAP[oneGenomeNmToAnotherGenomeNm] = DefaultOrderedDict(list)
checkAndAddProteins(GENOMENM_TO_PROT_TOPROT_LIST_MAP[oneGenomeNmToAnotherGenomeNm], eVal, coverage, identity, firstProt, secondProt)
except Exception:
print ("Problem happened: ")
print (traceback.print_exc())
finally:
fileinput.close()
print ("Number of compared genomes: " + str(len(GENOME_SET)))
def checkAndAddProteins(firstGenomeToSecGenome, eVal, coverage, identity, firstProt, secondProt):
if float(eVal) <= EVAL_THRESHOLD and float(coverage) >= COVERAGE_THRESHOLD and float(identity) >= IDENTITY_THRESHOLD:
firstGenomeToSecGenome[firstProt].append(secondProt)
#Create COGs
def createCOGs():
print ("GENOME_PAIRS ", str(GENOME_PAIRS))
clusterName = 0
for everyGenomePair in GENOME_PAIRS:
frstGenNmToSecGenNm = everyGenomePair
secGenNmToFrstGenNm = everyGenomePair.split("_vs_")[1] + "_vs_" + everyGenomePair.split("_vs_")[0]
firstGenomeToSecGenome = GENOMENM_TO_PROT_TOPROT_LIST_MAP[frstGenNmToSecGenNm]
secondGenomeToFirstGenome = GENOMENM_TO_PROT_TOPROT_LIST_MAP[secGenNmToFrstGenNm]
if len(list(firstGenomeToSecGenome)):
firstGenomeProt = list(firstGenomeToSecGenome)[0]
protHitInSecondGenomeFromFirstGenome = firstGenomeToSecGenome[firstGenomeProt][0]
if protHitInSecondGenomeFromFirstGenome in secondGenomeToFirstGenome and len(secondGenomeToFirstGenome[protHitInSecondGenomeFromFirstGenome]):
protHitInFirstGenomeFromSecondGenome = secondGenomeToFirstGenome[protHitInSecondGenomeFromFirstGenome][0]
if firstGenomeProt == protHitInFirstGenomeFromSecondGenome:
if DO_MERGE:
clusterName = processIfDoMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName)
else:
clusterName = processIfNotMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName)
# for firstGenomeProt in firstGenomeToSecGenome:
# listOfProtsInSecGenome_ForProtInFrstGenome = firstGenomeToSecGenome[firstGenomeProt]
# for protHitInSecondGenomeFromFirstGenome in listOfProtsInSecGenome_ForProtInFrstGenome:
# if protHitInSecondGenomeFromFirstGenome in secondGenomeToFirstGenome:
# listOfProtsInFrstGenome_ForProtInSecGenome = secondGenomeToFirstGenome[protHitInSecondGenomeFromFirstGenome]
# # for protInFirstGenomeFromSecondGenome in listOfProtsInFrstGenome_ForProtInSecGenome:
# # if protInFirstGenomeFromSecondGenome == firstGenomeProt:
# # if DO_MERGE:
# # clusterName = processIfDoMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName)
# # else:
# # clusterName = processIfNotMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName)
# if listOfProtsInFrstGenome_ForProtInSecGenome[0] == firstGenomeProt:
# if DO_MERGE:
# clusterName = processIfDoMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName)
# else:
# clusterName = processIfNotMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName)
def processIfNotMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName):
if firstGenomeProt not in PROT_TO_CLUSTERSET:
if protHitInSecondGenomeFromFirstGenome not in PROT_TO_CLUSTERSET:
PROT_TO_CLUSTERSET[firstGenomeProt].add(clusterName)
CLUSTER_TO_PROTSET[clusterName].add(firstGenomeProt)
PROT_TO_CLUSTERSET[protHitInSecondGenomeFromFirstGenome].add(clusterName)
CLUSTER_TO_PROTSET[clusterName].add(protHitInSecondGenomeFromFirstGenome)
clusterName+=1
else:
usedClusterNames = PROT_TO_CLUSTERSET[protHitInSecondGenomeFromFirstGenome]
for clstName in usedClusterNames:
PROT_TO_CLUSTERSET[firstGenomeProt].add(clstName)
CLUSTER_TO_PROTSET[clstName].add(firstGenomeProt)
else:
if protHitInSecondGenomeFromFirstGenome not in PROT_TO_CLUSTERSET:
usedClusterNames = PROT_TO_CLUSTERSET[firstGenomeProt]
for clstName in usedClusterNames:
PROT_TO_CLUSTERSET[protHitInSecondGenomeFromFirstGenome].add(clstName)
CLUSTER_TO_PROTSET[clstName].add(protHitInSecondGenomeFromFirstGenome)
else:
usedClusterNames_ofFirstGenomeFirstProt = PROT_TO_CLUSTERSET[firstGenomeProt]
usedClusterNames_ofFirstGenomeSecondProt = PROT_TO_CLUSTERSET[protHitInSecondGenomeFromFirstGenome]
PROT_TO_CLUSTERSET[firstGenomeProt] = PROT_TO_CLUSTERSET[firstGenomeProt].union(usedClusterNames_ofFirstGenomeSecondProt)
PROT_TO_CLUSTERSET[protHitInSecondGenomeFromFirstGenome] = PROT_TO_CLUSTERSET[protHitInSecondGenomeFromFirstGenome].unoin(usedClusterNames_ofFirstGenomeFirstProt)
for clstName1 in usedClusterNames_ofFirstGenomeFirstProt:
CLUSTER_TO_PROTSET[clstName1].add(protHitInSecondGenomeFromFirstGenome)
for clstName2 in usedClusterNames_ofFirstGenomeSecondProt:
CLUSTER_TO_PROTSET[clstName2].add(firstGenomeProt)
return clusterName
def processIfDoMerge(firstGenomeProt, protHitInSecondGenomeFromFirstGenome, clusterName):
if firstGenomeProt not in PROT_TO_CLUSTER:
if protHitInSecondGenomeFromFirstGenome not in PROT_TO_CLUSTER:
PROT_TO_CLUSTER[firstGenomeProt] = clusterName
CLUSTER_TO_PROTSET[clusterName].add(firstGenomeProt)
PROT_TO_CLUSTER[protHitInSecondGenomeFromFirstGenome] = clusterName
CLUSTER_TO_PROTSET[clusterName].add(protHitInSecondGenomeFromFirstGenome)
clusterName+=1
else:
usedClusterName = PROT_TO_CLUSTER[protHitInSecondGenomeFromFirstGenome]
PROT_TO_CLUSTER[firstGenomeProt] = usedClusterName
CLUSTER_TO_PROTSET[usedClusterName].add(firstGenomeProt)
else:
if protHitInSecondGenomeFromFirstGenome not in PROT_TO_CLUSTER:
usedClusterName = PROT_TO_CLUSTER[firstGenomeProt]
PROT_TO_CLUSTER[protHitInSecondGenomeFromFirstGenome] = usedClusterName
CLUSTER_TO_PROTSET[usedClusterName].add(protHitInSecondGenomeFromFirstGenome)
else:
usedClusterName = PROT_TO_CLUSTER[firstGenomeProt]
clusterNameToChange = PROT_TO_CLUSTER[protHitInSecondGenomeFromFirstGenome]
if usedClusterName != clusterNameToChange:
protsToChangeClstBelonging = CLUSTER_TO_PROTSET[clusterNameToChange]
for prot in protsToChangeClstBelonging:
PROT_TO_CLUSTER[prot] = usedClusterName
CLUSTER_TO_PROTSET[usedClusterName] = CLUSTER_TO_PROTSET[usedClusterName].union(protsToChangeClstBelonging)
del CLUSTER_TO_PROTSET[clusterNameToChange]
return clusterName
def printData():
os.chdir("..")
with open(OUTPUT_FILENAME, "w") as outFile:
sortedProts = sorted(CLUSTER_TO_PROTSET.values(), key=len)
clstName = 1
for proteins in sortedProts:
outFile.write(str(clstName) + ":" + "\n")
clstName+=1
for prot in proteins:
outFile.write(prot + "\n")
class DefaultOrderedDict(collections.OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, collections.Callable)):
raise TypeError('first argument must be callable')
collections.OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return collections.OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
collections.OrderedDict.__repr__(self))
def main(argv):
initialyze(argv)
processInputData()
createCOGs()
printData()
if __name__ == "__main__":
main(sys.argv)
|
ToshkaDev/bioinformatics-universe
|
bioinformatics-universe/bioinformatics-programs/createCOGs.py
|
Python
|
mit
| 12,501
|
[
"BLAST"
] |
2287da64718d37247449066b1de6d6998b29a745752ae4881bcd378116b9524e
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# Copyright (c) 2009-2010 Arista Networks, Inc.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""basic checker for Python code"""
import sys
import astroid
from logilab.common.ureports import Table
from astroid import are_exclusive, InferenceError
import astroid.bases
from pylint.interfaces import IAstroidChecker
from pylint.utils import EmptyReport
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
clobber_in_except,
is_builtin_object,
is_inside_except,
overrides_a_method,
safe_infer,
get_argument_from_call,
NoSuchArgumentError,
)
import re
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
CLASS_ATTRIBUTE_RGX = re.compile(r'([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
REVERSED_METHODS = (('__getitem__', '__len__'),
('__reversed__', ))
PY33 = sys.version_info >= (3, 3)
BAD_FUNCTIONS = ['map', 'filter', 'apply']
if sys.version_info < (3, 0):
BAD_FUNCTIONS.append('input')
BAD_FUNCTIONS.append('file')
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = set(('exempt', 'ignore'))
del re
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astroid.For, astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _loop_exits_early(loop):
"""Returns true if a loop has a break statement in its body."""
loop_nodes = (astroid.For, astroid.While)
# Loop over body explicitly to avoid matching break statements
# in orelse.
for child in loop.body:
if isinstance(child, loop_nodes):
# break statement may be in orelse of child loop.
for orelse in (child.orelse or ()):
for _ in orelse.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
continue
for _ in child.nodes_of_class(astroid.Break, skip_klass=loop_nodes):
return True
return False
if sys.version_info < (3, 0):
PROPERTY_CLASSES = set(('__builtin__.property', 'abc.abstractproperty'))
else:
PROPERTY_CLASSES = set(('builtins.property', 'abc.abstractproperty'))
ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
def _determine_function_name_type(node):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:returns: One of ('function', 'method', 'attr')
"""
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Getattr) and
decorator.attrname == 'abstractproperty')):
infered = safe_infer(decorator)
if infered and infered.qname() in PROPERTY_CLASSES:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Getattr) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method'
def decorated_with_abc(func):
""" Determine if the `func` node is decorated
with `abc` decorators (abstractmethod et co.)
"""
if func.decorators:
for node in func.decorators.nodes:
try:
infered = node.infer().next()
except InferenceError:
continue
if infered and infered.qname() in ABC_METHODS:
return True
def has_abstract_methods(node):
""" Determine if the given `node` has
abstract methods, defined with `abc` module.
"""
return any(decorated_with_abc(meth)
for meth in node.mymethods())
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise EmptyReport()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astroid.Getattr) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(BaseChecker):
__implements__ = IAstroidChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'init-is-generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'return-in-init',
'Used when the special class method __init__ has an explicit \
return value.'),
'E0102': ('%s already defined line %s',
'function-redefined',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'not-in-loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'return-outside-function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'yield-outside-function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'return-arg-in-generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).',
{'maxversion': (3, 3)}),
'E0107': ("Use of the non-existent %s operator",
'nonexistent-operator',
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
'E0108': ('Duplicate argument name %s in function definition',
'duplicate-argument-name',
'Duplicate argument names in function definitions are syntax'
' errors.'),
'E0110': ('Abstract class with abstract methods instantiated',
'abstract-class-instantiated',
'Used when an abstract class with `abc.ABCMeta` as metaclass '
'has abstract methods and is instantiated.',
{'minversion': (3, 0)}),
'W0120': ('Else clause on loop without a break statement',
'useless-else-on-loop',
'Loops should only have an else clause if they can exit early '
'with a break statement, otherwise the statements under else '
'should be on the same scope as the loop itself.'),
}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
@check_messages('function-redefined')
def visit_class(self, node):
self._check_redefinition('class', node)
@check_messages('init-is-generator', 'return-in-init',
'function-redefined', 'return-arg-in-generator',
'duplicate-argument-name')
def visit_function(self, node):
if not redefined_by_decorator(node):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astroid.Return,
skip_klass=(astroid.Function, astroid.Class))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('init-is-generator', node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if [v for v in values if
not (v is None or
(isinstance(v, astroid.Const) and v.value is None) or
(isinstance(v, astroid.Name) and v.name == 'None')
)]:
self.add_message('return-in-init', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
if not PY33:
for retnode in returns:
if isinstance(retnode.value, astroid.Const) and \
retnode.value.value is not None:
self.add_message('return-arg-in-generator', node=node,
line=retnode.fromlineno)
# Check for duplicate names
args = set()
for name in node.argnames():
if name in args:
self.add_message('duplicate-argument-name', node=node, args=(name,))
else:
args.add(name)
@check_messages('return-outside-function')
def visit_return(self, node):
if not isinstance(node.frame(), astroid.Function):
self.add_message('return-outside-function', node=node)
@check_messages('yield-outside-function')
def visit_yield(self, node):
if not isinstance(node.frame(), (astroid.Function, astroid.Lambda)):
self.add_message('yield-outside-function', node=node)
@check_messages('not-in-loop')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@check_messages('not-in-loop')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@check_messages('useless-else-on-loop')
def visit_for(self, node):
self._check_else_on_loop(node)
@check_messages('useless-else-on-loop')
def visit_while(self, node):
self._check_else_on_loop(node)
@check_messages('nonexistent-operator')
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astroid.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('nonexistent-operator', node=node, args=node.op*2)
@check_messages('abstract-class-instantiated')
def visit_callfunc(self, node):
""" Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
infered = node.func.infer().next()
except astroid.InferenceError:
return
if not isinstance(infered, astroid.Class):
return
# __init__ was called
metaclass = infered.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in infered.ancestors():
if (ancestor.qname() == 'abc.ABC' and
has_abstract_methods(infered)):
self.add_message('abstract-class-instantiated', node=node)
break
return
if (metaclass.qname() == 'abc.ABCMeta' and
has_abstract_methods(infered)):
self.add_message('abstract-class-instantiated', node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message('useless-else-on-loop', node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
break
_node = _node.parent
else:
self.add_message('not-in-loop', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('function-redefined', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IAstroidChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'unreachable',
'Used when there is some code behind a "return" or "raise" \
statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'dangerous-default-value',
'Used when a mutable value as list or dictionary is detected in \
a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'pointless-statement',
'Used when a statement doesn\'t have (or at least seems to) \
any effect.'),
'W0105': ('String statement has no effect',
'pointless-string-statement',
'Used when a string is used as a statement (which of course \
has no effect). This is a particular case of W0104 with its \
own message so you can easily disable it if you\'re using \
those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'expression-not-assigned',
'Used when an expression that is not a function call is assigned\
to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'unnecessary-lambda',
'Used when the body of a lambda expression is a function call \
on the same argument list as the lambda itself; such lambda \
expressions are in all but a few cases replaceable with the \
function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
'duplicate-key',
"Used when a dictionary expression binds the same key multiple \
times."),
'W0122': ('Use of exec',
'exec-used',
'Used when you use the "exec" statement (function for Python 3), to discourage its \
usage. That doesn\'t mean you can not use it !'),
'W0123': ('Use of eval',
'eval-used',
'Used when you use the "eval" function, to discourage its '
'usage. Consider using `ast.literal_eval` for safely evaluating '
'strings containing Python expressions '
'from untrusted sources. '),
'W0141': ('Used builtin function %r',
'bad-builtin',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'star-args',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
'lost-exception',
"Used when a break or a return statement is found inside the \
finally clause of a try...finally block: the exceptions raised \
in the try clause will be silently swallowed instead of being \
re-raised."),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'assert-on-tuple',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'W0121': ('Use raise ErrorClass(args) instead of raise ErrorClass, args.',
'old-raise-syntax',
"Used when the alternate raise syntax 'raise foo, bar' is used "
"instead of 'raise foo(bar)'.",
{'maxversion': (3, 0)}),
'C0121': ('Missing required attribute "%s"', # W0103
'missing-module-attribute',
'Used when an attribute required for modules is missing.'),
'E0109': ('Missing argument to reversed()',
'missing-reversed-argument',
'Used when reversed() builtin didn\'t receive an argument.'),
'E0111': ('The first reversed() argument is not a sequence',
'bad-reversed-sequence',
'Used when the first argument to reversed() builtin '
'isn\'t a sequence (does not implement __reversed__, '
'nor __getitem__ and __len__'),
}
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('bad-functions',
{'default' : BAD_FUNCTIONS,
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = (('RP0101', 'Statistics by type', report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
@check_messages('missing-module-attribute')
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
for attr in self.config.required_attributes:
if attr not in node:
self.add_message('missing-module-attribute', node=node, args=attr)
def visit_class(self, node):
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@check_messages('pointless-statement', 'pointless-string-statement',
'expression-not-assigned')
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value,
basestring):
# treat string statement in a separated message
self.add_message('pointless-string-statement', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (isinstance(expr, (astroid.Yield, astroid.CallFunc)) or
(isinstance(node.parent, astroid.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astroid.CallFunc)):
self.add_message('expression-not-assigned', node=node, args=expr.as_string())
else:
self.add_message('pointless-statement', node=node)
@check_messages('unnecessary-lambda')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astroid >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astroid.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astroid.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in xrange(len(ordinary_args)):
if not isinstance(call.args[i], astroid.Name):
return
if node.args.args[i].name != call.args[i].name:
return
self.add_message('unnecessary-lambda', line=node.fromlineno, node=node)
@check_messages('dangerous-default-value')
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
# check for dangerous default values as arguments
for default in node.args.defaults:
try:
value = default.infer().next()
except astroid.InferenceError:
continue
builtins = astroid.bases.BUILTINS
if (isinstance(value, astroid.Instance) and
value.qname() in ['.'.join([builtins, x]) for x in ('set', 'dict', 'list')]):
if value is default:
msg = default.as_string()
elif type(value) is astroid.Instance:
msg = '%s (%s)' % (default.as_string(), value.qname())
else:
msg = '%s (%s)' % (default.as_string(), value.as_string())
self.add_message('dangerous-default-value', node=node, args=(msg,))
@check_messages('unreachable', 'lost-exception')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astroid.Function,))
@check_messages('unreachable')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('unreachable', 'lost-exception')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astroid.For, astroid.While,))
@check_messages('unreachable', 'old-raise-syntax')
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
if sys.version_info >= (3, 0):
return
if node.exc is not None and node.inst is not None and node.tback is None:
self.add_message('old-raise-syntax', node=node)
@check_messages('exec-used')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('exec-used', node=node)
@check_messages('bad-builtin', 'star-args', 'eval-used',
'exec-used', 'missing-reversed-argument',
'bad-reversed-sequence')
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name == 'exec':
self.add_message('exec-used', node=node)
elif name == 'reversed':
self._check_reversed(node)
elif name == 'eval':
self.add_message('eval-used', node=node)
if name in self.config.bad_functions:
self.add_message('bad-builtin', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astroid.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # star-args can be skipped
self.add_message('star-args', node=node.func)
@check_messages('assert-on-tuple')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astroid.Tuple) and \
len(node.test.elts) == 2:
self.add_message('assert-on-tuple', node=node)
@check_messages('duplicate-key')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message('duplicate-key', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('unreachable', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('lost-exception', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
""" check that the argument to `reversed` is a sequence """
try:
argument = safe_infer(get_argument_from_call(node, position=0))
except NoSuchArgumentError:
self.add_message('missing-reversed-argument', node=node)
else:
if argument is astroid.YES:
return
if argument is None:
# nothing was infered
# try to see if we have iter()
if isinstance(node.args[0], astroid.CallFunc):
try:
func = node.args[0].func.infer().next()
except InferenceError:
return
if (getattr(func, 'name', None) == 'iter' and
is_builtin_object(func)):
self.add_message('bad-reversed-sequence', node=node)
return
if isinstance(argument, astroid.Instance):
if (argument._proxied.name == 'dict' and
is_builtin_object(argument._proxied)):
self.add_message('bad-reversed-sequence', node=node)
return
elif any(ancestor.name == 'dict' and is_builtin_object(ancestor)
for ancestor in argument._proxied.ancestors()):
# mappings aren't accepted by reversed()
self.add_message('bad-reversed-sequence', node=node)
return
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
# check if it is a .deque. It doesn't seem that
# we can retrieve special methods
# from C implemented constructs
if argument._proxied.qname().endswith(".deque"):
return
self.add_message('bad-reversed-sequence', node=node)
elif not isinstance(argument, (astroid.List, astroid.Tuple)):
# everything else is not a proper sequence for reversed()
self.add_message('bad-reversed-sequence', node=node)
_NAME_TYPES = {
'module': (MOD_NAME_RGX, 'module'),
'const': (CONST_NAME_RGX, 'constant'),
'class': (CLASS_NAME_RGX, 'class'),
'function': (DEFAULT_NAME_RGX, 'function'),
'method': (DEFAULT_NAME_RGX, 'method'),
'attr': (DEFAULT_NAME_RGX, 'attribute'),
'argument': (DEFAULT_NAME_RGX, 'argument'),
'variable': (DEFAULT_NAME_RGX, 'variable'),
'class_attribute': (CLASS_ATTRIBUTE_RGX, 'class attribute'),
'inlinevar': (COMP_VAR_RGX, 'inline iteration'),
}
def _create_naming_options():
name_options = []
for name_type, (rgx, human_readable_name) in _NAME_TYPES.iteritems():
name_type = name_type.replace('_', '-')
name_options.append((
'%s-rgx' % (name_type,),
{'default': rgx, 'type': 'regexp', 'metavar': '<regexp>',
'help': 'Regular expression matching correct %s names' % (human_readable_name,)}))
name_options.append((
'%s-name-hint' % (name_type,),
{'default': rgx.pattern, 'type': 'string', 'metavar': '<string>',
'help': 'Naming hint for %s names' % (human_readable_name,)}))
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'blacklisted-name',
'Used when the name is listed in the black list (unauthorized \
names).'),
'C0103': ('Invalid %s name "%s"%s',
'invalid-name',
'Used when the name doesn\'t match the regular expression \
associated to its type (constant, variable, class...).'),
}
options = (# XXX use set
('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
('name-group',
{'default' : (),
'type' :'csv', 'metavar' : '<name1:name2>',
'help' : ('Colon-delimited sets of names that determine each'
' other\'s naming style when the name regexes'
' allow several styles.')}
),
('include-naming-hint',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help': 'Include a hint for the correct naming format with invalid-name'}
),
) + _create_naming_options()
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0)
for group in self.config.name_group:
for name_type in group.split(':'):
self._name_group[name_type] = 'group_%s' % (group,)
@check_messages('blacklisted-name', 'invalid-name')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_class(self, node):
self._check_name('class', node.name, node)
for attr, anodes in node.instance_attrs.iteritems():
if not list(node.instance_attr_ancestors(attr)):
self._check_name('attr', attr, anodes[0])
@check_messages('blacklisted-name', 'invalid-name')
def visit_function(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
if node.is_method() and overrides_a_method(node.parent.frame(), node.name):
return
self._check_name(_determine_function_name_type(node),
node.name, node)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_global(self, node):
for name in node.names:
self._check_name('const', name, node)
@check_messages('blacklisted-name', 'invalid-name')
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, astroid.Comprehension):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(ass_type, astroid.Assign) and not in_loop(ass_type):
if isinstance(safe_infer(ass_type.value), astroid.Class):
self._check_name('class', node.name, node)
else:
self._check_name('const', node.name, node)
elif isinstance(ass_type, astroid.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.Function):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
self._check_name('variable', node.name, node)
elif isinstance(frame, astroid.Class):
if not list(frame.local_attr_ancestors(node.name)):
self._check_name('class_attribute', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _is_multi_naming_match(self, match):
return (match is not None and
match.lastgroup is not None and
match.lastgroup not in EXEMPT_NAME_CATEGORIES)
def _check_name(self, node_type, name, node):
"""check for a name using the type's regexp"""
if is_inside_except(node):
clobbering, _ = clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('blacklisted-name', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
match = regexp.match(name)
if self._is_multi_naming_match(match):
name_group = self._find_name_group(node_type)
if name_group not in self._name_category:
self._name_category[name_group] = match.lastgroup
elif self._name_category[name_group] != match.lastgroup:
match = None
if match is None:
type_label = _NAME_TYPES[node_type][1]
hint = ''
if self.config.include_naming_hint:
hint = ' (hint: %s)' % (getattr(self.config, node_type + '_name_hint'))
self.add_message('invalid-name', node=node, args=(type_label, name, hint))
self.stats['badname_' + node_type] += 1
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing %s docstring', # W0131
'missing-docstring',
'Used when a module, function, class or method has no docstring.\
Some special methods like __init__ doesn\'t necessary require a \
docstring.'),
'C0112': ('Empty %s docstring', # W0132
'empty-docstring',
'Used when a module, function, class or method has an empty \
docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'function or class names that do not require a '
'docstring.'}
),
('docstring-min-length',
{'default' : -1,
'type' : 'int', 'metavar' : '<int>',
'help': ('Minimum line length for functions/classes that'
' require docstrings, shorter ones are exempt.')}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
@check_messages('missing-docstring', 'empty-docstring')
def visit_module(self, node):
self._check_docstring('module', node)
@check_messages('missing-docstring', 'empty-docstring')
def visit_class(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
@check_messages('missing-docstring', 'empty-docstring')
def visit_function(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = node.is_method() and 'method' or 'function'
if isinstance(node.parent.frame(), astroid.Class):
overridden = False
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astroid.Function):
overridden = True
break
self._check_docstring(ftype, node,
report_missing=not overridden)
else:
self._check_docstring(ftype, node)
def _check_docstring(self, node_type, node, report_missing=True):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
if not report_missing:
return
if node.body:
lines = node.body[-1].lineno - node.body[0].lineno + 1
else:
lines = 0
max_lines = self.config.docstring_min_length
if node_type != 'module' and max_lines > -1 and lines < max_lines:
return
self.stats['undocumented_'+node_type] += 1
self.add_message('missing-docstring', node=node, args=(node_type,))
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('empty-docstring', node=node, args=(node_type,))
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'unnecessary-pass',
'Used when a "pass" statement that can be avoided is '
'encountered.'),
}
@check_messages('unnecessary-pass')
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('unnecessary-pass', node=node)
class LambdaForComprehensionChecker(_BasicChecker):
"""check for using a lambda where a comprehension would do.
See <http://www.artima.com/weblogs/viewpost.jsp?thread=98196>
where GvR says comprehensions would be clearer.
"""
msgs = {'W0110': ('map/filter on lambda could be replaced by comprehension',
'deprecated-lambda',
'Used when a lambda is the first argument to "map" or '
'"filter". It could be clearer as a list '
'comprehension or generator expression.',
{'maxversion': (3, 0)}),
}
@check_messages('deprecated-lambda')
def visit_callfunc(self, node):
"""visit a CallFunc node, check if map or filter are called with a
lambda
"""
if not node.args:
return
if not isinstance(node.args[0], astroid.Lambda):
return
infered = safe_infer(node.func)
if (is_builtin_object(infered)
and infered.name in ['map', 'filter']):
self.add_message('deprecated-lambda', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(LambdaForComprehensionChecker(linter))
|
hkupty/python-mode
|
pymode/libs/pylama/lint/pylama_pylint/pylint/checkers/base.py
|
Python
|
lgpl-3.0
| 49,156
|
[
"VisIt"
] |
d010e1dd240d7b690471449f77f9c11d7164a3434c0525c26e45a1485b8b54f8
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that trains an MLP using early stopping.
Training will stop when the stopping condition is satisfied
or when num_epochs has been reached, whichever is first.
"""
import os
from neon.data import DataIterator, load_mnist
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
train_set = DataIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
valid_set = DataIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))
# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)
# initialize model
layers = []
layers.append(Affine(nout=100, init=init_norm, batch_norm=True, activation=Rectlin()))
layers.append(Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True)))
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
mlp = Model(layers=layers)
# define stopping function
# it takes as input a tuple (State,val[t])
# which describes the cumulative validation state (generated by this function)
# and the validation error at time t
# and returns as output a tuple (State', Bool),
# which represents the new state and whether to stop
# Stop if validation error ever increases from epoch to epoch
def stop_func(s, v):
if s is None:
return (v, False)
return (min(v, s), v > s)
# fit and validate
optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
# configure callbacks
if args.callback_args['eval_freq'] is None:
args.callback_args['eval_freq'] = 1
callbacks = Callbacks(mlp, train_set, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)
callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
mlp.fit(train_set,
optimizer=optimizer,
num_epochs=args.epochs,
cost=cost,
callbacks=callbacks)
|
nhynes/neon
|
examples/early_stopping.py
|
Python
|
apache-2.0
| 3,001
|
[
"Gaussian"
] |
686bcdaefd01ff4687d28170779f1b189d525141c79a6d9ec1291e541f717248
|
#!/usr/bin/env python
################################################################################
#
# qe_extractor.py
#
# Pulls all sorts of information from a QE output file and writes to standard
# output, e.g. the command "qe_extractor.py INPUTFILE homo" uses the number of
# electrons and the output KS eigenvalues to print the KS homo.
#
################################################################################
#
# Copyright 2015 Kane O'Donnell
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
#
# NOTES
#
# 1. A list of allowed commands appears early in the code below. Multiple commands
# leads to multiple output lines in the same order.
#
# 2. Output is simply "printed" (to stdout), so redirect to a file or a variable
# if you need the value.
#
# 3. The output isn't "safe", e.g. the code will give you a homo if you ask for
# one even if the input is a metal. That's by design, because the target use
# for my own work considers cases where smearing has been used for an insulating
# system to help convergence (small bandgap) but the homo and lumo are needed.
# Quantum Espresso is pretty silly about this case (again, by design) and only
# reports the (non-physical) fermi level.
#
# 4. Energy outputs are in eV, because Ry and Hartree are insane.
#
# 5. The output isn't (yet) clever to geometry steps - you might get an output for
# every single SCF cycle, or you might not, depending on the command.
#
################################################################################
from __future__ import division
import argparse
import sys
import os.path
from math import floor
Ry2eV = 13.605698066
SMALL = 1.0e-6 # small floating point number for equality comparisons
DEBUG = 1
valid_commands = ["homo", \
"lumo", \
"num_atoms", \
"num_electrons", \
"num_bands", \
"num_kpoints", \
"total_energy", \
"total_ae_energy", \
"efermi" ]
def get_eigs_from_string(str):
""" This is string.split() tweaked to address a bug in Quantum Espresso's formatted fortran
output where sometimes it prints two negative floats without a space e.g. -130.3940-120.6023.
In these cases, split() doesn't work directly. """
eigs = []
bits = str.split()
for b in bits:
if b is not '':
try:
tmpf = float(b)
eigs.append(tmpf)
except ValueError:
negs = b.split('-')
eigs += [-1 * float(c) for c in negs if c is not ''] # This is a bit obscure I know...
return eigs
parser = argparse.ArgumentParser(description="Extract information from QE(PWSCF) output file and print to stdout.")
parser.add_argument('inputfile', help="Quantum Espresso pw.x output file for input.")
parser.add_argument('commands', nargs="+", help="Parameters to be extracted.")
args = parser.parse_args()
# Check we have valid commands
for c in args.commands:
if c not in valid_commands:
print "ERROR: command %s is not valid, see source file for a list of possible commands." % (c)
# Some of the commands are easy, others require more complex parsing. Deal with all the easy ones
# first.
f = open(args.inputfile, 'r')
lines = f.readlines()
f.close()
found_fermi = False
found_homo = False
found_lumo = False
found_ae_energy = False
output_text = {}
for l in lines:
if "number of atoms/cell =" in l:
if "num_atoms" in args.commands:
output_text["num_atoms"] = l.split()[4]
if "number of electrons =" in l:
if "num_electrons" in args.commands:
output_text["num_electrons"] = l.split()[4]
nelec = float(l.split()[4])
if "number of Kohn-Sham states=" in l:
if "num_bands" in args.commands:
output_text["num_bands"] = l.split()[4]
nband = int(l.split()[4])
if "number of k points=" in l:
if "num_kpoints" in args.commands:
output_text["num_kpoints"] = l.split()[4]
nkpt = int(l.split()[4])
if "! total energy =" in l:
if "total_energy" in args.commands:
output_text["total_energy"] = float(l.split()[4]) * Ry2eV
if "total all-electron energy =" in l:
if "total_ae_energy" in args.commands:
output_text["total_ae_energy"] = float(l.split()[4]) * Ry2eV
found_ae_energy = True
if "highest occupied, lowest unoccupied level (ev):" in l:
# This might not be present - opportunistic!
homo = float(l.split()[6])
found_homo = True
lumo = float(l.split()[7])
found_lumo = True
if "homo" in args.commands:
output_text["homo"] = homo
if "lumo" in args.commands:
output_text["lumo"] = lumo
if "the Fermi energy is" in l:
efermi = float(l.split()[4])
found_fermi = True
if "efermi" in args.commands:
output_text["efermi"] = efermi
if "highest occupied level (ev):" in l:
homo = float(l.split()[4])
found_homo = True
if "homo" in args.commands:
output_text["homo"] = homo
# If PAW potentials aren't used, an all-electron energy won't be reported so if the user
# asked for one, give an error.
if "total_ae_energy" in args.commands and not found_ae_energy:
print "ERROR - All-electron energy not found. Check calculation used PAW and that it finished correctly."
# Ok, now for the slightly trickier ones - homo, lumo and fermi_level. First, QE might actually
# give us values, which we picked up earlier. If not, we need to do a bit more work.
if ("homo" in args.commands and found_homo is False) or \
("lumo" in args.commands and found_lumo is False) or \
("efermi" in args.commands and found_fermi is False):
# Lots of things to worry about here and we have to loop a lot. For performance, find
# the important section of the file.
for i,l in enumerate(lines):
if "End of self-consistent calculation" in l:
istart = i
if "convergence has been achieved in" in l:
iend = i
has_spin = False
for i,l in enumerate(lines[istart:iend]):
if "SPIN UP" in l:
has_spin = True
istart = i
if "SPIN DOWN" in l:
iend = i
# Find the k-point block locations
ks = []
for i,l in enumerate(lines[istart:iend]):
if "k =" in l:
if DEBUG:
print l
ks.append(i+istart + 1)
if DEBUG:
print "K-point indices are:"
print ks
# Add the iend value to act as an endpoint for the
# eigenvalue search.
ks.append(iend)
# For each k, look for eigenvalues until we have enough.
eigsk = []
for i in range(len(ks)-1):
eigs = []
for l in lines[ks[i]+1:ks[i+1]]:
# Now - pay attention! There is a bug in the output of espresso that means split() might
# not work here. This means we have to play a silly game here assuming eigenvalues are
# output in increasing order.
# This is done with a recursive function defined at the top of the file.
eigs += get_eigs_from_string(l)
if DEBUG:
print "Current length of eigs is %d, num_bands is %d." %(len(eigs), nband)
if len(eigs) == nband:
eigsk.append(eigs)
break
# Now, use number of electrons to figure out where the homo is.
max_occ = -1e6
min_unocc = 1e6
idx_homo = int(floor(nelec / 2)) - 1 # The -1 is because we have 0-based indices in python.
for ek in eigsk:
if ek[idx_homo] > max_occ:
max_occ = ek[idx_homo]
if ek[idx_homo + 1] < min_unocc:
min_unocc = ek[idx_homo + 1]
if not found_homo:
homo = max_occ
output_text["homo"] = homo
if not found_lumo:
lumo = min_unocc
output_text["lumo"] = lumo
if not found_fermi:
efermi = (homo + lumo) / 2
output_text["efermi"] = efermi
# Print output in the order requested.
for c in args.commands:
print output_text[c]
|
HSINWEI/physics
|
python/qe_extractor.py
|
Python
|
gpl-3.0
| 8,488
|
[
"ESPResSo",
"Quantum ESPRESSO"
] |
88a8743c004db888bd7cf4c873b65d87c9cade397bda36fcc2eb1ba2ba1ffa63
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 29 01:48:17 2017
@author: Aretas
"""
#Export Linear MD results to xlsx
import re
import argparse
import os
import xlsxwriter
import collections
import csv
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file',
help='choose the list of MD enzymes (should be a .csv file)', required=True)
parser.add_argument('-f2', '--file2',
help='choose the TXT file with the list of ligands', required=True)
parser.add_argument('-lo', '--location',
help='choose MD results containing folder', required=True)
args = parser.parse_args()
location = args.location
workbook = xlsxwriter.Workbook("MD_Linear_pho.xlsx")#####!!!!!!!
ligand_dict = {} #skeleton number in the lib : type of the skeleton
ligand_counter = 0
with open(args.file2) as f:
for line in f:
k = line.split()
ligand_dict[int(k[0])] = k[2]
ligand_counter += 1
input1_dict = {}
number_of_products = ligand_counter + 1
results_loc = os.path.realpath(args.location)
file_path = results_loc + "/aff_dis/"
with open(args.file) as f:
csv_dict = [{k: str(v) for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
mean_line = ""
worksheet1 = workbook.add_worksheet('mean')
input2_dict = {}
for txt_aff in os.listdir(file_path):
if txt_aff.endswith("aff_dis.txt"):
completeName = os.path.join(file_path,"{0}".format(txt_aff))
with open(completeName) as input2:
for line in input2:
f = line.split()
input2_dict[f[0]] = 0 - float(f[1])
m_obj = re.search(r'(.*)_NS_pho_(.*)_scaffold_aff_dis.txt', txt_aff)
m_obj1 = re.search(r'(.*)_NS_(.*)_scaffold_aff_dis.txt', txt_aff)
if m_obj:
enzyme_name = m_obj.groups()[0]
chain = m_obj.groups()[1]
if m_obj1:
enzyme_name = m_obj1.groups()[0]
chain = m_obj.groups()[1]
sheet_name = enzyme_name.upper() + "_" + chain
sheet_name2 = "'" + sheet_name + "'" + '!C{0},'
mean_line += sheet_name2
worksheet = workbook.add_worksheet(sheet_name)
bold = workbook.add_format ({'bold' : True})
italic = workbook.add_format ({'italic' : True})
chart = workbook.add_chart({'type': 'column'})
chart1 = workbook.add_chart({'type': 'column'})
chart.add_series({'values': '={0}!$C$2:$C${1}'.format(sheet_name, number_of_products)})
chart.set_y_axis({'min': 4.0})
worksheet.insert_chart('F12', chart)
worksheet.write('A1', 'Scaffold Number', bold)
worksheet.set_column('A:A', 14)
worksheet.write('B1', 'Library number', bold)
worksheet.write('C1', 'Top Score', bold)
row = 1
col = 0
od = collections.OrderedDict(sorted(ligand_dict.items()))
for k, v in od.items():
worksheet.write(row, col, k)
worksheet.write(row, col + 1, v)
worksheet.write(row, col + 2, input2_dict[v])
row += 1
#worksheet.write('F2', enzyme_name.upper())
worksheet.write('F2', 'Top score')
worksheet.write('F3', 'min')
worksheet.write('F4', 'max')
worksheet.write('F5', 'sd')
worksheet.write('G3', '=MIN(C2:C{0})'.format(number_of_products))
worksheet.write('G4', '=MAX(C2:C{0})'.format(number_of_products))
worksheet.write('G5', '=STDEV(C2:C{0})'.format(number_of_products))
for dictionary in csv_dict:
if dictionary["PDB_ID"] == enzyme_name or dictionary["PDB_ID"] == enzyme_name.upper() :
worksheet.write('F7', 'Enzyme name')
worksheet.write('G7', 'PDB ID')
worksheet.write('H7', 'Native product', bold)
worksheet.write('I7', 'Chain')
worksheet.write('J7', 'Ligand in crystal')
worksheet.write('K7', 'Conformation')
worksheet.write('L7', 'Resolution (A)')
worksheet.set_column('L:L', 12)
worksheet.write('M7', 'Species')
worksheet.write('F8', dictionary["Enzyme_name"])
worksheet.set_column('F:F', 25)
worksheet.write('G8', dictionary['PDB_ID'])
worksheet.write('H8', dictionary['Product'])
worksheet.set_column('H:H', 20)
worksheet.write('I8', chain)
worksheet.write('J8', dictionary['Ligand_crystal'])
worksheet.set_column('J:J', 18)
worksheet.write('K8', dictionary['Conformation'])
worksheet.write('L8', dictionary['Resolution'])
worksheet.write('M8', dictionary['Species'], italic)
worksheet.set_column('M:M', 18)
### MEAN SHEET
bold = workbook.add_format ({'bold' : True})
italic = workbook.add_format ({'italic' : True})
chart = workbook.add_chart({'type': 'column'})
chart1 = workbook.add_chart({'type': 'column'})
chart2 = workbook.add_chart({'type': 'column'})
chart3 = workbook.add_chart({'type': 'column'})
worksheet1.write('A1', 'Scaffold Number', bold)
worksheet1.set_column('A:A', 14)
worksheet1.write('B1', 'Library number', bold)
worksheet1.write('C1', 'Topology', bold)
worksheet1.write('D1', 'mean', bold)
worksheet1.write('E1', 'sd', bold)
row = 1
col = 0
od = collections.OrderedDict(sorted(ligand_dict.items()))
for k, v in od.items():
worksheet1.write(row, col, k)
worksheet1.write(row, col + 1, v)
worksheet1.write(row, col + 2, "linear")
row += 1
mean_line = mean_line[:-1]
a_mean_line = "=AVERAGE(" + mean_line + ")"
std_mean_line = "=STDEV(" + mean_line + ")"
#print (a_mean_line)
for i in range(2, number_of_products + 1):
worksheet1.write('D{0}'.format(i), a_mean_line.format(i))
worksheet1.write('E{0}'.format(i), std_mean_line.format(i))
worksheet1.write('I2', 'mean')
worksheet1.write('I3', 'min')
worksheet1.write('I4', 'max')
worksheet1.write('I5', 'sd')
worksheet1.write('J3', '=MIN(D2:D{0})'.format(number_of_products))
worksheet1.write('J4', '=MAX(D2:D{0})'.format(number_of_products))
worksheet1.write('J5', '=STDEV(D2:D{0})'.format(number_of_products))
chart.add_series({
'values': '={0}!$D$2:$D${1}'.format("mean", number_of_products),
'y_error_bars': {'type': 'standard_error'},
})
chart.set_y_axis({'min': 5.0})
worksheet1.insert_chart('I8', chart)
chart1.add_series({'values': '={0}!$C$2:$C${1}'.format("mean", number_of_products)})
chart1.set_y_axis({'min': 0.15})
worksheet1.insert_chart('I23', chart1)
workbook.close()
|
aretas2/High-throughput-molecular-docking
|
excel-py/xcl2_pho_L.py
|
Python
|
mit
| 6,624
|
[
"CRYSTAL"
] |
922bd3432383f88472ee778a30510d9e189621da7fc64ba79384f693aca8901a
|
import subprocess
from Bio import SeqIO
import unittest
import shutil
import os
import time
# Prereqs:
# module load bowtie/0.12.8
# module load java
# module load samtools
# Trinity
# Copy the .gz files in sample_data/test_Trinity_Assembly to current directory
# Run using nosetests
MEM_FLAG = "--max_memory 2G"
TEMP_FILES = ['both.fa', 'inchworm.K25.L25.fa', 'jellyfish.kmers.fa', 'scaffolding_entries.sam']
class TestTrinity(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
os.remove('coverage.log')
except:
pass
def tearDown(self):
shutil.rmtree('trinity_out_dir', True)
def test_sample_data_seq_count(self):
self.trinity(
"Trinity --seqType fq %s --left reads.left.fq.gz,reads2.left.fq.gz --right reads.right.fq.gz,reads2.right.fq.gz --SS_lib_type RF --CPU 4 --no_cleanup" % MEM_FLAG)
handle = open("trinity_out_dir/Trinity.fasta", "rU")
seq_count = len([x for x in SeqIO.parse(handle, "fasta")])
handle.close()
self.assertTrue(75 <= seq_count <= 90, msg='Found %s sequences' % seq_count)
def test_sample_data_trimmed_and_normalized(self):
self.trinity(
"Trinity --seqType fq %s --left reads.left.fq.gz,reads2.left.fq.gz --right reads.right.fq.gz,reads2.right.fq.gz --SS_lib_type RF --CPU 4 --trimmomatic --normalize_reads --no_cleanup" % MEM_FLAG)
handle = open("trinity_out_dir/Trinity.fasta", "rU")
seq_count = len([x for x in SeqIO.parse(handle, "fasta")])
handle.close()
self.assertTrue(75 <= seq_count <= 85, msg='Found %s sequences' % seq_count)
def test_no_cleanup_leaves_temp_files(self):
self.trinity(
"Trinity --seqType fq %s --left reads.left.fq.gz,reads2.left.fq.gz --right reads.right.fq.gz,reads2.right.fq.gz --SS_lib_type RF --CPU 4 --no_cleanup" % MEM_FLAG)
for f in TEMP_FILES:
self.assertTrue(os.path.exists("trinity_out_dir/%s" % f), msg="%s not found with no_cleanup" % f)
def test_cleanup_removes_temp_files(self):
self.trinity(
"Trinity --seqType fq %s --left reads.left.fq.gz,reads2.left.fq.gz --right reads.right.fq.gz,reads2.right.fq.gz --SS_lib_type RF --CPU 4 --full_cleanup" % MEM_FLAG)
time.sleep(5) # Make sure the system has time to recognize the directory is gone
self.assertFalse(os.path.exists("trinity_out_dir"), msg="Did full_cleanup but trinity_out_dir exists")
self.assertTrue(os.path.isfile("trinity_out_dir.Trinity.fasta"),
msg="Did full_cleanup but output file not created")
def test_single_end_with_rf_lib_type_error(self):
try:
subprocess.call("Trinity --seqType fq --single reads.left.fq --SS_lib_type RF", shell=True)
except subprocess.CalledProcessError as e:
self.assertTrue("Error, with --single reads, the --SS_lib_type can be 'F' or 'R' only." in e.output)
def test_single_end_with_fq(self):
self.trinity("Trinity %s --seqType fq --single reads.left.fq --SS_lib_type F" % MEM_FLAG)
def test_no_run_chrysalis(self):
self.trinity("Trinity %s --seqType fq --single reads.left.fq --SS_lib_type F --no_run_chrysalis" % MEM_FLAG)
self.assertEquals(0, len(os.listdir('trinity_out_dir/chrysalis')))
def test_no_run_inchworm(self):
self.trinity("Trinity %s --seqType fq --single reads.left.fq --SS_lib_type F --no_run_inchworm" % MEM_FLAG)
self.assertFalse(os.path.isfile("trinity_out_dir/inchworm.K25.L25.fa.finished"),
msg="Inchworm appears to have run although no_run_inchworm was specified")
self.assertTrue(os.path.isfile("trinity_out_dir/jellyfish.kmers.fa"),
msg="jellyfish.kmers.fa was not created")
def test_no_bowtie(self):
self.trinity("Trinity %s --seqType fq --single reads.left.fq --SS_lib_type F --no_bowtie" % MEM_FLAG)
self.assertFalse(os.path.isfile("trinity_out_dir/bowtie.nameSorted.bam"),
msg="Bowtie appears to have run although no_bowtie was specified")
def test_no_distributed_trinity_exec(self):
self.trinity("Trinity %s --seqType fq --single reads.left.fq --SS_lib_type F --no_distributed_trinity_exec" % MEM_FLAG)
self.assertTrue(os.path.isfile("trinity_out_dir/inchworm.K25.L25.fa.finished"),
msg="Inchworm did not appear to run with no_distributed_trinity_exec flag")
self.assertTrue(os.path.isfile("trinity_out_dir/jellyfish.1.finished"),
msg="Jellyfish did not appear to run with no_distributed_trinity_exec flag")
self.assertFalse(os.path.isfile("trinity_out_dir/Trinity.fasta"),
msg="Trinity.fasta created with no_distributed_trinity_exec")
def test_single_end_with_fa_and_reverse(self):
self.fq2fa()
self.trinity("Trinity %s --seqType fa --single reads.fa --SS_lib_type R" % MEM_FLAG)
def test_output_correctly_changes_dir(self):
shutil.rmtree('trinity_test', True)
self.trinity("Trinity %s --seqType fq --single reads.left.fq --SS_lib_type F --output trinity_test" % MEM_FLAG)
self.assertTrue(os.path.exists("trinity_test"), msg="Changed output directory but it was not created")
shutil.rmtree('trinity_test', True)
### information tests
def test_cite(self):
expected = '\n\n* Trinity:\nFull-length transcriptome assembly from RNA-Seq data without a reference genome.\nGrabherr MG, Haas BJ, Yassour M, Levin JZ, Thompson DA, Amit I, Adiconis X, Fan L,\nRaychowdhury R, Zeng Q, Chen Z, Mauceli E, Hacohen N, Gnirke A, Rhind N, di Palma F,\nBirren BW, Nusbaum C, Lindblad-Toh K, Friedman N, Regev A.\nNature Biotechnology 29, 644\xe2\x80\x93652 (2011)\nPaper: http://www.nature.com/nbt/journal/v29/n7/full/nbt.1883.html\nCode: http://trinityrnaseq.sf.net\n\n\n'
cite = subprocess.check_output(["Trinity", "--cite"])
self.assertEqual(expected, cite)
def test_version(self):
try:
subprocess.check_output(["Trinity", "--version"])
self.fail("Version returned 0 errorcode!")
except subprocess.CalledProcessError as e:
#self.assertEqual('Trinity version: BLEEDING_EDGE\n', e.output)
#self.assertEqual('Trinity version: trinityrnaseq_r20140717\n', e.output)
self.assertEqual('Trinity version: Trinity_v2.0.2\n', e.output)
def test_show_full_usage_info(self):
try:
subprocess.check_output(["Trinity", "--show_full_usage_info"])
except subprocess.CalledProcessError as e:
self.assertTrue("Inchworm and K-mer counting-related options" in e.output)
self.assertTrue("Chrysalis-related options" in e.output)
self.assertTrue("Butterfly-related options" in e.output)
self.assertTrue("Quality Trimming Options" in e.output)
self.assertTrue("In silico Read Normalization Options" in e.output)
### Invalid command line tests
def test_no_JM_specified_error(self):
error = self.get_error("Trinity --seqType fq --single reads.left.fq --SS_lib_type F")
self.assertTrue("Error, must specify max memory for jellyfish to use, eg. --max_memory 10G" in error)
def test_invalid_option_error(self):
error = self.get_error("Trinity --squidward")
self.assertTrue("Error, do not understand options: --squidward" in error)
def test_set_no_cleanup_and_full_cleanup_error(self):
error = self.get_error("Trinity --no_cleanup --full_cleanup")
self.assertTrue("cannot set --no_cleanup and --full_cleanup as they contradict" in error)
### Helper methods
def trinity(self, cmdline):
with open("coverage.log", 'a') as file_out:
subprocess.call(cmdline,shell=True, stdout=file_out)
def get_error(self, cmd):
try:
subprocess.check_output(cmd.split(' '))
except subprocess.CalledProcessError as e:
return e.output
def fq2fa(self):
handle = open("reads.left.fq", "rU")
records = [x for x in SeqIO.parse(handle, "fastq")]
handle.close()
SeqIO.write(records, "reads.fa", "fasta")
|
ssn1306/trinityrnaseq
|
util/support_scripts/tests.py
|
Python
|
bsd-3-clause
| 8,295
|
[
"Bowtie"
] |
edcdf5170a2e12f63e87f3572019d447144e20169b15707ead22589904e1d405
|
########################################################################
# File : CloudDirector.py
# Author : A.Tsaregorodtsev
########################################################################
""" The Cloud Director is a simple agent performing VM instantiations
"""
import os
import random
import socket
import hashlib
from collections import defaultdict
# DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals, Registry, Operations, Resources
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import jobDB
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.List import fromChar
# VMDIRAC
from VMDIRAC.Resources.Cloud.EndpointFactory import EndpointFactory
from VMDIRAC.Resources.Cloud.ConfigHelper import findGenericCloudCredentials, \
getImages, \
getPilotBootstrapParameters
from VMDIRAC.WorkloadManagementSystem.Client.ServerUtils import virtualMachineDB
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import pilotAgentsDB
__RCSID__ = "$Id$"
class CloudDirector( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
self.imageDict = {}
self.imageCECache = {}
self.imageSlots = {}
self.failedImages = defaultdict( int )
self.firstPass = True
self.vo = ''
self.group = ''
# self.voGroups contain all the eligible user groups for clouds submitted by this SiteDirector
self.voGroups = []
self.cloudDN = ''
self.cloudGroup = ''
self.platforms = []
self.sites = []
self.proxy = None
self.updateStatus = True
self.getOutput = False
self.sendAccounting = True
def initialize( self ):
""" Standard constructor
"""
return S_OK()
def beginExecution( self ):
# The Director is for a particular user community
self.vo = self.am_getOption( "VO", '' )
if not self.vo:
self.vo = CSGlobals.getVO()
# The SiteDirector is for a particular user group
self.group = self.am_getOption( "Group", '' )
# Choose the group for which clouds will be submitted. This is a hack until
# we will be able to match clouds to VOs.
if not self.group:
if self.vo:
result = Registry.getGroupsForVO( self.vo )
if not result['OK']:
return result
self.voGroups = []
for group in result['Value']:
if 'NormalUser' in Registry.getPropertiesForGroup( group ):
self.voGroups.append( group )
else:
self.voGroups = [ self.group ]
result = findGenericCloudCredentials( vo = self.vo )
if not result[ 'OK' ]:
return result
self.cloudDN, self.cloudGroup = result[ 'Value' ]
self.maxVMsToSubmit = self.am_getOption( 'MaxVMsToSubmit', 1 )
self.runningPod = self.am_getOption( 'RunningPod', self.vo)
# Get the site description dictionary
siteNames = None
if not self.am_getOption( 'Site', 'Any' ).lower() == "any":
siteNames = self.am_getOption( 'Site', [] )
if not siteNames:
siteNames = None
ces = None
if not self.am_getOption( 'CEs', 'Any' ).lower() == "any":
ces = self.am_getOption( 'CEs', [] )
if not ces:
ces = None
result = getImages( vo = self.vo,
siteList = siteNames )
if not result['OK']:
return result
resourceDict = result['Value']
result = self.getImages( resourceDict )
if not result['OK']:
return result
#if not siteNames:
# siteName = gConfig.getValue( '/DIRAC/Site', 'Unknown' )
# if siteName == 'Unknown':
# return S_OK( 'No site specified for the SiteDirector' )
# else:
# siteNames = [siteName]
#self.siteNames = siteNames
self.log.always( 'Sites:', siteNames )
self.log.always( 'CEs:', ces )
self.log.always( 'CloudDN:', self.cloudDN )
self.log.always( 'CloudGroup:', self.cloudGroup )
self.localhost = socket.getfqdn()
self.proxy = ''
if self.firstPass:
if self.imageDict:
self.log.always( "Agent will serve images:" )
for queue in self.imageDict:
self.log.always( "Site: %s, CE: %s, Image: %s" % ( self.imageDict[queue]['Site'],
self.imageDict[queue]['CEName'],
queue ) )
self.firstPass = False
return S_OK()
def __generateImageHash( self, imageDict ):
""" Generate a hash of the queue description
"""
myMD5 = hashlib.md5()
myMD5.update( str( imageDict ) )
hexstring = myMD5.hexdigest()
return hexstring
def getImages( self, resourceDict ):
""" Get the list of relevant CEs and their descriptions
"""
self.imageDict = {}
ceFactory = EndpointFactory()
result = getPilotBootstrapParameters( vo = self.vo, runningPod = self.runningPod )
if not result['OK']:
return result
opParameters = result['Value']
for site in resourceDict:
for ce in resourceDict[site]:
ceDict = resourceDict[site][ce]
ceTags = ceDict.get( 'Tag', [] )
if isinstance( ceTags, basestring ):
ceTags = fromChar( ceTags )
ceMaxRAM = ceDict.get( 'MaxRAM', None )
qDict = ceDict.pop( 'Images' )
for image in qDict:
imageName = '%s_%s' % ( ce, image )
self.imageDict[imageName] = {}
self.imageDict[imageName]['ParametersDict'] = qDict[image]
self.imageDict[imageName]['ParametersDict']['Image'] = image
self.imageDict[imageName]['ParametersDict']['Site'] = site
self.imageDict[imageName]['ParametersDict']['Setup'] = gConfig.getValue( '/DIRAC/Setup', 'unknown' )
self.imageDict[imageName]['ParametersDict']['CPUTime'] = 99999999
imageTags = self.imageDict[imageName]['ParametersDict'].get( 'Tag' )
if imageTags and isinstance( imageTags, basestring ):
imageTags = fromChar( imageTags )
self.imageDict[imageName]['ParametersDict']['Tag'] = imageTags
if ceTags:
if imageTags:
allTags = list( set( ceTags + imageTags ) )
self.imageDict[imageName]['ParametersDict']['Tag'] = allTags
else:
self.imageDict[imageName]['ParametersDict']['Tag'] = ceTags
maxRAM = self.imageDict[imageName]['ParametersDict'].get( 'MaxRAM' )
maxRAM = ceMaxRAM if not maxRAM else maxRAM
if maxRAM:
self.imageDict[imageName]['ParametersDict']['MaxRAM'] = maxRAM
platform = ''
if "Platform" in self.imageDict[imageName]['ParametersDict']:
platform = self.imageDict[imageName]['ParametersDict']['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
if platform and not platform in self.platforms:
self.platforms.append( platform )
if not "Platform" in self.imageDict[imageName]['ParametersDict'] and platform:
result = Resources.getDIRACPlatform( platform )
if result['OK']:
self.imageDict[imageName]['ParametersDict']['Platform'] = result['Value'][0]
ceImageDict = dict( ceDict )
ceImageDict['CEName'] = ce
ceImageDict['VO'] = self.vo
ceImageDict['Image'] = image
ceImageDict['RunningPod'] = self.runningPod
ceImageDict['CSServers'] = gConfig.getValue( "/DIRAC/Configuration/Servers", [] )
ceImageDict.update( self.imageDict[imageName]['ParametersDict'] )
ceImageDict.update( opParameters )
# Generate the CE object for the image or pick the already existing one
# if the image definition did not change
imageHash = self.__generateImageHash( ceImageDict )
if imageName in self.imageCECache and self.imageCECache[imageName]['Hash'] == imageHash:
imageCE = self.imageCECache[imageName]['CE']
else:
result = ceFactory.getCEObject( parameters = ceImageDict )
if not result['OK']:
return result
self.imageCECache.setdefault( imageName, {} )
self.imageCECache[imageName]['Hash'] = imageHash
self.imageCECache[imageName]['CE'] = result['Value']
imageCE = self.imageCECache[imageName]['CE']
self.imageDict[imageName]['CE'] = imageCE
self.imageDict[imageName]['CEName'] = ce
self.imageDict[imageName]['CEType'] = ceDict['CEType']
self.imageDict[imageName]['Site'] = site
self.imageDict[imageName]['ImageName'] = image
self.imageDict[imageName]['Platform'] = platform
self.imageDict[imageName]['MaxInstances'] = ceDict['MaxInstances']
if not self.imageDict[imageName]['CE'].isValid():
self.log.error( 'Failed to instantiate CloudEndpoint for %s' % imageName )
continue
if site not in self.sites:
self.sites.append( site )
return S_OK()
def execute( self ):
""" Main execution method
"""
if not self.imageDict:
self.log.warn( 'No site defined, exiting the cycle' )
return S_OK()
result = self.createVMs()
if not result['OK']:
self.log.error( 'Errors in the job submission: ', result['Message'] )
#cyclesDone = self.am_getModuleParam( 'cyclesDone' )
#if self.updateStatus and cyclesDone % self.cloudStatusUpdateCycleFactor == 0:
# result = self.updatePilotStatus()
# if not result['OK']:
# self.log.error( 'Errors in updating cloud status: ', result['Message'] )
return S_OK()
def createVMs( self ):
""" Go through defined computing elements and submit jobs if necessary
"""
# Check that there is some work at all
setup = CSGlobals.getSetup()
tqDict = { 'Setup':setup,
'CPUTime': 9999999 }
if self.vo:
tqDict['Community'] = self.vo
if self.voGroups:
tqDict['OwnerGroup'] = self.voGroups
result = Resources.getCompatiblePlatforms( self.platforms )
if not result['OK']:
return result
tqDict['Platform'] = result['Value']
tqDict['Site'] = self.sites
tqDict['Tag'] = []
self.log.verbose( 'Checking overall TQ availability with requirements' )
self.log.verbose( tqDict )
rpcMatcher = RPCClient( "WorkloadManagement/Matcher" )
result = rpcMatcher.getMatchingTaskQueues( tqDict )
if not result[ 'OK' ]:
return result
if not result['Value']:
self.log.verbose( 'No Waiting jobs suitable for the director' )
return S_OK()
jobSites = set()
anySite = False
testSites = set()
totalWaitingJobs = 0
for tqID in result['Value']:
if "Sites" in result['Value'][tqID]:
for site in result['Value'][tqID]['Sites']:
if site.lower() != 'any':
jobSites.add( site )
else:
anySite = True
else:
anySite = True
if "JobTypes" in result['Value'][tqID]:
if "Sites" in result['Value'][tqID]:
for site in result['Value'][tqID]['Sites']:
if site.lower() != 'any':
testSites.add( site )
totalWaitingJobs += result['Value'][tqID]['Jobs']
tqIDList = result['Value'].keys()
result = virtualMachineDB.getInstanceCounters( 'Status', {} )
totalVMs = 0
if result['OK']:
for status in result['Value']:
if status in [ 'New', 'Submitted', 'Running' ]:
totalVMs += result['Value'][status]
self.log.info( 'Total %d jobs in %d task queues with %d VMs' % (totalWaitingJobs, len( tqIDList ), totalVMs ) )
# Check if the site is allowed in the mask
result = jobDB.getSiteMask()
if not result['OK']:
return S_ERROR( 'Can not get the site mask' )
siteMaskList = result['Value']
images = self.imageDict.keys()
random.shuffle( images )
totalSubmittedPilots = 0
matchedQueues = 0
for image in images:
# Check if the image failed previously
#failedCount = self.failedImages[ image ] % self.failedImageCycleFactor
#if failedCount != 0:
# self.log.warn( "%s queue failed recently, skipping %d cycles" % ( image, 10-failedCount ) )
# self.failedImages[image] += 1
# continue
print "AT >>> image parameters:", image
for key,value in self.imageDict[image].items():
print key,value
ce = self.imageDict[image]['CE']
ceName = self.imageDict[image]['CEName']
imageName = self.imageDict[image]['ImageName']
siteName = self.imageDict[image]['Site']
platform = self.imageDict[image]['Platform']
siteMask = siteName in siteMaskList
endpoint = "%s::%s" % ( siteName, ceName )
maxInstances = int( self.imageDict[image]['MaxInstances'] )
if not anySite and siteName not in jobSites:
self.log.verbose( "Skipping queue %s at %s: no workload expected" % (imageName, siteName) )
continue
if not siteMask and siteName not in testSites:
self.log.verbose( "Skipping queue %s: site %s not in the mask" % (imageName, siteName) )
continue
if 'CPUTime' in self.imageDict[image]['ParametersDict'] :
imageCPUTime = int( self.imageDict[image]['ParametersDict']['CPUTime'] )
else:
self.log.warn( 'CPU time limit is not specified for queue %s, skipping...' % image )
continue
# Prepare the queue description to look for eligible jobs
ceDict = ce.getParameterDict()
if not siteMask:
ceDict['JobType'] = "Test"
if self.vo:
ceDict['VO'] = self.vo
if self.voGroups:
ceDict['OwnerGroup'] = self.voGroups
result = Resources.getCompatiblePlatforms( platform )
if not result['OK']:
continue
ceDict['Platform'] = result['Value']
# Get the number of eligible jobs for the target site/queue
print "AT >>> getMatchingTaskQueues ceDict", ceDict
result = rpcMatcher.getMatchingTaskQueues( ceDict )
print result
if not result['OK']:
self.log.error( 'Could not retrieve TaskQueues from TaskQueueDB', result['Message'] )
return result
taskQueueDict = result['Value']
if not taskQueueDict:
self.log.verbose( 'No matching TQs found for %s' % image )
continue
matchedQueues += 1
totalTQJobs = 0
tqIDList = taskQueueDict.keys()
for tq in taskQueueDict:
totalTQJobs += taskQueueDict[tq]['Jobs']
self.log.verbose( '%d job(s) from %d task queue(s) are eligible for %s queue' % (totalTQJobs, len( tqIDList ), image) )
# Get the number of already instantiated VMs for these task queues
totalWaitingVMs = 0
result = virtualMachineDB.getInstanceCounters( 'Status', { 'Endpoint': endpoint } )
if result['OK']:
for status in result['Value']:
if status in [ 'New', 'Submitted' ]:
totalWaitingVMs += result['Value'][status]
if totalWaitingVMs >= totalTQJobs:
self.log.verbose( "%d VMs already for all the available jobs" % totalWaitingVMs )
self.log.verbose( "%d VMs for the total of %d eligible jobs for %s" % (totalWaitingVMs, totalTQJobs, image) )
# Get the working proxy
#cpuTime = imageCPUTime + 86400
#self.log.verbose( "Getting cloud proxy for %s/%s %d long" % ( self.cloudDN, self.cloudGroup, cpuTime ) )
#result = gProxyManager.getPilotProxyFromDIRACGroup( self.cloudDN, self.cloudGroup, cpuTime )
#if not result['OK']:
# return result
#self.proxy = result['Value']
#ce.setProxy( self.proxy, cpuTime - 60 )
# Get the number of available slots on the target site/endpoint
totalSlots = self.getVMInstances( endpoint, maxInstances )
if totalSlots == 0:
self.log.debug( '%s: No slots available' % image )
continue
vmsToSubmit = max( 0, min( totalSlots, totalTQJobs - totalWaitingVMs ) )
self.log.info( '%s: Slots=%d, TQ jobs=%d, VMs: %d, to submit=%d' % \
( image, totalSlots, totalTQJobs, totalWaitingVMs, vmsToSubmit ) )
# Limit the number of clouds to submit to MAX_PILOTS_TO_SUBMIT
vmsToSubmit = min( self.maxVMsToSubmit, vmsToSubmit )
self.log.info( 'Going to submit %d VMs to %s queue' % ( vmsToSubmit, image ) )
result = ce.createInstances( vmsToSubmit )
print "AT >>> createInstances", result, image
if not result['OK']:
self.log.error( 'Failed submission to queue %s:\n' % image, result['Message'] )
self.failedImages.setdefault( image, 0 )
self.failedImages[image] += 1
continue
# Add VMs to the VirtualMachineDB
vmDict = result['Value']
totalSubmittedPilots += len( vmDict )
self.log.info( 'Submitted %d VMs to %s@%s' % ( len( vmDict ), imageName, ceName ) )
pilotList = []
for uuID in vmDict:
diracUUID = vmDict[uuID]['InstanceID']
endpoint = '%s::%s' % ( self.imageDict[image]['Site'], ceName )
result = virtualMachineDB.insertInstance( uuID, imageName, diracUUID, endpoint, self.vo )
if not result['OK']:
continue
for ncpu in range( vmDict[uuID]['NumberOfCPUs'] ):
pRef = 'vm://' + ceName + '/' + diracUUID + ':' + str( ncpu ).zfill( 2 )
pilotList.append( pRef )
stampDict = {}
tqPriorityList = []
sumPriority = 0.
for tq in taskQueueDict:
sumPriority += taskQueueDict[tq]['Priority']
tqPriorityList.append( ( tq, sumPriority ) )
tqDict = {}
for pilotID in pilotList:
rndm = random.random() * sumPriority
for tq, prio in tqPriorityList:
if rndm < prio:
tqID = tq
break
if not tqDict.has_key( tqID ):
tqDict[tqID] = []
tqDict[tqID].append( pilotID )
for tqID, pilotList in tqDict.items():
result = pilotAgentsDB.addPilotTQReference( pilotList,
tqID,
'',
'',
self.localhost,
'Cloud',
'',
stampDict )
if not result['OK']:
self.log.error( 'Failed to insert pilots into the PilotAgentsDB' )
self.log.info( "%d VMs submitted in total in this cycle, %d matched queues" % ( totalSubmittedPilots, matchedQueues ) )
return S_OK()
def getVMInstances( self, endpoint, maxInstances ):
result = virtualMachineDB.getInstanceCounters( 'Status', { 'Endpoint': endpoint } )
print "AT >>> getVMInstances", result
if not result['OK']:
return result
count = 0
for status in result['Value']:
if status in [ 'New', 'Submitted', 'Running']:
count += int( result['Value'][status] )
return max( 0, maxInstances - count )
|
xianghuzhao/VMDIRAC
|
VMDIRAC/WorkloadManagementSystem/Agent/CloudDirector.py
|
Python
|
gpl-3.0
| 19,941
|
[
"DIRAC"
] |
61ae63c64cf32ef0e9e886f5307cb7f32cc701de68d7c246af94983bd168a00d
|
import numpy as np
from scipy.optimize import check_grad
import copy
import pdb
import os
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
from .context import aep
from .context import flatten_dict, unflatten_dict
from .context import PROP_MC, PROP_MM, PROP_LIN
from test_utils import kink, check_grad
np.random.seed(42)
def test_gplvm_aep_gaussian(nat_param=True, stoc=False, prop_mode=PROP_MM):
N_train = 10
alpha = 0.5
M = 5
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
lvm = aep.SGPLVM(y_train, D, M, lik='Gaussian', nat_param=nat_param)
params = lvm.init_hypers(y_train)
# lvm.optimise(method='adam', alpha=alpha, maxiter=1000, adam_lr=0.08)
# params = lvm.get_hypers()
print 'gplvm aep gaussian nat_param %r stoc %r prop_mode %s' % (nat_param, stoc, prop_mode)
check_grad(params, lvm, stochastic=stoc, alpha=alpha, prop_mode=prop_mode)
def test_gplvm_aep_probit(nat_param=False, stoc=False, prop_mode=PROP_MM):
N_train = 5
alpha = 0.5
M = 3
D = 2
Q = 3
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
lvm = aep.SGPLVM(y_train, D, M, lik='Probit', nat_param=nat_param)
params = lvm.init_hypers(y_train)
# lvm.optimise(method='adam', alpha=alpha, maxiter=1000, adam_lr=0.08)
# params = lvm.get_hypers()
print 'gplvm aep probit nat_param %r stoc %r prop_mode %s' % (nat_param, stoc, prop_mode)
check_grad(params, lvm, stochastic=stoc, alpha=alpha, prop_mode=prop_mode)
def plot_gplvm_aep_gaussian_stochastic():
N_train = 2000
alpha = 0.5
M = 50
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
model = aep.SGPLVM(y_train, D, M, lik='Gaussian')
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=alpha)
mbs = np.logspace(-2, 0, 10)
reps = 20
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
objs[i, k] = model.objective_function(
params, no_points, alpha=alpha)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/gaussian_stochastic_aep_gplvm.pdf')
def plot_gplvm_aep_probit_stochastic():
N_train = 2000
alpha = 0.5
M = 50
D = 2
Q = 3
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
model = aep.SGPLVM(y_train, D, M, lik='Probit')
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=alpha)
mbs = np.logspace(-2, 0, 10)
reps = 20
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
objs[i, k] = model.objective_function(
params, no_points, alpha=alpha)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/probit_stochastic_aep_gplvm.pdf')
def test_gpr_aep_gaussian(nat_param=True, stoc=False):
N_train = 20
alpha = 0.0001
M = 10
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
x_train = np.random.randn(N_train, D)
model = aep.SGPR(x_train, y_train, M, lik='Gaussian', nat_param=nat_param)
params = model.init_hypers(y_train)
print 'gpr aep gaussian nat_param %r stoc %r' % (nat_param, stoc)
check_grad(params, model, stochastic=stoc, alpha=alpha)
def test_gpr_aep_probit(nat_param=True, stoc=False):
N_train = 5
alpha = 0.5
M = 3
D = 2
Q = 3
x_train = np.random.randn(N_train, D)
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
model = aep.SGPR(x_train, y_train, M, lik='Probit', nat_param=nat_param)
params = model.init_hypers(y_train)
print 'gpr aep probit nat_param %r stoc %r' % (nat_param, stoc)
check_grad(params, model, stochastic=stoc, alpha=alpha)
def plot_gpr_aep_gaussian_stochastic():
N_train = 2000
alpha = 0.5
M = 50
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
x_train = np.random.randn(N_train, D)
model = aep.SGPR(x_train, y_train, M, lik='Gaussian')
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=alpha)
mbs = np.logspace(-2, 0, 10)
reps = 20
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
objs[i, k] = model.objective_function(
params, no_points, alpha=alpha)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/gaussian_stochastic_aep_gpr.pdf')
def plot_gpr_aep_probit_stochastic():
N_train = 2000
alpha = 0.5
M = 50
D = 2
Q = 3
x_train = np.random.randn(N_train, D)
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
model = aep.SGPR(x_train, y_train, M, lik='Probit')
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=alpha)
mbs = np.logspace(-2, 0, 10)
reps = 20
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
objs[i, k] = model.objective_function(
params, no_points, alpha=alpha)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/probit_stochastic_aep_gpr.pdf')
def test_dgpr_aep_gaussian(nat_param=True, stoc=False):
N_train = 10
alpha = 1
M = 5
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
x_train = np.random.randn(N_train, D)
hidden_size = [3, 2]
# TODO: nat_param
model = aep.SDGPR(x_train, y_train, M, hidden_size, lik='Gaussian')
params = model.init_hypers(y_train)
print 'dgpr aep gaussian nat_param %r stoc %r' % (nat_param, stoc)
check_grad(params, model, stochastic=stoc, alpha=alpha)
def test_dgpr_aep_probit(nat_param=True, stoc=False):
N_train = 5
alpha = 1
M = 3
D = 2
Q = 3
hidden_size = [3, 2]
x_train = np.random.randn(N_train, D)
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
# TODO: nat_param
model = aep.SDGPR(x_train, y_train, M, hidden_size, lik='Probit')
params = model.init_hypers(y_train)
print 'dgpr aep probit nat_param %r stoc %r' % (nat_param, stoc)
check_grad(params, model, stochastic=stoc, alpha=alpha)
def plot_dgpr_aep_gaussian_stochastic():
N_train = 2000
M = 50
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
x_train = np.random.randn(N_train, D)
hidden_size = [3, 2]
model = aep.SDGPR(x_train, y_train, M, hidden_size, lik='Gaussian')
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=1.0)
mbs = np.logspace(-2, 0, 10)
reps = 20
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
objs[i, k] = model.objective_function(
params, no_points, alpha=1.0)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/gaussian_stochastic_aep_dgpr.pdf')
def plot_dgpr_aep_probit_stochastic():
N_train = 2000
M = 50
D = 2
Q = 3
x_train = np.random.randn(N_train, D)
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
hidden_size = [3, 2]
model = aep.SDGPR(x_train, y_train, M, hidden_size, lik='Gaussian')
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=1.0)
mbs = np.logspace(-2, 0, 10)
reps = 20
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
objs[i, k] = model.objective_function(
params, no_points, alpha=1.0)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/probit_stochastic_aep_dgpr.pdf')
def test_dgprh_aep_gaussian(nat_param=True, stoc=False):
N_train = 10
alpha = 0.5
M = 5
D = 2
Q = 3
y_train = np.random.randn(N_train, Q)
x_train = np.random.randn(N_train, D)
hidden_size = [3, 2]
model = aep.SDGPR_H(x_train, y_train, M, hidden_size, lik='Gaussian')
params = model.init_hypers(y_train)
print 'dgprh aep gaussian nat_param %r stoc %r' % (nat_param, stoc)
check_grad(params, model, stochastic=stoc, alpha=alpha)
def test_dgprh_aep_probit(nat_param=True, stoc=False):
N_train = 5
alpha = 0.3
M = 3
D = 2
Q = 3
hidden_size = [3, 2]
x_train = np.random.randn(N_train, D)
y_train = 2 * np.random.randint(0, 2, size=(N_train, Q)) - 1
model = aep.SDGPR_H(x_train, y_train, M, hidden_size, lik='Probit')
params = model.init_hypers(y_train)
print 'dgprh aep probit nat_param %r stoc %r' % (nat_param, stoc)
check_grad(params, model, stochastic=stoc, alpha=alpha)
def test_gpssm_linear_aep_gaussian_kink(nat_param=True, stoc=False, prop_mode=PROP_MM):
N_train = 50
process_noise = 0.2
obs_noise = 0.1
alpha = 0.5
M = 4
Q = 1
D = 1
(xtrue, x, y) = kink(N_train, process_noise, obs_noise)
y_train = np.reshape(y, [y.shape[0], 1])
# TODO nat_param=nat_param
lvm = aep.SGPSSM(y_train, Q, M, lik='Gaussian', gp_emi=False)
lvm.optimise(method='adam', alpha=alpha, maxiter=500, adam_lr=0.08)
params = lvm.get_hypers()
# # init hypers, inducing points and q(u) params
# params = lvm.init_hypers(y_train)
print 'gplvm linear emis aep kink nat_param %r stoc %r prop_mode %s' % (nat_param, stoc, prop_mode)
check_grad(params, lvm, stochastic=stoc, alpha=alpha, prop_mode=prop_mode)
def test_gpssm_gp_aep_gaussian_kink(nat_param=True, stoc=False, prop_mode=PROP_MM):
N_train = 10
process_noise = 0.2
obs_noise = 0.1
alpha = 0.5
M = 4
Q = 1
D = 1
(xtrue, x, y) = kink(N_train, process_noise, obs_noise)
y_train = np.reshape(y, [y.shape[0], 1])
lvm = aep.SGPSSM(y_train, Q, M, lik='Gaussian', gp_emi=True)
# init hypers, inducing points and q(u) params
params = lvm.init_hypers(y_train)
print 'gplvm gp emis aep kink nat_param %r stoc %r prop_mode %s' % (nat_param, stoc, prop_mode)
check_grad(params, lvm, stochastic=stoc, alpha=alpha, prop_mode=prop_mode)
def plot_gpssm_linear_aep_gaussian_stochastic():
N_train = 2000
alpha = 0.3
M = 50
Q = 2
D = 3
y_train = np.random.randn(N_train, D)
model = aep.SGPSSM(y_train, Q, M, lik='Gaussian', gp_emi=False)
# init hypers, inducing points and q(u) params
params = model.init_hypers(y_train)
logZ, grad_all = model.objective_function(params, N_train, alpha=alpha)
mbs = np.logspace(-2, 0, 20)
reps = 40
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
print '%d/%d, %d/%d' % (i, len(mbs), k, reps)
objs[i, k] = model.objective_function(
params, no_points, alpha=alpha, prop_mode=PROP_MM)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/gaussian_stochastic_aep_gpssm_linear_MM.pdf')
# init hypers, inducing points and q(u) params
logZ, grad_all = model.objective_function(params, N_train, alpha=alpha)
times = np.zeros(len(mbs))
objs = np.zeros((len(mbs), reps))
for i, mb in enumerate(mbs):
no_points = int(N_train * mb)
start_time = time.time()
for k in range(reps):
print '%d/%d, %d/%d' % (i, len(mbs), k, reps)
objs[i, k] = model.objective_function(
params, no_points, alpha=alpha, prop_mode=PROP_MC)[0]
times[i] = time.time() - start_time
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
ax1.plot(mbs, times, 'x-')
ax1.set_xlabel("Minibatch proportion")
ax1.set_ylabel("Time taken")
ax1.set_xscale("log", nonposx='clip')
ax2.plot(mbs, objs, 'kx')
ax2.axhline(logZ, color='b')
ax2.set_xlabel("Minibatch proportion")
ax2.set_ylabel("ELBO estimates")
ax2.set_xscale("log", nonposx='clip')
plt.savefig('/tmp/gaussian_stochastic_aep_gpssm_linear_MC.pdf')
if __name__ == '__main__':
# TODO: PROP_LIN
test_gplvm_aep_gaussian(nat_param=True, stoc=False, prop_mode=PROP_MM)
# test_gplvm_aep_gaussian(nat_param=True, stoc=True, prop_mode=PROP_MM)
test_gplvm_aep_gaussian(nat_param=False, stoc=False, prop_mode=PROP_MM)
# test_gplvm_aep_gaussian(nat_param=False, stoc=True, prop_mode=PROP_MM)
# test_gplvm_aep_gaussian(nat_param=True, stoc=False, prop_mode=PROP_MC)
# test_gplvm_aep_gaussian(nat_param=True, stoc=True, prop_mode=PROP_MC)
# test_gplvm_aep_gaussian(nat_param=False, stoc=False, prop_mode=PROP_MC)
# test_gplvm_aep_gaussian(nat_param=False, stoc=True, prop_mode=PROP_MC)
# test_gplvm_aep_gaussian(nat_param=True, stoc=False, prop_mode=PROP_LIN)
# test_gplvm_aep_gaussian(nat_param=True, stoc=True, prop_mode=PROP_LIN)
# test_gplvm_aep_gaussian(nat_param=False, stoc=False, prop_mode=PROP_LIN)
# test_gplvm_aep_gaussian(nat_param=False, stoc=True, prop_mode=PROP_LIN)
# test_gplvm_aep_probit(nat_param=True, stoc=False, prop_mode=PROP_MM)
# test_gplvm_aep_probit(nat_param=True, stoc=True, prop_mode=PROP_MM)
# test_gplvm_aep_probit(nat_param=False, stoc=False, prop_mode=PROP_MM)
# test_gplvm_aep_probit(nat_param=False, stoc=True, prop_mode=PROP_MM)
# test_gplvm_aep_probit(nat_param=True, stoc=False, prop_mode=PROP_MC)
# test_gplvm_aep_probit(nat_param=True, stoc=True, prop_mode=PROP_MC)
# test_gplvm_aep_probit(nat_param=False, stoc=False, prop_mode=PROP_MC)
# test_gplvm_aep_probit(nat_param=False, stoc=True, prop_mode=PROP_MC)
# test_gplvm_aep_probit(nat_param=True, stoc=False, prop_mode=PROP_LIN)
# test_gplvm_aep_probit(nat_param=True, stoc=True, prop_mode=PROP_LIN)
# test_gplvm_aep_probit(nat_param=False, stoc=False, prop_mode=PROP_LIN)
# test_gplvm_aep_probit(nat_param=False, stoc=True, prop_mode=PROP_LIN)
# plot_gplvm_aep_probit_stochastic()
# plot_gplvm_aep_gaussian_stochastic()
test_gpr_aep_gaussian(nat_param=True, stoc=False)
test_gpr_aep_gaussian(nat_param=True, stoc=True)
test_gpr_aep_gaussian(nat_param=False, stoc=False)
test_gpr_aep_gaussian(nat_param=False, stoc=True)
test_gpr_aep_probit(nat_param=True, stoc=False)
test_gpr_aep_probit(nat_param=True, stoc=True)
test_gpr_aep_probit(nat_param=False, stoc=False)
test_gpr_aep_probit(nat_param=False, stoc=True)
# plot_gpr_aep_probit_stochastic()
# plot_gpr_aep_gaussian_stochastic()
# TODO: Deep GP, nat param, different prop mode
# test_dgpr_aep_gaussian(nat_param=True, stoc=False)
# test_dgpr_aep_gaussian(nat_param=True, stoc=True)
# # test_dgpr_aep_gaussian(nat_param=False, stoc=False)
# # test_dgpr_aep_gaussian(nat_param=False, stoc=True)
# test_dgpr_aep_probit(nat_param=True, stoc=False)
# test_dgpr_aep_probit(nat_param=True, stoc=True)
# # test_dgpr_aep_probit(nat_param=False, stoc=False)
# # test_dgpr_aep_probit(nat_param=False, stoc=True)
# plot_dgpr_aep_probit_stochastic()
# plot_dgpr_aep_gaussian_stochastic()
# TODO: Deep GP with hidden, nat param, different prop mode
# test_dgprh_aep_gaussian(nat_param=True, stoc=False)
# test_dgprh_aep_gaussian(nat_param=True, stoc=True)
# # test_dgprh_aep_gaussian(nat_param=False, stoc=False)
# # test_dgprh_aep_gaussian(nat_param=False, stoc=True)
# test_dgprh_aep_probit(nat_param=True, stoc=False)
# test_dgprh_aep_probit(nat_param=True, stoc=True)
# # test_dgprh_aep_probit(nat_param=False, stoc=False)
# # test_dgprh_aep_probit(nat_param=False, stoc=True)
# TODO: GPSSM, nat param
# test_gpssm_linear_aep_gaussian_kink(nat_param=True, stoc=False, prop_mode=PROP_MM)
# test_gpssm_linear_aep_gaussian_kink(nat_param=True, stoc=True, prop_mode=PROP_MM)
# # test_gpssm_linear_aep_gaussian_kink(nat_param=False, stoc=False, prop_mode=PROP_MM)
# # test_gpssm_linear_aep_gaussian_kink(nat_param=False, stoc=True, prop_mode=PROP_MM)
# test_gpssm_linear_aep_gaussian_kink(nat_param=True, stoc=False, prop_mode=PROP_MC)
# test_gpssm_linear_aep_gaussian_kink(nat_param=True, stoc=True, prop_mode=PROP_MC)
# # test_gpssm_linear_aep_gaussian_kink(nat_param=False, stoc=False, prop_mode=PROP_MC)
# # test_gpssm_linear_aep_gaussian_kink(nat_param=False, stoc=True, prop_mode=PROP_MC)
# test_gpssm_gp_aep_gaussian_kink(nat_param=True, stoc=False, prop_mode=PROP_MM)
# test_gpssm_gp_aep_gaussian_kink(nat_param=True, stoc=True, prop_mode=PROP_MM)
# # test_gpssm_gp_aep_gaussian_kink(nat_param=False, stoc=False, prop_mode=PROP_MM)
# # test_gpssm_gp_aep_gaussian_kink(nat_param=False, stoc=True, prop_mode=PROP_MM)
# test_gpssm_gp_aep_gaussian_kink(nat_param=True, stoc=False, prop_mode=PROP_MC)
# test_gpssm_gp_aep_gaussian_kink(nat_param=True, stoc=True, prop_mode=PROP_MC)
# # test_gpssm_gp_aep_gaussian_kink(nat_param=False, stoc=False, prop_mode=PROP_MC)
# # test_gpssm_gp_aep_gaussian_kink(nat_param=False, stoc=True, prop_mode=PROP_MC)
# plot_gpssm_linear_aep_gaussian_stochastic()
|
thangbui/geepee
|
tests/test_grads_aep.py
|
Python
|
mit
| 20,873
|
[
"Gaussian"
] |
b391b10c439f719e9ac21a8448480004a614e3fa107f3381f62f056a33e0c576
|
import ast
from collections import defaultdict
from functools import singledispatch
from typing import Tuple
def generic_visit(node: ast.AST, scope, namespace):
"""Called if no explicit visitor function exists for a node.
Adapted from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from visit(item, scope, namespace)
elif isinstance(value, ast.AST):
yield from visit(value, scope, namespace)
@singledispatch
def visit(node: ast.AST, scope, namespace):
"""Visit a node.
Adapted from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
yield from generic_visit(node, scope, namespace)
@visit.register
def visit_module(node: ast.Module, scope, namespace):
yield from generic_visit(node, scope, namespace)
@visit.register
def visit_name(node: ast.Name, scope, namespace):
position = node_position(node)
name = namespaced(namespace, node.id)
if isinstance(node.ctx, ast.Store):
scope[name] = []
scope[name].append(position)
yield scope[name]
else:
occurrences = scope[name]
occurrences.append(position)
yield occurrences
def namespaced(namespace: Tuple[str, ...], name: str) -> Tuple[str, ...]:
return tuple(namespace) + (name,)
def node_position(node: ast.AST, row_offset=0, column_offset=0) -> Tuple[int, int]:
return ((node.lineno - 1) + row_offset, node.col_offset + column_offset)
def test_adds_definition_to_scope():
tree = ast.parse("old = 1")
scope = defaultdict(list)
namespace = []
occurrences = list(visit(tree, scope, namespace))
assert occurrences == [[(0, 0)]]
assert scope == {("old",): [(0, 0)]}
|
thisfred/breakfast
|
tests/test_attempt_11.py
|
Python
|
bsd-2-clause
| 1,921
|
[
"VisIt"
] |
e19c0536fd92b1f16713d309d2d5b1b601f24fbe6547835085ad633bd830539e
|
#!/usr/bin/env python
# Parses CWL's "SoftwareRequirement" hints section and dumps a cbl compatible yaml file.
# The purpose with this script is to create smaller composable docker containers for bcbio-nextgen.
#
# Usage: cwl2yaml_packages.py test_bcbio_cwl/run_info-cwl-workflow/steps/process_alignment.cwl > cloudbiolinux/contrib/flavor/cwl_dockers/packages-bcbio-alignment.yaml
import os
import sys
import yaml
CWL_STEPS=sys.argv[1]
cwl_pkgs=yaml.safe_load(open(CWL_STEPS,'r'))
cbl_yml=dict()
cbl_pkgs=[]
# take the filename as the flavor/dockerfile name
cbl_flavor="bcbio-"+os.path.splitext(os.path.basename(sys.argv[1]))[0]
for pkg in cwl_pkgs['hints'][1]['packages']:
cbl_pkgs.append(pkg['package'])
cbl_yml['channels']=['bioconda', 'conda-forge']
cbl_yml[cbl_flavor]=cbl_pkgs
#print cbl_yml
print yaml.safe_dump(cbl_yml, default_flow_style=False, indent=4)
|
chapmanb/cloudbiolinux
|
utils/cwl2yaml_packages.py
|
Python
|
mit
| 873
|
[
"Bioconda"
] |
f3256547daedb45c93af34efd7e476db5baffaf358b6fbfe7e65058951a26157
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.core.lib.settingstab` module contains the base SettingsTab class which plugins use for adding their
own tab to the settings dialog.
"""
from PyQt4 import QtGui
from openlp.core.lib import Registry
class SettingsTab(QtGui.QWidget):
"""
SettingsTab is a helper widget for plugins to define Tabs for the settings
dialog.
"""
def __init__(self, parent, title, visible_title=None, icon_path=None):
"""
Constructor to create the Settings tab item.
``title``
The title of the tab, which is used internally for the tab handling.
``visible_title``
The title of the tab, which is usually displayed on the tab.
"""
QtGui.QWidget.__init__(self, parent)
self.tabTitle = title
self.tabTitleVisible = visible_title
self.settingsSection = self.tabTitle.lower()
if icon_path:
self.iconPath = icon_path
self.setupUi()
self.retranslateUi()
self.initialise()
self.load()
def setupUi(self):
"""
Setup the tab's interface.
"""
self.tabLayout = QtGui.QHBoxLayout(self)
self.tabLayout.setObjectName(u'tabLayout')
self.leftColumn = QtGui.QWidget(self)
self.leftColumn.setObjectName(u'leftColumn')
self.leftLayout = QtGui.QVBoxLayout(self.leftColumn)
self.leftLayout.setMargin(0)
self.leftLayout.setObjectName(u'leftLayout')
self.tabLayout.addWidget(self.leftColumn)
self.rightColumn = QtGui.QWidget(self)
self.rightColumn.setObjectName(u'rightColumn')
self.rightLayout = QtGui.QVBoxLayout(self.rightColumn)
self.rightLayout.setMargin(0)
self.rightLayout.setObjectName(u'rightLayout')
self.tabLayout.addWidget(self.rightColumn)
def resizeEvent(self, event=None):
"""
Resize the sides in two equal halves if the layout allows this.
"""
if event:
QtGui.QWidget.resizeEvent(self, event)
width = self.width() - self.tabLayout.spacing() - \
self.tabLayout.contentsMargins().left() - self.tabLayout.contentsMargins().right()
left_width = min(width - self.rightColumn.minimumSizeHint().width(), width / 2)
left_width = max(left_width, self.leftColumn.minimumSizeHint().width())
self.leftColumn.setFixedWidth(left_width)
def retranslateUi(self):
"""
Setup the interface translation strings.
"""
pass
def initialise(self):
"""
Do any extra initialisation here.
"""
pass
def load(self):
"""
Load settings from disk.
"""
pass
def save(self):
"""
Save settings to disk.
"""
pass
def cancel(self):
"""
Reset any settings if cancel triggered
"""
self.load()
def postSetUp(self, postUpdate=False):
"""
Changes which need to be made after setup of application
``postUpdate``
Indicates if called before or after updates.
"""
pass
def tabVisible(self):
"""
Tab has just been made visible to the user
"""
pass
def _get_service_manager(self):
"""
Adds the service manager to the class dynamically
"""
if not hasattr(self, u'_service_manager'):
self._service_manager = Registry().get(u'service_manager')
return self._service_manager
service_manager = property(_get_service_manager)
def _get_main_window(self):
"""
Adds the main window to the class dynamically
"""
if not hasattr(self, u'_main_window'):
self._main_window = Registry().get(u'main_window')
return self._main_window
main_window = property(_get_main_window)
def _get_renderer(self):
"""
Adds the Renderer to the class dynamically
"""
if not hasattr(self, u'_renderer'):
self._renderer = Registry().get(u'renderer')
return self._renderer
renderer = property(_get_renderer)
def _get_theme_manager(self):
"""
Adds the theme manager to the class dynamically
"""
if not hasattr(self, u'_theme_manager'):
self._theme_manager = Registry().get(u'theme_manager')
return self._theme_manager
theme_manager = property(_get_theme_manager)
def _get_media_controller(self):
"""
Adds the media controller to the class dynamically
"""
if not hasattr(self, u'_media_controller'):
self._media_controller = Registry().get(u'media_controller')
return self._media_controller
media_controller = property(_get_media_controller)
|
marmyshev/transitions
|
openlp/core/lib/settingstab.py
|
Python
|
gpl-2.0
| 6,947
|
[
"Brian"
] |
ab163b69230e306de69fa960a2b7bd097260378a9cc7b128cfb6e9f58f48aae1
|
"""Test for the hide-show feature.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from StringIO import StringIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestHideShow(TestCase):
def check(self, saved=False):
"""Does the checking, if saved is True it does not change the
properties at first to see how those behave and only tests the
final unpickled state."""
script = self.script
e = script.engine
scene = e.current_scene
wrl = scene.children[0]
src = scene.children[1]
mm = src.children[0]
scp = mm.children[0]
iso = mm.children[1]
if not saved:
assert scp.actor.actor.visibility == True
assert scp.implicit_plane.widget.enabled == True
for a in wrl.actors:
assert a.visibility == True
assert iso.actor.actor.visibility == True
# Check if widget state is remembered.
scp.implicit_plane.widget.enabled = False
scp.visible = False
assert scp.actor.actor.visibility == False
assert scp.implicit_plane.widget.enabled == False
assert scp.name == 'ScalarCutPlane [Hidden]'
# Reenable it and check widget state.
scp.visible = True
assert scp.actor.actor.visibility == True
assert scp.implicit_plane.widget.enabled == False
# Reset the visible state.
wrl.visible = False
scp.visible = False
iso.visible = False
# Check final state.
for a in wrl.actors:
assert a.visibility == False
assert wrl.name.find('[Hidden]') > -1
assert scp.actor.actor.visibility == False
assert scp.implicit_plane.widget.enabled == False
assert scp.name == 'ScalarCutPlane [Hidden]'
assert iso.name == 'IsoSurface [Hidden]'
assert iso.actor.actor.visibility == False
def test(self):
self.main()
def do(self):
############################################################
# Imports.
from mayavi.sources.api import VTKXMLFileReader,\
VRMLImporter
from mayavi.modules.api import ScalarCutPlane,\
IsoSurface
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
script = mayavi = self.script
# Read a VRML file.
w = VRMLImporter()
w.initialize(get_example_data('room_vis.wrl'))
script.add_source(w)
# Read a VTK data file.
r = VTKXMLFileReader()
r.initialize(get_example_data('fire_ug.vtu'))
script.add_source(r)
# Create the modules.
scp = ScalarCutPlane()
script.add_module(scp)
iso = IsoSurface()
script.add_module(iso)
# Check.
self.check(saved=False)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = StringIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
s.scene.isometric_view()
# Now do the check.
self.check(saved=True)
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
sources = s.children
s.children = []
# Add it back to see if that works without error.
s.children.extend(sources)
# Now do the check.
s.scene.isometric_view()
self.check(saved=True)
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
sources1 = copy.deepcopy(sources)
s.children[:] = sources1
# Now do the check.
s.scene.isometric_view()
self.check(saved=True)
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestHideShow()
t.test()
|
liulion/mayavi
|
integrationtests/mayavi/test_hide_show.py
|
Python
|
bsd-3-clause
| 4,695
|
[
"Mayavi",
"VTK"
] |
669e550e54d7d40145a5d1121659631488cfdc13ad9e339391eaed64d0539135
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.signal1d import Signal1D
from hyperspy.components1d import Gaussian
class TestSetParameterInModel:
def setup_method(self, method):
g1 = Gaussian()
g2 = Gaussian()
g3 = Gaussian()
s = Signal1D(np.arange(10))
m = s.create_model()
m.append(g1)
m.append(g2)
m.append(g3)
self.g1 = g1
self.g2 = g2
self.g3 = g3
self.model = m
def test_set_parameter_in_model_not_free(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
assert len(g1.free_parameters) == 0
assert len(g2.free_parameters) == 0
assert len(g3.free_parameters) == 0
def test_set_parameter_in_model_free(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
g1.A.free = False
g2.sigma.free = False
g3.centre.free = False
m.set_parameters_free()
assert len(g1.free_parameters) == len(g1.parameters)
assert len(g2.free_parameters) == len(g2.parameters)
assert len(g3.free_parameters) == len(g3.parameters)
def test_set_parameter_in_model1(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free([g1, g2])
assert len(g1.free_parameters) == 0
assert len(g2.free_parameters) == 0
assert len(g3.free_parameters) == len(g3.parameters)
def test_set_parameter_in_model2(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
m.set_parameters_free([g3])
assert len(g1.free_parameters) == 0
assert len(g2.free_parameters) == 0
assert len(g3.free_parameters) == len(g3.parameters)
def test_set_parameter_in_model3(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free(parameter_name_list=['A'])
assert not g1.A.free
assert g1.sigma.free
assert g1.centre.free
assert not g2.A.free
assert g2.sigma.free
assert g2.centre.free
assert not g3.A.free
assert g3.sigma.free
assert g3.centre.free
def test_set_parameter_in_model4(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free([g2], parameter_name_list=['A'])
assert g1.A.free
assert g1.sigma.free
assert g1.centre.free
assert not g2.A.free
assert g2.sigma.free
assert g2.centre.free
assert g3.A.free
assert g3.sigma.free
assert g3.centre.free
def test_set_parameter_in_model5(self):
m = self.model
g1 = self.g1
g2 = self.g2
g3 = self.g3
m.set_parameters_not_free()
m.set_parameters_free([g1], parameter_name_list=['centre'])
assert not g1.A.free
assert not g1.sigma.free
assert g1.centre.free
assert not g2.A.free
assert not g2.sigma.free
assert not g2.centre.free
assert not g3.A.free
assert not g3.sigma.free
assert not g3.centre.free
|
erh3cq/hyperspy
|
hyperspy/tests/model/test_set_parameter_state.py
|
Python
|
gpl-3.0
| 4,051
|
[
"Gaussian"
] |
4ae36dad390187a9e70b9d94fc9acb7f028f66a8e4642f7f5bda45b2839fead7
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Manages icons and artworks"""
import os
import logging
logger = logging.getLogger('camelot.view.art')
from PyQt4 import QtGui
def file_(name):
from camelot.core.resources import resource_filename
import camelot
return resource_filename(camelot.__name__, 'art/%s'%name)
def read(fname):
import camelot
from camelot.core.resources import resource_string
return resource_string(
camelot.__name__,
'art/%s' % fname,
)
class Pixmap(object):
"""Load pixmaps from the camelot art library"""
def __init__(self, path, module=None):
""":param path: the path of the pixmap relative to the art directory, use
'/' as a path separator
:param module: the module that contains the art directory, if None is given
this will be camelot"""
self._path = path
self._cached_pixmap = None
if not module:
import camelot
self._module_name = camelot.__name__
else:
self._module_name = module.__name__
def __unicode__(self):
return self._path
def __repr__(self):
return self.__class__.__name__ + "('" + self._path + "')"
def fullpath(self):
"""Obsolete : avoid this method, since it will copy the resource file
from its package and copy it to a temp folder if the resource is
packaged."""
from camelot.core.resources import resource_filename
pth = resource_filename(self._module_name, 'art/%s'%(self._path))
if os.path.exists(pth):
return pth
else:
return ''
def getQPixmap(self):
"""QPixmaps can only be used in the gui thread"""
if self._cached_pixmap:
return self._cached_pixmap
from camelot.core.resources import resource_string
from PyQt4.QtGui import QPixmap
qpm = QPixmap()
p = os.path.join('art', self._path)
try:
# For some reason this throws a unicode error if the path contains an accent (cf windows username)
# this happens only here, not for icons further on in the application
# so they see no splash screen, tant pis
r = resource_string(self._module_name, p)
qpm.loadFromData(r)
except Exception, e:
logger.warn(u'Could not load pixmap "%s" from module: %s, encountered exception' % (p, self._module_name), exc_info=e)
self._cached_pixmap = qpm
return qpm
class Icon(Pixmap):
"""Manages paths to the icons images"""
def getQIcon(self):
"""QPixmaps can only be used in the gui thread"""
from PyQt4.QtGui import QIcon
return QIcon(self.getQPixmap())
class ColorScheme(object):
"""The default color scheme for camelot, based on the Tango icon set
see http://tango.freedesktop.org/Generic_Icon_Theme_Guidelines
"""
yellow = QtGui.QColor('#ffff00')
yellow_0 = yellow
yellow_1 = QtGui.QColor('#fce94f')
yellow_2 = QtGui.QColor('#edd400')
yellow_3 = QtGui.QColor('#c4a000')
orange_1 = QtGui.QColor('#fcaf3e')
orange_2 = QtGui.QColor('#f57900')
orange_3 = QtGui.QColor('#cd5c00')
brown_1 = QtGui.QColor('#e9b96e')
brown_2 = QtGui.QColor('#c17d11')
brown_3 = QtGui.QColor('#8f5902')
red = QtGui.QColor('#ff0000')
red_0 = red
red_1 = QtGui.QColor('#ef2929')
red_2 = QtGui.QColor('#cc0000')
red_3 = QtGui.QColor('#a40000')
blue = QtGui.QColor('#0000ff')
blue_0 = blue
blue_1 = QtGui.QColor('#000080')
green = QtGui.QColor('#00ff00')
green_0 = green
cyan = QtGui.QColor('#00ffff')
cyan_0 = cyan
cyan_1 = QtGui.QColor('#008080')
magenta = QtGui.QColor('#ff00ff')
magenta_0 = magenta
magenta_1 = QtGui.QColor('#800080')
pink_1 = QtGui.QColor('#f16c6c')
pink_2 = QtGui.QColor('#f13c3c')
aluminium_0 = QtGui.QColor('#eeeeec')
aluminium_1 = QtGui.QColor('#d3d7cf')
aluminium_2 = QtGui.QColor('#babdb6')
aluminium = aluminium_0
grey_0 = QtGui.QColor('#eeeeee')
grey_1 = QtGui.QColor('#cccccc')
grey_2 = QtGui.QColor('#333333')
grey_3 = QtGui.QColor('#666666')
grey_4 = QtGui.QColor('#999999')
grey = grey_0
VALIDATION_ERROR = red_1
NOTIFICATION = yellow_1
"""
for consistency with QT:
Qt::white 3 White (#ffffff)
Qt::black 2 Black (#000000)
Qt::red 7 Red (#ff0000)
Qt::darkRed 13 Dark red (#800000)
Qt::green 8 Green (#00ff00)
Qt::darkGreen 14 Dark green (#008000)
Qt::blue 9 Blue (#0000ff)
Qt::darkBlue 15 Dark blue ()
Qt::cyan 10 Cyan (#00ffff)
Qt::darkCyan 16 Dark cyan (#008080)
Qt::magenta 11 Magenta (#ff00ff)
Qt::darkMagenta 17 Dark magenta (#800080)
Qt::yellow 12 Yellow (#ffff00)
Qt::darkYellow 18 Dark yellow (#808000)
Qt::gray 5 Gray (#a0a0a4)
Qt::darkGray 4 Dark gray (#808080)
Qt::lightGray 6 Light gray (#c0c0c0)
Qt::transparent 19 a transparent black value (i.e., QColor(0, 0, 0, 0))
Qt::color0 0 0 pixel value (for bitmaps)
Qt::color1 1 1 pixel value (for bitmaps)
"""
|
jeroendierckx/Camelot
|
camelot/view/art.py
|
Python
|
gpl-2.0
| 6,325
|
[
"VisIt"
] |
3ed6285d7ea67a1d98e8250da81979d6d501b5061fa6d6c3b2ad9bb60b109e32
|
#!/usr/bin/env python
#
# restrict_long_contigs.py
#
# USAGE: restrict_long_contigs.py [options] <input_directory> \
# <output_directory>
#
# Options:
# -h, --help show this help message and exit
# -l MINLEN, --minlen=MINLEN
# Minimum length of sequence
# -s SUFFIX, --filesuffix=SUFFIX
# Suffix to indicate the file was processed
# -v, --verbose Give verbose output
#
# Non-PSL dependencies: Biopython (www.biopython.org)
#
# A short script that takes as input a directory containing (many) FASTA files
# describing biological sequences, and writes to a new, named directory
# multiple FASTA files containing the same sequences, but restricted only to
# those sequences whose length is greater than a passed value.
#
# Example usage: You have a directory with many sets of contigs from different
# assemblies. This script will produce a new directory of the same data where
# the contig lengths are restricted to being greater than a specified length.
#
# Copyright (C) 2013 The James Hutton Institute
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@hutton.ac.uk
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2010-2014 The James Hutton Institute
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
# IMPORTS
from Bio import SeqIO
from optparse import OptionParser
import logging
import logging.handlers
import os
import re
import sys
###
# GLOBALS
# File extensions that indicate FASTA content
fasta_ext = ['.fa', '.fas', '.fasta']
###
# FUNCTIONS
# Parse cmd-line
def parse_cmdline(args):
""" Parse command-line arguments. Note that the input and output
directories are positional arguments
"""
usage = "usage: %prog [options] <input_directory> <output_directory>"
parser = OptionParser(usage)
parser.add_option("-l", "--minlen", dest="minlen",
action="store", default=1000,
help="Minimum length of sequence")
parser.add_option("-s", "--filesuffix", dest="suffix",
action="store", default="_restricted",
help="Suffix to indicate the file was processed")
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="Give verbose output")
return parser.parse_args()
# Get list of FASTA files from a directory
def get_fasta_filenames(indir, extensions=fasta_ext):
""" Identifies files in the passed directory whose extensions indicate
that they may be FASTA files. Returns the path to the file,
including the parent directory.
"""
filelist = [f for f in os.listdir(indir) if
os.path.splitext(f)[-1].lower() in extensions]
logger.info("Identified %d FASTA files in %s:" % (len(filelist),
indir))
if not len(filelist): # We want there to be at least one file
logger.error("No FASTA files found in %s" % indir)
sys.exit(1)
return filelist
# Restrict sequence length in a named FASTA file, writing it to
# the named location
def restrict_seq_length(infile, outfile, minlen):
""" Takes an input FASTA file as infile, and writes out a corresponding
file to outfile, where sequences shorter than minlen are not included
"""
logger.info("Restricting lengths of %s to >=%d;" % (infile, minlen) +
" writing to %s" % outfile)
SeqIO.write([s for s in SeqIO.parse(infile, 'fasta')
if not len(s) < minlen],
outfile, 'fasta')
# Process FASTA files in the directory
def process_files(indir, outdir, minlen, suffix):
""" Takes an input directory that contains FASTA files, and writes
to the output directory corresponding files (with the suffix appended)
that contain only sequences of length greater than minlen.
"""
for filename in get_fasta_filenames(indir):
filestem, ext = os.path.splitext(filename)
infilename = os.path.join(indir, filename)
outfilename = os.path.join(outdir, ''.join([filestem, suffix, ext]))
restrict_seq_length(infilename, outfilename, minlen)
###
# SCRIPT
if __name__ == '__main__':
# Parse command-line
# options are options, arguments are the .sff files
options, args = parse_cmdline(sys.argv)
# We set up logging, and modify loglevel according to whether we need
# verbosity or not
logger = logging.getLogger('restrict_long_contigs.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
if options.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
# Report arguments, if verbose
logger.info(options)
logger.info(args)
# If there are not two positional arguments, throw an error
if len(args) != 2:
logger.error("Not enough arguments: script requires input and " +
"output directory")
sys.exit(1)
indir, outdir = tuple(args)
# Make sure that the input directory exists
if not os.path.isdir(indir):
logger.error("Input directory %s does not exist" % indir)
sys.exit(1)
# If output directory does not exist, create it. If it does exist,
# issue a warning that contents may be overwritten
if os.path.isdir(outdir):
logger.warning("Contents of %s may be overwritten" % outdir)
else:
logger.warning("Output directory %s does not exist: creating it" %
outdir)
os.mkdir(outdir)
# Check that the passed suffix is a valid string: escape dodgy characters
#try:
# suffix = re.escape(options.suffix)
#except:
# logger.error("Could not escape suffix string: %s" % options.suffix)
# sys.exit(1)
# Make sure that the minimum length is an integer, and positive
if not int(options.minlen) > 0:
logger.error("Minimum length must be a positive integer, got %s" %
options.minlen)
sys.exit(1)
# Restrict sequence lengths
process_files(indir, outdir, int(options.minlen), options.suffix)
|
widdowquinn/scripts
|
bioinformatics/restrict_long_contigs.py
|
Python
|
mit
| 7,563
|
[
"Biopython"
] |
33ce5a3052ddcf6995ce74f18a393c4b4f5dd8fccfbed5e83b97d45e79f048b2
|
"""
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. module:: scipy.stats
This module contains a large number of probability distributions as
well as a growing library of statistical functions.
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
Continuous distributions
========================
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
frechet_r -- Frechet Right Sided, Extreme Value Type II (Extreme LB) or weibull_min
frechet_l -- Frechet Left Sided, Weibull_max
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Kolmogorov-Smirnov one-sided (no stats)
kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats)
laplace -- Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
reciprocal -- Reciprocal
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewnorm -- Skew normal
t -- Student's T
trapz -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
==========================
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
random_correlation -- random correlation matrices
Discrete distributions
======================
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
zipf -- Zipf
Statistical functions
=====================
Several of these functions have a similar version in scipy.stats.mstats
which work for masked arrays.
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
kurtosistest --
mode -- Modal value
moment -- Central moment
normaltest --
skew -- Skewness
skewtest --
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
.. autosummary::
:toctree: generated/
cumfreq
histogram2
histogram
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
bayes_mvs
mvsdist
sem
zmap
zscore
iqr
.. autosummary::
:toctree: generated/
sigmaclip
threshold
trimboth
trim1
.. autosummary::
:toctree: generated/
f_oneway
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
f_value
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
kstest
chisquare
power_divergence
ks_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
ss
square_of_sums
jarque_bera
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
fligner
median_test
mood
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
entropy
.. autosummary::
:toctree: generated/
chisqprob
betai
Circular statistical functions
==============================
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
===========================
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.expected_freq
contingency.margins
fisher_exact
Plot-tests
==========
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
Masked statistics functions
===========================
.. toctree::
stats.mstats
Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`)
==============================================================================
.. autosummary::
:toctree: generated/
gaussian_kde
For many more stat related functions install the software R and the
interface package rpy.
"""
from __future__ import division, print_function, absolute_import
from .stats import *
from .distributions import *
from .morestats import *
from ._binned_statistic import *
from .kde import gaussian_kde
from . import mstats
from .contingency import chi2_contingency
from ._multivariate import *
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from numpy.testing import Tester
test = Tester().test
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/scipy/stats/__init__.py
|
Python
|
mit
| 9,083
|
[
"Gaussian"
] |
d45d374bd63a474ffb2f353632a8f20276db51be05c206e958d40987f23f7c2c
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator as glm
# test scoring_history for Gaussian family with validation dataset and cv
def test_gaussian_alpha():
col_list_compare = ["iterations", "objective", "negative_log_likelihood", "training_rmse", "validation_rmse",
"training_mae", "validation_mae", "training_deviance", "validation_deviance"]
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
enum_columns = ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10"]
for cname in enum_columns:
h2o_data[cname] = h2o_data[cname]
myY = "C21"
myX = h2o_data.names.remove(myY)
data_frames = h2o_data.split_frame(ratios=[0.8])
training_data = data_frames[0]
test_data = data_frames[1]
# test with lambda search on, generate_scoring_history on and off
model1 = glm(family="gaussian", lambda_search=True, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=True)
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=True, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=False)
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search off, generate_scoring_history on and off
model1 = glm(family="gaussian", lambda_search=False, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=True,
Lambda=[0,0.1,0.001,0.004])
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=False, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=False,
Lambda=[0,0.1,0.001,0.004])
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search on, generate_scoring_history on and off, cv on
model1 = glm(family="gaussian", lambda_search=True, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=True,
nfolds=2, seed=12345)
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=True, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=False,
nfolds=2, seed=12345)
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
# test with lambda search off, generate_scoring_history on and off, cv on
model1 = glm(family="gaussian", lambda_search=False, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=True,
Lambda=[0,0.1,0.001,0.004], nfolds=2, seed=12345)
model1.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
model2 = glm(family="gaussian", lambda_search=False, alpha=[0,0.2,0.5,0.8,1], generate_scoring_history=False,
Lambda=[0,0.1,0.001,0.004], nfolds=2, seed=12345)
model2.train(x=myX, y=myY, training_frame = training_data, validation_frame = test_data)
pyunit_utils.assertCoefDictEqual(model1.coef(), model2.coef())
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gaussian_alpha)
else:
test_gaussian_alpha()
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_8077_gaussian_alpha.py
|
Python
|
apache-2.0
| 3,549
|
[
"Gaussian"
] |
caa8472496c14f58e717cd70d1dbbe9f089e055e20557b62e4350d1f2f3e02c5
|
# -*- coding:Utf-8 -*-
"""
This module handles CORMORAN measurement data
CorSer Class
============
.. autoclass:: CorSer
:members:
Notes
-----
Useful members
distdf : distance between radio nodes (122 columns)
devdf : device data frame
"""
#import mayavi.mlab as mlabc
import os
import pdb
import sys
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.io as io
from pylayers.util.project import *
from pylayers.util.pyutil import *
from pylayers.mobility.ban.body import *
from pylayers.gis.layout import *
import pylayers.antprop.antenna as antenna
from matplotlib.widgets import Slider, CheckButtons, Button, Cursor
from pylayers.signal.DF import *
# from moviepy.editor import *
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
try:
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
#Those lines handle incompatibility between mayavi and VTK
#and redirect noisy warning message into a log file
# import vtk
# output=vtk.vtkFileOutputWindow()
# output.SetFileName("mayaviwarninglog.tmp")
# vtk.vtkOutputWindow().SetInstance(output)
def cor_log(short=True):
""" display cormoran measurement campaign logfile
Parameters
----------
short : boolean
enable short version
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> cor_log(short=True)
"""
filelog = os.path.join(os.environ['CORMORAN'],'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
if short :
log['day'] = [x.split('/')[0] for x in log['Date'].values]
log['serie']=log['Meas Serie']
return log[['serie','day','Subject','techno','Short Notes']]
else:
return log
def time2npa(lt):
""" convert pd.datetime.time to numpy array
Parameters
----------
lt : pd.datetime.time
Returns
-------
ta : numpy array
time in seconds
"""
ta = (lt.microsecond*1e-6+
lt.second+
lt.minute*60+
lt.hour*3600)
return(ta)
class CorSer(PyLayers):
""" Handle CORMORAN measurement data
Hikob data handling from CORMORAN measurement campaign
11/06/2014
single subject (Bernard and Nicolas)
12/06/2014
several subject (Jihad, Eric , Nicolas)
"""
def __init__(self,serie=6,day=11,source='CITI',layout=False):
"""
Parameters
----------
serie : int
day : int
source : string
Notes
-----
The environment variable CORMORAN is indicating the location of data directory
"""
assert (day in [11,12]),"wrong day"
try:
self.rootdir = os.environ['CORMORAN']
except:
raise NameError('Please add a CORMORAN environement variable \
pointing to the data')
# infos
self.serie = serie
self.day = day
self.loadlog()
if day == 11:
if serie in [7,8]:
raise 'Serie '+str(serie) + ' has no hkb data and will not be loaded'
if day ==12:
if serie in [17,18,19,20]:
raise AttributeError('Serie '+str(serie) + \
' has no hkb data and will not be loaded')
#Measures
if day==11:
self.stcr = [1,2,3,4,10,11,12,32,33,34,35,9,17,18,19,20,25,26]
self.shkb = [5,6,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.sbs = [5,6,7,8,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.mocap = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35]
self.mocapinterf=[]
if day==12:
self.stcr = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
self.shkb = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.sbs = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocap =[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocapinterf = [5,6,7,8,13,14,15,16,21,22,23,24,]
self.typ=''
# HIKOB
if serie in self.shkb:
self._loadhkb(serie=serie,day=day,source=source)
# IR-UWB TCR
if serie in self.stcr:
self._loadTCR(serie=serie,day=day)
# BeSpoon
if serie in self.sbs:
self._loadBS(serie=serie,day=day)
# set filename
if self.typ=='FULL':
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ.capitalize()
else:
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ
#Layout
if layout:
self.L= Layout('MOCAP-small2.lay')
# Load Infrastructure Nodes
self._loadinfranodes()
# Load cameras
self._loadcam()
#BODY & interferers
self.subject = str(self.log['Subject'].values[0].replace('jihad','Jihad')).split(' ')
#filter typos in self.subject
self.subject = [ x for x in self.subject if len(x)!=0 ]
if 'Jihad' in self.subject :
uj = self.subject.index('Jihad')
self.subject[uj]='Jihan'
if serie in self.mocap :
# load bodies from mocap file
self._loadbody(serie=serie,day=day)
self._distancematrix()
self._computedevpdf()
if isinstance(self.B,dict):
for b in self.B:
if hasattr(self,'L'):
self.B[b].traj.Lfilename=copy.copy(self.L._filename)
else:
self.B[b].traj.Lfilename='notloaded'
else :
self.B.traj.Lfilename=copy.copy(self.L._filename)
# reference time is tmocap
self.tmocap = self.B[self.subject[0]].time
# load offset dict
self.offset= self._load_offset_dict()
########################
#realign Radio on mocap
########################
# 1 - Resample radio time => mocap time
# 2 - (if available) apply offset
if ('BS' in self.typ) or ('FULL' in self.typ):
print( '\nBS data frame index: ',)
self._align_on_devdf(typ='BS')
print( 'Align on mocap OK...',)
try:
self._apply_offset('BS')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No BS offset not yet set => use self.offset_setter ')
if ('TCR' in self.typ) or ('FULL' in self.typ):
print ('\nTCR data frame index:', )
self._align_on_devdf(typ='TCR')
print ('Align on mocap OK...',)
try:
self._apply_offset('TCR')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No TCR offset not yet set => use self.offset_setter')
if ('HK' in self.typ) or ('FULL' in self.typ):
print ('\nHKB data frame index:',)
self._align_on_devdf(typ='HKB')
print ('Align on mocap OK...',)
try:
# self._apply_offset('HKB')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No HKB offset not yet set => use self.offset_setter')
print ('\nCreate distance Dataframe...',)
self._computedistdf()
print ('OK',)
def __repr__(self):
st = ''
st = st + 'filename : ' + self._filename + '\n'
st = st + 'filewear : ' + self.filewear + '\n'
st = st + 'filebody : ' + self.filebody + '\n'
st = st + 'filemocap : ' + self.filemocap + '\n'
st = st + 'Day : '+ str(self.day)+'/06/2014'+'\n'
st = st + 'Serie : '+ str(self.serie)+'\n'
st = st + 'Scenario : '+str(self.scenario)+'\n'
st = st + 'Run : '+ str(self.run)+'\n'
st = st + 'Type : '+ str(self.typ)+'\n'
st = st + 'Original Video Id : '+ str(self.video)+'\n'
st = st + 'Subject(s) : '
for k in self.subject:
st = st + k + ' '
st = st + '\n\n'
st = st+'Body available: ' + str('B' in dir(self)) + '\n\n'
try :
st = st+'BeSPoon : '+self._fileBS+'\n'
except:
pass
try :
st = st+'HIKOB : '+self._filehkb+'\n'
except:
pass
try :
st = st+'TCR : '+self._fileTCR+'\n'
except:
pass
st = st + '----------------------\n\n'
for k in self.log.columns:
st = st + k + ' :' + str(self.log[k].values)+'\n'
return(st)
# @property
# def dev(self):
# """ display device techno, id , id on body, body owner,...
# """
# title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
# print title + '\n' + '-'*len(title)
# if ('HK' in self.typ) or ('FULL' in self.typ):
# hkbkeys = self.idHKB.keys()
# hkbkeys.sort()
# for d in hkbkeys:
# dev = self.devmapper(self.idHKB[d],'HKB')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
# if ('TCR' in self.typ) or ('FULL' in self.typ):
# tcrkeys = self.idTCR.keys()
# tcrkeys.sort()
# for d in tcrkeys:
# dev = self.devmapper(self.idTCR[d],'TCR')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
@property
def dev(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print( title + '\n' + '='*len(title))
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
@property
def ant(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print (title + '\n' + '='*len(title) )
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print( '{0:66}'.format('-'*len(title) ))
def _loadcam(self):
""" load camera position
Returns
-------
update self.cam
"""
self.cam = np.array([
[-6502.16643961174,5440.97951452912,2296.44437108561],
[-7782.34866625776,4998.47624994092,2417.5861326688],
[8308.82897665828,3618.50516290547,2698.07710953287],
[5606.68337709102,-6354.17891528277,2500.27779697402],
[-8237.91886515041,-2332.98639475305,4765.31798299242],
[5496.0942989988,6216.91946236788,2433.30012872688],
[-8296.19706598514,2430.07325486109,4794.01607841197],
[7718.37527064615,-4644.26760522485,2584.75330667172],
[8471.27154730777,-3043.74550832061,2683.45089703377],
[-8213.04824602894,-4034.57371591121,2368.54548665579],
[-7184.66711497403,-4950.49444503781,2317.68563412347],
[7531.66103727189,5279.02353243886,2479.36291603544],
[-6303.08628709464,-7057.06193926342,2288.84938553817],
[-5441.17834354692,6637.93014323586,2315.15657646861],
[8287.79937470615,59.1614281340528,4809.14535447027]
])*1e-3
def _loadinfranodes(self):
""" load infrastructure nodes
nico
A4
mpts[6,7,8]
X
A3 A1
mpts[9,10,11] mpts[3,4,5]
X X
A2
mpts[0,1,2]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
bernard
A3
mpts[3,4,5]
X
A2 A4
mpts[6,7,8] mpts[0,1,2]
X X
A1
mpts[9,10,11]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
"""
filename = os.path.join(self.rootdir,'RAW','11-06-2014','MOCAP','scene.c3d')
print( "\nload infrastructure node position:",)
a, self.infraname, pts, i = c3d.ReadC3d(filename)
pts = pts/1000.
mpts = np.mean(pts, axis=0)
self.din={}
if ('HK' in self.typ) or ('FULL' in self.typ):
uhkb = np.array([[1,2], [4,5], [7,8], [10,11]])
mphkb = np.mean(mpts[uhkb], axis=1)
self.din.update(
{'HKB:1':{'p' : mphkb[3],
# 'T' : np.eye(3),
's3off' : 0.},
'HKB:2':{'p' : mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.} ,
'HKB:3':{'p':mphkb[1],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.},
'HKB:4':{'p':mphkb[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.}
})
# TCR:31 is the coordinator which was not captured.
# The position has been determined via optimization
if ('TCR' in self.typ) or ('FULL' in self.typ):
self.din.update({'TCR:32':{'p':mpts[9],
'T':np.eye(3),
's3off':0.1},
'TCR:24':{'p':mpts[6],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.1},
'TCR:27':{'p':mpts[3],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:28':{'p':mpts[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:31':{'p':array([1.7719,-3.2655,1.74]),
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.0}
})
if self.day == 12:
#BS idem HKB:1 and HKB:2
if ('BS' in self.typ) or ('FULL' in self.typ):
self.din.update(
{'BS:74':{'p':mphkb[3],
# 'T':np.eye(3),
's3off':-0.2},
'BS:157':{'p':mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':-0.2} ,
})
#load extra information from inifile (antenna, rotation matrix,...)
inifile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear','AccesPoints.ini')
config = ConfigParser.ConfigParser()
config.read(inifile)
for d in self.din:
self.din[d]['antname']=config.get(d,'file')
self.din[d]['ant']=antenna.Antenna(config.get(d,'file'))
self.din[d]['T']=eval(config.get(d,'t'))
self.din[d]['comment']=config.get(d,'comment')
# self.pts= np.empty((12,3))
# self.pts[:,0]= -mpts[:,1]
# self.pts[:,1]= mpts[:,0]
# self.pts[:,2]= mpts[:,2]
# return mpts
# self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def loadlog(self):
""" load in self.log the log of the current serie
from MeasurementLog.csv
"""
filelog = os.path.join(self.rootdir,'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
date = str(self.day)+'/06/14'
self.log = log[(log['Meas Serie'] == self.serie) & (log['Date'] == date)]
def _loadbody(self,day=11,serie=''):
""" load body from motion capture file
Parameters
----------
day :
serie :
"""
assert day in [11,12],"wrong day in _loadbody"
self.B={}
color=['LightBlue','YellowGreen','PaleVioletRed','white','white','white','white','white','white','white']
for us,subject in enumerate(self.subject):
print( "\nload ",subject, " body:",)
seriestr = str(self.serie).zfill(3)
if day == 11:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','serie_'+seriestr+'.c3d')
elif day == 12:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','Nav_serie_'+seriestr+'.c3d')
# body and wear directory
baw = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear')
if subject =='Jihad':
subject ='Jihan'
#
# Load body cylinder description : "Subject.ini"
# Load wearable device description (contains antenna filename) :
#
self.filebody = os.path.join(baw, subject + '.ini')
self.filewear = os.path.join(baw,subject + '_' +str(self.day)+'-06-2014_' + self.typ + '.ini')
if len(self.subject) >1 or self.mocapinterf:
multi_subject=True
else:
multi_subject=False
self.B.update({subject:Body(_filebody=self.filebody,
_filemocap=self.filemocap,unit = 'mm', loop=False,
_filewear=self.filewear,
centered=False,
multi_subject_mocap=multi_subject,
color=color[us])})
if self.serie in self.mocapinterf:
self.interf = ['Anis_Cylindre:',
'Benoit_Cylindre:',
'Bernard_Cylindre:',
'Claude_Cylindre:',
'Meriem_Cylindre:']
intertmp=[]
if self.serie==13:
self.interf.remove('Bernard_Cylindre:')
for ui,i in enumerate(self.interf):
#try:
print( "load ",i, " interfering body:",)
_filemocap = pyu.getshort(self.filemocap)
self.B.update({i:Cylinder(name=i,
_filemocap=_filemocap,
unit = 'mm',
color = color[ui])})
intertmp.append(i)
#except:
# print "Warning ! load ",i, " FAIL !"
self.interf=intertmp
else :
self.interf=[]
# if len(self.subject) == 1:
# self.B = self.B[self.subject]
def _loadTCR(self,day=11,serie='',scenario='20',run=1):
""" load TCR data
Parameters
----------
day :
serie :
scenario :
run :
"""
#
# TNET : (NodeId,MAC)
#
self.TNET={0:31,
1:2,
7:24,
8:25,
9:26,
10:27,
11:28,
12:30,
14:32,
15:33,
16:34,
17:35,
18:36,
19:37,
20:48,
21:49}
if day==11:
self.dTCR ={'Unused':49,
'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'HeadRight':34,
'TorsoTopRight':25,
'TorsoTopLeft':30,
'BackCenter':35,
'HipRight':2,
'WristRight':26,
'WristLeft':48,
'KneeLeft':33,
'AnkleRight':36,
'AnkleLeft':37}
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','TCR')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','TCR')
self.dTCR ={ 'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'Jihad:TorsoTopRight':35,
'Jihad:TorsoTopLeft':2,
'Jihad:BackCenter':33,
'Jihad:ShoulderLeft':37,
'Nicolas:TorsoTopRight':34,
'Nicolas:TorsoTopLeft':49,
'Nicolas:BackCenter':48,
'Nicolas:ShoulderLeft':36,
'Eric:TorsoCenter':30,
'Eric:BackCenter':25,
'Eric:ShoulderLeft':26}
#
# TCR : (Name , MAC)
# iTCR : (MAC , Name)
# dTCR : (NodeId, Name)
#
self.idTCR={}
for k in self.dTCR:
self.idTCR[self.dTCR[k]]=k
dTCRni={}
for k in self.TNET.keys():
dTCRni[k]=self.idTCR[self.TNET[k]]
files = os.listdir(dirname)
if serie != '':
try:
self._fileTCR = filter(lambda x : '_S'+str(serie)+'_' in x ,files)[0]
except:
self._fileTCR = filter(lambda x : '_s'+str(serie)+'_' in x ,files)[0]
tt = self._fileTCR.split('_')
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3].replace('.csv','').upper()
self.video = 'NA'
else:
filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileTCR = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
self.scenario= scenario
self.run = str(run)
filename = os.path.join(dirname,self._fileTCR)
dtTCR = pd.read_csv(filename)
tcr={}
for k in dTCRni:
for l in dTCRni:
if k!=l:
d = dtTCR[((dtTCR['ida']==k) & (dtTCR['idb']==l))]
d.drop_duplicates('time',inplace=True)
del d['lqi']
del d['ida']
del d['idb']
d = d[d['time']!=-1]
d.index = d['time']
del d['time']
if len(d)!=0:
sr = pd.Series(d['dist']/1000,index=d.index)
tcr[dTCRni[k]+'-'+dTCRni[l]]= sr
self.tcr = pd.DataFrame(tcr)
self.tcr = self.tcr.fillna(0)
ts = 75366400./1e9
t = np.array(self.tcr.index)*ts
t = t-t[0]
self.tcr.index = t
self.ttcr=self.tcr.index
def _loadBS(self,day=11,serie='',scenario='20',run=1):
""" load BeSpoon data
Parameters
----------
day : int
serie : string
scenario : string
run : int
"""
if day == 11:
self.dBS = {'WristRight':157,'AnkleRight':74,'HandRight':0}
elif day == 12:
self.dBS = {'AP1':157,'AP2':74,'HandRight':0}
self.idBS={}
for k in self.dBS:
self.idBS[self.dBS[k]]=k
if day==11:
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','BeSpoon')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','BeSpoon')
files = os.listdir(dirname)
if serie != '':
#self._fileBS = filter(lambda x : 'S'+str(serie) in x ,files)[0]
self._fileBS = [ x for x in files if 'S'+str(serie) in x ][0]
else:
self._fileBS = [ x for x in files if 'R'+str(serie) in x ][0]
#filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileBS = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
bespo = pd.read_csv(os.path.join(dirname,self._fileBS),index_col='ts')
gb = bespo.groupby(['Sensor'])
#get device id
devid,idevid = np.unique(bespo['Sensor'],return_index=True)
# get index of each group
dgb={d:gb.get_group(d) for d in devid}
lgb=[]
for i in dgb:
ind = dgb[i].index/1e3
dti = pd.to_datetime(ind,unit='s')
npai = time2npa(dti)
npai = npai - npai[0]
dgb[i].index=pd.Index(npai)
lgb.append(pd.DataFrame(dgb[i]['d'].values,columns=[self.idBS[0]+'-'+self.idBS[i]],index=dgb[i].index))
df = lgb[0].join(lgb[1])
self.bespo = df
#self.s157 = self.bespo[self.bespo['Sensor']==157]
#self.s157.set_index(self.s157['tu'].values/1e9)
#self.s74 = self.bespo[self.bespo['Sensor']==74]
#self.s74.set_index(self.s74['tu'].values/1e9)
#t157 = np.array(self.s157['tu']/(1e9))
#self.t157 = t157-t157[0]
#t74 = np.array(self.s74['tu']/(1e9))
#self.t74 = t74 - t74[0]
def _loadhkb(self,day=11,serie='',scenario='20',run=1,source='CITI'):
""" load hkb measurement data
Parameters
----------
day : string
serie : string
scenario : string
run : int
source : 'string'
Returns
-------
update self.hkb
"""
if day == 11:
if serie == 5:
source = 'UR1'
if day==11:
self.dHKB ={'AP1':1,'AP2':2,'AP3':3,'AP4':4,
'HeadRight':5,'TorsoTopRight':6,'TorsoTopLeft':7,'BackCenter':8,'ElbowRight':9,'ElbowLeft':10,'HipRight':11,'WristRight':12,'WristLeft':13,'KneeLeft':14,'AnkleRight':16,'AnkleLeft':15}
if source=='UR1' :
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB')
elif source=='CITI':
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB','CITI')
if day==12:
self.dHKB= {'AP1':1,'AP2':2,'AP3':3,'AP4':4,'Jihad:TorsoTopRight':10,'Jihad:TorsoTopLeft':9,'Jihad:BackCenter':11,'JihadShoulderLeft':12,
'Nicolas:TorsoTopRight':6,'Nicolas:TorsoTopLeft':5,'Nicolas:BackCenter':7,'Nicolas:ShoulderLeft':8,
'Eric:TooTopRight':15,'Eric:TorsoTopLeft':13,'Eric:BackCenter':16,'Eric:ShoulderLeft':14}
#if source=='UR1':
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','HIKOB')
files = os.listdir(dirname)
self.idHKB={}
for k in self.dHKB:
self.idHKB[self.dHKB[k]]=k
if serie != '':
self._filehkb = [ x for x in files if 'S'+str(serie) in x][0]
tt = self._filehkb.split('_')
if source == 'UR1':
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3]
self.video = tt[4].replace('.mat','')
elif source == 'CITI':
self.scenario=tt[0].replace('Sc','')+tt[1]
self.run = tt[3].replace('r','')
self.typ = tt[4]
if self.typ == 'HKB':
self.typ = 'HKBS'
self.video = tt[5].replace('.mat','')
else:
filesc = [ x for x in files if x in 'Sc'+scenario ][0]
if source=='UR1':
self._filehkb = [ x for x in filesc if x in 'R'+str(run)][0]
else:
self._filehkb = [ x for x in filesc if x in 'r'+str(run)][0]
data = io.loadmat(os.path.join(dirname,self._filehkb))
if source=='UR1':
self.rssi = data['rssi']
self.thkb = data['t']
else:
self.rssi = data['val']
self.thkb = np.arange(np.shape(self.rssi)[2])*25.832e-3
def topandas():
try:
self.hkb = pd.DataFrame(index=self.thkb[0])
except:
self.hkb = pd.DataFrame(index=self.thkb)
for k in self.idHKB:
for l in self.idHKB:
if k!=l:
col = self.idHKB[k]+'-'+self.idHKB[l]
rcol = self.idHKB[l]+'-'+self.idHKB[k]
if rcol not in self.hkb.columns:
rssi = self.rssi[k-1,l-1,:]
self.hkb[col] = rssi
topandas()
self.hkb = self.hkb[self.hkb!=0]
def compute_visibility(self,techno='HKB',square_mda=True,all_links=True):
""" determine visibility of links for a given techno
Parameters
----------
techno string
select the given radio technology of the nodes to determine
the visibility matrix
square_mda boolean
select ouput format
True : (device x device x timestamp)
False : (link x timestamp)
all_links : bool
compute all links or just those for which data is available
Return
------
if square_mda = True
intersection : (ndevice x nbdevice x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nbdevice)
name of the links
if square_mda = False
intersection : (nblink x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nblink x2)
name of the links
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=14,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> inter.shape
(15, 15, 12473)
>>>C.imshowvisibility_i(inter,links)
"""
if techno == 'TCR':
if not ((self.typ == 'TCR') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: ',techno)
hname = self.tcr.keys()
dnode=copy.copy(self.dTCR)
dnode.pop('COORD')
prefix = 'TCR:'
elif techno=='HKB':
if not ((self.typ == 'HKBS') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: '+techno)
hname = self.hkb.keys()
dnode=self.dHKB
prefix = 'HKB:'
# get link list
if all_links:
import itertools
links =[l for l in itertools.combinations(dnode.keys(),2)]
else:
links=[n.split('-') for n in hname]
links = [l for l in links if ('COORD' not in l[0]) and ('COORD' not in l[1])]
#mapping between device name in self.hkb and on body/in self.devdf
dev_bid = [self.devmapper(k,techno=techno)[2] for k in dnode.keys()]
nb_totaldev=len(np.unique(self.devdf['id']))
# extract all dev position on body
# Mpdev : (3 x (nb devices and nb infra nodes) x nb_timestamp)
Mpdev = np.empty((3,len(dev_bid),len(self.devdf.index)/nb_totaldev))
# get all positions
for ik,i in enumerate(dev_bid) :
if i in self.din:
Mpdev[:,ik,:] = self.din[i]['p'][:,np.newaxis]
else:
pts = self.devdf[self.devdf['id']==i][['x','y','z']].values.T
if np.prod(pts.shape)!=0:
Mpdev[:,ik,:] = pts
# create A and B from links
nA = np.array([prefix+ str(dnode[l[0]]) for l in links])
nB = np.array([prefix+ str(dnode[l[1]]) for l in links])
dma = dict(zip(dev_bid,range(len(dev_bid))))
mnA = [dma[n] for n in nA]
mnB = [dma[n] for n in nB]
A=Mpdev[:,mnA]
B=Mpdev[:,mnB]
# intersect2D matrix is
# d_0: nb links
#d_1: (cylinder number) * nb body + 1 * nb cylinder_object
# d_2 : nb frame
intersect2D = np.zeros((len(links),
11*len(self.subject) + len(self.interf),
Mpdev.shape[-1]))
# usub : index axes subject
usub_start=0
usub_stop=0
# C-D correspond to bodies segments
#C or D : 3 x 11 body segments x time
# radius of cylinders are (nb_cylinder x time)
for b in self.B:
print( 'processing shadowing from ',b)
# if b is a body not a cylinder
if not 'Cylindre' in b:
uta = self.B[b].sl[:,0].astype('int')
uhe = self.B[b].sl[:,1].astype('int')
rad = self.B[b].sl[:,2]
C = self.B[b].d[:,uta,:]
D = self.B[b].d[:,uhe,:]
try:
radius = np.concatenate((radius,rad[:,np.newaxis]*np.ones((1,C.shape[2]))),axis=0)
except:
radius = rad[:,np.newaxis]*np.ones((1,C.shape[2]))
usub_start=usub_stop
usub_stop=usub_stop+11
else:
cyl = self.B[b]
# top of cylinder
top = cyl.d[:,cyl.topnode,:]
# bottom of cylinder =top with z =0
bottom = copy.copy(cyl.d[:,cyl.topnode,:])
bottom[2,:]=0.02
#top 3 x 1 X time
C=top[:,np.newaxis,:]
D=bottom[:,np.newaxis,:]
radius = np.concatenate((radius,cyl.radius[np.newaxis]))
usub_start=usub_stop
usub_stop=usub_stop+1
f,g,X,Y,alpha,beta,dmin=seg.segdist(A,B,C,D,hard=True)
intersect2D[:,usub_start:usub_stop,:]=g
# import ipdb
# ipdb.set_trace()
#USEFUL Lines for debug
#########################
# def plt3d(ndev=53,ncyl=0,kl=11499):
# fig=plt.figure()
# ax=fig.add_subplot(111,projection='3d')
# if not isinstance(kl,list):
# kl=[kl]
# for ktime in kl:
# ax.plot([A[0,ndev,ktime],B[0,ndev,ktime]],[A[1,ndev,ktime],B[1,ndev,ktime]],[A[2,ndev,ktime],B[2,ndev,ktime]])
# [ax.plot([C[0,k,ktime],D[0,k,ktime]],[C[1,k,ktime],D[1,k,ktime]],[C[2,k,ktime],D[2,k,ktime]],'k') for k in range(11) ]
# ax.plot([X[0,ndev,ncyl,ktime],Y[0,ndev,ncyl,ktime]],[X[1,ndev,ncyl,ktime],Y[1,ndev,ncyl,ktime]],[X[2,ndev,ncyl,ktime],Y[2,ndev,ncyl,ktime]])
# ax.auto_scale_xyz([-5, 5], [-5, 5], [0, 2])
# plt.show()
# import ipdb
# ipdb.set_trace()
uinter1 = np.where((intersect2D<=(radius-0.01)))
uinter0 = np.where((intersect2D>(radius-0.01)))
# intersect2D_=copy.copy(intersect2D)
intersect2D[uinter1[0],uinter1[1],uinter1[2]]=1
intersect2D[uinter0[0],uinter0[1],uinter0[2]]=0
# #integrate the effect of all bodies by summing on axis 1
intersect = np.sum(intersect2D,axis=1)>0
if square_mda:
dev= np.unique(links)
ddev = dict(zip(dev,range(len(dev))))
lmap = np.array(map(lambda x: (ddev[x[0]],ddev[x[1]]),links))
M = np.nan*np.ones((len(dev),len(dev),intersect.shape[-1]))
for i in range(len(intersect)):
id1 = lmap[i][0]
id2 = lmap[i][1]
M[id1,id2,:]=intersect[i,:]
M[id2,id1,:]=intersect[i,:]
intersect=M
links = dev
self._visilinks = links
self._visiintersect = intersect
return intersect,links
def imshowvisibility(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda
Parameters
----------
techno : (HKB|TCR)
t : float
time in second
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
See Also
--------
pylayers.measures.CorSer.compute_visibility()
"""
defaults = { 'grid':True,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fig' not in kwargs:
fig = plt.figure()
else:
fig = kwargs.pop('fig')
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs.pop('ax')
if not '_visiintersect' in dir(self):
print( 'Visibility computed only once')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
kt=np.where(self.tmocap <= t)[0][-1]
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
ax.imshow(inter[:,:,kt],interpolation='nearest')
if kwargs['grid']:
ax.grid()
return fig,ax
def _show3i(self,t=0,**kwargs):
""" show3 interactive
"""
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _show3idemo(self,t=0,**kwargs):
""" show3 interactive
"""
defaults={'nodename':'TorsoTopLeft'}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
vline0.set_data(([time[value],time[value]],[0,1]))
vline1.set_data(([time[value],time[value]],[0,1]))
vline2.set_data(([time[value],time[value]],[0,1]))
vline3.set_data(([time[value],time[value]],[0,1]))
fig.canvas.draw_idle()
fig2.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
plt.close(fig2)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
fig2,ax2 = plt.subplots(4,1,figsize=(12,6))
ax2=ax2.ravel()
df0 = self.getlink(kwargs['nodename'],'AP1',techno='HKB')
df0.plot(ax=ax2[0],fig=fig2)
df1 = self.getlink(kwargs['nodename'],'AP2',techno='HKB')
df1.plot(ax=ax2[1],fig=fig2)
df2 = self.getlink(kwargs['nodename'],'AP3',techno='HKB')
df2.plot(ax=ax2[2],fig=fig2)
df3 = self.getlink(kwargs['nodename'],'AP4',techno='HKB')
df3.plot(ax=ax2[3],fig=fig2)
ax2[0].set_ylabel('AP1')
ax2[1].set_ylabel('AP2')
ax2[2].set_ylabel('AP3')
ax2[3].set_ylabel('AP4')
vline0 = ax2[0].axvline(x=time[fId], color='red')
vline1 = ax2[1].axvline(x=time[fId], color='red')
vline2 = ax2[2].axvline(x=time[fId], color='red')
vline3 = ax2[3].axvline(x=time[fId], color='red')
fig2.suptitle(kwargs['nodename'])
plt.show()
def __refreshshow3i(self,kt):
""" show3 update for interactive mode
USED in imshowvisibility_i
"""
t=self.tmocap[kt]
for ib,b in enumerate(self.B):
self.B[b].settopos(t=t,cs=True)
try:
# body
X=np.hstack((self.B[b]._pta,self.B[b]._phe))
self.B[b]._mayapts.mlab_source.set(x=X[0,:], y=X[1,:], z=X[2,:])
# device
udev = [self.B[b].dev[i]['uc3d'][0] for i in self.B[b].dev]
Xd=self.B[b]._f[kt,udev,:].T
self.B[b]._mayadev.mlab_source.set(x=Xd[0,:], y=Xd[1,:], z=Xd[2,:])
# name
uupper = np.where(X[2]==X[2].max())[0]
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(X[0,uupper][0],X[1,uupper][0],X[2,uupper][0],self.B[b].name,scale=0.05,color=(1,0,0))
# s = np.hstack((cylrad,cylrad))
except:
# cylinder
X=np.vstack((self.B[b].top,self.B[b].bottom))
self.B[b]._mayapts.mlab_source.set(x=X[:,0], y=X[:,1], z=X[:,2])
# name
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(self.B[b].top[0],self.B[b].top[1],self.B[b].top[2],self.B[b].name,scale=0.05,color=(1,0,0))
#vdict
V = self.B[b].traj[['vx','vy','vz']].iloc[self.B[b].toposFrameId].values
self.B[b]._mayavdic.mlab_source.set(x= self.B[b].top[0],y=self.B[b].top[1],z=self.B[b].top[2],u=V[ 0],v=V[ 1],w=V[ 2])
def imshowvisibility_i(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda interactive
Parameters
----------
inter : (nb link x nb link x timestamps)
links : (nblinks)
time : intial time (s)
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.visimda(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
"""
# if in_ipynb():
# notebook = False #program launch in ipyhon notebook
# from IPython.html import widgets # Widget definitions
# from IPython.display import display, clear_output# Used to display widgets in the notebook
# else :
# notebook = False
if not '_visiintersect' in dir(self):
print( 'Visibility is computed only once, Please wait\n')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
time=self.tmocap
fId = np.where(time<=t)[0][-1]
vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
poly = plt.Polygon(vertc)
pp = ax.add_patch(poly)
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
l=ax.imshow(inter[:,:,fId],interpolation='nearest')
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.15, 0.8, 0.05])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, inter.shape[-1],
valinit=fId, color='#AAAAAA')
# else :
# int_range = widgets.IntSliderWidget(min=0,max=inter.shape[-1],step=1,value=fId)
# display(int_range)
def update_x(val):
value = int(sliderx.val)
sliderx.valtext.set_text('{}'.format(value))
l.set_data(inter[:,:,value])
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# else:
# def update_x(name,value):
# clear_output(wait=True)
# display(plt.gcf())
# plt.imshow(inter[:,:,value],interpolation='nearest')
# # l.set_data(inter[:,:,value])
# kwargs['bodytime']=[self.tmocap[value]]
# self._show3(**kwargs)
# myu.inotshow('fig1',width=200,height=200,magnification=1)
# # slax.set_title('t='+str(time[val]),loc='left')
# # fig.canvas.draw_idle()
# int_range.on_trait_change(update_x, 'value')
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
# #QUIT by pressing 'q'
# def press(event):
# if event.key == 'q':
# mlab.close(mayafig)
# plt.close(fig)
# fig.canvas.mpl_connect('key_press_event', press)
# if not notebook:
#-1 frame axes
axm = plt.axes([0.3, 0.05, 0.1, 0.075])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.075])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.9, 0.05, 0.1, 0.075])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _distancematrix(self):
"""Compute the distance matrix between the nodes
self.dist : (nb frame x nb_node x nb_node)
self.dist_nodesmap : list of used nodes (useful to make the association ;) )
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
bn= []
for b in B:
if 'dev' in dir(B[b]):
tdev=[]
for k in B[b].dev:
bn.append(k)
tdev.append(B[b].dev[k]['uc3d'][0])
tdev=np.array(tdev)
try:
pnb = np.concatenate((pnb,B[b]._f[:,tdev,:]),axis=1)
except:
pnb = B[b]._f[:,tdev,:]
ln = []
uin = []
# infrastructure nodes
if ('HK' in self.typ) or ('FULL' in self.typ):
uin.extend(['HKB:1','HKB:2','HKB:3','HKB:4'])
if ('TCR' in self.typ) or ('FULL' in self.typ):
# TCR:31 is the coordinator (1.7719,-3.26)
uin.extend(['TCR:32','TCR:24','TCR:27','TCR:28','TCR:31'])
if self.day == 12:
if ('BS' in self.typ) or ('FULL' in self.typ):
uin.extend(['BS:74','BS:157'])
ln = uin + bn
pin = np.array([self.din[d]['p'] for d in uin])
pin2 = np.empty((pnb.shape[0],pin.shape[0],pin.shape[1]))
pin2[:,:,:] = pin
p = np.concatenate((pin2,pnb),axis=1)
self.points = p
self.dist = np.sqrt(np.sum((p[:,:,np.newaxis,:]-p[:,np.newaxis,:,:])**2,axis=3))
self.dist_nodesmap = ln
def _computedistdf(self):
"""Compute the distance dataframe from distance matrix
"""
# HIKOB
if ('HK' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'hkb')[0]:self.devmapper(k,'hkb')[2] for k in self.dHKB}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.hkb.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
df = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
# BE Spoon
if ('BS' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'BS')[0]:self.devmapper(k,'BS')[2] for k in self.dBS}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.bespo.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dfb = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
df = df.join(dfb)
del dfb
if ('TCR' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'tcr')[0]:self.devmapper(k,'tcr')[2] for k in self.dTCR}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),
self.dist_nodesmap.index(devmap[k.split('-')[1]])]
for k in self.tcr.keys() ])
# for k in self.tcr.keys() if not 'COORD' in k])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dft = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
if ('FULL' in self.typ):
df = df.join(dft)
else :
df = dft
del dft
self.distdf=df
# def accessdm(self,a,b,techno=''):
# """ access to the distance matrix
# give name|id of node a and b and a given techno. retrun Groung truth
# distance between the 2 nodes
# # """
# # a,ia,bia,subja=self.devmapper(a,techno)
# # b,ib,bib,subjb=self.devmapper(b,techno)
# if 'HKB' in techno :
# if isinstance(a,str):
# ia = self.dHKB[a]
# else:
# ia = a
# a = self.idHKB[a]
# if isinstance(b,str):
# ib = self.dHKB[b]
# else:
# ib = b
# b = self.idHKB[b]
# elif 'TCR' in techno :
# if isinstance(a,str):
# ia = self.dTCR[a]
# else:
# ia = a
# a = self.idTCR[a]
# if isinstance(b,str):
# ib = self.dTCR[b]
# else:
# ib = b
# b = self.idTCR[b]
# else :
# raise AttributeError('please give only 1 techno or radio node')
# ka = techno+':'+str(ia)
# kb = techno+':'+str(ib)
# ua = self.dist_nodesmap.index(ka)
# ub = self.dist_nodesmap.index(kb)
# return(ua,ub)
# c3ds = self.B._f.shape
# if 'Full' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(self.tcr)+len(bs),3))
# elif 'HK' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(bs),3))
# elif 'TCR' in self.typ:
# pdev= np.empty((c3ds[0],len(self.tcr),3))
# else:
# raise AttributeError('invalid self.typ')
# self.B.network()
# DB = self.B.D2
# ludev = np.array([[i,self.B.dev[i]['uc3d'][0]] for i in self.B.dev])
# for i in ludev:
# pdev[:,eval(i[0])-1,:] = self.B._f[:,i[1],:]
# # self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def vlc(self):
""" play video of the associated serie
"""
videofile = os.path.join(self.rootdir,'POST-TREATED', str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
try:
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
os.system('vlc '+filename +'&' )
except:
raise AttributeError('file '+ self._filename + ' not found')
def snapshot(self,t0=0,offset=15.5,title=True,save=False,fig=[],ax=[],figsize=(10,10)):
""" single snapshot plot
Parameters
----------
t0: float
offset : float
title : boolean
save : boolean
fig
ax
figsize : tuple
"""
if fig ==[]:
fig=plt.figure(figsize=figsize)
if ax == []:
ax = fig.add_subplot(111)
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
I0 = img_as_ubyte(F0)
ax.imshow(F0)
if title:
ax.set_title('t = '+str(t0)+'s')
if save :
plt.savefig(self._filename +'_'+str(t0) + '_snap.png',format='png')
return fig,ax
def snapshots(self,t0=0,t1=10,offset=15.5):
""" take snapshots
Parameters
----------
t0 : float
t1 : float
"""
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
F1 = vc.get_frame(t1+offset)
I0 = img_as_ubyte(F0)
I1 = img_as_ubyte(F1)
plt.subplot(121)
plt.imshow(F0)
plt.title('t = '+str(t0)+'s')
plt.subplot(122)
plt.imshow(F1)
plt.title('t = '+str(t1)+'s')
def _show3(self,**kwargs):
""" mayavi 3d show of scenario
Parameters
----------
L : boolean
display layout (True)
body :boolean
display bodytime(True)
bodyname : boolean
display body name
bodytime: list
list of time instant where body topos has to be shown
devsize : float
device on body size (100)
devlist : list
list of device name to show on body
pattern : boolean
display devices pattern
trajectory : boolean
display trajectory (True)
tagtraj : boolean
tag on trajectory at the 'bodytime' instants (True)
tagname : list
name of the tagtrajs
tagpoffset : ndarray
offset of the tag positions (nb_of_tags x 3)
fontsizetag : float
size of the tag names
inodes : boolean
display infrastructure nodes
inname : boolean
display infra strucutre node name
innamesize : float,
size of name of infrastructure nodes (0.1)
incolor: str
color of infrastructure nodes ('r')
insize
size of infrastructure nodes (0.1)
camera : boolean
display Vicon camera position (True)
cameracolor : str
color of camera nodes ('b')
camerasize : float
size of camera nodes (0.1)
Examples
--------
>>> S = Corser(6)
>>> S._show3()
"""
defaults = { 'L':True,
'body':True,
'bodyname':True,
'subject':[],
'interf':True,
'trajectory' :False,
'trajectory_list' :[],
'devsize':100,
'devlist':[],
'pattern':False,
'inodes' : True,
'inname' : True,
'innamesize' : 0.1,
'incolor' : 'r',
'insize' : 0.1,
'camera':True,
'cameracolor' :'k',
'camerasize' :0.1,
'bodytime':[],
'tagtraj':True,
'tagname':[],
'tagpoffset':[],
'fontsizetag':0.1,
'trajectory_color_range':True,
'trajectory_linewidth':0.01
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
cold = pyu.coldict()
camhex = cold[kwargs['cameracolor']]
cam_color = tuple(pyu.rgb(camhex)/255.)
inhex = cold[kwargs['incolor']]
in_color = tuple(pyu.rgb(inhex)/255.)
if kwargs['subject'] == []:
subject = self.subject
else:
subject = kwargs['subject']
if kwargs['L']:
self.L._show3(opacity=0.5)
v = self.din.items()
if kwargs['inodes']:
X= np.array([v[i][1]['p'] for i in range(len(v))])
mlab.points3d(X[:,0],X[:,1], X[:,2],scale_factor=kwargs['insize'],color=in_color)
if kwargs['pattern']:
for i in range(len(v)):
if not hasattr(self.din[v[i][0]]['ant'],'SqG'):
self.din[v[i][0]]['ant'].eval()
self.din[v[i][0]]['ant']._show3(po=v[i][1]['p'],
T=self.din[v[i][0]]['T'],
ilog=False,
minr=0.01,
maxr=0.2,
newfig=False,
title=False,
colorbar=False,
)
if kwargs['inname']:
[mlab.text3d(v[i][1]['p'][0],
v[i][1]['p'][1],
v[i][1]['p'][2]+v[i][1]['s3off'],
v[i][0],
scale=kwargs['innamesize'],color=in_color) for i in range(len(v))]
if kwargs['body']:
if kwargs['bodytime']==[]:
time =np.linspace(0,self.B[subject[0]].time[-1],5).astype(int)
# time=range(10,100,20)
else :
time=kwargs['bodytime']
for ki, i in enumerate(time):
for ib,b in enumerate(subject):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(dev=True,
name = kwargs['bodyname'],
devlist=kwargs['devlist'],
devsize=kwargs['devsize'],
tube_sides=12,
pattern=kwargs['pattern'])
if kwargs['tagtraj']:
X=self.B[b].traj[['x','y','z']].values[self.B[b].toposFrameId]
if kwargs['tagpoffset']==[]:
X[2]=X[2]+0.2
else :
X=X+kwargs['tagpoffset'][ki]
if kwargs['tagname']==[]:
name = 't='+str(i)+'s'
else :
name = str(kwargs['tagname'][ki])
mlab.text3d(X[0],X[1],X[2],name,scale=kwargs['fontsizetag'])
if kwargs['interf']:
for ib,b in enumerate(self.interf):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(name=kwargs['bodyname'],tube_sides=12)
if kwargs['trajectory']:
if kwargs['trajectory_list']==[]:
tr_subject = subject
else:
tr_subject = kwargs['trajectory_list']
for b in tr_subject:
self.B[b].traj._show3(color_range=kwargs['trajectory_color_range'],
linewidth=kwargs['trajectory_linewidth'])
if kwargs['camera'] :
mlab.points3d(self.cam[:,0],self.cam[:,1], self.cam[:,2],scale_factor=kwargs['camerasize'],color=cam_color)
mlab.view(-111.44127634143871,
60.40674368088245,
24.492297713984197,
array([-0.07235499, 0.04868631, -0.00314969]))
mlab.view(-128.66519195313163,
50.708933839573511,
24.492297713984247,
np.array([-0.07235499, 0.04868631, -0.00314969]))
def anim(self):
self._show3(body=False,inname=False,trajectory=False)
[self.B[b].anim() for b in self.B]
mlab.view(-43.413544538477254,
74.048193730704611,
11.425837641867618,
array([ 0.48298163, 0.67806043, 0.0987967 ]))
def imshow(self,time=100,kind='time'):
""" DEPRECATED
Parameters
----------
kind : string
'mean','std'
"""
fig = plt.figure(figsize=(10,10))
self.D = self.rssi-self.rssi.swapaxes(0,1)
try:
timeindex = np.where(self.thkb[0]-time>0)[0][0]
except:
timeindex = np.where(self.thkb-time>0)[0][0]
if kind=='time':
dt1 = self.rssi[:,:,timeindex]
dt2 = self.D[:,:,timeindex]
if kind == 'mean':
dt1 = ma.masked_invalid(self.rssi).mean(axis=2)
dt2 = ma.masked_invalid(self.D).mean(axis=2)
if kind == 'std':
dt1 = ma.masked_invalid(self.rssi).std(axis=2)
dt2 = ma.masked_invalid(self.D).std(axis=2)
ax1 = fig.add_subplot(121)
#img1 = ax1.imshow(self.rssi[:,:,timeindex],interpolation='nearest',origin='lower')
img1 = ax1.imshow(dt1,interpolation='nearest')
labels = [ self.idHKB[x] for x in range(1,17)]
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
if kind=='time':
plt.title('t = '+str(time)+ ' s')
if kind=='mean':
plt.title(u'$mean(\mathbf{L})$')
if kind=='std':
plt.title(u'$std(\mathbf{L})$')
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
clb1 = fig.colorbar(img1,cax1)
clb1.set_label('level dBm',fontsize=14)
ax2 = fig.add_subplot(122)
#img2 = ax2.imshow(self.D[:,:,timeindex],interpolation='nearest',origin='lower')
img2 = ax2.imshow(dt2,interpolation='nearest')
plt.title(u'$\mathbf{L}-\mathbf{L}^T$')
divider = make_axes_locatable(ax2)
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
clb2 = fig.colorbar(img2,cax2)
clb2.set_label('level dBm',fontsize=14)
plt.tight_layout()
plt.show()
#for k in range(1,17):
# for l in range(1,17):
# self.dHKB[(k,l)]=iHKB[k]+' - '+iHKB[l]
# cpt = cpt + 1
return fig,(ax1,ax2)
def lk2nd(self,lk):
""" transcode a lk from Id to real name
Parameters
----------
lk : string
Examples
--------
>>> C=Corser(6)
>>> lk = 'HKB:15-HKB:7'
>>> C.lk2nd(lk)
"""
u = lk.replace('HKB:','').split('-')
v = [ self.idHKB[int(x)] for x in u ]
return(v)
def _load_offset_dict(self):
""" load offset_dictionnary.bin
Returns
-------
d : dict
{'Sc20_S5_R1_HKBS': {'hkb_index': -148, 'video_sec': 32.622087273809527},
'Sc20_S6_R2_HKBS': {'bs_index': -124, 'hkb_index': -157},
'Sc21a_S13_R1_HKBS': {'hkb_index': 537},
'Sc21a_S14_R2_HKBS': {'hkb_index': 752},
'Sc21a_S15_R3_HKBS': {'hkb_index': 438},
'Sc21a_S16_R4_HKBS': {'hkb_index': 224},
'Sc21b_S21_R1_HKBS': {'hkb_index': 368},
'Sc21b_S22_R2_HKBS': {'hkb_index': -333},
'Sc21b_S23_R3_HKBS': {'hkb_index': 136},
'Sc22a_S9_R1_Full': {'hkb_index': 678}}
Notes
-----
This is used for synchronization purpose
"""
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.load( open( os.path.join(path,'offset_dictionnary.bin'), "rb" ) )
return d
def _save_offset_dict(self,d):
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.dump( d, open( os.path.join(path,'offset_dictionnary.bin'), "wb" ) )
def _save_data_off_dict(self,filename,typ,value):
""" save
- a given "value" of an for,
- a serie/run "filename",
- of a given typ (video|hkb|tcr|...)
"""
d = self._load_offset_dict()
try:
d[filename].update({typ:value})
except:
d[filename]={}
d[filename][typ]=value
self._save_offset_dict(d)
def offset_setter_video(self,a='AP1',b='WristRight',**kwargs):
""" video offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig, axs = plt.subplots(nrows=2,ncols=1)
fig.subplots_adjust(bottom=0.3)
if isinstance(a,str):
ia = self.dHKB[a]
else:
ia = a
a = self.idHKB[a]
if isinstance(b,str):
ib = self.dHKB[b]
else:
ib = bq
b = self.idHKB[b]
time = self.thkb
if len(time) == 1:
time=time[0]
sab = self.hkb[a+'-'+b].values
sabt = self.hkb[a+'-'+b].index
hkb = axs[1].plot(sabt,sab,label = a+'-'+b)
axs[1].legend()
try :
init = self.offset[self._filename]['video_sec']
except:
init=time[0]
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(init)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.05])
sliderx = Slider(slide_xoffset_ax, "video offset", 0, self.hkb.index[-1],
valinit=init, color='#AAAAAA')
# vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
def update_x(val):
F0 = vc.get_frame(val)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# def cursor(val):
# try :
# pp.remove()
# except:
# pass
# vertc = [(sabt[0]+val,min(sab)-10),(sabt[0]+val,min(sab)-10),(sabt[0]+val,max(sab)+10),(sabt[0]+val,max(sab)-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
# sliderx.on_changed(cursor)
def plus(event):
sliderx.set_val(sliderx.val +0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def setter(event):
self._save_data_off_dict(self._filename,'video_sec',sliderx.val)
self.offset= self._load_offset_dict()
axp = plt.axes([0.3, 0.05, 0.1, 0.075])
axset = plt.axes([0.5, 0.05, 0.1, 0.075])
axm = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '<-')
bp.on_clicked(minus)
bset = Button(axset, 'SET offs.')
bset.on_clicked(setter)
bm = Button(axm, '->')
bm.on_clicked(plus)
plt.show()
def offset_setter(self,a='HKB:1',b='HKB:12',techno='',**kwargs):
""" offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if plt.isinteractive():
interactive = True
plt.ioff()
else :
interactive = False
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.3)
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
time = self.tmocap
if len(time.shape) == 2:
time = time[0,:]
try :
init = time[0]#self.offset[self._filename]['hkb_index']
except:
init=time[0]
var = self.getlinkd(ia,ib,techno).values
if kwargs['inverse']:
var = 10*np.log10(1./(var)**2)
gt = ax.plot(time,var)
ab = self.getlink(ia,ib,techno)
sab = ab.values
sabt = ab.index.values
technoval = ax.plot(sabt,sab)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
sliderx = Slider(slide_xoffset_ax, techno + " offset", -(len(sabt)/16), (len(sabt)/16),
valinit=init, color='#AAAAAA')
slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
valinit=0, color='#AAAAAA')
slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 60,
valinit=30, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
rtechnoval = np.roll(sab,value)
sliderx.valtext.set_text('{}'.format(value))
technoval[0].set_xdata(sabt)
technoval[0].set_ydata(rtechnoval)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
sliderx.drawon = False
def update_y(val):
yoff = slidery.val
alpha = slideralpha.val
gt[0].set_ydata(alpha*var + yoff)
fig.canvas.draw_idle()
#initpurpose
update_y(5)
slidery.on_changed(update_y)
slideralpha.on_changed(update_y)
def setter(event):
value = int(sliderx.val)
try :
nval = self.offset[self._filename][techno.lower()+'_index'] + value
except :
nval = value
self._save_data_off_dict(self._filename,techno.lower()+'_index',nval)
self.offset= self._load_offset_dict()
ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
axset = plt.axes([0.0, 0.5, 0.2, 0.05])
bset = Button(axset, 'SET ' +techno+' offs.')
bset.on_clicked(setter)
plt.show()
if interactive :
plt.ion()
# def offset_setter_hkb(self,a='AP1',b='WristRight',**kwargs):
# """ offset setter
# """
# defaults = { 'inverse':True
# }
# for k in defaults:
# if k not in kwargs:
# kwargs[k] = defaults[k]
# if plt.isinteractive():
# interactive = True
# plt.ioff()
# else :
# interactive = False
# fig, ax = plt.subplots()
# fig.subplots_adjust(bottom=0.2, left=0.3)
# a,ia,bia,subja,techno=self.devmapper(a,'HKB')
# b,ib,bib,subjb,techno=self.devmapper(b,'HKB')
# time = self.thkb
# if len(time.shape) == 2:
# time = time[0,:]
# try :
# init = time[0]#self.offset[self._filename]['hkb_index']
# except:
# init=time[0]
# var = self.getlinkd(ia,ib,'HKB').values
# if kwargs['inverse']:
# var = 10*np.log10(1./(var)**2)
# gt = ax.plot(self.B[self.B.keys()[0]].time,var)
# sab = self.hkb[a+'-'+b].values
# sabt = self.hkb[a+'-'+b].index
# hkb = ax.plot(sabt,sab)
# ########
# # slider
# ########
# slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
# sliderx = Slider(slide_xoffset_ax, "hkb offset", -(len(sabt)/16), (len(sabt)/16),
# valinit=init, color='#AAAAAA')
# slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
# slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
# valinit=0, color='#AAAAAA')
# slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
# slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 10,
# valinit=5, color='#AAAAAA')
# def update_x(val):
# value = int(sliderx.val)
# rhkb = np.roll(sab,value)
# sliderx.valtext.set_text('{}'.format(value))
# hkb[0].set_xdata(sabt)
# hkb[0].set_ydata(rhkb)
# fig.canvas.draw_idle()
# sliderx.on_changed(update_x)
# sliderx.drawon = False
# def update_y(val):
# yoff = slidery.val
# alpha = slideralpha.val
# gt[0].set_ydata(alpha*var + yoff)
# fig.canvas.draw_idle()
# #initpurpose
# update_y(5)
# slidery.on_changed(update_y)
# slideralpha.on_changed(update_y)
# def setter(event):
# value = int(sliderx.val)
# try :
# nval = self.offset[self._filename]['hkb_index'] + value
# except :
# nval = value
# self._save_data_off_dict(self._filename,'hkb_index',nval)
# self.offset= self._load_offset_dict()
# ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
# axset = plt.axes([0.0, 0.5, 0.2, 0.05])
# bset = Button(axset, 'SET offs.')
# bset.on_clicked(setter)
# plt.show()
# if interactive:
# plt.ion()
def mtlbsave(self):
""" Matlab format save
S{day}_{serie}
node_name
node_place
node_coord
HKB.{linkname}.tr
HKB.{linkname}.rssi
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
HKB.{linkname}.dsh
TCR.{linkname}.tr
HKB.{linkname}.range
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
"""
key = 'S'+str(self.day)+'_'+str(self.serie)
filemat = key+'.mat'
d = {}
d[key]={}
d[key]['node_name'] = self.dist_nodesmap
d[key]['node_place'] = [ self.devmapper(x)[0] for x in self.dist_nodesmap ]
d[key]['node_coord'] = self.points
for subject in self.interf:
sub = subject.replace(':','')
d[key][sub]=np.mean(self.B[subject].d,axis=1)
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['HKB']={}
links = list(self.hkb.columns)
inter,lks = self.compute_visibility(techno='HKB')
for l in links:
ls = l.split('-')
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['HKB'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 = np.where(lks==ls[1])[0]
Ssh = inter[ix0,ix1,:]
Srssi= self.getlink(ls[0],ls[1],techno='HKB')
# get distances between nodes
Sdist = self.getlinkd(ls[0],ls[1],techno='HKB')
dsh = dist_sh2rssi(Sdist,Ssh,15)
# rssi
d[key]['HKB'][nl]['rssi'] = Srssi.values
# dsh
d[key]['HKB'][nl]['dsh'] = dsh
#d['S6'][nl]['rssi_dec'] = np.roll(Srssi.values,-dec)
d[key]['HKB'][nl]['sh'] = Ssh
# time rssi
#d[key]['HKB'][nl]['trh'] = np.array(Srssi.index)
d[key]['trh'] = np.array(Srssi.index)
# distance
d[key]['HKB'][nl]['dist'] = Sdist.values
# time mocap
#d[key]['HKB'][nl]['td'] = np.array(Sdist.index)
d[key]['tm'] = np.array(Sdist.index)
if ('TCR' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['TCR']={}
links = list(self.tcr.columns)
inter,lks = self.compute_visibility(techno='TCR')
for l in links:
ls = l.split('-')
# to shorten matlab keys surname are replaced by first letter
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['TCR'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 = np.where(lks==ls[1])[0]
# intersection on the link
Ssh = inter[ix0,ix1,:]
Srange= self.getlink(ls[0],ls[1],techno='TCR')
# get distances between nodes
Sdist = self.getlinkd(ls[0],ls[1],techno='TCR')
# rssi
d[key]['TCR'][nl]['range'] = Srange.values
# dsh
#d['S6'][nl]['rssi_dec'] = np.roll(Srssi.values,-dec)
d[key]['TCR'][nl]['sh'] = Ssh
# time rssi
#d[key]['TCR'][nl]['tr'] = np.array(Srange.index)
d[key]['trt'] = np.array(Srange.index)
# distance
d[key]['TCR'][nl]['dist'] = Sdist.values
# time mocap
#d[key]['TCR'][nl]['td'] = np.array(Sdist.index)
d[key]['tm'] = np.array(Sdist.index)
self.matlab = d
io.savemat(filemat,d)
def pltvisi(self,a,b,techno='',**kwargs):
""" plot visibility between link a and b
Attributes
----------
color:
fill color
hatch:
hatch type
label_pos: ('top'|'bottom'|'')
postion of the label
label_pos_off: float
offset of postion of the label
label_mob: str
prefix of label in mobility
label_stat: str
prefix of label static
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'fig':[],
'figsize':(10,10),
'ax':[],
'color':'',
'hatch':'//',
'label_pos':'',
'label_pos_off':5,
'label_vis':'V',
'label_hide':'H'
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
aa= ax.axis()
a,ia,nna,subjecta,technoa = self.devmapper(a,techno)
b,ib,nnb,subjectb,technob = self.devmapper(b,techno)
vv,tv,tseg,itseg = self._visiarray(nna,nnb)
# vv.any : it exist NLOS regions
if vv.any():
if kwargs['color']=='':
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],
fill=False,
hatch=kwargs['hatch'],
fig=fig,ax=ax)
else :
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],
color=kwargs['color'],
hatch=kwargs['hatch'],
fig=fig,ax=ax)
if kwargs['label_pos']!='':
if kwargs['label_pos'] == 'top':
yposV = aa[3]-kwargs['label_pos_off']+0.5
yposH = aa[3]-kwargs['label_pos_off']-0.5
elif kwargs['label_pos'] == 'bottom':
yposV = aa[2]+kwargs['label_pos_off']+0.5
yposH = aa[2]+kwargs['label_pos_off']+0.5
xposV= tv[tseg.mean(axis=1).astype(int)]
xposH= tv[itseg.mean(axis=1).astype(int)]
[ax.text(x,yposV,kwargs['label_vis']+str(ix+1)) for ix,x in enumerate(xposV)]
[ax.text(x,yposH,kwargs['label_hide']+str(ix+1)) for ix,x in enumerate(xposH)]
return fig,ax
def pltmob(self,**kwargs):
""" plot mobility
Parameters
----------
subject: str
subject to display () if '', take the fist one from self.subject)
showvel : boolean
display filtered velocity
velth: float (0.7)
velocity threshold
fo : int (5)
filter order
fw: float (0.02)
0 < fw < 1 (fN <=> 1)
time_offset : int
add time_offset to start later
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> #f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'subject':'',
'fig':[],
'figsize':(10,10),
'ax':[],
'showvel':False,
'velth':0.07,
'fo':5,
'fw':0.02,
'ylim':(),
'time_offset':0,
'color':'gray',
'hatch':'',
'label_pos':'top',
'label_pos_off':2,
'label_mob':'M',
'label_stat':'S'
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
if kwargs['subject']=='':
subject=self.B.keys()[0]
else:
subject=kwargs['subject']
V=self.B[subject].traj[['vx','vy']].values
Vi=np.sqrt((V[:,0]**2+V[:,1]**2))
f=DF()
f.butter(kwargs['fo'],kwargs['fw'],'lowpass')
Vif=f.filter(Vi)
if kwargs['time_offset']>=0:
zmo = np.zeros(kwargs['time_offset'])
tmp = np.insert(Vif,zmo,0)
Vif = tmp[:len(Vif)]
else:
zmo = np.zeros(-kwargs['time_offset'])
tmp = np.concatenate((Vif,zmo))
Vif = tmp[-kwargs['time_offset']:len(Vif)-kwargs['time_offset']]
if kwargs['showvel']:
fig2 = plt.figure()
ax2=fig2.add_subplot(111)
ax2.plot(self.B[subject].time[:-2],Vif)
ax2.plot(Vif)
cursor2 = Cursor(ax2, useblit=True, color='gray', linewidth=1)
null = np.where(Vif<kwargs['velth'])[0]
unu1 = np.where(np.diff(null)!=1)[0]
unu2 = np.where(np.diff(null[::-1])!=-1)[0]
unu2 = len(null)-unu2
unu = np.concatenate((unu1,unu2))
unu = np.sort(unu)
sunu = unu.shape
if sunu[0]%2:
unu=np.insert(unu,-1,len(null)-1)
sunu = unu.shape
nullr=null[unu].reshape(sunu[0]/2,2)
if kwargs['ylim'] != ():
ylim = kwargs['ylim']
else :
axlim = ax.axis()
ylim = [axlim[2],axlim[3]]
fig , ax =plu.rectplot(self.B[subject].time,nullr,ylim=ylim,
color=kwargs['color'],
hatch=kwargs['hatch'],
fig=fig,ax=ax)
inullr = copy.copy(nullr)
bb = np.insert(inullr[:,1],0,0)
ee = np.hstack((inullr[:,0],null[-1]))
inullr = np.array((bb,ee)).T
# remove last
inullr = inullr[:-1,:]
if kwargs['label_pos']!='':
if kwargs['label_pos'] == 'top':
yposM = ylim[1]-kwargs['label_pos_off']+0.5
yposS = ylim[1]-kwargs['label_pos_off']-0.5
elif kwargs['label_pos'] == 'bottom':
yposM = ylim[0]+kwargs['label_pos_off']+0.5
yposS = ylim[0]+kwargs['label_pos_off']+0.5
xposM= self.B[subject].time[nullr.mean(axis=1).astype(int)]
xposS= self.B[subject].time[inullr.mean(axis=1).astype(int)]
[ax.text(x,yposM,kwargs['label_mob']+str(ix+1),
horizontalalignment='center',
verticalalignment='center')
for ix,x in enumerate(xposM)]
[ax.text(x,yposS,kwargs['label_stat']+str(ix+1),
horizontalalignment='center',
verticalalignment='center')
for ix,x in enumerate(xposS)]
return fig,ax
def animhkb(self,a,b,interval=10,save=False):
"""
Parameters
----------
a : node name |number
b : node name | number
save : bool
"""
import matplotlib.animation as animation
x = self.hkb.index
link = a+'-'+b
y = self.hkb[link].values
fig, ax = plt.subplots()
plt.xlim(0,x[-1])
line = [ax.plot(x, y, animated=True)[0]]
def animate(i):
line[0].set_ydata(y[:i])
line[0].set_xdata(x[:i])
return line
ani = animation.FuncAnimation(fig, animate, xrange(1, len(x)),
interval=interval, blit=True)
if save:
ani.save(link+'.mp4')
plt.title(link)
plt.xlabel('time (s)')
plt.ylabel('RSS (dBm)')
plt.show()
def animhkbAP(self,a,AP_list,interval=1,save=False,**kwargs):
"""
Parameters
----------
a : node name
AP_nb=[]
save : bool
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> S.animhkbAP('TorsoTopLeft',['AP1','AP2','AP3','AP4'],interval=100,xstart=58,figsize=(20,2))
"""
import matplotlib.animation as animation
defaults = { 'fig':[],
'figsize':(10,10),
'ax':[],
'label':'',
'xstart':0
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
ust = np.where(self.hkb.index>=kwargs['xstart'])[0][0]
x = self.hkb.index[ust:]
links = [l+'-'+a for l in AP_list]
ly = [self.hkb[l].values[ust:] for l in links]
color=['k','b','g','r']
plt.xlim(kwargs['xstart'],x[-1]+3)
line = [ax.plot(x, y, animated=True,
color=color[iy],
label=AP_list[iy]+'-'+kwargs['label'])[0] for iy,y in enumerate(ly)]
def animate(i):
for iy,y in enumerate(ly):
line[iy].set_ydata(y[:i])
line[iy].set_xdata(x[:i])
return line
plt.legend()
plt.xlabel('time (s)')
plt.ylabel('RSS (dBm)')
ani = animation.FuncAnimation(fig, animate, xrange(0, len(x)),
interval=interval, blit=True)
if save:
ani.save(a+'.mp4')
#plt.title(links)
plt.show()
def plot(self,a,b,techno='',t='',**kwargs):
""" ploting
Parameters
----------
a : str | int
name |id
b : str | int
name |id
techno : str (optional)
radio techno
t : float | list (optional)
given time
or [start,stop] time
color : color
distance : boolean (False)
plot distance instead of value
lin : boolean (False)
display linear value instead of dB
sqrtinv : boolean (False)
apply : "sqrt (1/ dataset)"
xoffset : float (0)
add an offset on x axis
yoffset : float (1|1e3|1e6)
add an offset on y axis
title : boolean (True)
display title
shortlabel : boolean (True)
enable short labelling
fontsize : int (18)
font size
returnlines : boolean
if True return the matplotlib ploted lines
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plot('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> #f,ax = S.pltmob(fig=f,ax=ax)
>>> #plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 'fig':[],
'ax':[],
'figsize':(6,4),
'color':'g',
'distance':False,
'lin':False,
'xoffset':0,
'yoffset': 1e6,
'sqrtinv':False,
'title':True,
'shortlabel':True,
'fontsize':18,
'returnlines':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
###create a short labeling
if kwargs['shortlabel']:
#find uppercase position
uu = np.nonzero([l.isupper() or l.isdigit() for l in a])[0]
#cretae string from list
labela = ''.join([a[i] for i in uu])
uu = np.nonzero([l.isupper() or l.isdigit() for l in b])[0]
#cretae string from list
labelb = ''.join([b[i] for i in uu])
label = labela +'-'+labelb
else:
label = a+'-'+b
if kwargs['distance']:
label = 'dist ' + label
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
# get dataframe
if not kwargs['distance']:
df = self.getlink(a,b,techno,t)
title = 'Received Power between ' + label
ylabel = 'Received Power dBm'
else :
df = self.getlinkd(a,b,techno,t)
title = 'Distance between ' + label
ylabel = 'distance (m)'
#post processing on dataframe
if kwargs['lin']:
df = 10**(df/10) * kwargs['yoffset']
if kwargs['sqrtinv']:
df = np.sqrt(1./df)
ylabel = u'$ (mW)^{-1/2} linear scale$'
lines = df.plot(ax=ax,color=kwargs['color'],label=label)
# Managing labelling
if kwargs['title']:
ax.set_title(label=title,fontsize=kwargs['fontsize'])
if kwargs['lin']:
if kwargs['yoffset']==1:
ylabel = 'mW'
if kwargs['yoffset']==1e3:
ylabel = u'$\micro$W'
if kwargs['yoffset']==1e6:
ylabel = u'nW'
ax.set_ylabel(ylabel)
# if kwargs['data']==True:
# #ax.plot(self.thkb[0],self.rssi[ia,ib,:])
# #ax.plot(self.thkb[0],self.rssi[ib,ia,:])
# sab = self.hkb[a+'-'+b]
# if not(kwargs['dB']):
# sab = 10**(sab/10) * kwargs['yoffset']
# if kwargs['distance']:
# sab = np.sqrt(1/sab)
# if kwargs['reciprocal']:
# sba = 10**(sba/10 ) * kwargs['yoffset']
# sba = np.sqrt(1/sba)
# sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],label=label,xlim=(t0,t1))
# if kwargs['reciprocal']:
# sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],label=label)
# #title = 'Received Power ' + self.title1
# if kwargs['dis_title']:
# #title = self.title1+kwargs['tit']
# title = kwargs['tit']
# ax.set_title(label=title,fontsize=kwargs['fontsize'])
# if not kwargs['distance']:
# if kwargs['dB']:
# ax.set_ylabel('Received Power dBm')
# else:
# if kwargs['yoffset']==1:
# ax.set_ylabel('mW')
# if kwargs['yoffset']==1e3:
# ax.set_ylabel(u'$\micro$W')
# if kwargs['yoffset']==1e6:
# ax.set_ylabel(u'nW')
# else:
# ax.set_ylabel(u'$\prop (mW)^{-1/2} linear scale$')
# if kwargs['reciprocal']==True:
# # if kwargs['data']==True:
# # ax2=fig.add_subplot(212)
# r = self.hkb[a+'-'+b][self.hkb[a+'-'+b]!=0]- self.hkb[b+'-'+a][self.hkb[b+'-'+a]!=0]
# r[t0:t1].plot(ax=ax2)
# ax2.set_title('Reciprocity offset',fontsize=kwargs['fontsize'])
if not kwargs['returnlines']:
return fig,ax
else:
return fig,ax,lines
def plthkb(self,a,b,techno='HKB',**kwargs):
""" plot Hikob devices
DEPRECATED
Parameters
----------
a : node name |number
b : node name | number
t0 : start time
t1 : stop time
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> f,ax = S.plthkb('AP1','TorsoTopLeft',techno='HKB')
>>> f,ax = S.pltvisi('AP1','TorsoTopLeft',techno='HKB',fig=f,ax=ax)
>>> f,ax = S.pltmob(fig=f,ax=ax)
>>> plt.title('hatch = visibility / gray= mobility')
>>> plt.show()
"""
defaults = { 't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'xoffset':0,
'yoffset': 1e6,
'reciprocal':False,
'dB':True,
'data':True,
'colorab':'g',
'colorba':'b',
'distance':False,
'fontsize':18,
'shortlabel':True,
'dis_title':True,
'xlim':(),
'tit':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
t0 =kwargs['t0']
t1 =kwargs['t1']
if t1 ==-1:
try:
t1=self.thkb[0][-1]
except:
t1=self.thkb[-1]
a,ia,bia,subja,technoa=self.devmapper(a,techno)
b,ib,bib,subjb,technob=self.devmapper(b,techno)
if kwargs['shortlabel']:
#find uppercase position
uu = np.nonzero([l.isupper() or l.isdigit() for l in a])[0]
#cretae string from list
labela = ''.join([a[i] for i in uu])
uu = np.nonzero([l.isupper() or l.isdigit() for l in b])[0]
#cretae string from list
labelb = ''.join([b[i] for i in uu])
label = labela +'-'+labelb
else:
label = a+'-'+b
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else :
fig=kwargs['fig']
if kwargs['ax'] ==[]:
if kwargs['reciprocal']:
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else :
ax = fig.add_subplot(111)
else :
ax = kwargs['ax']
if kwargs['data']==True:
#ax.plot(self.thkb[0],self.rssi[ia,ib,:])
#ax.plot(self.thkb[0],self.rssi[ib,ia,:])
sab = self.hkb[a+'-'+b]
if not(kwargs['dB']):
sab = 10**(sab/10) * kwargs['yoffset']
if kwargs['distance']:
sab = np.sqrt(1/sab)
if kwargs['reciprocal']:
sba = 10**(sba/10 ) * kwargs['yoffset']
sba = np.sqrt(1/sba)
sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],label=label,xlim=(t0,t1))
if kwargs['reciprocal']:
sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],label=label)
#title = 'Received Power ' + self.title1
if kwargs['dis_title']:
#title = self.title1+kwargs['tit']
title = kwargs['tit']
ax.set_title(label=title,fontsize=kwargs['fontsize'])
if not kwargs['distance']:
if kwargs['dB']:
ax.set_ylabel('Received Power dBm')
else:
if kwargs['yoffset']==1:
ax.set_ylabel('mW')
if kwargs['yoffset']==1e3:
ax.set_ylabel(u'$\micro$W')
if kwargs['yoffset']==1e6:
ax.set_ylabel(u'nW')
else:
ax.set_ylabel(u'$\prop (mW)^{-1/2} linear scale$')
if kwargs['reciprocal']==True:
# if kwargs['data']==True:
# ax2=fig.add_subplot(212)
r = self.hkb[a+'-'+b][self.hkb[a+'-'+b]!=0]- self.hkb[b+'-'+a][self.hkb[b+'-'+a]!=0]
r[t0:t1].plot(ax=ax2)
ax2.set_title('Reciprocity offset',fontsize=kwargs['fontsize'])
return fig,ax
def plttcr(self,a,b,**kwargs):
""" plot TCR devices
Parameters
----------
a : node name |number
b : node name | number
t0 : start time
t1 : stop time
"""
defaults = { 't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'data':True,
'colorab':'g',
'colorba':'b',
'linestyle':'default',
'inverse':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
t0 =kwargs['t0']
t1 =kwargs['t1']
if t1 ==-1:
t1=self.ttcr[-1]
if isinstance(a,str):
ia = self.dTCR[a]
else:
ia = a
a = self.idTCR[a]
if isinstance(b,str):
ib = self.dTCR[b]
else:
ib = b
b = self.idTCR[b]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
else :
ax=kwargs['ax']
if kwargs['data']==True:
#ax.plot(self.thkb[0],self.rssi[ia,ib,:])
#ax.plot(self.thkb[0],self.rssi[ib,ia,:])
if kwargs['inverse']:
sab = 1./(self.tcr[a+'-'+b])**2
sba = 1./(self.tcr[b+'-'+a])**2
else:
sab = self.tcr[a+'-'+b]
sba = self.tcr[b+'-'+a]
sab[t0:t1].plot(ax=ax,color=kwargs['colorab'],marker='o',linestyle=kwargs['linestyle'])
sba[t0:t1].plot(ax=ax,color=kwargs['colorba'],marker='o',linestyle=kwargs['linestyle'])
ax.set_title(a+'-'+b)
return fig,ax
def pltgt(self,a,b,**kwargs):
""" plt ground truth
Parameters
----------
t0
t1
fig
ax
figsize: tuple
linestyle'
inverse :False,
display 1/distance instead of distance
log : boolean
display log for distance intead of distance
gammma':1.,
mulitplication factor for log : gamma*log(distance)
this can be used to fit RSS
mode : string
'HKB' | 'TCR' | 'FULL'
visi : boolean,
display visibility
color: string color ('k'|'m'|'g'),
color to display the visibility area
hatch': strin hatch type ('//')
hatch type to hatch visibility area
fontsize: int
title fontsize
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> S.pltgt('AP1','TorsoTopLeft')
"""
defaults = { 'subject':'',
't0':0,
't1':-1,
'fig':[],
'ax':[],
'figsize':(8,8),
'linestyle':'default',
'inverse':False,
'log':True,
'gamma':-40,
'mode':'HKB',
'visi': True,
'fontsize': 14,
'color':'k',
'hatch':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
#t0 =kwargs.pop('t0')
#t1 =kwargs.pop('t1')
#if t1 ==-1:
#t1=self.thkb[-1]
# t1=self.ttcr[-1]
label = a+'-'+b
mode = kwargs.pop('mode')
inverse = kwargs.pop('inverse')
log = kwargs.pop('log')
gamma = kwargs.pop('gamma')
visibility = kwargs.pop('visi')
fontsize = kwargs.pop('fontsize')
hatch = kwargs.pop('hatch')
subject = kwargs.pop('subject')
if subject=='':
subject=self.B.keys()[0]
else:
subject=subject
if kwargs['fig']==[]:
figsize = kwargs.pop('figsize')
kwargs.pop('fig')
fig = plt.figure(figsize=figsize)
else:
kwargs.pop('figsize')
fig = kwargs.pop('fig')
if kwargs['ax'] ==[]:
kwargs.pop('ax')
ax = fig.add_subplot(111)
else :
ax=kwargs.pop('ax')
if mode == 'HKB' or mode == 'FULL':
if isinstance(a,str):
iahk = self.dHKB[a]
else:
iahk = a
a = self.idHKB[a]
if isinstance(b,str):
ibhk = self.dHKB[b]
else:
ibhk = b
b = self.idHKB[b]
var = self.getlink(iahk,ibhk,'HKB')
#var = U.values
#time = U.index
#pdb.set_trace()
if inverse:
var = 1./(var)
ax.set_ylabel(u'$m^{-2}$',fontsize=fontsize)
if log :
#var = gamma*10*np.log10(var)
var = 20*np.log10(var)+gamma
ax.set_ylabel(u'$- 20 \log_{10}(d)'+str(gamma)+'$ (dB)',fontsize=fontsize)
plt.ylim(-65,-40)
else:
ax.set_ylabel(u'meters',fontsize=fontsize)
if log :
var = gamma*10*np.log10(var)+gamma
ax.set_ylabel(u'$10log_{10}m^{-2}$',fontsize=fontsize)
#ax.plot(self.B[subject].time,var,label=label,**kwargs)
var.plot()
#
# TCR |Full
#
if mode == 'TCR' or mode == 'FULL':
if isinstance(a,str):
iatcr = self.dTCR[a]
else:
iatcr = a
a = self.idTCR[a]
if isinstance(b,str):
ibtcr = self.dTCR[b]
else:
ibtcr = b
b = self.idTCR[b]
var = self.getlink(iatcr,ibtcr,'TCR').values
#if inverse:
# var = 1./(var)**2
# if log :
# var = gamma*10*np.log10(var)
#else:
# if log :
# var = gamma*10*np.log10(var)
#pdb.set_trace()
#ax.plot(self.B[subject].time,var,**kwargs)
ax.plot(self.B[subject].ttcr,var,**kwargs)
if visibility:
aa= ax.axis()
vv,tv,tseg,itseg = self._visiarray(a,b)
# vv.any : it exist NLOS regions
if vv.any():
fig,ax=plu.rectplot(tv,tseg,ylim=aa[2:],color=kwargs['color'],hatch=hatch,fig=fig,ax=ax)
# for t in tseg:
#axs[cptax].plot(visi.index.values,visi.values,'r')
#if inverse:
# ax.set_title(u'Motion Capture Ground Truth : inverse of squared distance',fontsize=fontsize+1)
#else:
# ax.set_title('Motion Capture Ground Truth : evolution of distance (m)',fontsize=fontsize+1)
ax.set_xlabel('Time (s)',fontsize=fontsize)
plt.tight_layout()
return fig, ax
def pltlk(self,a,b,**kwargs):
""" plot links
Parameters
----------
a : string
node a name
b : string
node b name
display: list
techno to be displayed
figsize
t0: float
time start
t1 : float
time stop
colhk: plt.color
color of hk curve
colhk2:plt.color
color of hk curve2 ( if recirpocal)
linestylehk:
linestyle hk
coltcr:
color tcr curve
coltcr2:
color of tcr curve2 ( if recirpocal)
linestyletcr:
linestyle tcr
colgt:
color ground truth
inversegt:
invert ground truth
loggt: bool
apply a log10 factor to ground truth
gammagt:
applly a gamma factor to ground truth (if loggt ! )
fontsize:
font size of legend
visi:
display visibility indicator
axs :
list of matplotlib axes
Example
-------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> S.pltlk('AP1','TorsoTopLeft')
"""
defaults = { 'display':[],
'figsize':(8,8),
't0':0,
't1':-1,
'colhk':'g',
'colhk2':'b',
'linestylehk':'default',
'coltcr':'g',
'coltcr2':'b',
'linestyletcr':'step',
'colgt': 'k',
'inversegt':True,
'loggt':True,
'gammagt':-40,
'fontsize':14,
'visi':True,
'axs' :[],
'gt':True,
'tit':''
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
display = kwargs.pop('display')
if not isinstance(display,list):
display=[display]
if display == []:
if ('tcr' in dir(self)) and ('hkb' in dir(self)):
display.append('FULL')
elif 'tcr' in dir(self):
display.append('TCR')
elif 'hkb' in dir(self):
display.append('HKB')
display = [t.upper() for t in display]
if 'FULL' in display:
ld = 2
elif 'TCR' in display or 'HKB' in display:
ld = 2
#Axes management
if kwargs['axs'] == []:
kwargs.pop('axs')
fig,axs = plt.subplots(nrows=ld,ncols=1,figsize=kwargs['figsize'],sharex=True)
else :
fig =plt.gcf()
axs = kwargs.pop('axs')
cptax= 0
# HKB plot
if 'HKB' in display or 'FULL' in display:
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
if isinstance(a,str):
iahk = self.dHKB[a]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
if isinstance(b,str):
ibhk = self.dHKB[b]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
else :
raise AttributeError('HK not available for the given scenario')
kwargs['fig']=fig
kwargs['ax']=axs[cptax]
kwargs['colorab']=kwargs.pop('colhk')
kwargs['colorba']=kwargs.pop('colhk2')
kwargs['linestyle']=kwargs.pop('linestylehk')
kwargs['tit']=kwargs.pop('tit')
fig,axs[cptax]=self.plthkb(a,b,reciprocal=False,**kwargs)
cptax+=1
else :
kwargs.pop('colhk')
kwargs.pop('colhk2')
kwargs.pop('linestylehk')
#TCR plot
if 'TCR' in display or 'FULL' in display:
if ('TCR' in self.typ.upper()) or ('FULL' in self.typ.upper()):
if isinstance(a,str):
iatcr = self.dTCR[a]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
if isinstance(b,str):
ibtcr = self.dTCR[b]
else :
raise AttributeError('in self.pltlk, nodes id must be a string')
else :
raise AttributeError('TCR not available for the given scenario')
kwargs['fig']=fig
kwargs['ax']=axs[cptax]
kwargs['colorab']=kwargs.pop('coltcr')
kwargs['colorba']=kwargs.pop('coltcr2')
kwargs['linestyle']=kwargs.pop('linestyletcr')
tcrlink = a+'-'+b
#plot only if link exist
if tcrlink in self.tcr:
fig,axs[cptax]=self.plttcr(a,b,**kwargs)
else :
kwargs.pop('coltcr')
kwargs.pop('coltcr2')
kwargs.pop('linestyletcr')
#cptax+=1
#
# Ground Truth
#
#
# HKB |Full
#
if kwargs.pop('gt'):
kwargs['color'] = kwargs.pop('colgt')
kwargs.pop('colorab')
kwargs.pop('colorba')
kwargs['ax']=axs[cptax]
kwargs['inverse']=kwargs.pop('inversegt')
kwargs['log']=kwargs.pop('loggt')
kwargs['gamma']=kwargs.pop('gammagt')
kwargs.pop('tit')
if 'HKB' in display or 'FULL' in display:
kwargs['mode']= 'HKB'
fig,axs[cptax] = self.pltgt(a,b,**kwargs)
elif 'TCR' in display or 'FULL' in display:
kwargs['mode']= 'TCR'
fig,axs[cptax] = self.pltgt(a,b,**kwargs)
return fig,axs
# aa = axs[cptax].axis()
#
# calculates visibility and display NLOS region
# as a yellow patch over the shadowed region
#
def showpattern(self,a,techno='HKB',**kwargs):
""" show pattern configuation for a given link and frame
Parameters
----------
a : int
link index
technoa : string
'HKB'|'TCR'|'BS'
technob
default 'HKB'|'TCR'|'BS'
phi : float
antenna elevation in rad
fig :
ax :
t : float
phi : float
pi/2
ap : boolean
"""
defaults = { 'fig':[],
'ax':[],
't':0,
'phi':np.pi/2.,
'ap':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig=plt.figure()
else :
fig = kwargs['fig']
if kwargs['ax'] == []:
ax=fig.add_subplot(111)
else :
ax = kwargs['ax']
# display nodes
#
#
#
a,ia,ba,subjecta,techno = self.devmapper(a,techno)
pa = self.getdevp(a,techno=techno,t=kwargs['t']).values
if len(pa.shape) >1:
pa=pa[0]
ax.plot(pa[0],pa[1],'ob')
ax.text(pa[0],pa[1],ba)
if subjecta != '':
self.B[subjecta].settopos(t=kwargs['t'])
self.B[subjecta].dev[ba]['ant'].eval()
xa,ya,z,sa,v = self.B[subjecta].dev[ba]['ant']._computemesh(po=pa,T=self.B[subjecta].acs[ba],minr=0.01,maxr=0.1,ilog=False)
p2 = np.where(self.B[subjecta].dev[ba]['ant'].phi<=kwargs['phi'])[0][-1]
# ax.plot(xa[:,p2],ya[:,p2])
ax.plot(xa[p2,:],ya[p2,:])
else:
self.din[ba]['ant'].eval()
xa,ya,z,sa,v = self.din[ba]['ant']._computemesh(po=self.din[ba]['p'],T=self.din[ba]['T'],minr=0.01,maxr=0.1,ilog=False)
p2 = np.where(self.din[ba]['ant'].phi<=kwargs['phi'])[0][-1]
ax.plot(xa[:,p2],ya[:,p2])
return fig,ax
def showlink(self,a='AP1',b='BackCenter',technoa='HKB',technob='HKB',**kwargs):
""" show link configuation for a given frame
Parameters
----------
a : int
link index
b : int
link index
technoa : string
default 'HKB'|'TCR'|'BS'
technob
default 'HKB'|'TCR'|'BS'
phi : float
antenna elevation in rad
"""
defaults = { 'fig':[],
'ax':[],
't':0,
'phi':np.pi/2.,
'ap':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig=plt.figure()
else :
fig = kwargs['fig']
if kwargs['ax'] == []:
ax=fig.add_subplot(111)
else :
ax = kwargs['ax']
# display nodes
fig,ax=self.showpattern(a=a,techno=technoa,fig=fig,ax=ax)
fig,ax=self.showpattern(a=b,techno=technob,fig=fig,ax=ax)
plt.axis('equal')
p1 = self.din['HKB:1']['p']
p2 = self.din['HKB:2']['p']
p3 = self.din['HKB:3']['p']
p4 = self.din['HKB:4']['p']
plt.plot(p1[0],p1[1],'og')
plt.plot(p2[0],p2[1],'ob')
plt.plot(p3[0],p3[1],'or')
plt.plot(p4[0],p4[1],'ok')
plt.axis('equal')
# if A.ndim==2:
# plt.plot(A[iframe,0],A[iframe,1],'ob')
# plt.text(A[iframe,0],A[iframe,1],a)
# else:
# plt.plot(A[0],A[1],'or')
# #plt.text(A[0],A[1],a)
# if B.ndim==2:
# plt.plot(B[iframe,0],B[iframe,1],style)
# plt.text(B[iframe,0]+0.1,B[iframe,1]+0.1,b)
# else:
# plt.plot(B[0],B[1],'ob')
# plt.text(B[0],B[1],b)
# plt.xlim(-6,6)
# plt.ylim(-5,5)
# self.B[subjecta].settopos(t=t)
# self.B[subjectb].settopos(t=t)
#
# # display body
# #pc = self.B.d[:,2,iframe] + self.B.pg[:,iframe].T
# pc0 = self.B[subjecta].d[:,0,iframe] + self.B[subjecta].pg[:,iframe].T
# pc1 = self.B[subjecta].d[:,1,iframe] + self.B[subjecta].pg[:,iframe].T
# pc15 = self.B[subjecta].d[:,15,iframe] + self.B[subjecta].pg[:,iframe].T
# #plt.plot(pc0[0],pc0[1],'og')
# #plt.text(pc0[0]+0.1,pc0[1],str(iframe))
# #plt.plot(pc1[0],pc1[1],'og')
# #plt.plot(pc15[0],pc15[1],'og')
# #ci00 = plt.Circle((pc0[0],pc0[1]),self.B[subjecta].sl[0,2],color='green',alpha=0.6)
# #ci01 = plt.Circle((pc1[0],pc1[1]),self.B[subjecta].sl[0,2],color='green',alpha=0.1)
# #ci100 = plt.Circle((pc0[0],pc0[1]),self.B[subjecta].sl[10,2],color='red',alpha=0.1)
# ci1015 = plt.Circle((pc15[0],pc15[1]),self.B[subjecta].sl[10,2],color='green',alpha=0.5)
# plt.axis('equal')
# ax = plt.gca()
# ax.add_patch(ci1015)
# #ax.add_patch(ci01)
# #ax.add_patch(ci100)
# #ax.add_patch(ci1015)
# #its = self.B[subjecta].intersectBody(A[iframe,:],B[iframe,:],topos=False,frameId=iframe)
# #x.set_title('frameId :'+str(iframe)+' '+str(its.T))
def visidev(self,a,b,technoa='HKB',technob='HKB',dsf=10):
""" get link visibility status
Returns
-------
visi : pandas Series
0 : LOS
1 : NLOS
"""
A,B = self.getlinkp(a,b,technoa=technoa,technob=technob)
A=A.values
B=B.values
aa,ia,ba,subjecta,technoa= self.devmapper(a,technoa)
ab,ib,bb,subjectb,technob= self.devmapper(b,technob)
if 'AP' not in aa:
Nframe = A.shape[0]
if 'AP' not in ab:
Nframe = B.shape[0]
else:
Nframe = len(self.B[self.B.keys()[0]].time)
iframe = np.arange(0,Nframe-1,dsf)
tvisi = []
#
# A : Nframe x 3
# B : Nframe x 3
# B.pg : 3 x Nframe
#
if subjecta != '':
subject = subjecta
elif subjectb != '':
subject = subjectb
else :
raise AttributeError('Visibility can only be determine on a body for now')
if self.B[subject].centered:
A = A-self.B[subject].pg.T
B = B-self.B[subject].pg.T
for k in iframe:
if len(np.shape(A))<2:
A=A[np.newaxis,:]*np.ones((len(B),3))
if len(np.shape(B))<2:
B=B[np.newaxis,:]*np.ones((len(A),3))
its = self.B[subject].intersectBody(A[k,:],B[k,:],topos=False,frameId=k)
tvisi.append(its.any())
visi = pd.Series(tvisi,index=iframe/100.)
#return(visi,iframe)
return(visi)
def visidev2(self,a,b,technoa='HKB',technob='HKB',trange=[]):
""" get link visibility status
Returns
-------
trange : nd array
time range
visi : pandas Series
0 : LOS
1 : NLOS
"""
A,B = self.getlinkp(a,b,technoa,technob)
A=A.values
B=B.values
aa,ia,ba,subjecta,technoa= self.devmapper(a,technoa)
ab,ib,bb,subjectb,technob= self.devmapper(b,technob)
if 'AP' not in a:
Nframe = A.shape[0]
if 'AP' not in b:
Nframe = B.shape[0]
# iframe = np.arange(0,Nframe-1,dsf)
tvisi = []
#
# A : Nframe x 3
# B : Nframe x 3
# B.pg : 3 x Nframe
#
if subjecta != '':
subject = subjecta
elif subjectb != '':
subject = subjectb
else :
raise AttributeError('Visibility can only be determine on a body for now')
if self.B[subject].centered:
A = A-self.B[subject].pg.T
B = B-self.B[subject].pg.T
for t in trange:
fid = self.B[subject].posvel(self.B[subjecta].traj,t)[0]
its = self.B[subject].intersectBody(A[fid,:],B[fid,:],topos=False,frameId=fid)
tvisi.append(its.any())
visi = pd.Series(tvisi,index=trange)
#return(visi,iframe)
return(visi)
def _visiarray(self,a,b,technoa='HKB',technob='HKB'):
""" create entries for plu.rectplot
"""
visi = self.visidev(a,b,technoa=technoa,technob=technob)
tv = visi.index.values
vv = visi.values.astype(int)
if (not(vv.all()) and vv.any()):
df = vv[1:]-vv[0:-1]
um = np.where(df==1)[0]
ud = np.where(df==-1)[0]
lum = len(um)
lud = len(ud)
#
# impose same size and starting
# on leading edge um and endinf on
# falling edge ud
#
if lum==lud:
if ud[0]<um[0]:
um = np.hstack((np.array([0]),um))
ud = np.hstack((ud,np.array([len(vv)-1])))
else:
if ((lum<lud) & (vv[0]==1)):
um = np.hstack((np.array([0]),um))
if ((lud<lum) & (vv[len(vv)-1]==1)):
ud = np.hstack((ud,np.array([len(vv)-1])))
tseg = np.array(zip(um,ud))
#else:
# tseg = np.array(zip(ud,um))
else:
if vv.all():
tseg = np.array(zip(np.array([0]),np.array([len(vv)-1])))
else :
tseg = np.array([[0,0]])
itseg = copy.copy(tseg)
bb = np.insert(itseg[:,1],0,0)
ee = np.hstack((itseg[:,0],len(vv)))
itseg = np.array((bb,ee)).T
# bb = np.hstack((bb,len(vv)))
return vv,tv,tseg,itseg
# def _computedevpdf(self):
# """ create a timestamped data frame
# with all positions of devices
# """
# t=self.B.traj.time()
# pos = np.empty((len(t),12,3))
# for ik,k in enumerate(t):
# self.B.settopos(t=k)
# pos[ik,:,:]=self.B.getlinkp()
# df=[]
# for d in range(pos.shape[1]):
# df_tmp=pd.DataFrame(pos[:,d,:],columns=['x','y','z'],index=t)
# df_tmp['id']=self.B.dev.keys()[d]
# try :
# df = pd.concat([df,df_tmp])
# except:
# df = df_tmp
# df = df.sort_index()
# cols=['id','x','y','z']
# self.devdf=df[cols]
def _computedevpdf(self):
""" create a timestamped data frame
with positions of all devices
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
for b in B:
if 'dev' in dir(B[b]):
dev = B[b].dev.keys()
udev=[B[b].dev[d]['uc3d'] for d in dev]
postmp = np.array([np.mean(B[b]._f[:,u,:],axis=1) for u in udev])
pos = postmp.swapaxes(0,1)
t = B[b].time
for d in range(len(dev)):
df_tmp=pd.DataFrame(pos[:,d,:],columns=['x','y','z'],index=t)
df_tmp[['vx','vy','vz']]=df_tmp.diff()/(t[1]-t[0])
df_tmp['v']=np.sqrt(np.sum(df_tmp[['vx','vy','vz']]**2,axis=1))
df_tmp[['ax','ay','az']]=df_tmp[['vx','vy','vz']].diff()/(t[1]-t[0])
df_tmp['a']=np.sqrt(np.sum(df_tmp[['ax','ay','az']]**2,axis=1))
df_tmp['id'] = list(B[b].dev.keys())[d]
df_tmp['subject']=B[b].name
try :
df = pd.concat([df,df_tmp])
except:
df = df_tmp
for i in self.din:
pos = self.din[i]['p']
pos2 = pos[:,np.newaxis]*np.ones(len(t))
df_tmp=pd.DataFrame(pos2.T,columns=['x','y','z'],index=t)
df_tmp['v']=0.
df_tmp['vx']=0.
df_tmp['vy']=0.
df_tmp['vz']=0.
df_tmp['a']=0.
df_tmp['ax']=0.
df_tmp['ay']=0.
df_tmp['az']=0.
df_tmp['subject']=''
df_tmp['id']=i
df = pd.concat([df,df_tmp])
df = df.sort_index()
cols=['id','subject','x','y','z','v','vx','vy','vz','a','ax','ay','az']
self.devdf=df[cols]
# if ('HKB' in self.typ) or ('FULL' in selftyp):
# self.devdf=self._align_devdf_on_hkb(self.devdf,self.hkb)
def export_csv(self,**kwargs):
""" export to csv devices positions
Parameters
----------
unit : string ('mm'|'cm'|'m'),
unit of positions in csv(default mm)
tunit: string
time unit in csv (default 'ns')
'alias': dict
dictionnary to replace name of the devices into the csv .
example : if you want to replace a device id named 'TCR:34'
to an id = 5, you have to add an entry in the alias dictionnary as :
alias.update({'TCR34':5})
offset : np.array
apply an offset on positions
Return
------
a csv file into the folder <PylayersProject>/netsave
"""
defaults={'unit' :'mm',
'tunit':'ns',
'offset':np.array(([0,0,0])),
'alias':{}}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
unit=kwargs.pop('unit')
tunit=kwargs.pop('tunit')
alias = kwargs.pop('alias')
if alias == {}:
alias={'TCR:49':4 #Nicolas TorsoLeft
,'TCR:34':5 #Nicolas TorsoRight
,'TCR:48':6 #Nicolas Back
,'TCR:36':7 #Nicolas Shoulder
,'TCR:2':8 # Jihad TorsoLeft
,'TCR:35':9 #Jihad TorsoRight
,'TCR:33':10 #Jihad Back
,'TCR:37':11 #Jihad Shoulder
,'TCR:30':12 #Eric Torso
,'TCR:25':13 #Eric Back
,'TCR:26':14 # Eric Shoulder
}
filename =pyu.getlong(self._filename,pstruc['DIRNETSAVE']) + '.csv'
df = copy.deepcopy(self.devdf)
ldf = df[['id','x','y','z']]
#rename devices
if alias != {}:
for k in alias:
u=ldf['id'] == k
ldf.iloc[u.values,0]=str(alias[k])
# fix position unit
if unit == 'm':
_unit = 1.
if unit == 'cm':
_unit = 1e2
if unit == 'mm':
_unit = 1e3
ldf.loc[:,'x']=ldf.loc[:,'x']*_unit-kwargs['offset'][0]
ldf.loc[:,'y']=ldf.loc[:,'y']*_unit-kwargs['offset'][1]
ldf.loc[:,'z']=ldf.loc[:,'z']*_unit-kwargs['offset'][2]
# fix time unit
if tunit == 'ms':
_tunit = 1e3
if tunit == 'us':
_tunit = 1e6
if tunit == 'ns':
_tunit = 1e9
# add timestamp column
ldf['Timestamp']=ldf.index*_tunit
ldf.to_csv(filename, sep = ' ',index=False)
def savemat(self):
""" save in Matlab Format
"""
d ={}
# Access Point Coordinates
pAP1 = self.getdevp(1,t=[0,100])
pAP2 = self.getdevp(2,t=[0,100])
pAP3 = self.getdevp(3,t=[0,100])
pAP4 = self.getdevp(4,t=[0,100])
t = self.hkb.index.values
AP=np.array([[pAP1['x'][0],pAP1['y'][0]],
[pAP2['x'][0],pAP2['y'][0]],
[pAP3['x'][0],pAP3['y'][0]],
[pAP4['x'][0],pAP4['y'][0]]
])
pTTR = self.getdevp('TorsoTopRight',techno='HKB',t=[0,100])
pTTL = self.getdevp('TorsoTopLeft',techno='HKB',t=[0,100])
pBC = self.getdevp('BackCenter',techno='HKB',t=[0,100])
d['AP']=AP
d['pTTR'] = np.array([pTTR['x'],pTTR['y']])
d['pTTL'] = np.array([pTTL['x'],pTTL['y']])
d['pBC'] = np.array([pBC['x'],pBC['y']])
# observables radios TTR/TTL/BC
TTR_1 = self.hkb['AP1-TorsoTopRight'].values
TTL_1 = self.hkb['AP1-TorsoTopLeft'].values
BC_1 = self.hkb['AP1-BackCenter'].values
Rho1R = BC_1-TTR_1
Rho1L = BC_1-TTL_1
RhoM1R = np.nanmax(Rho1R)
Rhom1R = np.nanmin(Rho1R)
RhoM1L = np.nanmax(Rho1L)
Rhom1L = np.nanmin(Rho1L)
### AP2
TTR_2= self.hkb['AP2-TorsoTopRight'].values
TTL_2= self.hkb['AP2-TorsoTopLeft'].values
BC_2 = self.hkb['AP2-BackCenter'].values
Rho2R = BC_2-TTR_2
Rho2L = BC_2-TTL_2
RhoM2R=np.nanmax(Rho2R)
Rhom2R=np.nanmin(Rho2R)
RhoM2L=np.nanmax(Rho2L)
Rhom2L=np.nanmin(Rho2L)
### AP3
TTR_3= self.hkb['AP3-TorsoTopRight'].values
TTL_3= self.hkb['AP3-TorsoTopLeft'].values
BC_3 = self.hkb['AP3-BackCenter'].values
Rho3R = BC_3-TTR_3
Rho3L = BC_3-TTL_3
RhoM3R=np.nanmax(Rho3R)
Rhom3R=np.nanmin(Rho3R)
RhoM3L=np.nanmax(Rho3L)
Rhom3L=np.nanmin(Rho3L)
### AP4
TTR_4= self.hkb['AP4-TorsoTopRight'].values
TTL_4= self.hkb['AP4-TorsoTopLeft'].values
BC_4 = self.hkb['AP4-BackCenter'].values
Rho4R = BC_4-TTR_4
Rho4L = BC_4-TTL_4
RhoM4R=np.nanmax(Rho4R)
Rhom4R=np.nanmin(Rho4R)
RhoM4L=np.nanmax(Rho4L)
Rhom4L=np.nanmin(Rho4L)
d['ttr1'] = TTR_1
d['ttr2'] = TTR_2
d['ttr3'] = TTR_3
d['ttr4'] = TTR_4
d['ttl1'] = TTL_1
d['ttl2'] = TTL_2
d['ttl3'] = TTL_3
d['ttl4'] = TTL_4
d['bc1'] = BC_1
d['bc2'] = BC_2
d['bc3'] = BC_3
d['bc4'] = BC_4
d['time']=t[0:10001]
vpRC = (pTTR-pBC)
vpLC = (pTTL-pBC)
vp = (pTTR-pBC)+(pTTL-pBC)
# unitary vectors
vpRC = array([vpRC['x'],vpRC['y']])
vpLC = array([vpLC['x'],vpLC['y']])
vp = array([vp['x'],vp['y']])
# unitary vectors
vpn = vp/np.sqrt(np.sum(vp*vp,axis=0))
vpRCn = vpRC/np.sqrt(np.sum(vpRC*vpRC,axis=0))
vpLCn = vpLC/np.sqrt(np.sum(vpLC*vpLC,axis=0))
# coord des AP
p1 = pAP1.ix[0]
p2 = pAP2.ix[0]
p3 = pAP3.ix[0]
p4 = pAP4.ix[0]
v1C = p1-pBC
v2C = p2-pBC
v3C = p3-pBC
v4C = p4-pBC
v1C = np.array((v1C.x.values,v1C.y.values))
v2C = np.array((v2C.x.values,v2C.y.values))
v3C = np.array((v3C.x.values,v3C.y.values))
v4C = np.array((v4C.x.values,v4C.y.values))
v1Cn = v1C/(sqrt(np.sum(v1C*v1C,axis=0)))
v2Cn = v2C/(sqrt(np.sum(v2C*v2C,axis=0)))
v3Cn = v3C/(sqrt(np.sum(v3C*v3C,axis=0)))
v4Cn = v4C/(sqrt(np.sum(v4C*v4C,axis=0)))
cr1 = np.cross(vpn,v1Cn,axis=0)
cr2 = np.cross(vpn,v2Cn,axis=0)
cr3 = np.cross(vpn,v3Cn,axis=0)
cr4 = np.cross(vpn,v4Cn,axis=0)
cr1R = np.cross(vpRCn,v1Cn,axis=0)
cr2R = np.cross(vpRCn,v2Cn,axis=0)
cr3R = np.cross(vpRCn,v3Cn,axis=0)
cr4R = np.cross(vpRCn,v4Cn,axis=0)
cr1L = np.cross(vpLCn,v1Cn,axis=0)
cr2L = np.cross(vpLCn,v2Cn,axis=0)
cr3L = np.cross(vpLCn,v3Cn,axis=0)
cr4L = np.cross(vpLCn,v4Cn,axis=0)
dvpnv1n = sum(vpn*v1Cn,axis=0)
dvpnv2n = sum(vpn*v2Cn,axis=0)
dvpnv3n = sum(vpn*v3Cn,axis=0)
dvpnv4n = sum(vpn*v4Cn,axis=0)
dvpnv1Rn = sum(vpRCn*v1Cn,axis=0)
dvpnv2Rn = sum(vpRCn*v2Cn,axis=0)
dvpnv3Rn = sum(vpRCn*v3Cn,axis=0)
dvpnv4Rn = sum(vpRCn*v4Cn,axis=0)
dvpnv1Ln = sum(vpLCn*v1Cn,axis=0)
dvpnv2Ln = sum(vpLCn*v2Cn,axis=0)
dvpnv3Ln = sum(vpLCn*v3Cn,axis=0)
dvpnv4Ln = sum(vpLCn*v4Cn,axis=0)
alf1R = np.arctan2(cr1R,dvpnv1Rn)
alf2R = np.arctan2(cr2R,dvpnv2Rn)
alf3R = np.arctan2(cr3R,dvpnv3Rn)
alf4R = np.arctan2(cr4R,dvpnv4Rn)
alf1L = np.arctan2(cr1L,dvpnv1Ln)
alf2L = np.arctan2(cr2L,dvpnv2Ln)
alf3L = np.arctan2(cr3L,dvpnv3Ln)
alf4L = np.arctan2(cr4L,dvpnv4Ln)
d['al1R_gt'] = alf1R
d['al2R_gt'] = alf2R
d['al3R_gt'] = alf3R
d['al4R_gt'] = alf4R
d['al1L_gt'] = alf1L
d['al2L_gt'] = alf2L
d['al3L_gt'] = alf3L
d['al4L_gt'] = alf4L
d['al1R_est']=np.nan_to_num(al1ebR[0:10001])
d['al2R_est']=np.nan_to_num(al2ebR[0:10001])
d['al3R_est']=np.nan_to_num(al3ebR[0:10001])
d['al4R_est']=np.nan_to_num(al4ebR[0:10001])
d['al1L_est']=np.nan_to_num(al1ebL[0:10001])
d['al2L_est']=np.nan_to_num(al2ebL[0:10001])
d['al3L_est']=np.nan_to_num(al3ebL[0:10001])
d['al4L_est']=np.nan_to_num(al4ebL[0:10001])
_filename = self.filemocap.replace('.c3d','.mat')
savemat(_filename,d)
def getlinkd(self,a,b,techno='',t=''):
""" get the distance for a link between devices
Parameters
----------
a : str | int
name |id
b : str | int
name |id
optional
techno : str
radio techno
t : float | list
given time
or [start,stop] time
Returns
-------
dist : np.array()
all distances for all timestamps for the given link
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(serie=6)
>>> d = S.getlinkd('AP1','WristLeft',techno='HKB')
"""
ra = a
rb = b
a,ia,nna,subjecta,techno = self.devmapper(a,techno)
b,ib,nnb,subjectb,techno = self.devmapper(b,techno)
df = self.distdf
if (nna +'-' + nnb) in self.distdf.keys():
link = nna +'-' + nnb
elif (nnb +'-' + nna) in self.distdf.keys():
link = nnb +'-' + nna
else :
raise AttributeError('Link between ' + str(ra) +' and ' + str(rb) + ' is not available in distdf dataframe')
#determine time
if isinstance(t,list):
tstart = t[0]
tstop = t[-1]
val = df[(df.index >= tstart) & (df.index <= tstop)][link]
elif t == '':
val = df[link]
else :
hstep = (df.index[1]-df.index[0])/2.
val = df[(df.index >= t-hstep) & (df.index <= t+hstep)][link]
return val
def getlinkp(self,a,b,technoa='',technob='',t='',fId=''):
""" get a link devices positions
Parameters
----------
a : str | int
name |id
b : str | int
name |id
optional :
technoa : str
radio techno
technob : str
radio techno
t : float | list
given time | [time_start,time_stop]
OR
fId : int
frame id
Returns
-------
pa,pb : np.array()
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(serie=34)
>>> a,b=S.getlinkp('AP1','WristLeft')
"""
pa = self.getdevp(a,technoa,t,fId)
pb = self.getdevp(b,technob,t,fId)
return pa,pb
def getlink(self,a,b,techno='',t=''):
""" get a link value
Parameters
----------
a : str | int
name |id
b : str | int
name |id
optional :
techno : str
radio techno
t : float | list
given time
or [start,stop] time
Returns
-------
Pandas Serie
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(serie=34)
>>> S.getlink('AP1','WristLeft')
"""
ra = a
rb = b
a,ia,nna,subjecta,techno = self.devmapper(a,techno)
b,ib,nnb,subjectb,techno = self.devmapper(b,techno)
if ('HK' in techno.upper()) :
if (a +'-' + b) in self.hkb.keys():
link = a +'-' + b
elif (b +'-' + a) in self.hkb.keys():
link = b +'-' + a
else :
raise AttributeError('Link between ' + str(ra) +' and ' + str(rb) + ' is not available in hkb dataframe')
df = self.hkb
elif ('BS' in techno.upper()):
if (a +'-' + b) in self.bespo.keys():
link = a +'-' + b
elif (b +'-' + a) in self.bespo.keys():
link = b +'-' + a
else:
raise AttributeError('Link between ' + str(ra) +' and ' + str(rb) + ' is not available in bespo dataframe')
df = self.bespo
elif ('TCR' in techno.upper()):
if (a +'-' + b) in self.tcr.keys():
link = a +'-' + b
elif (b +'-' + a) in self.tcr.keys():
link = b +'-' + a
else:
raise AttributeError('Link between ' + str(ra) +' and ' + str(rb) + ' is not available in tcr dataframe')
df = self.tcr
#determine time
if isinstance(t,list):
tstart = t[0]
tstop = t[-1]
val = df[(df.index >= tstart) & (df.index <= tstop)][link]
elif t == '':
val = df[link]
else :
hstep = (df.index[1]-df.index[0])/2.
val = df[(df.index >= t-hstep) & (df.index <= t+hstep)][link]
return val
def getdevp(self,a,techno='',t='',fId=''):
""" get a device position
Parameters
----------
a : str | int
name |id
techno : str
radio techno
optional :
t : float | list
given time |[time_start,time_stop]
Returns
-------
pa : np.array()
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(serie=34)
>>> a=S.getdevp('AP1','WristLeft')
"""
a,ia,nna,subjecta,techno = self.devmapper(a,techno)
device_select = self.devdf['id'] == nna
if isinstance(t,list):
tstart = t[0]
tstop = t[-1]
#findex = np.where((self.tmocap>=tstart) & (self.tmocap<=tstop))[0]
elif t == '':
tstart = 0.0
tstop = 1000000
else :
hstep = (self.devdf[device_select].index[1]-self.devdf[device_select].index[0])/2.
tstart = t-hstep
tstop = t+hstep
pa = self.devdf[(self.devdf.index >= tstart) &
(self.devdf.index <= tstop) &
device_select][['x','y','z']]
return pa
def devmapper(self,a,techno=''):
""" retrieve name of device if input is number
or
retrieve number of device if input is name
Parameters
----------
a : str | int
name |id |bodyid
techno : str
radio techno
Returns
-------
a : string
dev name
ia : int
dev number
ba : string
dev refernce in body
subject : string
body owning the device
"""
subject=''
#if a is a bodyid (e.g. 'HKB:16') or a body part (e.g. AnkleRight)
if isinstance(a,str):
# case where body id is given as input
if ('HKB' in a) or ('TCR' in a ) or ('BS' in a ) :
ba = a
techno, ia = a.split(':')
ia=int(ia)
try:
if techno.upper() == 'TCR':
a = self.idTCR[ia]
elif (techno.upper() == 'HKB'):
a = self.idHKB[ia]
elif (techno.upper() == 'BS'):
a = self.idBS[ia]
except:
raise AttributeError('No device ' + a + ' for techno ' +techno)
for b in self.B:
if not 'Cylindre' in b:
if ba in self.B[b].dev.keys():
subject = b
break
#case where body part (e.g. AnkleRight) is given. Here techno is mandatory
else :
if techno == '':
if self.typ != 'FULL':
if self.typ == 'HKBS':
raise AttributeError('Please indicate the radio techno in argument : HKB or BS')
else :
techno = self.typ
else:
raise AttributeError('Please indicate the radio techno in argument : TCR, HKB, BS')
try :
if techno.upper() == 'TCR':
ia = self.dTCR[a]
ba='TCR:'+str(ia)
elif (techno.upper() == 'HKB') :
ia = self.dHKB[a]
ba='HKB:'+str(ia)
elif techno.upper() == 'BS':
ia = self.dBS[a]
ba='BS:'+str(ia)
except:
raise AttributeError('No device on body part: ' + a + ' for techno ' +techno)
for b in self.B:
if not 'Cylindre' in b:
if ba in self.B[b].dev.keys():
subject = b
break
# an id (number) is given
else:
# techno autodetection raise an error if conflict and invite to precise radio techno
if techno == '':
if hasattr(self,'idHKB'):
if a in self.idHKB.keys() :
if techno == '':
techno = 'HKB'
else :
raise AttributeError('Please indicate the radio techno in argument : TCR, HKB, BS')
if hasattr(self,'idBS'):
if a in self.idBS.keys():
if techno == '':
techno = 'BS'
else :
raise AttributeError('Please indicate the radio techno in argument : TCR, HKB, BS')
if hasattr(self,'idTCR'):
if a in self.idTCR.keys():
if techno == '':
techno = 'TCR'
else :
raise AttributeError('Please indicate the radio techno in argument : TCR, HKB, BS')
try :
if techno.upper() == 'TCR':
ia = a
a = self.idTCR[a]
ba='TCR:'+str(ia)
elif (techno.upper() == 'HKB') :
ia = a
a = self.idHKB[a]
ba='HKB:'+str(ia)
elif (techno.upper() == 'BS') :
ia = a
a = self.idBS[a]
ba='BS:'+str(ia)
except:
raise AttributeError('No device with ID: ' + str(a) + ' for techno ' +techno)
for b in self.B:
if not 'Cylindre' in b:
if ba in self.B[b].dev.keys():
subject = b
break
return a,ia,ba,subject,techno
def align(self,devdf,hkbdf):
""" DEPRECATED align time of 2 data frames:
the time delta of the second data frame is applyied on the first one
(e.g. time for devdf donwsampled by hkb data frame time)
Parameters
----------
devdf : device dataframe
hkbdf : hkbdataframe
Return
------
devdfc :
aligned copy device dataframe
hkbdfc :
aligned copy hkbdataframe
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> devdf = S.devdf[S.devdf['id']=='HKB:15']
>>> hkbdf = S.hkb['AP1-AnkleLeft']
>>> devdf2,hkbdf2 = S.align(devdf,hkbdf)
"""
print ('warning DEPRECATED')
devdfc=copy.deepcopy(devdf)
hkbdfc=copy.deepcopy(hkbdf)
idev = devdfc.index
ihkb = hkbdfc.index
devdfc.index = pd.to_datetime(idev,unit='s')
hkbdfc.index = pd.to_datetime(ihkb,unit='s')
import ipdb
ipdb.set_trace()
sf = (hkbdfc.index[2]-hkbdfc.index[1]).microseconds
devdfc= devdfc.resample(str(sf)+'U')
devdfc.index = pd.Series([val.time() for val in devdfc.index])
hkbdfc.index = pd.Series([val.time() for val in hkbdfc.index])
return devdfc,hkbdfc
def _apply_offset(self,techno):
""" apply offset from self.offset[self._filename][techno+'_index']
if offset >0
add np.nan at the begining
if offset <0
first values of self.hkb will be dropped
"""
if techno == 'HKB':
df = self.hkb
elif techno == 'TCR':
df = self.tcr
elif techno == 'BS':
df = self.bespo
else :
raise AttributeError('Unknown tecnology got applying offset')
offset = self.offset[self._filename][techno.lower()+'_index']
if offset <= 0 :
index = df.index
df = df.iloc[-offset:]
df.index = index[0:offset]
else :
#extract time values
npahkbi = df.index.values
step = npahkbi[1]- npahkbi[0]
nstart = npahkbi[0]+ (step * (offset))
df.index = pd.Index(npahkbi + nstart)
#add blank at begining
df = pd.DataFrame({},columns=df.keys(),index=npahkbi[:offset])
ndf=pd.concat([df,df])
df=ndf
if techno == 'HKB':
self.thkb = df.index
elif techno == 'TCR':
self.ttcr = df.index
elif techno == 'BS':
self.tbs = df.index
def _apply_hkb_offset(self):
""" apply offset from self.offset[self._filename]['hkb_index']
if offset >0
add np.nan at the begining
if offset <0
first values of self.hkb will be dropped
"""
# offset = self.offset[self._filename]['hkb_index']
# if offset >=0:
# self.hkb = self.hkb.iloc[offset:]
# else :
# # new length
# lhkb = len(self.hkb) + (-offset)
# #extract time values
# npahkbi = self.hkb.index.values
# #calculate new termianl time
# step = npahkbi[1]- npahkbi[0]
# nstop = npahkbi[-1]+ (step * (-offset))
# ni = np.linspace(0,nstop,lhkb)
# df = pd.DataFrame({},columns=self.hkb.keys(),index=ni[0:-offset])
# self.hkb.index=pd.Index(ni[-offset:])
# ndf=pd.concat([df,self.hkb])
# self.hkb=ndf
offset = self.offset[self._filename]['hkb_index']
if offset <= 0 :
index = self.hkb.index
self.hkb = self.hkb.iloc[-offset:]
self.hkb.index = index[0:offset]
else :
#extract time values
npahkbi = self.hkb.index.values
step = npahkbi[1]- npahkbi[0]
nstart = npahkbi[0]+ (step * (offset))
self.hkb.index = pd.Index(npahkbi + nstart)
#add blank at begining
df = pd.DataFrame({},columns=self.hkb.keys(),index=npahkbi[:offset])
ndf=pd.concat([df,self.hkb])
self.hkb=ndf
self.thkb = self.hkb.index
def _align_on_devdf(self,typ=''):
""" align hkb or bs time on device data frame (devdf) time index
In place (a.k.a. replace old self.hkb by the resampled one)
Parameters
----------
typp : 'HKB' |'BS'
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S = CorSer(6)
>>> devdf = S.devdf[S.devdf['id']=='HKB:15']
>>> hkbdf = S.hkb['AP1-AnkleLeft']
>>> devdf2 = S._align_on_hkb(devdf,hkbdf,typ ='HKB')
"""
if typ == 'HKB':
idf = self.hkb
elif typ == 'BS':
idf = self.bespo
elif typ == 'TCR':
idf = self.tcr
# mocap time
#
# 0 0.010001 0.020002
mocapindex = pd.to_datetime(self.tmocap,unit='s')
# radio time
# 0 0.023 0.0473
idf.index = pd.to_datetime(idf.index,unit='s')
sf = (mocapindex[2]-mocapindex[1]).microseconds
df = idf.resample(str(sf)+'U',fill_method='ffill')
nindex = time2npa(df.index)
df.index = pd.Index(nindex)
if typ == 'HKB':
self.hkb = df
elif typ == 'BS':
self.bespo = df
elif typ == 'TCR':
self.tcr = df
def _align_devdf_on_hkb(self,devdf,hkbdf):
""" NOT USED Practically
align time of 2 data frames:
the time delta of the second data frame is applyied on the first one
(e.g. time for devdf donwsampled by hkb data frame time)
Parameters
----------
devdf : device dataframe
hkbdf : hkbdataframe
Return
------
devdfc :
aligned copy device dataframe
hkbdfc :
aligned copy hkbdataframe
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> S=CorSer(6)
>>> devdf = S.devdf[S.devdf['id']=='HKB:15']
>>> hkbdf = S.hkb['AP1-AnkleLeft']
>>> devdf2 = S._align_devdf_on_hkb(devdf,hkbdf)
"""
devdfc=copy.deepcopy(devdf)
hkbdfc=copy.deepcopy(hkbdf)
idev = devdfc.index
ihkb = hkbdfc.index
devdfc.index = pd.to_datetime(idev,unit='s')
hkbdfc.index = pd.to_datetime(ihkb,unit='s')
sf = (hkbdfc.index[2]-hkbdfc.index[1]).microseconds
# cannot resapmple devdf directly because multiple similar index values
#need to resampl each groupby separately
gb = devdfc.groupby(['id'])
#get device id
devid,idevid = np.unique(devdfc['id'],return_index=True)
#save corresponding subject to each device
subject = {devid[i]:devdfc['subject'].iloc[i] for i in idevid}
# resample each group separatley
dgb={d:gb.get_group(d).resample(str(sf)+'U') for d in devid}
#re insert subject and device id information in each resampled group
for d in dgb:
dgb[d]['subject']=subject[d]
dgb[d]['id']=d
# create the realigned Dataframe
lgb = [dgb[d] for d in dgb]
df = pd.concat(lgb)
df.sort_index(inplace=True)
nindex = time2npa(df.index)
df.index = pd.Index(nindex)
cols=['id','subject','x','y','z','v','vx','vy','vz','a','ax','ay','az']
df=df[cols]
return df
def dist_sh2rssi(dist,Ssh,offsetdB=15):
"""
Parameters
----------
dist
Ssh
offsetdB : float
"""
if type(dist)==pd.Series:
z1 = 10*np.log10((1./dist**2)).values
else:
z1 = 10*np.log10((1./dist**2))
u = np.where(Ssh[0]==1)[0]
z1[u] = z1[u]-offsetdB
z1 = z1-np.mean(z1)
return(z1)
# z2 = Srssi.values
# z2m = np.mean(z2[~np.isnan(z2)])
# z2[np.isnan(z2)]=z2m
# z2 = z2-np.mean(z2)
# z1n = z1/np.sqrt(np.sum(z1*z1))
# z2n = z2/np.sqrt(np.sum(z2*z2))
# cn,dec,ratio = resync(z1n,z2n)
# tdec.append(dec)
#tratio.append(ratio)
#if ratio > maxratio:
# maxratio = ratio
# def get_data(self,a,b):
# T=self.tcr[a+'-'+b]
# T.name=T.name+'-tcr'
# H=self.hkb[a+'-'+b]
# H.name=H.name+'-hkb'
# udhk = self.accessdm(a,b,'HKB')
# udtcr = self.accessdm(a,b,'HKB')
# dist_tcr=self.dist[:,udtcr[0],udtcr[1]]
# dist_hkb=self.dist[:,udhk[0],udhk[1]]
# tdist=np.linspace(0,self.dist.shape[0]/100.,self.dist.shape[0])
# D_tcr=pd.Series(dist_tcr,index=tdist)
# D_tcr.name = 'dist-tcr'
# D_hkb=pd.Series(dist_hkb,index=tdist)
# D_hkb.name = 'dist-hkb'
# return T,H,D_tcr,D_hkb
# def get_dataframes(self,a,b):
# """ assemble all series in a DataFrame
# """
# T,H,DT,DH = self.get_data(a,b)
# NH=(np.sqrt(1/(10**(H/10)))/4e4)
# NHc=NH-NH.mean()
# DHc=DH-DH.mean()
# inh = NHc.index
# idh = DHc.index
# NHc.index = pd.to_datetime(inh,unit='m')
# DHc.index = pd.to_datetime(idh,unit='m')
# sD = (DHc.index[1]-DHc.index[0])
# sf= str(int(sD.microseconds*1e-3)) + 'ms'
# NHcr = NHc.resample(sf,fill_method='ffill')
# return NHcr,DHc
|
pylayers/pylayers
|
pylayers/measures/cormoran.py
|
Python
|
mit
| 164,467
|
[
"Mayavi",
"VTK"
] |
755b0ac18083cce96e2fffd88d042592d5107404f5870fc27f7c19be75a288f3
|
import json
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from keystoneclient.exceptions import Conflict
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from models import SLA
from crystal_dashboard.api import policies as api
from crystal_dashboard.dashboards.crystal import common
from crystal_dashboard.dashboards.crystal import exceptions as sdsexception
class MyFilterAction(tables.FilterAction):
name = "myfilter"
class CreateSLO(tables.LinkAction):
name = "create"
verbose_name = _("Create SLO")
url = "horizon:crystal:policies:bw_slos:create"
classes = ("ajax-modal",)
icon = "plus"
class UpdateSLO(tables.LinkAction):
name = "update"
verbose_name = _("Edit")
icon = "pencil"
classes = ("ajax-modal", "btn-update",)
def get_link_url(self, datum=None):
base_url = reverse("horizon:crystal:policies:bw_slos:update", kwargs={"slo_id": datum.id})
return base_url
class UpdateCell(tables.UpdateAction):
def allowed(self, request, project, cell):
return cell.column.name in ["get_bandwidth", "put_bandwidth"]
def update_cell(self, request, datum, id, cell_name, new_cell_value):
try:
slo_names_dict = {'get_bandwidth': 'get_bw', 'put_bandwidth': 'put_bw'}
api.fil_update_slo(request, 'bandwidth', slo_names_dict[cell_name], id, {'value': new_cell_value})
except Conflict:
# Returning a nice error message about name conflict. The message
# from exception is not that clear for the user
message = _("Can't change value")
raise ValidationError(message)
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, id):
get_sla = api.fil_get_slo(request, 'bandwidth', 'get_bw', id)
put_sla = api.fil_get_slo(request, 'bandwidth', 'put_bw', id)
get_sla_json = json.loads(get_sla.text)
put_sla_json = json.loads(put_sla.text)
storage_policies_dict = dict(common.get_storage_policy_list(request, common.ListOptions.by_id()))
projects_dict = dict(common.get_project_list(request))
project_target, policy_id = get_sla_json['target'].split('#')
sla = SLA(project_target, projects_dict[str(project_target)], policy_id,
storage_policies_dict[str(policy_id)], get_sla_json['value'],
put_sla_json['value'])
return sla
class DeleteSLO(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete SLO",
u"Delete SLOs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted SLO",
u"Deleted SLOs",
count
)
name = "delete_sla"
success_url = "horizon:crystal:policies:index"
def delete(self, request, obj_id):
try:
success = True
error_msg = ''
for slo_name in ['get_bw', 'put_bw']:
response = api.fil_delete_slo(request, 'bandwidth', slo_name, obj_id)
if 200 <= response.status_code < 300:
pass
# messages.success(request, _("Successfully deleted sla: %s") % obj_id)
else:
success = False
error_msg = response.text
if not success:
raise sdsexception.SdsException(error_msg)
except Exception as ex:
redirect = reverse("horizon:crystal:policies:index")
error_message = "Unable to remove sla.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class DeleteMultipleSLOs(DeleteSLO):
name = "delete_multiple_slas"
class SLAsTable(tables.DataTable):
tenant_name = tables.Column("project_name", verbose_name=_("Project Name"))
tenant_id = tables.Column("project_id", verbose_name=_("Project ID"))
policy_name = tables.Column("policy_name", verbose_name=_("Storage Policy"))
get_bandwidth = tables.Column("get_bw", verbose_name=_("GET BW"), form_field=forms.CharField(max_length=255), update_action=UpdateCell)
put_bandwidth = tables.Column("put_bw", verbose_name=_("PUT BW"), form_field=forms.CharField(max_length=255), update_action=UpdateCell)
class Meta:
name = "slas"
verbose_name = _("SLOs")
table_actions = (MyFilterAction, CreateSLO, DeleteMultipleSLOs,)
row_actions = (UpdateSLO, DeleteSLO,)
row_class = UpdateRow
|
Crystal-SDS/dashboard
|
crystal_dashboard/dashboards/crystal/policies/bw_slos/tables.py
|
Python
|
gpl-3.0
| 4,895
|
[
"CRYSTAL"
] |
f665993938ba3c4ba061fd071a8aa02ebc47366bd8ad7fd2edaa7967f9e98c67
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
import numpy as np
from bigdl.dllib.feature.dataset.base import maybe_download
from bigdl.orca.test_zoo_utils import ZooTestCase
from bigdl.orca.inference import InferenceModel
import tarfile
np.random.seed(1337) # for reproducibility
resource_path = os.path.join(os.path.dirname(__file__), "../resources")
property_path = os.path.join(os.path.dirname(__file__),
"../../../../../../scala/target/classes/app.properties")
data_url = "https://s3-ap-southeast-1.amazonaws.com"
with open(property_path) as f:
for _ in range(2): # skip the first two lines
next(f)
for line in f:
if "data-store-url" in line:
line = line.strip()
data_url = line.split("=")[1].replace("\\", "")
class TestInferenceModel(ZooTestCase):
def test_load_bigdl(self):
model = InferenceModel(3)
model.load_bigdl(os.path.join(resource_path, "models/bigdl/bigdl_lenet.model"))
input_data = np.random.random([4, 28, 28, 1])
output_data = model.predict(input_data)
def test_load_caffe(self):
model = InferenceModel(10)
model.load_caffe(os.path.join(resource_path, "models/caffe/test_persist.prototxt"),
os.path.join(resource_path, "models/caffe/test_persist.caffemodel"))
input_data = np.random.random([4, 3, 8, 8])
output_data = model.predict(input_data)
def test_load_openvino(self):
local_path = self.create_temp_dir()
model = InferenceModel(1)
model_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.xml"
weight_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.bin"
model_path = maybe_download("resnet_v1_50.xml",
local_path, model_url)
weight_path = maybe_download("resnet_v1_50.bin",
local_path, weight_url)
model.load_openvino(model_path, weight_path)
input_data = np.random.random([4, 1, 224, 224, 3])
model.predict(input_data)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/inference/test_inference_model.py
|
Python
|
apache-2.0
| 2,742
|
[
"ORCA"
] |
b23be33a133deed6968cbdc674b711bb328aa74f6cd6750c55960258f56a1768
|
# sybase/base.py
# Copyright (C) 2010-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
raise NotImplementedError("Sybase ASE does not support OFFSET")
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement),
default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = default.replace("DEFAULT", "").strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(
REFCONSTRAINT_SQL, table_id=table_id).fetchall()
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
if pks:
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
else:
return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
|
pcu4dros/pandora-core
|
workspace/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/base.py
|
Python
|
mit
| 28,696
|
[
"ASE"
] |
2e2564fd5fb565d0694de6da4f51630e2bf482c1738fddb99e3e82ded72f8824
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import os
import subprocess
import shutil
from morphforge.core import FileIO, LocMgr, LogMgr
from morphforge.core import RCMgr as RCReader
from morphforge.core.mgrs.settingsmgr import SettingsMgr
class ModBuilderParams(object):
nocmodlpath = RCReader.get('Neuron', 'nocmodlpath')
libtoolpath = RCReader.get('Neuron', 'libtoolpath')
compile_includes = ['.', '..'] + \
RCReader.get('Neuron', 'compileincludes').split(':')
compile_defs = ['HAVE_CONFIG_H']
std_link_libs = [
'nrnoc',
'oc',
'memacs',
'nrnmpi',
'scopmath',
'sparse13',
'readline',
'ncurses',
'ivoc',
'neuron_gnu',
'meschach',
'sundials',
'm',
'dl',
]
nrn_link_dirs = RCReader.get('Neuron', 'nrnlinkdirs').split(':')
rpath = RCReader.get('Neuron', 'rpath')
rnd_alone_link_statement = RCReader.get('Neuron', 'rndalonelinkstatement')
modlunitpath = RCReader.get('Neuron', 'modlunitpath')
@classmethod
def get_compile_str(cls, c_filename, lo_filename, additional_compile_flags=''):
incl_str = ' '.join(["""-I"%s" """ % _incl for _incl in cls.compile_includes])
def_str = ' '.join(["""-D%s """ % _def for _def in cls.compile_defs])
variables = {'lo': lo_filename, 'c': c_filename, 'incs': incl_str, 'defs': def_str, 'additional_flags': additional_compile_flags}
return """--mode=compile gcc %(defs)s %(incs)s %(additional_flags)s -g -O2 -c -o %(lo)s %(c)s """ % variables
@classmethod
def get_link_str(cls, lo_filename, la_filename, additional_link_flags=''):
std_lib_str = ' '.join(['-l%s' % lib for lib in cls.std_link_libs])
std_lib_dir_str = ' '.join(['-L%s' % _dir for _dir in cls.nrn_link_dirs])
link_dict = {'la': la_filename,
'lo': lo_filename,
'std_lib_str': std_lib_str,
'std_lib_dir_str': std_lib_dir_str,
'rpath': cls.rpath,
'randSt': cls.rnd_alone_link_statement,
'additional_flags': additional_link_flags
}
return """--mode=link gcc -module -g -O2 -shared -o %(la)s -rpath %(rpath)s %(lo)s %(std_lib_dir_str)s %(randSt)s %(std_lib_str)s %(additional_flags)s """ % link_dict
def _simple_exec(cmd, remaining, err_ok=False):
print 'Executing: %s %s' % (cmd, remaining)
args = [cmd + ' ' + remaining]
proc = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output,err = proc.communicate()
if not proc.returncode == 0 and err_ok!=True:
print output, err
raise ValueError(('Problem Building Mod-file!' + '\n %s '% args) + output)
if SettingsMgr.simulator_is_verbose():
print output, err
return output
def _build_modfile_local(mod_filename_short, modfile=None):
print os.getcwd()
mod_file_basename = mod_filename_short.replace('.mod', '')
c_filename = mod_file_basename + '.c'
la_filename = mod_file_basename + '.la'
lo_filename = mod_file_basename + '.lo'
so_filename = mod_file_basename + '.so'
libs_dir = '.libs/'
c_filename = mod_file_basename + '.c'
output = _simple_exec(ModBuilderParams.nocmodlpath, mod_filename_short, err_ok=True)
if not os.path.exists(c_filename):
print 'Failed to compile modfile. Error:'
print output, '\n'
assert False
# Add the extra registration function into our mod file:
new_register_func = """\n modl_reg(){ _%s_reg(); }""" \
% mod_file_basename
FileIO.append_to_file(new_register_func, c_filename)
# Compile the .c file -> .so:
compile_str = ModBuilderParams.get_compile_str(c_filename, lo_filename)
link_str = ModBuilderParams.get_link_str(lo_filename, la_filename)
compile_flags = modfile.additional_compile_flags if modfile else ''
link_flags = modfile.additional_link_flags if modfile else ''
if SettingsMgr.simulator_is_verbose():
print 'IN:', ModBuilderParams.libtoolpath,
print compile_str
print link_str
op1 = _simple_exec(ModBuilderParams.libtoolpath, ModBuilderParams.get_compile_str(c_filename, lo_filename, additional_compile_flags=compile_flags))
op2 = _simple_exec(ModBuilderParams.libtoolpath, ModBuilderParams.get_link_str(lo_filename, la_filename, additional_link_flags=link_flags))
for filename in [c_filename, lo_filename, la_filename]:
if not os.path.exists(filename):
assert False, 'Error building mod-file!'
if SettingsMgr.simulator_is_verbose() or True:
print 'OP1:', op1
print 'OP2:', op2
# Copy the correct .so from the libDir to the build_dir:
shutil.move(
os.path.join(libs_dir, mod_file_basename + '.so.0.0.0'),
so_filename)
# Clean up:
if True:
os.remove(c_filename)
os.remove(mod_filename_short)
for ext in ['.la', '.lo']:
os.remove(mod_file_basename + ext)
for ext in ['.la', '.lai', '.o', '.so', '.so.0']:
os.remove(os.path.join(libs_dir, mod_file_basename + ext))
os.rmdir(libs_dir)
return so_filename
def _build_mod_file(modfilename, output_dir=None, build_dir=None, modfile=None):
build_dir = LocMgr().get_default_mod_builddir() if not build_dir else build_dir
output_dir = LocMgr().get_default_mod_outdir() if not output_dir else output_dir
if SettingsMgr.simulator_is_verbose():
print ' - Building: ', modfilename
modfilenamebase = os.path.basename(modfilename)
sofilenamebase = modfilenamebase.replace('.mod', '.so')
shutil.copyfile(
modfilename,
os.path.join(build_dir, modfilenamebase))
so_filename_output = os.path.join(output_dir, sofilenamebase)
# Move to new directory to build:
initial_cwd = os.getcwd()
os.chdir(build_dir)
so_filename_build_short = _build_modfile_local(mod_filename_short=modfilenamebase, modfile=modfile)
os.chdir(initial_cwd)
# CopyFile to output cell_location:
so_filename_build = os.path.join(build_dir, so_filename_build_short)
if so_filename_build != so_filename_output:
shutil.move(so_filename_build, so_filename_output)
return so_filename_output
class ModFileCompiler(object):
@classmethod
def check_modfile_units(cls, modfilename):
output = _simple_exec(ModBuilderParams.modlunitpath, modfilename, err_ok=True)
op_expected = """
model 1.1.1.1 1994/10/12 17:22:51
Checking units of %s""" % modfilename
if SettingsMgr.simulator_is_verbose():
print 'OP', output
# Check line by line:
for (line, line_expected) in zip(output.split('\n'), op_expected.split('\n')):
if not line_expected.strip() == line.strip():
print 'ERROR ERROR ERROR WITH UNITS!!'
print 'Seen', line
print 'Expt', line_expected
assert False
@classmethod
def build_modfile(cls, modfile, strict_modlunit):
output_filename = modfile.get_built_filename_full(ensure_built=False)
if not os.path.exists(output_filename):
LogMgr.info('Does not exist: building: %s'
% output_filename)
mod_txt_filename = FileIO.write_to_file(modfile.modtxt, suffix='.mod')
if strict_modlunit:
ModFileCompiler.check_modfile_units(mod_txt_filename)
mod_dyn_filename = _build_mod_file(mod_txt_filename, modfile=modfile)
shutil.move(mod_dyn_filename, output_filename)
else:
LogMgr.info('Already Built')
return output_filename
|
mikehulluk/morphforge
|
src/morphforge/simulation/neuron/biophysics/modfilecompiler.py
|
Python
|
bsd-2-clause
| 9,423
|
[
"NEURON"
] |
194d0b4921f9e31f7d344a1fc0eed647ad4a80b36cba99612d10130459810c39
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN RNN definition."""
# internal imports
import numpy as np
import tensorflow as tf
def orthogonal(shape):
"""Orthogonal initilaizer."""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v
return q.reshape(shape)
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
return tf.constant(orthogonal(shape) * scale, dtype)
return _initializer
def lstm_ortho_initializer(scale=1.0):
"""LSTM orthogonal initializer."""
def _initializer(shape, dtype=tf.float32,
partition_info=None): # pylint: disable=unused-argument
size_x = shape[0]
size_h = shape[1] / 4 # assumes lstm.
t = np.zeros(shape)
t[:, :size_h] = orthogonal([size_x, size_h]) * scale
t[:, size_h:size_h * 2] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 2:size_h * 3] = orthogonal([size_x, size_h]) * scale
t[:, size_h * 3:] = orthogonal([size_x, size_h]) * scale
return tf.constant(t, dtype)
return _initializer
class LSTMCell(tf.contrib.rnn.RNNCell):
"""Vanilla LSTM cell.
Uses ortho initializer, and also recurrent dropout without memory loss
(https://arxiv.org/abs/1603.05118)
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.9):
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def state_size(self):
return 2 * self.num_units
@property
def output_size(self):
return self.num_units
def get_output(self, state):
unused_c, h = tf.split(state, 2, 1)
return h
def __call__(self, x, state, scope=None):
with tf.variable_scope(scope or type(self).__name__):
c, h = tf.split(state, 2, 1)
x_size = x.get_shape().as_list()[1]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
# Keep W_xh and W_hh separate here as well to use different init methods.
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
concat = tf.concat([x, h], 1)
w_full = tf.concat([w_xh, w_hh], 0)
hidden = tf.matmul(concat, w_full) + bias
i, j, f, o = tf.split(hidden, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat([new_c, new_h], 1) # fuk tuples.
def layer_norm_all(h,
batch_size,
base,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Layer Norm (faster version, but not using defun)."""
# Performs layer norm on multiple base at once (ie, i, g, j, o for lstm)
# Reshapes h in to perform layer norm in parallel
h_reshape = tf.reshape(h, [batch_size, base, num_units])
mean = tf.reduce_mean(h_reshape, [2], keep_dims=True)
var = tf.reduce_mean(tf.square(h_reshape - mean), [2], keep_dims=True)
epsilon = tf.constant(epsilon)
rstd = tf.rsqrt(var + epsilon)
h_reshape = (h_reshape - mean) * rstd
# reshape back to original
h = tf.reshape(h_reshape, [batch_size, base * num_units])
with tf.variable_scope(scope):
if reuse:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [4 * num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [4 * num_units], initializer=tf.constant_initializer(0.0))
if use_bias:
return gamma * h + beta
return gamma * h
def layer_norm(x,
num_units,
scope='layer_norm',
reuse=False,
gamma_start=1.0,
epsilon=1e-3,
use_bias=True):
"""Calculate layer norm."""
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
x_shifted = x - mean
var = tf.reduce_mean(tf.square(x_shifted), axes, keep_dims=True)
inv_std = tf.rsqrt(var + epsilon)
with tf.variable_scope(scope):
if reuse is True:
tf.get_variable_scope().reuse_variables()
gamma = tf.get_variable(
'ln_gamma', [num_units],
initializer=tf.constant_initializer(gamma_start))
if use_bias:
beta = tf.get_variable(
'ln_beta', [num_units], initializer=tf.constant_initializer(0.0))
output = gamma * (x_shifted) * inv_std
if use_bias:
output += beta
return output
def raw_layer_norm(x, epsilon=1e-3):
axes = [1]
mean = tf.reduce_mean(x, axes, keep_dims=True)
std = tf.sqrt(
tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True) + epsilon)
output = (x - mean) / (std)
return output
def super_linear(x,
output_size,
scope=None,
reuse=False,
init_w='ortho',
weight_start=0.0,
use_bias=True,
bias_start=0.0,
input_size=None):
"""Performs linear operation. Uses ortho init defined earlier."""
shape = x.get_shape().as_list()
with tf.variable_scope(scope or 'linear'):
if reuse is True:
tf.get_variable_scope().reuse_variables()
w_init = None # uniform
if input_size is None:
x_size = shape[1]
else:
x_size = input_size
if init_w == 'zeros':
w_init = tf.constant_initializer(0.0)
elif init_w == 'constant':
w_init = tf.constant_initializer(weight_start)
elif init_w == 'gaussian':
w_init = tf.random_normal_initializer(stddev=weight_start)
elif init_w == 'ortho':
w_init = lstm_ortho_initializer(1.0)
w = tf.get_variable(
'super_linear_w', [x_size, output_size], tf.float32, initializer=w_init)
if use_bias:
b = tf.get_variable(
'super_linear_b', [output_size],
tf.float32,
initializer=tf.constant_initializer(bias_start))
return tf.matmul(x, w) + b
return tf.matmul(x, w)
class LayerNormLSTMCell(tf.contrib.rnn.RNNCell):
"""Layer-Norm, with Ortho Init. and Recurrent Dropout without Memory Loss.
https://arxiv.org/abs/1607.06450 - Layer Norm
https://arxiv.org/abs/1603.05118 - Recurrent Dropout without Memory Loss
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90):
"""Initialize the Layer Norm LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
@property
def input_size(self):
return self.num_units
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.num_units
def get_output(self, state):
h, unused_c = tf.split(state, 2, 1)
return h
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
h, c = tf.split(state, 2, 1)
h_size = self.num_units
x_size = x.get_shape().as_list()[1]
batch_size = x.get_shape().as_list()[0]
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
concat = tf.concat([x, h], 1) # concat for speed.
w_full = tf.concat([w_xh, w_hh], 0)
concat = tf.matmul(concat, w_full) #+ bias # live life without garbage.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
concat = layer_norm_all(concat, batch_size, 4, h_size, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, h_size, 'ln_c')) * tf.sigmoid(o)
return new_h, tf.concat([new_h, new_c], 1)
class HyperLSTMCell(tf.contrib.rnn.RNNCell):
"""HyperLSTM with Ortho Init, Layer Norm, Recurrent Dropout, no Memory Loss.
https://arxiv.org/abs/1609.09106
http://blog.otoro.net/2016/09/28/hyper-networks/
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_recurrent_dropout=False,
dropout_keep_prob=0.90,
use_layer_norm=True,
hyper_num_units=256,
hyper_embedding_size=32,
hyper_use_recurrent_dropout=False):
"""Initialize the Layer Norm HyperLSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (default 1.0).
use_recurrent_dropout: Whether to use Recurrent Dropout (default False)
dropout_keep_prob: float, dropout keep probability (default 0.90)
use_layer_norm: boolean. (default True)
Controls whether we use LayerNorm layers in main LSTM & HyperLSTM cell.
hyper_num_units: int, number of units in HyperLSTM cell.
(default is 128, recommend experimenting with 256 for larger tasks)
hyper_embedding_size: int, size of signals emitted from HyperLSTM cell.
(default is 16, recommend trying larger values for large datasets)
hyper_use_recurrent_dropout: boolean. (default False)
Controls whether HyperLSTM cell also uses recurrent dropout.
Recommend turning this on only if hyper_num_units becomes large (>= 512)
"""
self.num_units = num_units
self.forget_bias = forget_bias
self.use_recurrent_dropout = use_recurrent_dropout
self.dropout_keep_prob = dropout_keep_prob
self.use_layer_norm = use_layer_norm
self.hyper_num_units = hyper_num_units
self.hyper_embedding_size = hyper_embedding_size
self.hyper_use_recurrent_dropout = hyper_use_recurrent_dropout
self.total_num_units = self.num_units + self.hyper_num_units
if self.use_layer_norm:
cell_fn = LayerNormLSTMCell
else:
cell_fn = LSTMCell
self.hyper_cell = cell_fn(
hyper_num_units,
use_recurrent_dropout=hyper_use_recurrent_dropout,
dropout_keep_prob=dropout_keep_prob)
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self.num_units
@property
def state_size(self):
return 2 * self.total_num_units
def get_output(self, state):
total_h, unused_total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
return h
def hyper_norm(self, layer, scope='hyper', use_bias=True):
num_units = self.num_units
embedding_size = self.hyper_embedding_size
# recurrent batch norm init trick (https://arxiv.org/abs/1603.09025).
init_gamma = 0.10 # cooijmans' da man.
with tf.variable_scope(scope):
zw = super_linear(
self.hyper_output,
embedding_size,
init_w='constant',
weight_start=0.00,
use_bias=True,
bias_start=1.0,
scope='zw')
alpha = super_linear(
zw,
num_units,
init_w='constant',
weight_start=init_gamma / embedding_size,
use_bias=False,
scope='alpha')
result = tf.multiply(alpha, layer)
if use_bias:
zb = super_linear(
self.hyper_output,
embedding_size,
init_w='gaussian',
weight_start=0.01,
use_bias=False,
bias_start=0.0,
scope='zb')
beta = super_linear(
zb,
num_units,
init_w='constant',
weight_start=0.00,
use_bias=False,
scope='beta')
result += beta
return result
def __call__(self, x, state, timestep=0, scope=None):
with tf.variable_scope(scope or type(self).__name__):
total_h, total_c = tf.split(state, 2, 1)
h = total_h[:, 0:self.num_units]
c = total_c[:, 0:self.num_units]
self.hyper_state = tf.concat(
[total_h[:, self.num_units:], total_c[:, self.num_units:]], 1)
batch_size = x.get_shape().as_list()[0]
x_size = x.get_shape().as_list()[1]
self._input_size = x_size
w_init = None # uniform
h_init = lstm_ortho_initializer(1.0)
w_xh = tf.get_variable(
'W_xh', [x_size, 4 * self.num_units], initializer=w_init)
w_hh = tf.get_variable(
'W_hh', [self.num_units, 4 * self.num_units], initializer=h_init)
bias = tf.get_variable(
'bias', [4 * self.num_units],
initializer=tf.constant_initializer(0.0))
# concatenate the input and hidden states for hyperlstm input
hyper_input = tf.concat([x, h], 1)
hyper_output, hyper_new_state = self.hyper_cell(hyper_input,
self.hyper_state)
self.hyper_output = hyper_output
self.hyper_state = hyper_new_state
xh = tf.matmul(x, w_xh)
hh = tf.matmul(h, w_hh)
# split Wxh contributions
ix, jx, fx, ox = tf.split(xh, 4, 1)
ix = self.hyper_norm(ix, 'hyper_ix', use_bias=False)
jx = self.hyper_norm(jx, 'hyper_jx', use_bias=False)
fx = self.hyper_norm(fx, 'hyper_fx', use_bias=False)
ox = self.hyper_norm(ox, 'hyper_ox', use_bias=False)
# split Whh contributions
ih, jh, fh, oh = tf.split(hh, 4, 1)
ih = self.hyper_norm(ih, 'hyper_ih', use_bias=True)
jh = self.hyper_norm(jh, 'hyper_jh', use_bias=True)
fh = self.hyper_norm(fh, 'hyper_fh', use_bias=True)
oh = self.hyper_norm(oh, 'hyper_oh', use_bias=True)
# split bias
ib, jb, fb, ob = tf.split(bias, 4, 0) # bias is to be broadcasted.
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i = ix + ih + ib
j = jx + jh + jb
f = fx + fh + fb
o = ox + oh + ob
if self.use_layer_norm:
concat = tf.concat([i, j, f, o], 1)
concat = layer_norm_all(concat, batch_size, 4, self.num_units, 'ln_all')
i, j, f, o = tf.split(concat, 4, 1)
if self.use_recurrent_dropout:
g = tf.nn.dropout(tf.tanh(j), self.dropout_keep_prob)
else:
g = tf.tanh(j)
new_c = c * tf.sigmoid(f + self.forget_bias) + tf.sigmoid(i) * g
new_h = tf.tanh(layer_norm(new_c, self.num_units, 'ln_c')) * tf.sigmoid(o)
hyper_h, hyper_c = tf.split(hyper_new_state, 2, 1)
new_total_h = tf.concat([new_h, hyper_h], 1)
new_total_c = tf.concat([new_c, hyper_c], 1)
new_total_state = tf.concat([new_total_h, new_total_c], 1)
return new_h, new_total_state
|
judithfan/sketch-rnn
|
pix_to_sketch/rnn.py
|
Python
|
mit
| 16,443
|
[
"Gaussian"
] |
cc01c395f9f1bc8391c14aeec4d1f84c2f2bea01eb4b39f9176120481c86280e
|
#!/usr/bin/env python2
#
# Copyright (c) 2013 Insollo Entertainment, LLC. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from __future__ import print_function
"""
This module generates state diagrams for all state machines found in gridmq
source code.
Dependencies:
* python2.7
* python-clang
* graphviz (dot)
Invocation:
make diagrams
To make code easier we make some assumtions about the code handing state
machine. We may lift some in the future. Important assumptions are:
* State machine code is handled by single function
* That function name is written literally in `grid_fsm_init`/`grid_fsm_init_root`
* Init call an function definition is in same source file
* State machine handled by nested switch statements
* Case labels contain `define`d constants written literally
* The `state` attribute is changed by assignment in the same file
* No `state` attributes are referenced in the function except FSM state
"""
import os
import sys
import subprocess
import errno
try:
from clang.cindex import Index
except ImportError:
sys.excepthook(*sys.exc_info())
print(file=sys.stderr)
print("It seems you don't have clang for python.", file=sys.stderr)
print("You may try one of the following:", file=sys.stderr)
print(" pip install clang", file=sys.stderr)
print(" easy_install clang", file=sys.stderr)
sys.exit(1)
HTML_HEADER = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>gridmq</title>
<style>
body {font-family:sans-serif;}
#toplist {
padding-left: 0px;
}
#toplist li {
display: inline;
list-style-type: none;
padding-right: 15px;
}
a {color:#000000;}
</style>
</head>
<body>
<div style="width:50em">
<img src="/logo.png">
<b>
<ul id='toplist'>
<li><a href="index.html">Home</a></li>
<li><a href="download.html">Download</a></li>
<li><a href="documentation.html">Documentation</a></li>
<li><a href="development.html">Development</a></li>
<li><a href="community.html">Community</a></li>
</ul>
</b>
<h2>State diagrams</h2>
"""
HTML_FOOTER = """
</div>
</body>
</html>
"""
def mkstate(stname):
if '_STATE_' in stname:
return stname.split('_STATE_')[1]
if stname.startswith('GRID_'):
return stname[3:]
return stname
def mksrc(src):
if src is None:
return '*'
if '_SRC_' in src:
return src.split('_SRC_')[1]
if src.startswith('GRID_'):
return src[3:]
return src
def mkaction(action):
if action is None:
return '*'
if '_ACTION_' in action:
return action.split('_ACTION_')[1]
if action.startswith('GRID_'):
return action[3:]
return action
class Visitor(object):
def run(self, cursor):
self.visit(cursor)
def visit(self, cursor):
try:
name = cursor.kind.name
except ValueError:
name = 'VERY_BAD_NAME'
meth = getattr(self, 'enter_' + name, None)
if meth is not None:
res = meth(cursor)
if res is not None:
return res.run(cursor) # overrides visitor for subtree
for i in cursor.get_children():
self.visit(i)
meth = getattr(self, 'exit_' + name, None)
if meth is not None:
meth(cursor)
class SkipVisitor(Visitor):
"""Returned from enter_xxx to skip checking subtree"""
def run(self, cursor):
pass
SKIP = Visitor()
class FindFSM(Visitor):
def __init__(self):
self.fsms = []
def enter_CALL_EXPR(self, cursor):
if cursor.displayname not in ('grid_fsm_init_root', 'grid_fsm_init'):
return SKIP
fname = list(cursor.get_children())[2]
if fname.displayname:
self.fsms.append(fname)
# else: NULL is used in core/pipe.c
def add_fsm(self, node):
self.fsms.append(node)
class StateFinder(Visitor):
def __init__(self):
self.states = []
def state_found(self, name, cursor):
self.states.append((name, cursor))
def visit(self, cursor):
super(StateFinder, self).visit(cursor)
def enter_CALL_EXPR(self, cursor):
fun = list(cursor.get_children())[0].get_definition()
if fun:
sf = StateFinder()
sf.run(fun)
for name, cursor in sf.states:
self.state_found(name, cursor)
def enter_BINARY_OPERATOR(self, cursor):
children = list(cursor.get_children())
if children[0].displayname == 'state': # tiny heuristic
# Operator is the text between two children
# Any better way to find out an operator?
sr = cursor.extent
with open(sr.start.file.name, 'rt') as f:
oplen = (children[1].extent.start.offset -
children[0].extent.end.offset)
vallen = (children[1].extent.end.offset -
children[1].extent.start.offset)
f.seek(children[0].extent.end.offset)
op = f.read(oplen).strip()
val = f.read(vallen).strip()
if op == '=':
self.state_found(val, cursor)
class FSMScanner(StateFinder):
def __init__(self):
self.edges = set()
self.running = {
'state': None,
'src': None,
'type': None,
}
self.switch_stack = []
def enter_SWITCH_STMT(self, cursor):
ch = list(cursor.get_children())
dn = ch[0].displayname
if dn in self.running:
assert self.running[dn] is None, (dn, self.running[dn])
self.switch_stack.append(dn)
def exit_SWITCH_STMT(self, cursor):
ch = list(cursor.get_children())
dn = ch[0].displayname
top = self.switch_stack.pop()
self.running[top] = None
assert top == dn, (top, dn) # Checking consistency of switch visits
def enter_CASE_STMT(self, cursor):
typ = self.switch_stack[-1]
if typ in self.running:
sr = list(cursor.get_children())[0].extent
with open(sr.start.file.name, 'rt') as f:
f.seek(sr.start.offset)
const = f.read(sr.end.offset - sr.start.offset)
self.running[typ] = const
def enter_DEFAULT_STMT(self, cursor):
typ = self.switch_stack[-1]
if typ in self.running:
self.running[typ] = '*'
def state_found(self, state, cursor):
r = self.running
edge = (r['state'], r['src'], r['type'], state)
if r['src'] is None or r['type'] is None:
print(' Undefined state or action at {0.start.file}:{0.start.line}'
.format(cursor.extent),
file=sys.stderr)
self.edges.add(edge)
index = None
def parse_file(fn):
tu = index.parse(fn, sys.argv[1:])
for i in tu.diagnostics:
print(i, file=sys.stderr)
finder = FindFSM()
finder.run(tu.cursor)
if finder.fsms:
for func in finder.fsms:
scan = FSMScanner()
scan.run(func.get_definition())
targetfn = os.path.join('doc/diagrams', func.displayname + '.png')
print("Writing", targetfn, 'from', fn, file=sys.stderr)
print("<h3>", func.displayname, "</h3>")
print("<p>Source file:", fn, "</p>")
print('<p><img src="diagrams/{}.png" border=0></p>'
.format(func.displayname))
lines = []
for fromstate, src, action, tostate in scan.edges:
if fromstate is None: continue # Not implemented well
if src == 'GRID_FSM_ACTION':
lines.append('{} -> {} [label="[{}]"]'.format(
mkstate(fromstate),
mkstate(tostate),
mkaction(action)))
else:
lines.append('{} -> {} [label="{}:{}"]'.format(
mkstate(fromstate),
mkstate(tostate),
mksrc(src),
mkaction(action)))
data = 'digraph G {' + '\n'.join(lines) + '}'
try:
subprocess.Popen(['dot', '-Tpng', '-o', targetfn],
stdin=subprocess.PIPE).communicate(data)
except OSError as e:
sys.excepthook(*sys.exc_info())
if e.errno == errno.ENOENT:
print(file=sys.stderr)
print("It seems you dont have `dot`", file=sys.stderr)
print("You may wish to try:", file=sys.stderr)
print(" apt-get install graphviz", file=sys.stderr)
sys.exit(1)
def main():
global index
index = Index.create()
script_dir = os.path.abspath(os.path.dirname(os.path.realpath(sys.argv[0])) + '/../')
with open('doc/diagrams.html', 'wt') as f:
sys.stdout = f
print(HTML_HEADER)
for dirpath, dirs, files in os.walk(os.path.join(script_dir,'src' )):
for f in files:
if not f.endswith('.c'):
continue
parse_file(os.path.join(dirpath, f))
print(HTML_FOOTER)
if __name__ == '__main__':
main()
|
gridmq/gridmq
|
man/diag.py
|
Python
|
mit
| 10,294
|
[
"VisIt"
] |
0ea76c1554d66592dd89af83eb177027354f1bdefad87edc91df4694cbeb1864
|
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import numpy
from . import logfileparser
from . import utils
class Molpro(logfileparser.Logfile):
"""Molpro file parser"""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(Molpro, self).__init__(logname="Molpro", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Molpro log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Molpro("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by Molpro."""
ans = label.replace("`", "'").replace("``", "''")
return ans
def before_parsing(self):
self.electronorbitals = ""
self.insidescf = False
def after_parsing(self):
# If optimization thresholds are default, they are normally not printed.
if not hasattr(self, "geotargets"):
self.geotargets = []
# Default THRGRAD (required accuracy of the optimized gradient).
self.geotargets.append(3E-4)
# Default THRENERG (required accuracy of the optimized energy).
self.geotargets.append(1E-6)
# Default THRSTEP (convergence threshold for the geometry optimization step).
self.geotargets.append(3E-4)
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
if line[1:19] == "ATOMIC COORDINATES":
if not hasattr(self,"atomcoords"):
self.atomcoords = []
self.atomnos = []
line = next(inputfile)
line = next(inputfile)
line = next(inputfile)
atomcoords = []
atomnos = []
line = next(inputfile)
while line.strip():
temp = line.strip().split()
atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[3:6]]) #bohrs to angs
atomnos.append(int(round(float(temp[2]))))
line = next(inputfile)
self.atomnos = numpy.array(atomnos, "i")
self.atomcoords.append(atomcoords)
self.natom = len(self.atomnos)
# Use BASIS DATA to parse input for aonames and atombasis.
# This is always the first place this information is printed, so no attribute check is needed.
if line[1:11] == "BASIS DATA":
blank = next(inputfile)
header = next(inputfile)
blank = next(inputfile)
self.aonames = []
self.atombasis = []
self.gbasis = []
for i in range(self.natom):
self.atombasis.append([])
self.gbasis.append([])
line = "dummy"
while line.strip() != "":
line = next(inputfile)
funcnr = line[1:6]
funcsym = line[7:9]
funcatom_ = line[11:14]
functype_ = line[16:22]
funcexp = line[25:38]
funccoeffs = line[38:]
# If a new function type is printed or the BASIS DATA block ends,
# then the previous functions can be added to gbasis.
# When translating the Molpro function type name into a gbasis code,
# note that Molpro prints all components, and we want to add
# only one to gbasis, with the proper code (S,P,D,F,G).
# Warning! The function types differ for cartesian/spherical functions.
# Skip the first printed function type, however (line[3] != '1').
if (functype_.strip() and line[1:4] != ' 1') or line.strip() == "":
funcbasis = None
if functype in ['1s', 's']:
funcbasis = 'S'
if functype in ['x', '2px']:
funcbasis = 'P'
if functype in ['xx', '3d0']:
funcbasis = 'D'
if functype in ['xxx', '4f0']:
funcbasis = 'F'
if functype in ['xxxx', '5g0']:
funcbasis = 'G'
if funcbasis:
# The function is split into as many columns as there are.
for i in range(len(coefficients[0])):
func = (funcbasis, [])
for j in range(len(exponents)):
func[1].append((exponents[j], coefficients[j][i]))
self.gbasis[funcatom-1].append(func)
# If it is a new type, set up the variables for the next shell(s).
if functype_.strip():
exponents = []
coefficients = []
functype = functype_.strip()
funcatom = int(funcatom_.strip())
# Add exponents and coefficients to lists.
if line.strip():
funcexp = float(funcexp)
funccoeffs = [float(s) for s in funccoeffs.split()]
exponents.append(funcexp)
coefficients.append(funccoeffs)
# If the function number is there, add to atombasis and aonames.
if funcnr.strip():
funcnr = int(funcnr.split('.')[0])
self.atombasis[funcatom-1].append(funcnr-1)
element = self.table.element[self.atomnos[funcatom-1]]
aoname = "%s%i_%s" % (element, funcatom, functype)
self.aonames.append(aoname)
if line[1:23] == "NUMBER OF CONTRACTIONS":
nbasis = int(line.split()[3])
if hasattr(self, "nbasis"):
assert nbasis == self.nbasis
else:
self.nbasis = nbasis
# This is used to signalize whether we are inside an SCF calculation.
if line[1:8] == "PROGRAM" and line[14:18] == "-SCF":
self.insidescf = True
# Use this information instead of 'SETTING ...', in case the defaults are standard.
# Note that this is sometimes printed in each geometry optimization step.
if line[1:20] == "NUMBER OF ELECTRONS":
spinup = int(line.split()[3][:-1])
spindown = int(line.split()[4][:-1])
# Nuclear charges (atomnos) should be parsed by now.
nuclear = numpy.sum(self.atomnos)
charge = nuclear - spinup - spindown
mult = spinup - spindown + 1
# Copy charge, or assert for exceptions if already exists.
if not hasattr(self, "charge"):
self.charge = charge
else:
assert self.charge == charge
# Copy multiplicity, or assert for exceptions if already exists.
if not hasattr(self, "mult"):
self.mult = mult
else:
assert self.mult == mult
# Convergenve thresholds for SCF cycle, should be contained in a line such as:
# CONVERGENCE THRESHOLDS: 1.00E-05 (Density) 1.40E-07 (Energy)
if self.insidescf and line[1:24] == "CONVERGENCE THRESHOLDS:":
if not hasattr(self, "scftargets"):
self.scftargets = []
scftargets = list(map(float, line.split()[2::2]))
self.scftargets.append(scftargets)
# Usually two criteria, but save the names this just in case.
self.scftargetnames = line.split()[3::2]
# Read in the print out of the SCF cycle - for scfvalues. For RHF looks like:
# ITERATION DDIFF GRAD ENERGY 2-EL.EN. DIPOLE MOMENTS DIIS
# 1 0.000D+00 0.000D+00 -379.71523700 1159.621171 0.000000 0.000000 0.000000 0
# 2 0.000D+00 0.898D-02 -379.74469736 1162.389787 0.000000 0.000000 0.000000 1
# 3 0.817D-02 0.144D-02 -379.74635529 1162.041033 0.000000 0.000000 0.000000 2
# 4 0.213D-02 0.571D-03 -379.74658063 1162.159929 0.000000 0.000000 0.000000 3
# 5 0.799D-03 0.166D-03 -379.74660889 1162.144256 0.000000 0.000000 0.000000 4
if self.insidescf and line[1:10] == "ITERATION":
if not hasattr(self, "scfvalues"):
self.scfvalues = []
line = next(inputfile)
energy = 0.0
scfvalues = []
while line.strip() != "":
if line.split()[0].isdigit():
ddiff = float(line.split()[1].replace('D','E'))
newenergy = float(line.split()[3])
ediff = newenergy - energy
energy = newenergy
# The convergence thresholds must have been read above.
# Presently, we recognize MAX DENSITY and MAX ENERGY thresholds.
numtargets = len(self.scftargetnames)
values = [numpy.nan]*numtargets
for n, name in zip(list(range(numtargets)),self.scftargetnames):
if "ENERGY" in name.upper():
values[n] = ediff
elif "DENSITY" in name.upper():
values[n] = ddiff
scfvalues.append(values)
line = next(inputfile)
self.scfvalues.append(numpy.array(scfvalues))
# SCF result - RHF/UHF and DFT (RKS) energies.
if line[1:5] in ["!RHF", "!UHF", "!RKS"] and line[16:22] == "ENERGY":
if not hasattr(self, "scfenergies"):
self.scfenergies = []
scfenergy = float(line.split()[4])
self.scfenergies.append(utils.convertor(scfenergy, "hartree", "eV"))
# We are now done with SCF cycle (after a few lines).
self.insidescf = False
# MP2 energies.
if line[1:5] == "!MP2":
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mp2energy = float(line.split()[-1])
mp2energy = utils.convertor(mp2energy, "hartree", "eV")
self.mpenergies.append([mp2energy])
# MP2 energies if MP3 or MP4 is also calculated.
if line[1:5] == "MP2:":
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
mp2energy = float(line.split()[2])
mp2energy = utils.convertor(mp2energy, "hartree", "eV")
self.mpenergies.append([mp2energy])
# MP3 (D) and MP4 (DQ or SDQ) energies.
if line[1:8] == "MP3(D):":
mp3energy = float(line.split()[2])
mp2energy = utils.convertor(mp3energy, "hartree", "eV")
line = next(inputfile)
self.mpenergies[-1].append(mp2energy)
if line[1:9] == "MP4(DQ):":
mp4energy = float(line.split()[2])
line = next(inputfile)
if line[1:10] == "MP4(SDQ):":
mp4energy = float(line.split()[2])
mp4energy = utils.convertor(mp4energy, "hartree", "eV")
self.mpenergies[-1].append(mp4energy)
# The CCSD program operates all closed-shel coupled cluster runs.
if line[1:15] == "PROGRAM * CCSD":
if not hasattr(self, "ccenergies"):
self.ccenergies = []
while line[1:20] != "Program statistics:":
# The last energy (most exact) will be read last and thus saved.
if line[1:5] == "!CCD" or line[1:6] == "!CCSD" or line[1:9] == "!CCSD(T)":
ccenergy = float(line.split()[-1])
ccenergy = utils.convertor(ccenergy, "hartree", "eV")
line = next(inputfile)
self.ccenergies.append(ccenergy)
# Read the occupancy (index of HOMO s).
# For restricted calculations, there is one line here. For unrestricted, two:
# Final alpha occupancy: ...
# Final beta occupancy: ...
if line[1:17] == "Final occupancy:":
self.homos = [int(line.split()[-1])-1]
if line[1:23] == "Final alpha occupancy:":
self.homos = [int(line.split()[-1])-1]
line = next(inputfile)
self.homos.append(int(line.split()[-1])-1)
# From this block atombasis, moenergies, and mocoeffs can be parsed.
# Note that Molpro does not print this by default, you must add this in the input:
# GPRINT,ORBITALS
# What's more, this prints only the occupied orbitals. To get virtuals, add also:
# ORBPTIN,NVIRT
# where NVIRT is how many to print (can be some large number, like 99999, to print all).
# The block is in general flipped when compared to other programs (GAMESS, Gaussian), and
# MOs in the rows. Also, it does not cut the table into parts, rather each MO row has
# as many lines as it takes to print all the coefficients, as shown below:
#
# ELECTRON ORBITALS
# =================
#
#
# Orb Occ Energy Couls-En Coefficients
#
# 1 1s 1 1s 1 2px 1 2py 1 2pz 2 1s (...)
# 3 1s 3 1s 3 2px 3 2py 3 2pz 4 1s (...)
# (...)
#
# 1.1 2 -11.0351 -43.4915 0.701460 0.025696 -0.000365 -0.000006 0.000000 0.006922 (...)
# -0.006450 0.004742 -0.001028 -0.002955 0.000000 -0.701460 (...)
# (...)
#
# For unrestricted calcualtions, ELECTRON ORBITALS is followed on the same line
# by FOR POSITIVE SPIN or FOR NEGATIVE SPIN.
# For examples, see data/Molpro/basicMolpro2006/dvb_sp*.
if line[1:18] == "ELECTRON ORBITALS" or self.electronorbitals:
# Detect if we are reading beta (negative spin) orbitals.
spin = 0
if line[19:36] == "FOR NEGATIVE SPIN" or self.electronorbitals[19:36] == "FOR NEGATIVE SPIN":
spin = 1
if not self.electronorbitals:
dashes = next(inputfile)
blank = next(inputfile)
blank = next(inputfile)
headers = next(inputfile)
blank = next(inputfile)
# Parse the list of atomic orbitals if atombasis or aonames is missing.
line = next(inputfile)
if not hasattr(self, "atombasis") or not hasattr(self, "aonames"):
self.atombasis = []
for i in range(self.natom):
self.atombasis.append([])
self.aonames = []
aonum = 0
while line.strip():
for s in line.split():
if s.isdigit():
atomno = int(s)
self.atombasis[atomno-1].append(aonum)
aonum += 1
else:
functype = s
element = self.table.element[self.atomnos[atomno-1]]
aoname = "%s%i_%s" % (element, atomno, functype)
self.aonames.append(aoname)
line = next(inputfile)
else:
while line.strip():
line = next(inputfile)
# Now there can be one or two blank lines.
while not line.strip():
line = next(inputfile)
# Create empty moenergies and mocoeffs if they don't exist.
if not hasattr(self, "moenergies"):
self.moenergies = [[]]
self.mocoeffs = [[]]
# Do the same if they exist and are being read again (spin=0),
# this means only the last print-out of these data are saved,
# which consistent with current cclib practices.
elif len(self.moenergies) == 1 and spin == 0:
self.moenergies = [[]]
self.mocoeffs = [[]]
else:
self.moenergies.append([])
self.mocoeffs.append([])
while line.strip() and not "ORBITALS" in line:
coeffs = []
while line.strip() != "":
if line[:30].strip():
moenergy = float(line.split()[2])
moenergy = utils.convertor(moenergy, "hartree", "eV")
self.moenergies[spin].append(moenergy)
line = line[31:]
# Each line has 10 coefficients in 10.6f format.
num = len(line)//10
for i in range(num):
try:
coeff = float(line[10*i:10*(i+1)])
# Molpro prints stars when coefficients are huge.
except ValueError as detail:
self.logger.warn("Set coefficient to zero: %s" %detail)
coeff = 0.0
coeffs.append(coeff)
line = next(inputfile)
self.mocoeffs[spin].append(coeffs)
line = next(inputfile)
# Check if last line begins the next ELECTRON ORBITALS section.
if line[1:18] == "ELECTRON ORBITALS":
self.electronorbitals = line
else:
self.electronorbitals = ""
# If the MATROP program was called appropriately,
# the atomic obital overlap matrix S is printed.
# The matrix is printed straight-out, ten elements in each row, both halves.
# Note that is the entire matrix is not printed, then aooverlaps
# will not have dimensions nbasis x nbasis.
if line[1:9] == "MATRIX S":
blank = next(inputfile)
symblocklabel = next(inputfile)
if not hasattr(self, "aooverlaps"):
self.aooverlaps = [[]]
line = next(inputfile)
while line.strip() != "":
elements = [float(s) for s in line.split()]
if len(self.aooverlaps[-1]) + len(elements) <= self.nbasis:
self.aooverlaps[-1] += elements
else:
n = len(self.aooverlaps[-1]) + len(elements) - self.nbasis
self.aooverlaps[-1] += elements[:-n]
self.aooverlaps.append([])
self.aooverlaps[-1] += elements[-n:]
line = next(inputfile)
# Thresholds are printed only if the defaults are changed with GTHRESH.
# In that case, we can fill geotargets with non-default values.
# The block should look like this as of Molpro 2006.1:
# THRESHOLDS:
# ZERO = 1.00D-12 ONEINT = 1.00D-12 TWOINT = 1.00D-11 PREFAC = 1.00D-14 LOCALI = 1.00D-09 EORDER = 1.00D-04
# ENERGY = 0.00D+00 ETEST = 0.00D+00 EDENS = 0.00D+00 THRDEDEF= 1.00D-06 GRADIENT= 1.00D-02 STEP = 1.00D-03
# ORBITAL = 1.00D-05 CIVEC = 1.00D-05 COEFF = 1.00D-04 PRINTCI = 5.00D-02 PUNCHCI = 9.90D+01 OPTGRAD = 3.00D-04
# OPTENERG= 1.00D-06 OPTSTEP = 3.00D-04 THRGRAD = 2.00D-04 COMPRESS= 1.00D-11 VARMIN = 1.00D-07 VARMAX = 1.00D-03
# THRDOUB = 0.00D+00 THRDIV = 1.00D-05 THRRED = 1.00D-07 THRPSP = 1.00D+00 THRDC = 1.00D-10 THRCS = 1.00D-10
# THRNRM = 1.00D-08 THREQ = 0.00D+00 THRDE = 1.00D+00 THRREF = 1.00D-05 SPARFAC = 1.00D+00 THRDLP = 1.00D-07
# THRDIA = 1.00D-10 THRDLS = 1.00D-07 THRGPS = 0.00D+00 THRKEX = 0.00D+00 THRDIS = 2.00D-01 THRVAR = 1.00D-10
# THRLOC = 1.00D-06 THRGAP = 1.00D-06 THRLOCT = -1.00D+00 THRGAPT = -1.00D+00 THRORB = 1.00D-06 THRMLTP = 0.00D+00
# THRCPQCI= 1.00D-10 KEXTA = 0.00D+00 THRCOARS= 0.00D+00 SYMTOL = 1.00D-06 GRADTOL = 1.00D-06 THROVL = 1.00D-08
# THRORTH = 1.00D-08 GRID = 1.00D-06 GRIDMAX = 1.00D-03 DTMAX = 0.00D+00
if line [1:12] == "THRESHOLDS":
blank = next(inputfile)
line = next(inputfile)
while line.strip():
if "OPTENERG" in line:
start = line.find("OPTENERG")
optenerg = line[start+10:start+20]
if "OPTGRAD" in line:
start = line.find("OPTGRAD")
optgrad = line[start+10:start+20]
if "OPTSTEP" in line:
start = line.find("OPTSTEP")
optstep = line[start+10:start+20]
line = next(inputfile)
self.geotargets = [optenerg, optgrad, optstep]
# The optimization history is the source for geovlues:
# END OF GEOMETRY OPTIMIZATION. TOTAL CPU: 246.9 SEC
#
# ITER. ENERGY(OLD) ENERGY(NEW) DE GRADMAX GRADNORM GRADRMS STEPMAX STEPLEN STEPRMS
# 1 -382.02936898 -382.04914450 -0.01977552 0.11354875 0.20127947 0.01183997 0.12972761 0.20171740 0.01186573
# 2 -382.04914450 -382.05059234 -0.00144784 0.03299860 0.03963339 0.00233138 0.05577169 0.06687650 0.00393391
# 3 -382.05059234 -382.05069136 -0.00009902 0.00694359 0.01069889 0.00062935 0.01654549 0.02016307 0.00118606
# 4 -382.05069136 -382.05069130 0.00000006 0.00295497 0.00363023 0.00021354 0.00234307 0.00443525 0.00026090
# 5 -382.05069130 -382.05069206 -0.00000075 0.00098220 0.00121031 0.00007119 0.00116863 0.00140452 0.00008262
# 6 -382.05069206 -382.05069209 -0.00000003 0.00011350 0.00022306 0.00001312 0.00013321 0.00024526 0.00001443
if line[1:30] == "END OF GEOMETRY OPTIMIZATION.":
blank = next(inputfile)
headers = next(inputfile)
# Although criteria can be changed, the printed format should not change.
# In case it does, retrieve the columns for each parameter.
headers = headers.split()
index_THRENERG = headers.index('DE')
index_THRGRAD = headers.index('GRADMAX')
index_THRSTEP = headers.index('STEPMAX')
line = next(inputfile)
self.geovalues = []
while line.strip() != "":
line = line.split()
geovalues = []
geovalues.append(float(line[index_THRENERG]))
geovalues.append(float(line[index_THRGRAD]))
geovalues.append(float(line[index_THRSTEP]))
self.geovalues.append(geovalues)
line = next(inputfile)
# This block should look like this:
# Normal Modes
#
# 1 Au 2 Bu 3 Ag 4 Bg 5 Ag
# Wavenumbers [cm-1] 151.81 190.88 271.17 299.59 407.86
# Intensities [km/mol] 0.33 0.28 0.00 0.00 0.00
# Intensities [relative] 0.34 0.28 0.00 0.00 0.00
# CX1 0.00000 -0.01009 0.02577 0.00000 0.06008
# CY1 0.00000 -0.05723 -0.06696 0.00000 0.06349
# CZ1 -0.02021 0.00000 0.00000 0.11848 0.00000
# CX2 0.00000 -0.01344 0.05582 0.00000 -0.02513
# CY2 0.00000 -0.06288 -0.03618 0.00000 0.00349
# CZ2 -0.05565 0.00000 0.00000 0.07815 0.00000
# ...
# Molpro prints low frequency modes in a subsequent section with the same format,
# which also contains zero frequency modes, with the title:
# Normal Modes of low/zero frequencies
if line[1:13] == "Normal Modes":
if line[1:37] == "Normal Modes of low/zero frequencies":
islow = True
else:
islow = False
blank = next(inputfile)
# Each portion of five modes is followed by a single blank line.
# The whole block is followed by an additional blank line.
line = next(inputfile)
while line.strip():
if line[1:25].isspace():
numbers = list(map(int, line.split()[::2]))
vibsyms = line.split()[1::2]
if line[1:12] == "Wavenumbers":
vibfreqs = list(map(float, line.strip().split()[2:]))
if line[1:21] == "Intensities [km/mol]":
vibirs = list(map(float, line.strip().split()[2:]))
# There should always by 3xnatom displacement rows.
if line[1:11].isspace() and line[13:25].strip().isdigit():
# There are a maximum of 5 modes per line.
nmodes = len(line.split())-1
vibdisps = []
for i in range(nmodes):
vibdisps.append([])
for n in range(self.natom):
vibdisps[i].append([])
for i in range(nmodes):
disp = float(line.split()[i+1])
vibdisps[i][0].append(disp)
for i in range(self.natom*3 - 1):
line = next(inputfile)
iatom = (i+1)//3
for i in range(nmodes):
disp = float(line.split()[i+1])
vibdisps[i][iatom].append(disp)
line = next(inputfile)
if not line.strip():
if not hasattr(self, "vibfreqs"):
self.vibfreqs = []
if not hasattr(self, "vibsyms"):
self.vibsyms = []
if not hasattr(self, "vibirs") and "vibirs" in dir():
self.vibirs = []
if not hasattr(self, "vibdisps") and "vibdisps" in dir():
self.vibdisps = []
if not islow:
self.vibfreqs.extend(vibfreqs)
self.vibsyms.extend(vibsyms)
if "vibirs" in dir():
self.vibirs.extend(vibirs)
if "vibdisps" in dir():
self.vibdisps.extend(vibdisps)
else:
nonzero = [f > 0 for f in vibfreqs]
vibfreqs = [f for f in vibfreqs if f > 0]
self.vibfreqs = vibfreqs + self.vibfreqs
vibsyms = [vibsyms[i] for i in range(len(vibsyms)) if nonzero[i]]
self.vibsyms = vibsyms + self.vibsyms
if "vibirs" in dir():
vibirs = [vibirs[i] for i in range(len(vibirs)) if nonzero[i]]
self.vibirs = vibirs + self.vibirs
if "vibdisps" in dir():
vibdisps = [vibdisps[i] for i in range(len(vibdisps)) if nonzero[i]]
self.vibdisps = vibdisps + self.vibdisps
line = next(inputfile)
if line[1:16] == "Force Constants":
self.logger.info("Creating attribute hessian")
self.hessian = []
line = next(inputfile)
hess = []
tmp = []
while line.strip():
try: list(map(float, line.strip().split()[2:]))
except:
line = next(inputfile)
line.strip().split()[1:]
hess.extend([list(map(float, line.strip().split()[1:]))])
line = next(inputfile)
lig = 0
while (lig==0) or (len(hess[0]) > 1):
tmp.append(hess.pop(0))
lig += 1
k = 5
while len(hess) != 0:
tmp[k] += hess.pop(0)
k += 1
if (len(tmp[k-1]) == lig): break
if k >= lig: k = len(tmp[-1])
for l in tmp: self.hessian += l
if line[1:14] == "Atomic Masses" and hasattr(self,"hessian"):
line = next(inputfile)
self.amass = list(map(float, line.strip().split()[2:]))
while line.strip():
line = next(inputfile)
self.amass += list(map(float, line.strip().split()[2:]))
if __name__ == "__main__":
import doctest, molproparser
doctest.testmod(molproparser, verbose=False)
|
Clyde-fare/cclib_bak
|
src/cclib/parser/molproparser.py
|
Python
|
lgpl-2.1
| 30,274
|
[
"GAMESS",
"Gaussian",
"Molpro",
"cclib"
] |
7d181f978d35a66aa754db6886ed72c40b017415b59f742aa841b85bf47fa79c
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Initializing espresso modules and the numpy package
import sys
import numpy as np
import espressomd
espressomd.assert_features(["ELECTROKINETICS"])
from espressomd import electrokinetics, shapes
# Set the slit pore geometry the width is the non-periodic part of the geometry
# the padding is used to ensure that there is no field outside the slit
box_x = 6
box_y = 6
width = 50
padding = 1
box_z = width + 2 * padding
system = espressomd.System(box_l=[box_x, box_y, box_z])
# Set the electrokinetic parameters
agrid = 1.0
dt = 0.2
kT = 1.0
bjerrum_length = 0.7095
D = 0.006075
valency = 1.0
viscosity_dynamic = 79.53
density_water = 26.15
sigma = -0.05
ext_force_density = 0.1
# Set the simulation parameters
system.time_step = dt
system.cell_system.skin = 0.2
system.thermostat.turn_off()
integration_length = 2000
# Set up the (LB) electrokinetics fluid
viscosity_kinematic = viscosity_dynamic / density_water
ek = electrokinetics.Electrokinetics(agrid=agrid, lb_density=density_water,
viscosity=viscosity_kinematic, friction=1.0,
T=kT, prefactor=bjerrum_length)
# Set up the charged and neutral species
density_counterions = -2.0 * sigma / width
counterions = electrokinetics.Species(density=density_counterions,
D=D, valency=valency,
ext_force_density=[ext_force_density, 0, 0])
ek.add_species(counterions)
# Set up the walls confining the fluid
ek_wall_left = espressomd.ekboundaries.EKBoundary(charge_density=sigma / agrid,
shape=shapes.Wall(
normal=[0, 0, 1],
dist=padding))
ek_wall_right = espressomd.ekboundaries.EKBoundary(
charge_density=sigma / agrid,
shape=shapes.Wall(normal=[0, 0, -1],
dist=-(padding + width)))
system.ekboundaries.add(ek_wall_left)
system.ekboundaries.add(ek_wall_right)
system.actors.add(ek)
# Integrate the system
for i in range(100):
system.integrator.run(integration_length)
sys.stdout.write("\rintegration: %i%%" % (i + 1))
sys.stdout.flush()
# Output
position_list = []
density_list = []
velocity_list = []
pressure_xz_list = []
for i in range(int(box_z / agrid)):
if (i * agrid >= padding) and (i * agrid < box_z - padding):
position = i * agrid - padding - width / 2.0 + agrid / 2.0
position_list.append(position)
# density
density_list.append(
counterions[box_x / (2 * agrid), box_y / (2 * agrid), i].density)
# velocity
velocity_list.append(
ek[box_x / (2 * agrid), box_y / (2 * agrid), i].velocity[0])
# xz component pressure tensor
pressure_xz_list.append(
ek[box_x / (2 * agrid), box_y / (2 * agrid), i].pressure[0, 2])
np.savetxt("eof_electrokinetics.dat", np.column_stack((position_list,
density_list,
velocity_list,
pressure_xz_list)),
header="#position calculated_density calculated_velocity calculated_pressure_xz")
|
mkuron/espresso
|
doc/tutorials/07-electrokinetics/scripts/eof_electrokinetics.py
|
Python
|
gpl-3.0
| 4,100
|
[
"ESPResSo"
] |
785a82bcc288b1217a1c9f0ad7f749812e107b4bf20ee30e56627419f3cbd032
|
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [Adam Breindel](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [Christian von Koch](https://www.linkedin.com/in/christianvonkoch/) and [William Anzén](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC # Artificial Neural Network - Perceptron
# MAGIC
# MAGIC The field of artificial neural networks started out with an electromechanical binary unit called a perceptron.
# MAGIC
# MAGIC The perceptron took a weighted set of input signals and chose an ouput state (on/off or high/low) based on a threshold.
# MAGIC
# MAGIC <img src="http://i.imgur.com/c4pBaaU.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC (raaz) Thus, the perceptron is defined by:
# MAGIC
# MAGIC $$
# MAGIC f(1, x\_1,x\_2,\ldots , x\_n \, ; \, w\_0,w\_1,w\_2,\ldots , w\_n) =
# MAGIC \begin{cases}
# MAGIC 1 & \text{if} \quad \sum\_{i=0}^n w\_i x\_i > 0 \\\\
# MAGIC 0 & \text{otherwise}
# MAGIC \end{cases}
# MAGIC $$
# MAGIC and implementable with the following arithmetical and logical unit (ALU) operations in a machine:
# MAGIC
# MAGIC * n inputs from one \\(n\\)-dimensional data point: \\( x_1,x_2,\ldots x_n \, \in \, \mathbb{R}^n\\)
# MAGIC * arithmetic operations
# MAGIC * n+1 multiplications
# MAGIC * n additions
# MAGIC * boolean operations
# MAGIC * one if-then on an inequality
# MAGIC * one output \\(o \in \\{0,1\\}\\), i.e., \\(o\\) belongs to the set containing \\(0\\) and \\(1\\)
# MAGIC * n+1 parameters of interest
# MAGIC
# MAGIC This is just a hyperplane given by a dot product of \\(n+1\\) known inputs and \\(n+1\\) unknown parameters that can be estimated. This hyperplane can be used to define a hyperplane that partitions \\(\mathbb{R}^{n+1}\\), the real Euclidean space, into two parts labelled by the outputs \\(0\\) and \\(1\\).
# MAGIC
# MAGIC The problem of finding estimates of the parameters, \\( (\hat{w}\_0,\hat{w}\_1,\hat{w}\_2,\ldots \hat{w}\_n) \in \mathbb{R}^{(n+1)} \\), in some statistically meaningful manner for a predicting task by using the training data given by, say \\(k\\) *labelled points*, where you know both the input and output:
# MAGIC $$
# MAGIC \left( ( \, 1, x\_1^{(1)},x\_2^{(1)}, \ldots x\_n^{(1)}), (o^{(1)}) \, ), \, ( \, 1, x\_1^{(2)},x\_2^{(2)}, \ldots x\_n^{(2)}), (o^{(2)}) \, ), \, \ldots \, , ( \, 1, x\_1^{(k)},x\_2^{(k)}, \ldots x\_n^{(k)}), (o^{(k)}) \, ) \right) \, \in \, (\mathbb{R}^{n+1} \times \\{ 0,1 \\} )^k
# MAGIC $$
# MAGIC is the machine learning problem here.
# MAGIC
# MAGIC Succinctly, we are after a random mapping, denoted below by \\( \mapsto\_{\rightsquigarrow} \\), called the *estimator*:
# MAGIC $$
# MAGIC (\mathbb{R}^{n+1} \times \\{0,1\\})^k \mapsto_{\rightsquigarrow} \, \left( \, \mathtt{model}( (1,x\_1,x\_2,\ldots,x\_n) \,;\, (\hat{w}\_0,\hat{w}\_1,\hat{w}\_2,\ldots \hat{w}\_n)) : \mathbb{R}^{n+1} \to \\{0,1\\} \, \right)
# MAGIC $$
# MAGIC which takes *random* labelled dataset (to understand random here think of two scientists doing independent experiments to get their own training datasets) of size \\(k\\) and returns a *model*. These mathematical notions correspond exactly to the `estimator` and `model` (which is a `transformer`) in the language of Apache Spark's Machine Learning Pipleines we have seen before.
# MAGIC
# MAGIC We can use this `transformer` for *prediction* of *unlabelled data* where we only observe the input and what to know the output under some reasonable assumptions.
# MAGIC
# MAGIC Of course we want to be able to generalize so we don't overfit to the training data using some *empirical risk minisation rule* such as cross-validation. Again, we have seen these in Apache Spark for other ML methods like linear regression and decision trees.
# COMMAND ----------
# MAGIC %md
# MAGIC If the output isn't right, we can adjust the weights, threshold, or bias (\\(x_0\\) above)
# MAGIC
# MAGIC The model was inspired by discoveries about the neurons of animals, so hopes were quite high that it could lead to a sophisticated machine. This model can be extended by adding multiple neurons in parallel. And we can use linear output instead of a threshold if we like for the output.
# MAGIC
# MAGIC If we were to do so, the output would look like \\({x \cdot w} + w_0\\) (this is where the vector multiplication and, eventually, matrix multiplication, comes in)
# MAGIC
# MAGIC When we look at the math this way, we see that despite this being an interesting model, it's really just a fancy linear calculation.
# MAGIC
# MAGIC And, in fact, the proof that this model -- being linear -- could not solve any problems whose solution was nonlinear ... led to the first of several "AI / neural net winters" when the excitement was quickly replaced by disappointment, and most research was abandoned.
# COMMAND ----------
# MAGIC %md
# MAGIC ### Linear Perceptron
# MAGIC
# MAGIC We'll get to the non-linear part, but the linear perceptron model is a great way to warm up and bridge the gap from traditional linear regression to the neural-net flavor.
# MAGIC
# MAGIC Let's look at a problem -- the diamonds dataset from R -- and analyze it using two traditional methods in Scikit-Learn, and then we'll start attacking it with neural networks!
# COMMAND ----------
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
input_file = "/dbfs/databricks-datasets/Rdatasets/data-001/csv/ggplot2/diamonds.csv"
df = pd.read_csv(input_file, header = 0)
# COMMAND ----------
import IPython.display as disp
pd.set_option('display.width', 200)
disp.display(df[:10])
# COMMAND ----------
df2 = df.drop(df.columns[0], axis=1)
disp.display(df2[:3])
# COMMAND ----------
df3 = pd.get_dummies(df2) # this gives a one-hot encoding of categorial variables
disp.display(df3.iloc[:3, 7:18])
# COMMAND ----------
# pre-process to get y
y = df3.iloc[:,3:4].values.flatten()
y.flatten()
# preprocess and reshape X as a matrix
X = df3.drop(df3.columns[3], axis=1).values
np.shape(X)
# break the dataset into training and test set with a 75% and 25% split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
# Define a decisoin tree model with max depth 10
dt = DecisionTreeRegressor(random_state=0, max_depth=10)
# fit the decision tree to the training data to get a fitted model
model = dt.fit(X_train, y_train)
# predict the features or X values of the test data using the fitted model
y_pred = model.predict(X_test)
# print the MSE performance measure of the fit by comparing the predicted versus the observed values of y
print("RMSE %f" % np.sqrt(mean_squared_error(y_test, y_pred)) )
# COMMAND ----------
from sklearn import linear_model
# Do the same with linear regression and not a worse MSE
lr = linear_model.LinearRegression()
linear_model = lr.fit(X_train, y_train)
y_pred = linear_model.predict(X_test)
print("RMSE %f" % np.sqrt(mean_squared_error(y_test, y_pred)) )
# COMMAND ----------
# MAGIC %md
# MAGIC Now that we have a baseline, let's build a neural network -- linear at first -- and go further.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Neural Network with Keras
# MAGIC
# MAGIC ### [Keras](https://keras.io/) is a High-Level API for Neural Networks and Deep Learning
# MAGIC
# MAGIC <img src="https://s3.amazonaws.com/keras.io/img/keras-logo-2018-large-1200.png" width=600>
# MAGIC
# MAGIC #### "*Being able to go from idea to result with the least possible delay is key to doing good research.*"
# MAGIC Maintained by Francois Chollet at Google, it provides
# MAGIC
# MAGIC * High level APIs
# MAGIC * Pluggable backends for Theano, TensorFlow, CNTK, MXNet
# MAGIC * CPU/GPU support
# MAGIC * The now-officially-endorsed high-level wrapper for TensorFlow; a version ships in TF
# MAGIC * Model persistence and other niceties
# MAGIC * JavaScript, iOS, etc. deployment
# MAGIC * Interop with further frameworks, like DeepLearning4J, Spark DL Pipelines ...
# MAGIC
# MAGIC Well, with all this, why would you ever *not* use Keras?
# MAGIC
# MAGIC As an API/Facade, Keras doesn't directly expose all of the internals you might need for something custom and low-level ... so you might need to implement at a lower level first, and then perhaps wrap it to make it easily usable in Keras.
# MAGIC
# MAGIC Mr. Chollet compiles stats (roughly quarterly) on "[t]he state of the deep learning landscape: GitHub activity of major libraries over the past quarter (tickets, forks, and contributors)."
# MAGIC
# MAGIC (October 2017: https://twitter.com/fchollet/status/915366704401719296; https://twitter.com/fchollet/status/915626952408436736)
# MAGIC <table><tr><td>__GitHub__<br>
# MAGIC <img src="https://i.imgur.com/Dru8N9K.jpg" width=600>
# MAGIC </td><td>__Research__<br>
# MAGIC <img src="https://i.imgur.com/i23TAwf.png" width=600></td></tr></table>
# MAGIC
# MAGIC ## Keras has wide adoption in industry
# MAGIC
# MAGIC <img src="https://s3.amazonaws.com/keras.io/img/dl_frameworks_power_scores.png" width=600>
# COMMAND ----------
# MAGIC %md
# MAGIC ### We'll build a "Dense Feed-Forward Shallow" Network:
# MAGIC (the number of units in the following diagram does not exactly match ours)
# MAGIC <img src="https://i.imgur.com/84fxFKa.png">
# MAGIC
# MAGIC Grab a Keras API cheat sheet from https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Keras_Cheat_Sheet_Python.pdf
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
# we are going to add layers sequentially one after the other (feed-forward) to our neural network model
model = Sequential()
# the first layer has 30 nodes (or neurons) with input dimension 26 for our diamonds data
# we will use Nomal or Guassian kernel to initialise the weights we want to estimate
# our activation function is linear (to mimic linear regression)
model.add(Dense(30, input_dim=26, kernel_initializer='normal', activation='linear'))
# the next layer is for the response y and has only one node
model.add(Dense(1, kernel_initializer='normal', activation='linear'))
# compile the model with other specifications for loss and type of gradient descent optimisation routine
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
# fit the model to the training data using stochastic gradient descent with a batch-size of 200 and 10% of data held out for validation
history = model.fit(X_train, y_train, epochs=10, batch_size=200, validation_split=0.1)
scores = model.evaluate(X_test, y_test)
print()
print("test set RMSE: %f" % np.sqrt(scores[1]))
# COMMAND ----------
model.summary() # do you understand why the number of parameters in layer 1 is 810? 26*30+30=810
# COMMAND ----------
# MAGIC %md
# MAGIC Notes:
# MAGIC
# MAGIC * We didn't have to explicitly write the "input" layer, courtesy of the Keras API. We just said `input_dim=26` on the first (and only) hidden layer.
# MAGIC * `kernel_initializer='normal'` is a simple (though not always optimal) *weight initialization*
# MAGIC * Epoch: 1 pass over all of the training data
# MAGIC * Batch: Records processes together in a single training pass
# MAGIC
# MAGIC How is our RMSE vs. the std dev of the response?
# COMMAND ----------
y.std()
# COMMAND ----------
# MAGIC %md
# MAGIC Let's look at the error ...
# COMMAND ----------
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's set up a "long-running" training. This will take a few minutes to converge to the same performance we got more or less instantly with our sklearn linear regression :)
# MAGIC
# MAGIC While it's running, we can talk about the training.
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import pandas as pd
input_file = "/dbfs/databricks-datasets/Rdatasets/data-001/csv/ggplot2/diamonds.csv"
df = pd.read_csv(input_file, header = 0)
df.drop(df.columns[0], axis=1, inplace=True)
df = pd.get_dummies(df, prefix=['cut_', 'color_', 'clarity_'])
y = df.iloc[:,3:4].values.flatten()
y.flatten()
X = df.drop(df.columns[3], axis=1).values
np.shape(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(30, input_dim=26, kernel_initializer='normal', activation='linear'))
model.add(Dense(1, kernel_initializer='normal', activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
history = model.fit(X_train, y_train, epochs=250, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, y_test)
print("\nroot %s: %f" % (model.metrics_names[1], np.sqrt(scores[1])))
# COMMAND ----------
# MAGIC %md
# MAGIC After all this hard work we are closer to the MSE we got from linear regression, but purely using a shallow feed-forward neural network.
# COMMAND ----------
# MAGIC %md
# MAGIC ### Training: Gradient Descent
# MAGIC
# MAGIC A family of numeric optimization techniques, where we solve a problem with the following pattern:
# MAGIC
# MAGIC 1. Describe the error in the model output: this is usually some difference between the the true values and the model's predicted values, as a function of the model parameters (weights)
# MAGIC
# MAGIC 2. Compute the gradient, or directional derivative, of the error -- the "slope toward lower error"
# MAGIC
# MAGIC 4. Adjust the parameters of the model variables in the indicated direction
# MAGIC
# MAGIC 5. Repeat
# MAGIC
# MAGIC <img src="https://i.imgur.com/HOYViqN.png" width=500>
# MAGIC
# MAGIC #### Some ideas to help build your intuition
# MAGIC
# MAGIC * What happens if the variables (imagine just 2, to keep the mental picture simple) are on wildly different scales ... like one ranges from -1 to 1 while another from -1e6 to +1e6?
# MAGIC
# MAGIC * What if some of the variables are correlated? I.e., a change in one corresponds to, say, a linear change in another?
# MAGIC
# MAGIC * Other things being equal, an approximate solution with fewer variables is easier to work with than one with more -- how could we get rid of some less valuable parameters? (e.g., L1 penalty)
# MAGIC
# MAGIC * How do we know how far to "adjust" our parameters with each step?
# MAGIC
# MAGIC <img src="http://i.imgur.com/AvM2TN6.png" width=600>
# MAGIC
# MAGIC What if we have billions of data points? Does it makes sense to use all of them for each update? Is there a shortcut?
# MAGIC
# MAGIC Yes: *Stochastic Gradient Descent*
# MAGIC
# MAGIC Stochastic gradient descent is an iterative learning algorithm that uses a training dataset to update a model.
# MAGIC - The batch size is a hyperparameter of gradient descent that controls the number of training samples to work through before the model's internal parameters are updated.
# MAGIC - The number of epochs is a hyperparameter of gradient descent that controls the number of complete passes through the training dataset.
# MAGIC
# MAGIC See [https://towardsdatascience.com/epoch-vs-iterations-vs-batch-size-4dfb9c7ce9c9](https://towardsdatascience.com/epoch-vs-iterations-vs-batch-size-4dfb9c7ce9c9).
# MAGIC
# MAGIC But SGD has some shortcomings, so we typically use a "smarter" version of SGD, which has rules for adjusting the learning rate and even direction in order to avoid common problems.
# MAGIC
# MAGIC What about that "Adam" optimizer? Adam is short for "adaptive moment" and is a variant of SGD that includes momentum calculations that change over time. For more detail on optimizers, see the chapter "Training Deep Neural Nets" in Aurélien Géron's book: *Hands-On Machine Learning with Scikit-Learn and TensorFlow* (http://shop.oreilly.com/product/0636920052289.do)
# MAGIC
# MAGIC See [https://keras.io/optimizers/](https://keras.io/optimizers/) and references therein.
# COMMAND ----------
# MAGIC %md
# MAGIC ### Training: Backpropagation
# MAGIC
# MAGIC With a simple, flat model, we could use SGD or a related algorithm to derive the weights, since the error depends directly on those weights.
# MAGIC
# MAGIC With a deeper network, we have a couple of challenges:
# MAGIC
# MAGIC * The error is computed from the final layer, so the gradient of the error doesn't tell us immediately about problems in other-layer weights
# MAGIC * Our tiny diamonds model has almost a thousand weights. Bigger models can easily have millions of weights. Each of those weights may need to move a little at a time, and we have to watch out for underflow or undersignificance situations.
# MAGIC
# MAGIC __The insight is to iteratively calculate errors, one layer at a time, starting at the output. This is called backpropagation. It is neither magical nor surprising. The challenge is just doing it fast and not losing information.__
# MAGIC
# MAGIC <img src="http://i.imgur.com/bjlYwjM.jpg" width=800>
# COMMAND ----------
# MAGIC %md
# MAGIC ## Ok so we've come up with a very slow way to perform a linear regression.
# MAGIC
# MAGIC ### *Welcome to Neural Networks in the 1960s!*
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC ### Watch closely now because this is where the magic happens...
# MAGIC
# MAGIC <img src="https://media.giphy.com/media/Hw5LkPYy9yfVS/giphy.gif">
# COMMAND ----------
# MAGIC %md
# MAGIC # Non-Linearity + Perceptron = Universal Approximation
# COMMAND ----------
# MAGIC %md
# MAGIC ### Where does the non-linearity fit in?
# MAGIC
# MAGIC * We start with the inputs to a perceptron -- these could be from source data, for example.
# MAGIC * We multiply each input by its respective weight, which gets us the \\(x \cdot w\\)
# MAGIC * Then add the "bias" -- an extra learnable parameter, to get \\({x \cdot w} + b\\)
# MAGIC * This value (so far) is sometimes called the "pre-activation"
# MAGIC * Now, apply a non-linear "activation function" to this value, such as the logistic sigmoid
# MAGIC
# MAGIC <img src="https://i.imgur.com/MhokAmo.gif">
# MAGIC
# MAGIC ### Now the network can "learn" non-linear functions
# MAGIC
# MAGIC To gain some intuition, consider that where the sigmoid is close to 1, we can think of that neuron as being "on" or activated, giving a specific output. When close to zero, it is "off."
# MAGIC
# MAGIC So each neuron is a bit like a switch. If we have enough of them, we can theoretically express arbitrarily many different signals.
# MAGIC
# MAGIC In some ways this is like the original artificial neuron, with the thresholding output -- the main difference is that the sigmoid gives us a smooth (arbitrarily differentiable) output that we can optimize over using gradient descent to learn the weights.
# MAGIC
# MAGIC ### Where does the signal "go" from these neurons?
# MAGIC
# MAGIC * In a regression problem, like the diamonds dataset, the activations from the hidden layer can feed into a single output neuron, with a simple linear activation representing the final output of the calculation.
# MAGIC
# MAGIC * Frequently we want a classification output instead -- e.g., with MNIST digits, where we need to choose from 10 classes. In that case, we can feed the outputs from these hidden neurons forward into a final layer of 10 neurons, and compare those final neurons' activation levels.
# MAGIC
# MAGIC Ok, before we talk any more theory, let's run it and see if we can do better on our diamonds dataset adding this "sigmoid activation."
# MAGIC
# MAGIC While that's running, let's look at the code:
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import pandas as pd
input_file = "/dbfs/databricks-datasets/Rdatasets/data-001/csv/ggplot2/diamonds.csv"
df = pd.read_csv(input_file, header = 0)
df.drop(df.columns[0], axis=1, inplace=True)
df = pd.get_dummies(df, prefix=['cut_', 'color_', 'clarity_'])
y = df.iloc[:,3:4].values.flatten()
y.flatten()
X = df.drop(df.columns[3], axis=1).values
np.shape(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(30, input_dim=26, kernel_initializer='normal', activation='sigmoid')) # <- change to nonlinear activation
model.add(Dense(1, kernel_initializer='normal', activation='linear')) # <- activation is linear in output layer for this regression
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
history = model.fit(X_train, y_train, epochs=2000, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, y_test)
print("\nroot %s: %f" % (model.metrics_names[1], np.sqrt(scores[1])))
# COMMAND ----------
# MAGIC %md
# MAGIC ##### What is different here?
# MAGIC
# MAGIC * We've changed the activation in the hidden layer to "sigmoid" per our discussion.
# MAGIC * Next, notice that we're running 2000 training epochs!
# MAGIC
# MAGIC Even so, it takes a looooong time to converge. If you experiment a lot, you'll find that ... it still takes a long time to converge. Around the early part of the most recent deep learning renaissance, researchers started experimenting with other non-linearities.
# MAGIC
# MAGIC (Remember, we're talking about non-linear activations in the hidden layer. The output here is still using "linear" rather than "softmax" because we're performing regression, not classification.)
# MAGIC
# MAGIC In theory, any non-linearity should allow learning, and maybe we can use one that "works better"
# MAGIC
# MAGIC By "works better" we mean
# MAGIC
# MAGIC * Simpler gradient - faster to compute
# MAGIC * Less prone to "saturation" -- where the neuron ends up way off in the 0 or 1 territory of the sigmoid and can't easily learn anything
# MAGIC * Keeps gradients "big" -- avoiding the large, flat, near-zero gradient areas of the sigmoid
# MAGIC
# MAGIC Turns out that a big breakthrough and popular solution is a very simple hack:
# MAGIC
# MAGIC ### Rectified Linear Unit (ReLU)
# MAGIC
# MAGIC <img src="http://i.imgur.com/oAYh9DN.png" width=1000>
# COMMAND ----------
# MAGIC %md
# MAGIC ### Go change your hidden-layer activation from 'sigmoid' to 'relu'
# MAGIC
# MAGIC Start your script and watch the error for a bit!
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import pandas as pd
input_file = "/dbfs/databricks-datasets/Rdatasets/data-001/csv/ggplot2/diamonds.csv"
df = pd.read_csv(input_file, header = 0)
df.drop(df.columns[0], axis=1, inplace=True)
df = pd.get_dummies(df, prefix=['cut_', 'color_', 'clarity_'])
y = df.iloc[:,3:4].values.flatten()
y.flatten()
X = df.drop(df.columns[3], axis=1).values
np.shape(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(30, input_dim=26, kernel_initializer='normal', activation='relu')) # <--- CHANGE IS HERE
model.add(Dense(1, kernel_initializer='normal', activation='linear'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_squared_error'])
history = model.fit(X_train, y_train, epochs=2000, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, y_test)
print("\nroot %s: %f" % (model.metrics_names[1], np.sqrt(scores[1])))
# COMMAND ----------
# MAGIC %md
# MAGIC Would you look at that?!
# MAGIC
# MAGIC * We break $1000 RMSE around epoch 112
# MAGIC * $900 around epoch 220
# MAGIC * $800 around epoch 450
# MAGIC * By around epoch 2000, my RMSE is < $600
# MAGIC
# MAGIC ...
# MAGIC
# MAGIC
# MAGIC __Same theory; different activation function. Huge difference__
# COMMAND ----------
# MAGIC %md
# MAGIC # Multilayer Networks
# MAGIC
# MAGIC If a single-layer perceptron network learns the importance of different combinations of features in the data...
# MAGIC
# MAGIC What would another network learn if it had a second (hidden) layer of neurons?
# MAGIC
# MAGIC It depends on how we train the network. We'll talk in the next section about how this training works, but the general idea is that we still work backward from the error gradient.
# MAGIC
# MAGIC That is, the last layer learns from error in the output; the second-to-last layer learns from error transmitted through that last layer, etc. It's a touch hand-wavy for now, but we'll make it more concrete later.
# MAGIC
# MAGIC Given this approach, we can say that:
# MAGIC
# MAGIC 1. The second (hidden) layer is learning features composed of activations in the first (hidden) layer
# MAGIC 2. The first (hidden) layer is learning feature weights that enable the second layer to perform best
# MAGIC * Why? Earlier, the first hidden layer just learned feature weights because that's how it was judged
# MAGIC * Now, the first hidden layer is judged on the error in the second layer, so it learns to contribute to that second layer
# MAGIC 3. The second layer is learning new features that aren't explicit in the data, and is teaching the first layer to supply it with the necessary information to compose these new features
# MAGIC
# MAGIC ### So instead of just feature weighting and combining, we have new feature learning!
# MAGIC
# MAGIC This concept is the foundation of the "Deep Feed-Forward Network"
# MAGIC
# MAGIC <img src="http://i.imgur.com/fHGrs4X.png">
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC ### Let's try it!
# MAGIC
# MAGIC __Add a layer to your Keras network, perhaps another 20 neurons, and see how the training goes.__
# MAGIC
# MAGIC if you get stuck, there is a solution in the Keras-DFFN notebook
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC I'm getting RMSE < $1000 by epoch 35 or so
# MAGIC
# MAGIC < $800 by epoch 90
# MAGIC
# MAGIC In this configuration, mine makes progress to around 700 epochs or so and then stalls with RMSE around $560
# COMMAND ----------
# MAGIC %md
# MAGIC ### Our network has "gone meta"
# MAGIC
# MAGIC It's now able to exceed where a simple decision tree can go, because it can create new features and then split on those
# MAGIC
# MAGIC ## Congrats! You have built your first deep-learning model!
# MAGIC
# MAGIC So does that mean we can just keep adding more layers and solve anything?
# MAGIC
# MAGIC Well, theoretically maybe ... try reconfiguring your network, watch the training, and see what happens.
# MAGIC
# MAGIC <img src="http://i.imgur.com/BumsXgL.jpg" width=500>
|
lamastex/scalable-data-science
|
dbcArchives/2021/000_6-sds-3-x-dl/051_DLbyABr_02-Neural-Networks.py
|
Python
|
unlicense
| 27,220
|
[
"NEURON"
] |
8981781cb2b591ba783b682e08fb3a72aa72bd83fce90343bb2c879435eb3165
|
# coding: utf-8
import logging
import math
import os
import subprocess
import tempfile
import time
import numpy as np
from monty.dev import requires
from monty.json import jsanitize, MSONable
from monty.os import cd
from monty.os.path import which
from scipy import constants
from scipy.spatial import distance
from pymatgen.core.lattice import Lattice
from pymatgen.core.units import Energy, Length
from pymatgen.electronic_structure.bandstructure import \
BandStructureSymmLine, Kpoint
from pymatgen.electronic_structure.core import Orbital
from pymatgen.electronic_structure.dos import Dos, Spin, CompleteDos
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
"""
This module provides classes to run and analyze boltztrap on pymatgen band
structure objects. Boltztrap is a software interpolating band structures and
computing materials properties from this band structure using Boltzmann
semi-classical transport theory.
Boltztrap has been developed by Georg Madsen.
http://www.icams.de/content/research/software-development/boltztrap/
You need version 1.2.3 or higher
References are::
Madsen, G. K. H., and Singh, D. J. (2006).
BoltzTraP. A code for calculating band-structure dependent quantities.
Computer Physics Communications, 175, 67-71
"""
__author__ = "Geoffroy Hautier, Zachary Gibbs, Francesco Ricci, Anubhav Jain"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "August 23, 2013"
class BoltztrapRunner(MSONable):
"""
This class is used to run Boltztrap on a band structure object.
Args:
bs:
A band structure object
nelec:
the number of electrons
dos_type:
two options for the band structure integration: "HISTO"
(histogram) or "TETRA" using the tetrahedon method. TETRA
typically gives better results (especially for DOSes)
but takes more time
energy_grid:
the energy steps used for the integration (eV)
lpfac:
the number of interpolation points in the real space. By
default 10 gives 10 time more points in the real space than
the number of kpoints given in reciprocal space
run_type:
type of boltztrap usage. by default
- BOLTZ: (default) compute transport coefficients
- BANDS: interpolate all bands contained in the energy range
specified in energy_span_around_fermi variable, along specified
k-points
- DOS: compute total and partial dos (custom BoltzTraP code
needed!)
- FERMI: compute fermi surface or more correctly to
get certain bands interpolated
band_nb:
indicates a band number. Used for Fermi Surface interpolation
(run_type="FERMI")
spin:
specific spin component (1: up, -1: down) of the band selected
in FERMI mode (mandatory).
cond_band:
if a conduction band is specified in FERMI mode,
set this variable as True
tauref:
reference relaxation time. Only set to a value different than
zero if we want to model beyond the constant relaxation time.
tauexp:
exponent for the energy in the non-constant relaxation time
approach
tauen:
reference energy for the non-constant relaxation time approach
soc:
results from spin-orbit coupling (soc) computations give
typically non-polarized (no spin up or down) results but single
electron occupations. If the band structure comes from a soc
computation, you should set soc to True (default False)
doping:
the fixed doping levels you want to compute. Boltztrap provides
both transport values depending on electron chemical potential
(fermi energy) and for a series of fixed carrier
concentrations. By default, this is set to 1e16 to 1e22 in
increments of factors of 10.
energy_span_around_fermi:
usually the interpolation is not needed on the entire energy
range but on a specific range around the fermi level.
This energy gives this range in eV. by default it is 1.5 eV.
If DOS or BANDS type are selected, this range is automatically
set to cover the entire energy range.
scissor:
scissor to apply to the band gap (eV). This applies a scissor
operation moving the band edges without changing the band
shape. This is useful to correct the often underestimated band
gap in DFT. Default is 0.0 (no scissor)
kpt_line:
list of fractional coordinates of kpoints as arrays or list of
Kpoint objects for BANDS mode calculation (standard path of
high symmetry k-points is automatically set as default)
tmax:
Maximum temperature (K) for calculation (default=1300)
tgrid:
Temperature interval for calculation (default=50)
symprec: 1e-3 is the default in pymatgen. If the kmesh has been
generated using a different symprec, it has to be specified
to avoid a "factorization error" in BoltzTraP calculation.
If a kmesh that spans the whole Brillouin zone has been used,
or to disable all the symmetries, set symprec to None.
cb_cut: by default 10% of the highest conduction bands are
removed because they are often not accurate.
Tune cb_cut to change the percentage (0-100) of bands
that are removed.
timeout: overall time limit (in seconds): mainly to avoid infinite
loop when trying to find Fermi levels.
"""
@requires(which('x_trans'),
"BoltztrapRunner requires the executables 'x_trans' to be in "
"the path. Please download the Boltztrap at http://"
"www.icams.de/content/research/software-development/boltztrap/ "
"and follow the instructions in the README to compile "
"Bolztrap accordingly. Then add x_trans to your path")
def __init__(self, bs, nelec, dos_type="HISTO", energy_grid=0.005,
lpfac=10, run_type="BOLTZ", band_nb=None, tauref=0, tauexp=0,
tauen=0, soc=False, doping=None, energy_span_around_fermi=1.5,
scissor=0.0, kpt_line=None, spin=None, cond_band=False,
tmax=1300, tgrid=50, symprec=1e-3, cb_cut=10, timeout=7200):
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
self.error = []
self.run_type = run_type
self.band_nb = band_nb
self.spin = spin
self.cond_band = cond_band
self.tauref = tauref
self.tauexp = tauexp
self.tauen = tauen
self.soc = soc
self.kpt_line = kpt_line
self.cb_cut = cb_cut/100.
if isinstance(doping, list) and len(doping) > 0:
self.doping = doping
else:
self.doping = []
for d in [1e16, 1e17, 1e18, 1e19, 1e20, 1e21]:
self.doping.extend([1 * d, 2.5 * d, 5 * d, 7.5 * d])
self.doping.append(1e22)
self.energy_span_around_fermi = energy_span_around_fermi
self.scissor = scissor
self.tmax = tmax
self.tgrid = tgrid
self._symprec = symprec
if self.run_type in ("DOS", "BANDS"):
self._auto_set_energy_range()
self.timeout = timeout
self.start_time = time.time()
def _auto_set_energy_range(self):
"""
automatically determine the energy range as min/max eigenvalue
minus/plus the buffer_in_ev
"""
emins = [min([e_k[0] for e_k in self._bs.bands[Spin.up]])]
emaxs = [max([e_k[0] for e_k in self._bs.bands[Spin.up]])]
if self._bs.is_spin_polarized:
emins.append(min([e_k[0] for e_k in
self._bs.bands[Spin.down]]))
emaxs.append(max([e_k[0] for e_k in
self._bs.bands[Spin.down]]))
min_eigenval = Energy(min(emins) - self._bs.efermi, "eV"). \
to("Ry")
max_eigenval = Energy(max(emaxs) - self._bs.efermi, "eV"). \
to("Ry")
# set energy range to buffer around min/max EV
# buffer does not increase CPU time but will help get equal
# energies for spin up/down for band structure
const = Energy(2, "eV").to("Ry")
self._ll = min_eigenval - const
self._hl = max_eigenval + const
en_range = Energy(max((abs(self._ll), abs(self._hl))),
"Ry").to("eV")
self.energy_span_around_fermi = en_range * 1.01
print("energy_span_around_fermi = ",
self.energy_span_around_fermi)
@property
def bs(self):
return self._bs
@property
def nelec(self):
return self._nelec
def write_energy(self, output_file):
with open(output_file, 'w') as f:
f.write("test\n")
f.write("{}\n".format(len(self._bs.kpoints)))
if self.run_type == "FERMI":
sign = -1.0 if self.cond_band else 1.0
for i in range(len(self._bs.kpoints)):
eigs = []
eigs.append(Energy(
self._bs.bands[Spin(self.spin)][self.band_nb][i] -
self._bs.efermi, "eV").to("Ry"))
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2],
len(eigs)))
for j in range(len(eigs)):
f.write("%18.8f\n" % (sign * float(eigs[j])))
else:
for i, kpt in enumerate(self._bs.kpoints):
eigs = []
if self.run_type == "DOS":
spin_lst = [self.spin]
else:
spin_lst = self._bs.bands
for spin in spin_lst:
# use 90% of bottom bands since highest eigenvalues
# are usually incorrect
# ask Geoffroy Hautier for more details
nb_bands = int(math.floor(self._bs.nb_bands*(1-self.cb_cut)))
for j in range(nb_bands):
eigs.append(
Energy(self._bs.bands[Spin(spin)][j][i] -
self._bs.efermi, "eV").to("Ry"))
eigs.sort()
if self.run_type == "DOS" and self._bs.is_spin_polarized:
eigs.insert(0, self._ll)
eigs.append(self._hl)
f.write("%12.8f %12.8f %12.8f %d\n"
% (kpt.frac_coords[0],
kpt.frac_coords[1],
kpt.frac_coords[2],
len(eigs)))
for j in range(len(eigs)):
f.write("%18.8f\n" % (float(eigs[j])))
def write_struct(self, output_file):
if self._symprec != None:
sym = SpacegroupAnalyzer(self._bs.structure, symprec=self._symprec)
elif self._symprec == None:
pass
with open(output_file, 'w') as f:
if self._symprec != None:
f.write("{} {}\n".format(self._bs.structure.composition.formula,
sym.get_space_group_symbol()))
elif self._symprec == None:
f.write("{} {}\n".format(self._bs.structure.composition.formula,
"symmetries disabled"))
f.write("{}\n".format("\n".join(
[" ".join(["%.5f" % Length(i, "ang").to("bohr") for i in row])
for row in self._bs.structure.lattice.matrix])))
if self._symprec != None:
ops = sym.get_symmetry_dataset()['rotations']
elif self._symprec == None:
ops = [[[1,0,0],[0,1,0],[0,0,1]]]
f.write("{}\n".format(len(ops)))
for c in ops:
for row in c:
f.write("{}\n".format(" ".join(str(i) for i in row)))
def write_def(self, output_file):
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
with open(output_file, 'w') as f:
so = ""
if self._bs.is_spin_polarized or self.soc:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n" +
"6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n" +
"20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy" + so + "', 'old', "
"'formatted',0\n" +
"48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" +
"49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" +
"50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" +
"51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" +
"21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" +
"22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" +
"24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" +
"30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n")
def write_proj(self, output_file_proj, output_file_def):
# This function is useless in std version of BoltzTraP code
# because x_trans script overwrite BoltzTraP.def
for oi, o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
with open(output_file_proj + "_" + str(site_nb) + "_" + str(
o),
'w') as f:
f.write(self._bs.structure.composition.formula + "\n")
f.write(str(len(self._bs.kpoints)) + "\n")
for i in range(len(self._bs.kpoints)):
tmp_proj = []
for j in range(
int(math.floor(self._bs.nb_bands*(1-self.cb_cut)))):
tmp_proj.append(
self._bs.projections[Spin(self.spin)][j][
i][oi][site_nb])
# TODO deal with the sorting going on at
# the energy level!!!
# tmp_proj.sort()
if self.run_type == "DOS" and \
self._bs.is_spin_polarized:
tmp_proj.insert(0, self._ll)
tmp_proj.append(self._hl)
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2],
len(tmp_proj)))
for j in range(len(tmp_proj)):
f.write("%18.8f\n" % float(tmp_proj[j]))
with open(output_file_def, 'w') as f:
so = ""
if self._bs.is_spin_polarized:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n" +
"6,'boltztrap.outputtrans', 'unknown', "
"'formatted',0\n" +
"20,'boltztrap.struct', 'old', 'formatted',0\n"
+ "10,'boltztrap.energy" + so + "', 'old', "
"'formatted',0\n" +
"48,'boltztrap.engre', 'unknown', "
"'unformatted',0\n" +
"49,'boltztrap.transdos', 'unknown', "
"'formatted',0\n" +
"50,'boltztrap.sigxx', 'unknown', 'formatted',"
"0\n" +
"51,'boltztrap.sigxxx', 'unknown', 'formatted',"
"0\n" +
"21,'boltztrap.trace', 'unknown', "
"'formatted',0\n" +
"22,'boltztrap.condtens', 'unknown', "
"'formatted',0\n" +
"24,'boltztrap.halltens', 'unknown', "
"'formatted',0\n" +
"30,'boltztrap_BZ.cube', 'unknown', "
"'formatted',0\n")
i = 1000
for oi, o in enumerate(Orbital):
for site_nb in range(0, len(self._bs.structure.sites)):
if oi < len(self._bs.projections[Spin.up][0][0]):
f.write(str(i) + ",\'" + "boltztrap.proj_" + str(
site_nb) + "_" + str(o.name) +
"\' \'old\', \'formatted\',0\n")
i += 1
def write_intrans(self, output_file):
setgap = 1 if self.scissor > 0.0001 else 0
if self.run_type == "BOLTZ" or self.run_type == "DOS":
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n"
% (setgap, Energy(self.scissor, "eV").to("Ry")))
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"%s # run mode (only BOLTZ is "
"supported)\n" % self.run_type)
fout.write(
".15 # (efcut) energy range of "
"chemical potential\n")
fout.write(
"{} {} # Tmax, temperature grid\n". \
format(self.tmax, self.tgrid))
fout.write(
"-1. # energyrange of bands given DOS output sig_xxx and "
"dos_xxx (xxx is band number)\n")
fout.write(self.dos_type + "\n") # e.g., HISTO or TETRA
fout.write("{} {} {} 0 0 0\n".format(
self.tauref, self.tauexp, self.tauen))
fout.write("{}\n".format(2 * len(self.doping)))
for d in self.doping:
fout.write(str(d) + "\n")
for d in self.doping:
fout.write(str(-d) + "\n")
elif self.run_type == "FERMI":
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 0 0.0 # iskip (not presently used) idebug "
"setgap shiftgap \n")
fout.write(
"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,"
"energy span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"), self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"FERMI # run mode (only BOLTZ is "
"supported)\n")
fout.write(str(1) +
" # actual band selected: " +
str(self.band_nb + 1) + " spin: " + str(self.spin))
elif self.run_type == "BANDS":
if self.kpt_line is None:
kpath = HighSymmKpath(self._bs.structure)
self.kpt_line = [Kpoint(k, self._bs.structure.lattice) for k
in
kpath.get_kpoints(coords_are_cartesian=False)[
0]]
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
elif type(self.kpt_line[0]) == Kpoint:
self.kpt_line = [kp.frac_coords for kp in self.kpt_line]
with open(output_file, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write(
"1 0 %d %f # iskip (not presently used) idebug "
"setgap shiftgap \n"
% (setgap, Energy(self.scissor, "eV").to("Ry")))
fout.write(
"0.0 %f %f %6.1f # Fermilevel (Ry),energygrid,energy "
"span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"),
Energy(self.energy_span_around_fermi, "eV").to("Ry"),
self._nelec))
fout.write(
"CALC # CALC (calculate expansion "
"coeff), NOCALC read from file\n")
fout.write(
"%d # lpfac, number of latt-points "
"per k-point\n" % self.lpfac)
fout.write(
"BANDS # run mode (only BOLTZ is "
"supported)\n")
fout.write("P " + str(len(self.kpt_line)) + "\n")
for kp in self.kpt_line:
fout.writelines([str(k) + " " for k in kp])
fout.write('\n')
def write_input(self, output_dir):
if self._bs.is_spin_polarized or self.soc:
self.write_energy(os.path.join(output_dir, "boltztrap.energyso"))
else:
self.write_energy(os.path.join(output_dir, "boltztrap.energy"))
self.write_struct(os.path.join(output_dir, "boltztrap.struct"))
self.write_intrans(os.path.join(output_dir, "boltztrap.intrans"))
self.write_def(os.path.join(output_dir, "BoltzTraP.def"))
if len(self.bs.projections) != 0 and self.run_type == "DOS":
self.write_proj(os.path.join(output_dir, "boltztrap.proj"),
os.path.join(output_dir, "BoltzTraP.def"))
def run(self, path_dir=None, convergence=True, write_input=True,
clear_dir=False, max_lpfac=150, min_egrid=0.00005):
"""
Write inputs (optional), run BoltzTraP, and ensure
convergence (optional)
Args:
path_dir (str): directory in which to run BoltzTraP
convergence (bool): whether to check convergence and make
corrections if needed
write_input: (bool) whether to write input files before the run
(required for convergence mode)
clear_dir: (bool) whether to remove all files in the path_dir
before starting
max_lpfac: (float) maximum lpfac value to try before reducing egrid
in convergence mode
min_egrid: (float) minimum egrid value to try before giving up in
convergence mode
Returns:
"""
# TODO: consider making this a part of custodian rather than pymatgen
# A lot of this functionality (scratch dirs, handlers, monitors)
# is built into custodian framework
if convergence and not write_input:
raise ValueError("Convergence mode requires write_input to be "
"true")
if self.run_type in ("BANDS", "DOS", "FERMI"):
convergence = False
if self.lpfac > max_lpfac:
max_lpfac = self.lpfac
if self.run_type == "BANDS" and self.bs.is_spin_polarized:
print("Reminder: for run_type " + str(
self.run_type) + ", spin component are not separated! "
"(you have a spin polarized band structure)")
if self.run_type in ("FERMI", "DOS") and self.spin is None:
if self.bs.is_spin_polarized:
raise BoltztrapError(
"Spin parameter must be specified for spin polarized "
"band structures!")
else:
self.spin = 1
dir_bz_name = "boltztrap"
if path_dir is None:
temp_dir = tempfile.mkdtemp()
path_dir = os.path.join(temp_dir, dir_bz_name)
else:
path_dir = os.path.abspath(
os.path.join(path_dir, dir_bz_name))
if not os.path.exists(path_dir):
os.mkdir(path_dir)
elif clear_dir:
for c in os.listdir(path_dir):
os.remove(os.path.join(path_dir, c))
FORMAT = "%(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT,
filename=os.path.join(path_dir, "../boltztrap.out"))
with cd(path_dir):
lpfac_start = self.lpfac
converged = False
while self.energy_grid >= min_egrid and not converged:
self.lpfac = lpfac_start
if time.time() - self.start_time > self.timeout:
raise BoltztrapError("no doping convergence after timeout "
"of {} s".format(self.timeout))
logging.info("lpfac, energy_grid: {} {}".format(self.lpfac, self.energy_grid))
while self.lpfac <= max_lpfac and not converged:
if time.time() - self.start_time > self.timeout:
raise BoltztrapError("no doping convergence after "
"timeout of {} s".format(self.timeout))
if write_input:
self.write_input(path_dir)
bt_exe = ["x_trans", "BoltzTraP"]
if self._bs.is_spin_polarized or self.soc:
bt_exe.append("-so")
p = subprocess.Popen(bt_exe, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
for c in p.communicate():
logging.info(c.decode())
if "error in factorization" in c.decode():
raise BoltztrapError("error in factorization")
warning = ""
with open(os.path.join(path_dir,
dir_bz_name + ".outputtrans")) as f:
for l in f:
if "Option unknown" in l:
raise BoltztrapError(
"DOS mode needs a custom version of "
"BoltzTraP code is needed")
if "WARNING" in l:
warning = l
break
if "Error - Fermi level was not found" in l:
warning = l
break
if not warning and convergence:
# check convergence for warning
analyzer = BoltztrapAnalyzer.from_files(path_dir)
for doping in ['n', 'p']:
for c in analyzer.mu_doping[doping]:
if len(analyzer.mu_doping[doping][c]) != len(
analyzer.doping[doping]):
warning = "length of mu_doping array is " \
"incorrect"
break
if doping == 'p' and \
sorted(
analyzer.mu_doping[doping][
c], reverse=True) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for p-type"
break
# ensure n-type doping sorted correctly
if doping == 'n' and sorted(
analyzer.mu_doping[doping][c]) != \
analyzer.mu_doping[doping][c]:
warning = "sorting of mu_doping array " \
"incorrect for n-type"
break
if warning:
self.lpfac += 10
logging.warn("Warning detected: {}! Increase lpfac to "
"{}".format(warning, self.lpfac))
else:
converged = True
if not converged:
self.energy_grid /= 10
logging.info("Could not converge with max lpfac; "
"Decrease egrid to {}".format(self.energy_grid))
if not converged:
raise BoltztrapError(
"Doping convergence not reached with lpfac=" + str(
self.lpfac) + ", energy_grid=" + str(self.energy_grid))
return path_dir
def as_dict(self):
results = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lpfac": self.lpfac,
"bs": self.bs.as_dict(),
"nelec": self._nelec,
"dos_type": self.dos_type,
"run_type": self.run_type,
"band_nb": self.band_nb,
"spin": self.spin,
"cond_band": self.cond_band,
"tauref": self.tauref,
"tauexp": self.tauexp,
"tauen": self.tauen,
"soc": self.soc,
"kpt_line": self.kpt_line,
"doping": self.doping,
"energy_span_around_fermi": self.energy_span_around_fermi,
"scissor": self.scissor,
"tmax": self.tmax,
"tgrid": self.tgrid,
"symprec": self._symprec
}
return jsanitize(results)
class BoltztrapError(Exception):
"""
Exception class for boltztrap.
Raised when the boltztrap gives an error
"""
def __init__(self, msg):
self.msg = msg
logging.error(self.msg)
def __str__(self):
return "BoltztrapError : " + self.msg
class BoltztrapAnalyzer:
"""
Class used to store all the data from a boltztrap run
"""
def __init__(self, gap=None, mu_steps=None, cond=None, seebeck=None,
kappa=None, hall=None, doping=None,
mu_doping=None, seebeck_doping=None, cond_doping=None,
kappa_doping=None,
hall_doping=None, intrans=None, dos=None, dos_partial=None,
carrier_conc=None, vol=None, warning=None,
bz_bands=None, bz_kpoints=None, fermi_surface_data=None):
"""
Constructor taking directly all the data generated by Boltztrap. You
won't probably use it directly but instead use the from_files and
from_dict methods.
Args:
gap: The gap after interpolation in eV
mu_steps: The steps of electron chemical potential (or Fermi
level) in eV.
cond: The electronic conductivity tensor divided by a constant
relaxation time (sigma/tau) at different temperature and
fermi levels.
The format is {temperature: [array of 3x3 tensors at each
fermi level in mu_steps]}. The units are 1/(Ohm*m*s).
seebeck: The Seebeck tensor at different temperatures and fermi
levels. The format is {temperature: [array of 3x3 tensors at
each fermi level in mu_steps]}. The units are V/K
kappa: The electronic thermal conductivity tensor divided by a
constant relaxation time (kappa/tau) at different temperature
and fermi levels. The format is {temperature: [array of 3x3
tensors at each fermi level in mu_steps]}
The units are W/(m*K*s)
hall: The hall tensor at different temperature and fermi levels
The format is {temperature: [array of 27 coefficients list at
each fermi level in mu_steps]}
The units are m^3/C
doping: The different doping levels that have been given to
Boltztrap. The format is {'p':[],'n':[]} with an array of
doping levels. The units are cm^-3
mu_doping: Gives the electron chemical potential (or Fermi level)
for a given set of doping.
Format is {'p':{temperature: [fermi levels],'n':{temperature:
[fermi levels]}}
the fermi level array is ordered according to the doping
levels in doping units for doping are in cm^-3 and for Fermi
level in eV
seebeck_doping: The Seebeck tensor at different temperatures and
doping levels. The format is {'p': {temperature: [Seebeck
tensors]}, 'n':{temperature: [Seebeck tensors]}}
The [Seebeck tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
Seebeck in V/K
cond_doping: The electronic conductivity tensor divided by a
constant relaxation time (sigma/tau) at different
temperatures and doping levels
The format is {'p':{temperature: [conductivity tensors]},
'n':{temperature: [conductivity tensors]}}
The [conductivity tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
conductivity in 1/(Ohm*m*s)
kappa_doping: The thermal conductivity tensor divided by a constant
relaxation time (kappa/tau) at different temperatures and
doping levels.
The format is {'p':{temperature: [thermal conductivity
tensors]},'n':{temperature: [thermal conductivity tensors]}}
The [thermal conductivity tensors] array is ordered according
to the doping levels in doping units for doping are in cm^-3
and for thermal conductivity in W/(m*K*s)
hall_doping: The Hall tensor at different temperatures and doping
levels.
The format is {'p':{temperature: [Hall tensors]},
'n':{temperature: [Hall tensors]}}
The [Hall tensors] array is ordered according to the doping
levels in doping and each Hall tensor is represented by a 27
coefficients list.
The units are m^3/C
intrans: a dictionary of inputs e.g. {"scissor": 0.0}
carrier_conc: The concentration of carriers in electron (or hole)
per unit cell
dos: The dos computed by Boltztrap given as a pymatgen Dos object
dos_partial: Data for the partial DOS projected on sites and
orbitals
vol: Volume of the unit cell in angstrom cube (A^3)
warning: string if BoltzTraP outputted a warning, else None
bz_bands: Data for interpolated bands on a k-point line
(run_type=BANDS)
bz_kpoints: k-point in reciprocal coordinates for interpolated
bands (run_type=BANDS)
fermi_surface_data: energy values in a 3D grid imported from the
output .cube file.
"""
self.gap = gap
self.mu_steps = mu_steps
self._cond = cond
self._seebeck = seebeck
self._kappa = kappa
self._hall = hall
self.warning = warning
self.doping = doping
self.mu_doping = mu_doping
self._seebeck_doping = seebeck_doping
self._cond_doping = cond_doping
self._kappa_doping = kappa_doping
self._hall_doping = hall_doping
self.intrans = intrans
self._carrier_conc = carrier_conc
self.dos = dos
self.vol = vol
self._dos_partial = dos_partial
self._bz_bands = bz_bands
self._bz_kpoints = bz_kpoints
self.fermi_surface_data = fermi_surface_data
def get_symm_bands(self, structure, efermi, kpt_line=None,
labels_dict=None):
"""
Function useful to read bands from Boltztrap output and get a
BandStructureSymmLine object comparable with that one from a DFT
calculation (if the same kpt_line is provided). Default kpt_line
and labels_dict is the standard path of high symmetry k-point for
the specified structure. They could be extracted from the
BandStructureSymmLine object that you want to compare with. efermi
variable must be specified to create the BandStructureSymmLine
object (usually it comes from DFT or Boltztrap calc)
"""
try:
if kpt_line is None:
kpath = HighSymmKpath(structure)
kpt_line = [Kpoint(k, structure.lattice.reciprocal_lattice) for
k in
kpath.get_kpoints(coords_are_cartesian=False)[0]]
labels_dict = {l: k for k, l in zip(
*kpath.get_kpoints(coords_are_cartesian=False)) if l}
kpt_line = [kp.frac_coords for kp in kpt_line]
elif type(kpt_line[0]) == Kpoint:
kpt_line = [kp.frac_coords for kp in kpt_line]
labels_dict = {k: labels_dict[k].frac_coords for k in
labels_dict}
idx_list = []
# kpt_dense=np.array([kp for kp in self._bz_kpoints])
for i, kp in enumerate(kpt_line):
w = []
prec = 1e-05
while len(w) == 0:
w = np.where(np.all(
np.abs(kp - self._bz_kpoints) < [prec] * 3,
axis=1))[0]
prec *= 10
# print( prec )
idx_list.append([i, w[0]])
# if len(w)>0:
# idx_list.append([i,w[0]])
# else:
# w=np.where(np.all(np.abs(kp.frac_coords-self._bz_kpoints)
# <[1e-04,1e-04,1e-04],axis=1))[0]
# idx_list.append([i,w[0]])
idx_list = np.array(idx_list)
# print( idx_list.shape )
bands_dict = {Spin.up: (self._bz_bands * Energy(1, "Ry").to(
"eV") + efermi).T[:, idx_list[:, 1]].tolist()}
# bz_kpoints = bz_kpoints[idx_list[:,1]].tolist()
sbs = BandStructureSymmLine(kpt_line, bands_dict,
structure.lattice.reciprocal_lattice,
efermi,
labels_dict=labels_dict)
return sbs
except:
raise BoltztrapError(
"Bands are not in output of BoltzTraP.\nBolztrapRunner must "
"be run with run_type=BANDS")
@staticmethod
def check_acc_bzt_bands(sbs_bz, sbs_ref, warn_thr=(0.03, 0.03)):
"""
Compare sbs_bz BandStructureSymmLine calculated with boltztrap with
the sbs_ref BandStructureSymmLine as reference (from MP for
instance), computing correlation and energy difference for eight bands
around the gap (semiconductors) or fermi level (metals).
warn_thr is a threshold to get a warning in the accuracy of Boltztap
interpolated bands.
Return a dictionary with these keys:
- "N": the index of the band compared; inside each there are:
- "Corr": correlation coefficient for the 8 compared bands
- "Dist": energy distance for the 8 compared bands
- "branch_name": energy distance for that branch
- "avg_corr": average of correlation coefficient over the 8 bands
- "avg_dist": average of energy distance over the 8 bands
- "nb_list": list of indexes of the 8 compared bands
- "acc_thr": list of two float corresponing to the two warning
thresholds in input
- "acc_err": list of two bools:
True if the avg_corr > warn_thr[0], and
True if the avg_dist > warn_thr[1]
See also compare_sym_bands function doc
"""
if not sbs_ref.is_metal() and not sbs_bz.is_metal():
vbm_idx = sbs_bz.get_vbm()['band_index'][Spin.up][-1]
cbm_idx = sbs_bz.get_cbm()['band_index'][Spin.up][0]
nb_list = range(vbm_idx - 3, cbm_idx + 4)
else:
bnd_around_efermi = []
delta = 0
spin = list(sbs_bz.bands.keys())[0]
while len(bnd_around_efermi) < 8 and delta < 100:
delta += 0.1
bnd_around_efermi = []
for nb in range(len(sbs_bz.bands[spin])):
for kp in range(len(sbs_bz.bands[spin][nb])):
if abs(sbs_bz.bands[spin][nb][
kp] - sbs_bz.efermi) < delta:
bnd_around_efermi.append(nb)
break
if len(bnd_around_efermi) < 8:
print("Warning! check performed on " + str(
len(bnd_around_efermi)))
nb_list = bnd_around_efermi
else:
nb_list = bnd_around_efermi[:8]
# print(nb_list)
bcheck = compare_sym_bands(sbs_bz, sbs_ref, nb_list)
# print(bcheck)
acc_err = [False, False]
avg_corr = sum([item[1]['Corr'] for item in bcheck.iteritems()]) / 8
avg_distance = sum([item[1]['Dist'] for item in bcheck.iteritems()]) / 8
if avg_corr > warn_thr[0]: acc_err[0] = True
if avg_distance > warn_thr[0]: acc_err[1] = True
bcheck['avg_corr'] = avg_corr
bcheck['avg_distance'] = avg_distance
bcheck['acc_err'] = acc_err
bcheck['acc_thr'] = warn_thr
bcheck['nb_list'] = nb_list
if True in acc_err:
print("Warning! some bands around gap are not accurate")
return bcheck
def get_seebeck(self, output='eigs', doping_levels=True):
"""
Gives the seebeck coefficient (microV/K) in either a
full 3x3 tensor form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to Seebeck at p-type doping
and 'n' to the Seebeck at n-type doping. Otherwise, returns a
{temp:[]} dictionary
The result contains either the sorted three eigenvalues of
the symmetric
Seebeck tensor (output='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
units are microV/K
"""
return BoltztrapAnalyzer._format_to_output(self._seebeck,
self._seebeck_doping,
output,
doping_levels, 1e6)
def get_conductivity(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the conductivity (1/Ohm*m) in either a full 3x3 tensor
form, as 3 eigenvalues, or as the average value
(trace/3.0) If doping_levels=True, the results are given at
different p and n doping
levels (given by self.doping), otherwise it is given as a series
of electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full
3x3 tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}.
The 'p' links to conductivity
at p-type doping and 'n' to the conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either
the sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3
array) (output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are 1/Ohm*m
"""
return BoltztrapAnalyzer._format_to_output(self._cond,
self._cond_doping, output,
doping_levels,
relaxation_time)
def get_power_factor(self, output='eigs', doping_levels=True,
relaxation_time=1e-14):
"""
Gives the power factor (Seebeck^2 * conductivity) in units
microW/(m*K^2) in either a full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionnary {temp:{'p':[],'n':[]}}. The
'p' links to power factor
at p-type doping and 'n' to the conductivity at n-type doping.
Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
power factor tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are microW/(m K^2)
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
full_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][
t][i],
self._seebeck_doping[doping][
t][i]))
result_doping[doping][t].append(full_tensor)
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
full_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
result[t].append(full_tensor)
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels,
multi=1e6 * relaxation_time)
def get_thermal_conductivity(self, output='eigs', doping_levels=True,
k_el=True, relaxation_time=1e-14):
"""
Gives the electronic part of the thermal conductivity in either a
full 3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
k_el (boolean): True for k_0-PF*T, False for k_0
relaxation_time (float): constant relaxation time in secs
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to thermal conductivity
at p-type doping and 'n' to the thermal conductivity at n-type
doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
conductivity tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time
units are W/mK
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
if k_el:
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][
t][i],
self._seebeck_doping[doping][
t][i]))
result_doping[doping][t].append((
self._kappa_doping[doping][t][
i] - pf_tensor * t))
else:
result_doping[doping][t].append((
self._kappa_doping[doping][t][i]))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
if k_el:
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
result[t].append((self._kappa[t][i] - pf_tensor * t))
else:
result[t].append((self._kappa[t][i]))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels,
multi=relaxation_time)
def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,
kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][
i],
self._seebeck_doping[doping][t][
i]))
thermal_conduct = (self._kappa_doping[doping][t][i]
- pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl * np.eye(3, 3))))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i]
- pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl *
np.eye(3, 3))))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels)
def get_average_eff_mass(self, output='eigs', doping_levels=True):
"""
Gives the average effective mass tensor. We call it average because
it takes into account all the bands
and regions in the Brillouin zone. This is different than the standard
textbook effective mass which relates
often to only one (parabolic) band.
The average effective mass tensor is defined as the integrated
average of the second derivative of E(k)
This effective mass tensor takes into account:
-non-parabolicity
-multiple extrema
-multiple bands
For more information about it. See:
Hautier, G., Miglio, A., Waroquiers, D., Rignanese, G., & Gonze,
X. (2014).
How Does Chemistry Influence Electron Effective Mass in Oxides?
A High-Throughput Computational Analysis. Chemistry of Materials,
26(19), 5447–5458. doi:10.1021/cm404079a
or
Hautier, G., Miglio, A., Ceder, G., Rignanese, G.-M., & Gonze,
X. (2013).
Identification and design principles of low hole effective mass
p-type transparent conducting oxides.
Nature Communications, 4, 2292. doi:10.1038/ncomms3292
Depending on the value of output, we have either the full 3x3
effective mass tensor,
its 3 eigenvalues or an average
Args:
output (string): 'eigs' for eigenvalues, 'tensor' for the full
tensor and 'average' for an average (trace/3)
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
Returns:
If doping_levels=True,a dictionary {'p':{temp:[]},'n':{temp:[]}}
with an array of effective mass tensor, eigenvalues of average
value (depending on output) for each temperature and for each
doping level.
The 'p' links to hole effective mass tensor and 'n' to electron
effective mass tensor.
"""
result = None
result_doping = None
conc = self.get_carrier_concentration()
if doping_levels:
result_doping = {doping: {t: [] for t in self._cond_doping[doping]}
for
doping in self.doping}
for doping in result_doping:
for temp in result_doping[doping]:
for i in range(len(self.doping[doping])):
try:
result_doping[doping][temp].append(np.linalg.inv(
np.array(self._cond_doping[doping][temp][i])) * \
self.doping[doping][
i] * 10 ** 6 *
constants.e ** 2 /
constants.m_e)
except np.linalg.LinAlgError:
pass
else:
result = {t: [] for t in self._seebeck}
for temp in result:
for i in range(len(self.mu_steps)):
try:
cond_inv = np.linalg.inv(np.array(self._cond[temp][i]))
except np.linalg.LinAlgError:
pass
result[temp].append(cond_inv * \
conc[temp][i] * 10 ** 6 *
constants.e ** 2 /
constants.m_e)
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels)
def get_seebeck_eff_mass(self, output='average', temp=300, doping_levels=False,
Lambda=0.5):
"""
Seebeck effective mass calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the seebeck effective mass calculated using
the average of the three diagonal components of the seebeck tensor.
'tensor' returns the seebeck effective mass respect to the three
diagonal components of the seebeck tensor.
doping_levels: False means that the seebeck effective mass is calculated
for every value of the chemical potential
True means that the seebeck effective mass is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the seebeck effective mass w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the seebeck effective
mass w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the seebeck effective mass.
"""
if doping_levels:
sbk_mass = {}
for dt in ('n','p'):
conc = self.doping[dt]
seebeck = self.get_seebeck(output=output, doping_levels=True)[dt][temp]
sbk_mass[dt] = []
for i in range(len(conc)):
if output == 'average':
sbk_mass[dt].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i]),
conc[i], temp, Lambda))
elif output == 'tensor':
sbk_mass[dt].append([])
for j in range(3):
sbk_mass[dt][-1].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i][j][j]),
conc[i], temp, Lambda))
else:
seebeck = self.get_seebeck(output=output, doping_levels=False)[temp]
conc = self.get_carrier_concentration()[temp]
sbk_mass = []
for i in range(len(conc)):
if output == 'average':
sbk_mass.append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i]),
conc[i], temp, Lambda))
elif output == 'tensor':
sbk_mass.append([])
for j in range(3):
sbk_mass[-1].append(
seebeck_eff_mass_from_seebeck_carr(abs(seebeck[i][j][j]),
conc[i], temp, Lambda))
return sbk_mass
def get_complexity_factor(self, output='average', temp=300, doping_levels=False,
Lambda=0.5):
"""
Fermi surface complexity factor respect to calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the complexity factor calculated using the average
of the three diagonal components of the seebeck and conductivity tensors.
'tensor' returns the complexity factor respect to the three
diagonal components of seebeck and conductivity tensors.
doping_levels: False means that the complexity factor is calculated
for every value of the chemical potential
True means that the complexity factor is calculated
for every value of the doping levels for both n and p types
temp: temperature of calculated seebeck and conductivity.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a list of values for the complexity factor w.r.t the chemical potential,
if doping_levels is set at False;
a dict with n an p keys that contain a list of values for the complexity factor
w.r.t the doping levels, if doping_levels is set at True;
if 'tensor' is selected, each element of the lists is a list containing
the three components of the complexity factor.
"""
if doping_levels:
cmplx_fact = {}
for dt in ('n','p'):
sbk_mass = self.get_seebeck_eff_mass(output, temp, True, Lambda)[dt]
cond_mass = self.get_average_eff_mass(output=output, doping_levels=True)[dt][temp]
if output == 'average':
cmplx_fact[dt] = [ (m_s/abs(m_c))**1.5 for m_s,m_c in zip(sbk_mass,cond_mass)]
elif output == 'tensor':
cmplx_fact[dt] = []
for i in range(len(sbk_mass)):
cmplx_fact[dt].append([])
for j in range(3):
cmplx_fact[dt][-1].append((sbk_mass[i][j]/abs(cond_mass[i][j][j]))**1.5)
else:
sbk_mass = self.get_seebeck_eff_mass(output, temp, False, Lambda)
cond_mass = self.get_average_eff_mass(output=output, doping_levels=False)[temp]
if output == 'average':
cmplx_fact = [ (m_s/abs(m_c))**1.5 for m_s,m_c in zip(sbk_mass,cond_mass)]
elif output == 'tensor':
cmplx_fact = []
for i in range(len(sbk_mass)):
cmplx_fact.append([])
for j in range(3):
cmplx_fact[-1].append((sbk_mass[i][j]/abs(cond_mass[i][j][j]))**1.5)
return cmplx_fact
def get_extreme(self, target_prop, maximize=True, min_temp=None,
max_temp=None, min_doping=None, max_doping=None,
isotropy_tolerance=0.05, use_average=True):
"""
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
"""
def is_isotropic(x, isotropy_tolerance):
"""
Internal method to tell you if 3-vector "x" is isotropic
Args:
x: the vector to determine isotropy for
isotropy_tolerance: tolerance, e.g. 0.05 is 5%
"""
if len(x) != 3:
raise ValueError("Invalid input to is_isotropic!")
st = sorted(x)
return bool(all([st[0], st[1], st[2]]) and \
(abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and \
(abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and \
(abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance))
if target_prop.lower() == "seebeck":
d = self.get_seebeck(output="eigs", doping_levels=True)
elif target_prop.lower() == "power factor":
d = self.get_power_factor(output="eigs", doping_levels=True)
elif target_prop.lower() == "conductivity":
d = self.get_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "kappa":
d = self.get_thermal_conductivity(output="eigs",
doping_levels=True)
elif target_prop.lower() == "zt":
d = self.get_zt(output="eigs", doping_levels=True)
else:
raise ValueError("Target property: {} not recognized!".
format(target_prop))
absval = True # take the absolute value of properties
x_val = None
x_temp = None
x_doping = None
x_isotropic = None
output = {}
min_temp = min_temp or 0
max_temp = max_temp or float('inf')
min_doping = min_doping or 0
max_doping = max_doping or float('inf')
for pn in ('p', 'n'):
for t in d[pn]: # temperatures
if min_temp <= float(t) <= max_temp:
for didx, evs in enumerate(d[pn][t]):
doping_lvl = self.doping[pn][didx]
if min_doping <= doping_lvl <= max_doping:
isotropic = is_isotropic(evs, isotropy_tolerance)
if absval:
evs = [abs(x) for x in evs]
if use_average:
val = float(sum(evs)) / len(evs)
else:
val = max(evs)
if x_val is None or (val > x_val and maximize) \
or (val < x_val and not maximize):
x_val = val
x_temp = t
x_doping = doping_lvl
x_isotropic = isotropic
output[pn] = {'value': x_val, 'temperature': x_temp,
'doping': x_doping, 'isotropic': x_isotropic}
x_val = None
if maximize:
max_type = 'p' if output['p']['value'] >= \
output['n']['value'] else 'n'
else:
max_type = 'p' if output['p']['value'] <= \
output['n']['value'] else 'n'
output['best'] = output[max_type]
output['best']['carrier_type'] = max_type
return output
@staticmethod
def _format_to_output(tensor, tensor_doping, output, doping_levels,
multi=1.0):
if doping_levels:
full_tensor = tensor_doping
result = {doping: {t: [] for t in tensor_doping[doping]} for doping
in tensor_doping}
for doping in full_tensor:
for temp in full_tensor[doping]:
for i in range(len(full_tensor[doping][temp])):
if output in ['eig', 'eigs']:
result[doping][temp].append(sorted(
np.linalg.eigh(full_tensor[doping][temp][i])[
0] * multi))
elif output == 'tensor':
result[doping][temp].append(
np.array(full_tensor[doping][temp][i]) * multi)
elif output == 'average':
result[doping][temp].append(
(full_tensor[doping][temp][i][0][0] \
+ full_tensor[doping][temp][i][1][1] \
+ full_tensor[doping][temp][i][2][
2]) * multi / 3.0)
else:
raise ValueError("Unknown output format: "
"{}".format(output))
else:
full_tensor = tensor
result = {t: [] for t in tensor}
for temp in full_tensor:
for i in range(len(tensor[temp])):
if output in ['eig', 'eigs']:
result[temp].append(sorted(
np.linalg.eigh(full_tensor[temp][i])[0] * multi))
elif output == 'tensor':
result[temp].append(
np.array(full_tensor[temp][i]) * multi)
elif output == 'average':
result[temp].append((full_tensor[temp][i][0][0]
+ full_tensor[temp][i][1][1]
+ full_tensor[temp][i][2][
2]) * multi / 3.0)
else:
raise ValueError("Unknown output format: {}".
format(output))
return result
def get_complete_dos(self, structure, analyzer_for_second_spin=None):
"""
Gives a CompleteDos object with the DOS from the interpolated
projected band structure
Args:
the structure (necessary to identify sites for projection)
analyzer_for_second_spin must be specified to have a
CompleteDos with both Spin components
Returns:
a CompleteDos object
Example of use in case of spin polarized case:
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=1).run(path_dir='dos_up/')
an_up=BoltztrapAnalyzer.from_files("dos_up/boltztrap/",dos_spin=1)
BoltztrapRunner(bs=bs,nelec=10,run_type="DOS",spin=-1).run(path_dir='dos_dw/')
an_dw=BoltztrapAnalyzer.from_files("dos_dw/boltztrap/",dos_spin=-1)
cdos=an_up.get_complete_dos(bs.structure,an_dw)
"""
pdoss = {}
spin_1 = list(self.dos.densities.keys())[0]
if analyzer_for_second_spin:
if not np.all(self.dos.energies ==
analyzer_for_second_spin.dos.energies):
raise BoltztrapError(
"Dos merging error: energies of the two dos are different")
spin_2 = list(analyzer_for_second_spin.dos.densities.keys())[0]
if spin_1 == spin_2:
raise BoltztrapError(
"Dos merging error: spin component are the same")
for s in self._dos_partial:
if structure.sites[int(s)] not in pdoss:
pdoss[structure.sites[int(s)]] = {}
for o in self._dos_partial[s]:
if Orbital[o] not in pdoss[structure.sites[int(s)]]:
pdoss[structure.sites[int(s)]][Orbital[o]] = {}
pdoss[structure.sites[int(s)]][Orbital[o]][
spin_1] = self._dos_partial[s][o]
if analyzer_for_second_spin:
pdoss[structure.sites[int(s)]][Orbital[o]][
spin_2] = analyzer_for_second_spin._dos_partial[s][o]
if analyzer_for_second_spin:
tdos = Dos(self.dos.efermi, self.dos.energies,
{spin_1: self.dos.densities[spin_1],
spin_2: analyzer_for_second_spin.dos.densities[
spin_2]})
else:
tdos = self.dos
return CompleteDos(structure, total_dos=tdos, pdoss=pdoss)
def get_mu_bounds(self, temp=300):
return min(self.mu_doping['p'][temp]), max(self.mu_doping['n'][temp])
def get_carrier_concentration(self):
"""
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
"""
return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]
for temp in self._carrier_conc}
def get_hall_carrier_concentration(self):
"""
gives the Hall carrier concentration (in cm^-3). This is the trace of
the Hall tensor (see Boltztrap source code) Hall carrier concentration
are not always exactly the same than carrier concentration.
Returns
a dictionary {temp:[]} with an array of Hall carrier concentration
(in cm^-3) at each temperature The array relates to each step of
electron chemical potential
"""
result = {temp: [] for temp in self._hall}
for temp in self._hall:
for i in self._hall[temp]:
trace = (i[1][2][0] + i[2][0][1] + i[0][1][2]) / 3.0
if trace != 0.0:
result[temp].append(1e-6 / (trace * constants.e))
else:
result[temp].append(0.0)
return result
@staticmethod
def parse_outputtrans(path_dir):
"""
Parses .outputtrans file
Args:
path_dir: dir containing boltztrap.outputtrans
Returns:
tuple - (run_type, warning, efermi, gap, doping_levels)
"""
run_type = None
warning = None
efermi = None
gap = None
doping_levels = []
with open(os.path.join(path_dir, "boltztrap.outputtrans"), 'r') \
as f:
for line in f:
if "WARNING" in line:
warning = line
elif "Calc type:" in line:
run_type = line.split()[-1]
elif line.startswith("VBM"):
efermi = Energy(line.split()[1], "Ry").to("eV")
elif line.startswith("Egap:"):
gap = Energy(float(line.split()[1]), "Ry").to("eV")
elif line.startswith("Doping level number"):
doping_levels.append(float(line.split()[6]))
return run_type, warning, efermi, gap, doping_levels
@staticmethod
def parse_transdos(path_dir, efermi, dos_spin=1, trim_dos=False):
"""
Parses .transdos (total DOS) and .transdos_x_y (partial DOS) files
Args:
path_dir: (str) dir containing DOS files
efermi: (float) Fermi energy
dos_spin: (int) -1 for spin down, +1 for spin up
trim_dos: (bool) whether to post-process / trim DOS
Returns:
tuple - (DOS, dict of partial DOS)
"""
data_dos = {'total': [], 'partial': {}}
# parse the total DOS data
## format is energy, DOS, integrated DOS
with open(os.path.join(path_dir, "boltztrap.transdos"), 'r') as f:
count_series = 0 # TODO: why is count_series needed?
for line in f:
if line.lstrip().startswith("#"):
count_series += 1
if count_series > 1:
break
else:
data_dos['total'].append(
[Energy(float(line.split()[0]), "Ry").to("eV"),
float(line.split()[1])])
total_elec = float(line.split()[2])
lw_l = 0
hg_l = -len(data_dos['total'])
if trim_dos:
# Francesco knows what this does
# It has something to do with a trick of adding fake energies
# at the endpoints of the DOS, and then re-trimming it. This is
# to get the same energy scale for up and down spin DOS.
tmp_data = np.array(data_dos['total'])
tmp_den = np.trim_zeros(tmp_data[:, 1], 'f')[1:]
lw_l = len(tmp_data[:, 1]) - len(tmp_den)
tmp_ene = tmp_data[lw_l:, 0]
tmp_den = np.trim_zeros(tmp_den, 'b')[:-1]
hg_l = len(tmp_ene) - len(tmp_den)
tmp_ene = tmp_ene[:-hg_l]
tmp_data = np.vstack((tmp_ene, tmp_den)).T
data_dos['total'] = tmp_data.tolist()
# parse partial DOS data
for file_name in os.listdir(path_dir):
if file_name.endswith(
"transdos") and file_name != 'boltztrap.transdos':
tokens = file_name.split(".")[1].split("_")
site = tokens[1]
orb = '_'.join(tokens[2:])
with open(os.path.join(path_dir, file_name), 'r') as f:
for line in f:
if not line.lstrip().startswith(" #"):
if site not in data_dos['partial']:
data_dos['partial'][site] = {}
if orb not in data_dos['partial'][site]:
data_dos['partial'][site][orb] = []
data_dos['partial'][site][orb].append(
float(line.split()[1]))
data_dos['partial'][site][orb] = data_dos['partial'][site][
orb][lw_l:-hg_l]
dos_full = {'energy': [], 'density': []}
for t in data_dos['total']:
dos_full['energy'].append(t[0])
dos_full['density'].append(t[1])
dos = Dos(efermi, dos_full['energy'],
{Spin(dos_spin): dos_full['density']})
dos_partial = data_dos['partial'] # TODO: make this real DOS object?
return dos, dos_partial
@staticmethod
def parse_intrans(path_dir):
"""
Parses boltztrap.intrans mainly to extract the value of scissor applied to the bands or some other inputs
Args:
path_dir: (str) dir containing the boltztrap.intrans file
Returns:
intrans (dict): a dictionary containing various inputs that had been used in the Boltztrap run.
"""
intrans = {}
with open(os.path.join(path_dir, "boltztrap.intrans"), 'r') as f:
for line in f:
if "iskip" in line:
intrans["scissor"] = Energy(float(line.split(" ")[3]),
"Ry").to("eV")
if "HISTO" in line or "TETRA" in line:
intrans["dos_type"] = line[:-1]
return intrans
@staticmethod
def parse_struct(path_dir):
"""
Parses boltztrap.struct file (only the volume)
Args:
path_dir: (str) dir containing the boltztrap.struct file
Returns:
(float) volume
"""
with open(os.path.join(path_dir, "boltztrap.struct"), 'r') as f:
tokens = f.readlines()
return Lattice([[Length(float(tokens[i].split()[j]), "bohr").
to("ang") for j in range(3)] for i in
range(1, 4)]).volume
@staticmethod
def parse_cond_and_hall(path_dir, doping_levels=None):
"""
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
"""
# Step 1: parse raw data but do not convert to final format
t_steps = set()
mu_steps = set()
data_full = []
data_hall = []
data_doping_full = []
data_doping_hall = []
doping_levels = doping_levels or []
# parse the full conductivity/Seebeck/kappa0/etc data
## also initialize t_steps and mu_steps
with open(os.path.join(path_dir, "boltztrap.condtens"), 'r') as f:
for line in f:
if not line.startswith("#"):
mu_steps.add(float(line.split()[0]))
t_steps.add(int(float(line.split()[1])))
data_full.append([float(c) for c in line.split()])
# parse the full Hall tensor
with open(os.path.join(path_dir, "boltztrap.halltens"), 'r') as f:
for line in f:
if not line.startswith("#"):
data_hall.append([float(c) for c in line.split()])
if len(doping_levels) != 0:
# parse doping levels version of full cond. tensor, etc.
with open(
os.path.join(path_dir, "boltztrap.condtens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_full.append([float(c)
for c in line.split()])
# parse doping levels version of full hall tensor
with open(
os.path.join(path_dir, "boltztrap.halltens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_hall.append(
[float(c) for c in line.split()])
# Step 2: convert raw data to final format
# sort t and mu_steps (b/c they are sets not lists)
# and convert to correct energy
t_steps = sorted([t for t in t_steps])
mu_steps = sorted([Energy(m, "Ry").to("eV") for m in mu_steps])
# initialize output variables - could use defaultdict instead
# I am leaving things like this for clarity
cond = {t: [] for t in t_steps}
seebeck = {t: [] for t in t_steps}
kappa = {t: [] for t in t_steps}
hall = {t: [] for t in t_steps}
carrier_conc = {t: [] for t in t_steps}
dos_full = {'energy': [], 'density': []}
mu_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
seebeck_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
cond_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
kappa_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
hall_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
# process doping levels
pn_doping_levels = {'p': [], 'n': []}
for d in doping_levels:
if d > 0:
pn_doping_levels['p'].append(d)
else:
pn_doping_levels['n'].append(-d)
# process raw conductivity data, etc.
for d in data_full:
temp, doping = d[1], d[2]
carrier_conc[temp].append(doping)
cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())
seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())
kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())
# process raw Hall data
for d in data_hall:
temp, doping = d[1], d[2]
hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(),
np.reshape(d[12:21], (3, 3)).tolist(),
np.reshape(d[21:30], (3, 3)).tolist()]
hall[temp].append(hall_tens)
# process doping conductivity data, etc.
for d in data_doping_full:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
mu_doping[pn][temp].append(Energy(mu, "Ry").to("eV"))
cond_doping[pn][temp].append(
np.reshape(d[2:11], (3, 3)).tolist())
seebeck_doping[pn][temp].append(
np.reshape(d[11:20], (3, 3)).tolist())
kappa_doping[pn][temp].append(
np.reshape(d[20:29], (3, 3)).tolist())
# process doping Hall data
for d in data_doping_hall:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(),
np.reshape(d[11:20], (3, 3)).tolist(),
np.reshape(d[20:29], (3, 3)).tolist()]
hall_doping[pn][temp].append(hall_tens)
return mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, \
mu_doping, seebeck_doping, cond_doping, kappa_doping, \
hall_doping, carrier_conc
@staticmethod
def from_files(path_dir, dos_spin=1):
"""
get a BoltztrapAnalyzer object from a set of files
Args:
path_dir: directory where the boltztrap files are
dos_spin: in DOS mode, set to 1 for spin up and -1 for spin down
Returns:
a BoltztrapAnalyzer object
"""
run_type, warning, efermi, gap, doping_levels = \
BoltztrapAnalyzer.parse_outputtrans(path_dir)
vol = BoltztrapAnalyzer.parse_struct(path_dir)
intrans = BoltztrapAnalyzer.parse_intrans(path_dir)
if run_type == "BOLTZ":
dos, pdos = BoltztrapAnalyzer.parse_transdos(
path_dir, efermi, dos_spin=dos_spin, trim_dos=False)
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, \
seebeck_doping, cond_doping, kappa_doping, hall_doping, \
carrier_conc = BoltztrapAnalyzer. \
parse_cond_and_hall(path_dir, doping_levels)
return BoltztrapAnalyzer(
gap, mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, intrans, dos, pdos, carrier_conc, vol, warning)
elif run_type == "DOS":
trim = True if intrans["dos_type"] == "HISTO" else False
dos, pdos = BoltztrapAnalyzer.parse_transdos(
path_dir, efermi, dos_spin=dos_spin, trim_dos=trim)
return BoltztrapAnalyzer(gap=gap, dos=dos, dos_partial=pdos,
warning=warning, vol=vol)
elif run_type == "BANDS":
bz_kpoints = np.loadtxt(
os.path.join(path_dir, "boltztrap_band.dat"))[:, -3:]
bz_bands = np.loadtxt(
os.path.join(path_dir, "boltztrap_band.dat"))[:, 1:-6]
return BoltztrapAnalyzer(bz_bands=bz_bands, bz_kpoints=bz_kpoints,
warning=warning, vol=vol)
elif run_type == "FERMI":
"""
"""
if os.path.exists(os.path.join(path_dir, 'boltztrap_BZ.cube')):
fs_data = read_cube_file(
os.path.join(path_dir, 'boltztrap_BZ.cube'))
elif os.path.exists(os.path.join(path_dir, 'fort.30')):
fs_data = read_cube_file(os.path.join(path_dir, 'fort.30'))
else:
raise BoltztrapError("No data file found for fermi surface")
return BoltztrapAnalyzer(fermi_surface_data=fs_data)
else:
raise ValueError("Run type: {} not recognized!".format(run_type))
def as_dict(self):
results = {'gap': self.gap,
'mu_steps': self.mu_steps,
'intrans': self.intrans,
'cond': self._cond,
'seebeck': self._seebeck,
'kappa': self._kappa,
'hall': self._hall,
'doping': self.doping,
'mu_doping': self.mu_doping,
'seebeck_doping': self._seebeck_doping,
'cond_doping': self._cond_doping,
'kappa_doping': self._kappa_doping,
'hall_doping': self._hall_doping,
'dos': self.dos.as_dict(),
'dos_partial': self._dos_partial,
'carrier_conc': self._carrier_conc,
'vol': self.vol,
'warning': self.warning}
return jsanitize(results)
@staticmethod
def from_dict(data):
def _make_float_array(a):
res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
for i in range(3):
for j in range(3):
res[i][j] = float(a[i][j])
return res
def _make_float_hall(a):
return [i for i in a[:27]]
intrans = data.get('intrans')
gap = data.get('gap')
mu_steps = [float(d) for d in data['mu_steps']] if \
'mu_steps' in data else None
cond = {int(d): [_make_float_array(v) for v in data['cond'][d]]
for d in data['cond']} if 'cond' in data else None
seebeck = {int(d): [_make_float_array(v) for v in data['seebeck'][d]]
for d in data['seebeck']} if 'seebeck' in data else None
kappa = {int(d): [_make_float_array(v) for v in data['kappa'][d]]
for d in data['kappa']} if 'kappa' in data else None
hall = {int(d): [_make_float_hall(v) for v in data['hall'][d]]
for d in data['hall']} if 'hall' in data else None
doping = {'p': [float(d) for d in data['doping']['p']],
'n': [float(d) for d in data['doping']['n']]} if \
'doping' in data else None
mu_doping = {'p': {int(d): [
float(v) for v in data['mu_doping']['p'][d]] for d in
data['mu_doping']['p']}, 'n':
{int(d): [float(v) for v in data['mu_doping']['n'][d]]
for d in data['mu_doping'][
'n']}} if 'mu_doping' in data else None
seebeck_doping = {'p': {int(d): [
_make_float_array(v) for v in data['seebeck_doping']['p'][d]]
for d in data['seebeck_doping']['p']}, 'n':
{int(d): [_make_float_array(v) for v in
data['seebeck_doping']['n'][d]] for d in
data['seebeck_doping'][
'n']}} if 'seebeck_doping' in data \
else None
cond_doping = {'p': {int(d): [_make_float_array(v)
for v in data['cond_doping']['p'][d]]
for d in data['cond_doping']['p']}, 'n':
{int(d): [_make_float_array(v) for v in
data['cond_doping']['n'][d]] for
d in data['cond_doping'][
'n']}} if 'cond_doping' in data else None
kappa_doping = {'p': {int(d): [_make_float_array(v)
for v in data['kappa_doping']['p'][d]]
for d in data['kappa_doping']['p']},
'n': {int(d): [_make_float_array(v) for v in
data['kappa_doping']['n'][d]]
for d in data['kappa_doping']['n']}} \
if 'kappa_doping' in data else None
hall_doping = {'p': {int(d): [_make_float_hall(v) for v in
data['hall_doping']['p'][d]] for d in
data['hall_doping']['p']}, 'n':
{int(d): [_make_float_hall(v) for v in
data['hall_doping']['n'][d]] for d in
data['hall_doping'][
'n']}} if "hall_doping" in data else None
dos = Dos.from_dict(data['dos']) if 'dos' in data else None
dos_partial = data.get('dos_partial')
carrier_conc = data.get('carrier_conc')
vol = data.get('vol')
warning = data.get('warning')
return BoltztrapAnalyzer(gap=gap,
mu_steps=mu_steps,
cond=cond,
seebeck=seebeck,
kappa=kappa,
hall=hall,
doping=doping,
mu_doping=mu_doping,
seebeck_doping=seebeck_doping,
cond_doping=cond_doping,
kappa_doping=kappa_doping,
hall_doping=hall_doping,
dos=dos,
dos_partial=dos_partial,
carrier_conc=carrier_conc,
vol=vol,
warning=warning)
def read_cube_file(filename):
with open(filename, 'rt') as f:
natoms = 0
count_line = 0
for line in f:
line = line.rstrip("\n")
if count_line == 0 and "CUBE" not in line:
raise ValueError("CUBE file format not recognized")
if count_line == 2:
tokens = line.split()
origin = [float(tokens[i]) for i in range(1,4)]
natoms = int(tokens[0])
if count_line == 3:
tokens = line.split()
a1 = [float(tokens[i]) for i in range(1,4)]
n1 = int(tokens[0])
elif count_line == 4:
tokens = line.split()
a2 = [float(tokens[i]) for i in range(1,4)]
n2 = int(tokens[0])
elif count_line == 5:
tokens = line.split()
a3 = [float(tokens[i]) for i in range(1,4)]
n3 = int(tokens[0])
#kpoints=[[[0 for i in range(0,n1)] for j in range(0,n2)] for l in range(0,n3)]
elif count_line > 5:
break
count_line += 1
if 'fort.30' in filename:
energy_data = np.genfromtxt(filename,skip_header=natoms+6,skip_footer=1)
nlines_data = len(energy_data)
last_line = np.genfromtxt(filename,skip_header=nlines_data+natoms+6)
energy_data = np.append(energy_data.flatten(),last_line).reshape(n1,n2,n3)
elif 'boltztrap_BZ.cube' in filename:
energy_data = np.loadtxt(filename,skiprows=natoms+6).reshape(n1,n2,n3)
energy_data /= Energy(1, "eV").to("Ry")
return energy_data
def compare_sym_bands(bands_obj, bands_ref_obj, nb=None):
"""
Compute the mean of correlation between bzt and vasp bandstructure on
sym line, for all bands and locally (for each branches) the difference
squared (%) if nb is specified.
"""
nkpt = len(bands_obj.kpoints)
if bands_ref_obj.is_spin_polarized:
nbands = min(bands_obj.nb_bands, 2 * bands_ref_obj.nb_bands)
else:
# TODO: why is this needed? Shouldn't pmg take care of nb_bands?
nbands = min(len(bands_obj.bands[Spin.up]),
len(bands_ref_obj.bands[Spin.up]))
# print(nbands)
arr_bands = np.array(bands_obj.bands[Spin.up][:nbands])
# arr_bands_lavg = (arr_bands-np.mean(arr_bands,axis=1).reshape(nbands,1))
if bands_ref_obj.is_spin_polarized:
arr_bands_ref_up = np.array(bands_ref_obj.bands[Spin.up])
arr_bands_ref_dw = np.array(bands_ref_obj.bands[Spin.down])
# print(arr_bands_ref_up.shape)
arr_bands_ref = np.vstack((arr_bands_ref_up, arr_bands_ref_dw))
arr_bands_ref = np.sort(arr_bands_ref, axis=0)[:nbands]
# print(arr_bands_ref.shape)
else:
arr_bands_ref = np.array(bands_ref_obj.bands[Spin.up][:nbands])
# arr_bands_ref_lavg =
# (arr_bands_ref-np.mean(arr_bands_ref,axis=1).reshape(nbands,1))
# err = np.sum((arr_bands_lavg-arr_bands_ref_lavg)**2,axis=1)/nkpt
corr = np.array(
[distance.correlation(arr_bands[idx], arr_bands_ref[idx]) for idx in
range(nbands)])
if type(nb) == int: nb = [nb]
bcheck = {}
if max(nb) < nbands:
branches = [[s['start_index'], s['end_index'], s['name']] for s in
bands_ref_obj.branches]
if not bands_obj.is_metal() and not bands_ref_obj.is_metal():
zero_ref = bands_ref_obj.get_vbm()['energy']
zero = bands_obj.get_vbm()['energy']
if not zero:
vbm = bands_ref_obj.get_vbm()['band_index'][Spin.up][-1]
zero = max(arr_bands[vbm])
else:
zero_ref = 0 # bands_ref_obj.efermi
zero = 0 # bands_obj.efermi
print(zero, zero_ref)
for nbi in nb:
bcheck[nbi] = {}
bcheck[nbi]['Dist'] = np.mean(abs(arr_bands[nbi] - zero
- arr_bands_ref[nbi] + zero_ref))
bcheck[nbi]['Corr'] = corr[nbi]
for start, end, name in branches:
# werr.append((sum((arr_bands_corr[nb][start:end+1] -
# arr_bands_ref_corr[nb][start:end+1])**2)/(end+1-start)*100,name))
bcheck[nbi][name] = np.mean(abs(arr_bands[nbi][start:end + 1]
- zero
- arr_bands_ref[nbi][
start:end + 1] + zero_ref))
else:
bcheck = "No nb given"
return bcheck
def seebeck_spb(eta,Lambda=0.5):
"""
Seebeck analytic formula in the single parabolic model
"""
from fdint import fdk
return constants.k/constants.e * ((2. + Lambda) * fdk( 1.+ Lambda, eta)/
((1.+Lambda)*fdk(Lambda, eta))- eta) * 1e+6
def eta_from_seebeck(seeb,Lambda):
"""
It takes a value of seebeck and adjusts the analytic seebeck until it's equal
Returns: eta where the two seebeck coefficients are equal
(reduced chemical potential)
"""
from scipy.optimize import fsolve
out = fsolve(lambda x: (seebeck_spb(x,Lambda) - abs(seeb)) ** 2, 1.,full_output=True)
return out[0][0]
def seebeck_eff_mass_from_carr(eta, n, T, Lambda):
"""
Calculate seebeck effective mass at a certain carrier concentration
eta in kB*T units, n in cm-3, T in K, returns mass in m0 units
"""
from fdint import fdk
return (2 * np.pi**2 * abs(n) * 10 ** 6 / (fdk(0.5,eta))) ** (2. / 3)\
/ (2 * constants.m_e * constants.k * T / (constants.h/2/np.pi) ** 2)
def seebeck_eff_mass_from_seebeck_carr(seeb, n, T, Lambda):
"""
Find the chemical potential where analytic and calculated seebeck are identical
and then calculate the seebeck effective mass at that chemical potential and
a certain carrier concentration n
"""
try:
from fdint import fdk
except ImportError:
raise BoltztrapError("fdint module not found. Please, install it.\n"+
"It is needed to calculate Fermi integral quickly.")
eta = eta_from_seebeck(seeb,Lambda)
mass = seebeck_eff_mass_from_carr(eta, n, T, Lambda)
return mass
|
dongsenfo/pymatgen
|
pymatgen/electronic_structure/boltztrap.py
|
Python
|
mit
| 107,508
|
[
"BoltzTrap",
"VASP",
"pymatgen"
] |
57ca1189b76591f28e6320f8f26ef092630be15f00fc52cfa15cdd9b88c47ad2
|
__author__ = "Sunil Kumar (kumar.sunil.p@gmail.com)"
__copyright__ = "Copyright 2014, Washington University in St. Louis"
__credits__ = ["Sunil Kumar", "Steve Pieper", "Dan Marcus"]
__license__ = "XNAT Software License Agreement " + \
"(see: http://xnat.org/about/license.php)"
__version__ = "2.1.1"
__maintainer__ = "Rick Herrick"
__email__ = "herrickr@mir.wustl.edu"
__status__ = "Production"
from XnatSlicerGlobals import *
from XnatSlicerUtils import *
from __main__ import vtk, ctk, qt, slicer
import datetime, time
import os
import sys
comment = """
There are two classes in this file:
XnatSessionArgs and SessionManager.s
XnatSessionArgs inherits the 'dict' type provided by python.
The user cannot insert keys into this object. It is used for
tracking XNAT-specifc data on a per-scene basis. Data being tracked
includes: host, username, saveLevel, fileName, session start, etc.
TODO :
"""
class XnatSessionArgs(dict):
""" Inherits the 'dict' type of python. Specifically tailored
for XNAT tracking. Keys are immutable, so the user cannot
add further keys.
"""
def __init__(self, MODULE, srcPath = None, useDefaultXnatSaveLevel = True):
""" Establish the relevant keys in the Session Manager.
"""
self.MODULE = MODULE
self.inserting = True
self['host'] = None
self['username'] = None
self['saveLevel'] = None
self['saveUri'] = None
self['otherDirs'] = None
self['fileName'] = None
self['sessionStart'] = str(datetime.datetime.now())
self["sessionType"] = None
self["XnatIo"] = None
self["metadata"] = None
self.inserting = False
if srcPath:
self.makeSessionArgs_byPath(srcPath)
dict.__init__(self)
def __setitem__(self, key, value):
""" Assigns a value to a key. User cannot add keys to object.
"""
if (key not in self) and (not self.inserting):
raise KeyError("XnatSessionArgs is immutable -- you can't insert keys.")
dict.__setitem__(self, key, value)
def makeSessionArgs_byPath(self, filePath):
"""
Consructs a number of the session values
by one argument, 'filePath'.
"""
saveLevelDir, slicerDir = XnatSlicerUtils.getSaveTuple(filePath)
self['host'] = self.MODULE.LoginMenu.hostDropdown.currentText
self['username'] = self.MODULE.LoginMenu.usernameLine.text
self['saveLevel'] = saveLevelDir
self['saveUri'] = slicerDir
if os.path.basename(os.path.dirname(filePath)) == 'files':
self["fileName"] = os.path.basename(filePath)
else:
self["fileName"] = os.path.basename(saveLevelDir)
def printAll(self, prefStr=None):
""" As stated. For debugging purposes.
"""
if prefStr:
MokaUtils.debug.lf(('%s')%(prefStr))
for k,v in self.iteritems():
MokaUtils.debug.lf( "[\'%s\']=\t%s"%(k,v))
class SessionManager(object):
""" Creates and maintains a 'SessionLog.txt' file
for writing XNATSession args to disc.
"""
def __init__(self, MODULE):
""" Init function.
"""
self.MODULE = MODULE
self.sessionFileName = os.path.join(XnatSlicerGlobals.LOCAL_URIS['settings'], 'SessionLog.txt')
self.sessionArgs = None
self.saveItem = None
def startNewSession(self, sessionArgs):
""" As stated. Get's a new session by a new sessionArgs set.
"""
if not sessionArgs.__class__.__name__ == "XnatSessionArgs":
raise NameError("You can only use XnatSessionArgs to start a new session.")
self.sessionArgs = sessionArgs
self.writeSession()
def clearCurrentSession(self):
""" As stated.
"""
##print(MokaUtils.debug.lf() + "Clearing current session")
self.sessionArgs = None
def writeSession(self):
""" Writes the self.sessionArgs dict to file.
"""
fileLines = []
for item in self.sessionArgs:
fileLines.append("%s:\t\t%s\n"%(item, self.sessionArgs[item]))
fileLines.append("\n\n")
##print(MokaUtils.debug.lf() + "Session log file: %s"%(self.sessionFileName))
f = open(self.sessionFileName, 'a')
f.writelines(fileLines)
f.close()
del fileLines
|
MokaCreativeLLC/XNATSlicer
|
XNATSlicer/XnatSlicerLib/utils/SessionManager.py
|
Python
|
bsd-3-clause
| 4,595
|
[
"VTK"
] |
f54b9705dc4282f83d1e0318973a62af636602bd1c1ca265a59e32e7a1d10923
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.search.categorysearch import SellableCategorySearch
from stoqlib.gui.test.uitestutils import GUITest
class TestSellableCategorySearch(GUITest):
def test_search(self):
# Create some more categories to populate the search
category1 = self.create_sellable_category(u"Category 1")
self.create_sellable_category(u"Category 2",
parent=category1)
category3 = self.create_sellable_category(u"Category 3",
parent=category1)
self.create_sellable_category(u"Category 4",
parent=category3)
self.create_sellable_category(u"Category 5")
self.store.flush()
search = SellableCategorySearch(self.store)
search.search.refresh()
self.check_search(search, 'sellable-category')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_categorysearch.py
|
Python
|
gpl-2.0
| 1,779
|
[
"VisIt"
] |
fd8bbdcc574b596ce05031944bbccbac1ef68ae426e2b22196a513132a145ec9
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following (as JSON) is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
status: indicates if any of the supplied files matched at least one target.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def __ExtractBasePath(target):
"""Extracts the path components of the specified gyp target path."""
last_index = target.rfind('/')
if last_index == -1:
return ''
return target[0:(last_index + 1)]
def __ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See __AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def __AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = __ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def __ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
__AddSources(action['inputs'], base_path, base_path_components, results)
def __ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
if os.sep == '\\' and os.altsep == '/':
base_path = target.replace('\\', '/')
else:
base_path = target
if base_path == toplevel_dir:
base_path = ''
elif base_path.startswith(toplevel_dir + '/'):
base_path = base_path[len(toplevel_dir) + len('/'):]
base_path = posixpath.dirname(base_path)
base_path_components = base_path.split('/')
# Add a trailing '/' so that __AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
__AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these effect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
__ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
__ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of the names of direct dependent targets.
match_staus: one of the MatchStatus values"""
def __init__(self):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
class Config(object):
"""Details what we're looking for
look_for_dependency_only: if true only search for a target listing any of
the files in files.
files: set of files to search for
targets: see file description for details"""
def __init__(self):
self.look_for_dependency_only = True
self.files = []
self.targets = []
def Init(self, params):
"""Initializes Config. This is a separate method as it may raise an
exception if there is a parse error."""
generator_flags = params.get('generator_flags', {})
# TODO(sky): nuke file_path and look_for_dependency_only once migrate
# recipes.
file_path = generator_flags.get('file_path', None)
if file_path:
self._InitFromFilePath(file_path)
return
# If |file_path| wasn't specified then we look for config_path.
# TODO(sky): always look for config_path once migrated recipes.
config_path = generator_flags.get('config_path', None)
if not config_path:
return
self.look_for_dependency_only = False
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
# Coalesce duplicates
self.targets = list(set(config.get('targets', [])))
def _InitFromFilePath(self, file_path):
try:
f = open(file_path, 'r')
for file_name in f:
if file_name.endswith('\n'):
file_name = file_name[0:len(file_name) - 1]
if len(file_name):
self.files.append(file_name)
f.close()
except IOError:
raise Exception('Unable to open file', file_path)
def __GenerateTargets(target_list, target_dicts, toplevel_dir, files):
"""Generates a dictionary with the key the name of a target and the value a
Target. |toplevel_dir| is the root of the source tree. If the sources of
a target match that of |files|, then |target.matched| is set to True.
This returns a tuple of the dictionary and whether at least one target's
sources listed one of the paths in |files|."""
targets = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
matched = False
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
if target_name in targets:
continue
target = Target()
targets[target_name] = target
sources = __ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
target.match_status = MATCH_STATUS_MATCHES
matched = True
break
for dep in target_dicts[target_name].get('dependencies', []):
targets[target_name].deps.add(dep)
targets_to_visit.append(dep)
return targets, matched
def _GetUnqualifiedToQualifiedMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to qualified name for
all the targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = target_name
if not to_find:
return result
return result
def _DoesTargetDependOn(target, all_targets):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for.
all_targets: mapping from target name to Target.
matching_targets: set of targets looking for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep_name in target.deps:
dep_target = all_targets[dep_name]
if _DoesTargetDependOn(dep_target, all_targets):
dep_target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
return True
dep_target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(all_targets, possible_targets):
"""Returns the list of targets in |possible_targets| that depend (either
directly on indirectly) on the matched files.
all_targets: mapping from target name to Target.
possible_targets: targets to search from."""
found = []
for target in possible_targets:
if _DoesTargetDependOn(all_targets[target], all_targets):
# possible_targets was initially unqualified, keep it unqualified.
found.append(gyp.common.ParseQualifiedTarget(target)[1])
return found
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
if config.look_for_dependency_only:
print 'Must specify files to analyze via file_path generator flag'
return
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = os.path.abspath(params['options'].toplevel_dir)
if os.sep == '\\' and os.altsep == '/':
toplevel_dir = toplevel_dir.replace('\\', '/')
if debug:
print 'toplevel_dir', toplevel_dir
all_targets, matched = __GenerateTargets(target_list, target_dicts,
toplevel_dir,
frozenset(config.files))
# Set of targets that refer to one of the files.
if config.look_for_dependency_only:
print found_dependency_string if matched else no_dependency_string
return
if matched:
unqualified_mapping = _GetUnqualifiedToQualifiedMapping(
all_targets, config.targets)
if len(unqualified_mapping) != len(config.targets):
not_found = []
for target in config.targets:
if not target in unqualified_mapping:
not_found.append(target)
raise Exception('Unable to find all targets: ' + str(not_found))
qualified_targets = [unqualified_mapping[x] for x in config.targets]
output_targets = _GetTargetsDependingOn(all_targets, qualified_targets)
else:
output_targets = []
print json.dumps(
{'targets': output_targets,
'status': found_dependency_string if matched else no_dependency_string })
except Exception as e:
print json.dumps({'error': str(e)})
|
FelixZYY/gyp
|
pylib/gyp/generator/analyzer.py
|
Python
|
bsd-3-clause
| 13,933
|
[
"VisIt"
] |
8cf2b8ff7bc4a6cadb14518249ab0ab391618fbea47db6d87a13a76143b9b64a
|
import pysam
import sys
from collections import defaultdict
orientations = defaultdict(int)
with pysam.AlignmentFile(sys.argv[1]) as bam:
for i, rec in enumerate(bam):
if rec.is_paired and rec.is_proper_pair and rec.reference_id == rec.next_reference_id:
strand = "R" if rec.is_reverse else "F"
mate_strand = "R" if rec.mate_is_reverse else "F"
read = "1"
mate_read = "2"
if not rec.is_read1:
read, mate_read = mate_read, read
orientation = [strand + read, mate_strand + mate_read]
if rec.reference_start > rec.next_reference_start:
orientation = orientation[::-1]
orientations["".join(orientation)] += 1
if i == 100000:
break
print(orientations)
|
PROSIC/libprosic
|
playground/check-read-orientation.py
|
Python
|
gpl-3.0
| 814
|
[
"pysam"
] |
e3d5a9271c644c163994d2046e0420a24d254e56cb29baa979199d0203fd83e6
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(x):
ee = 1.2
nu = 0.3
t = 0.0002
ll = 10
z = 0.5
c = 0.5
gg = ee / 2 / (1+nu)
beta = 3 * t * (1 - nu * nu) / 4 / c / c / c / ee
dd = - beta * nu / (1 - nu)
delta = beta / 3
gamma = -3 * t / 4 / c / gg
alpha = dd / 3 + t / 4 / c / c / c / gg
ux = beta * x * z * (2 * ll - x) + alpha * pow(z, 3)
uz = delta * x * x * (x - 3 * ll) + gamma * x + dd * z * z * (ll - x)
return (ux, uz)
def expected2(x):
ee = 1.2
nu = 0.3
aa = 1.11E-2
ll = 10
z = 0.5
c = 0.5
y = 0
gg = ee / 2 / (1+nu)
dd = -nu * aa
ux = aa * x * z
uy = dd * z * y
uz = -0.5 * aa * x * x + 0.5 * dd * (z * z - y * y)
return (ux, uy, uz)
def expected_slippery(x):
ee = 1.2
nu = 0.3
ll = 10
h = 0.5
s = -2E-4
gg = ee / 2 / (1 + nu)
bb = ee * h * h / 12 / (1 - nu * nu)
thy = 0.5 * s * x * (x - 2 * ll) / bb
uz = 2 * s * x / gg + 2 * s * (1 - nu * nu) * x * x * (3 * ll - x) / ee / h / h
return (thy, uz)
def expected_slippery_h(h):
ee = 1.2
nu = 0.3
ll = 10
x = 10
s = -2E-4
gg = ee / 2 / (1 + nu)
bb = ee * h * h / 12 / (1 - nu * nu)
thy = 0.5 * s * x * (x - 2 * ll) / bb
uz = 2 * s * x / gg + 2 * s * (1 - nu * nu) * x * x * (3 * ll - x) / ee / h / h
return (thy, uz)
def solid_bar():
f = open("../../tests/static_deformations/gold/beam_cosserat_01_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
def solid_bar2():
f = open("../../tests/static_deformations/gold/beam_cosserat_02_apply_stress_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
def solid_bar_slippery():
f = open("../../tests/static_deformations/gold/beam_cosserat_01_slippery_soln_0001.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:12]]
f.close()
return data
def solid_bar_slippery_h():
# these data were generated by hand using beam_cosserat_01_slippery.i with different values of layer_thickness (and nx=800)
data = [(0.1, -60.3), (0.2, -15.1), (0.3, -6.74), (0.4, -3.8), (0.5, -2.4), (0.9, -0.76)]
return data
xpoints = np.arange(0, 10.05, 0.1)
hpoints = np.arange(0.09, 1, 0.01)
moosex = [i for i in range(11)]
moose = solid_bar()
moose2 = solid_bar2()
moose_slippery = solid_bar_slippery()
mooseh = [0.1, 0.2, 0.3, 0.4, 0.5, 0.9]
moose_slippery_h = solid_bar_slippery_h()
plt.figure()
plt.plot(xpoints, expected(xpoints)[0], 'k-', linewidth = 1.0, label = 'expected u_x')
plt.plot(xpoints, expected(xpoints)[1], 'r-', linewidth = 1.0, label = 'expected u_z')
plt.plot(moosex, [d[4] for d in moose], 'ks', markersize = 10.0, label = 'MOOSE disp_x')
plt.plot(moosex, [d[5] for d in moose], 'r^', markersize = 10.0, label = 'MOOSE disp_z')
plt.legend(loc = 'lower left')
plt.xlabel("x (m)")
plt.ylabel("displacement (m)")
plt.title("Beam deformation")
#plt.savefig("cosserat_beam_disp.pdf")
plt.figure()
plt.plot(xpoints, expected2(xpoints)[0], 'k-', linewidth = 1.0, label = 'expected u_x')
plt.plot(xpoints, expected2(xpoints)[2], 'r-', linewidth = 1.0, label = 'expected u_z')
plt.plot(moosex, [d[9] for d in moose2], 'ks', markersize = 10.0, label = 'MOOSE disp_x')
plt.plot(moosex, [d[11] for d in moose2], 'r^', markersize = 10.0, label = 'MOOSE disp_z')
plt.legend(loc = 'lower left')
plt.xlabel("x (m)")
plt.ylabel("displacement (m)")
plt.title("Beam deformation")
#plt.savefig("cosserat_beam_disp_2.pdf")
plt.figure()
plt.plot(xpoints, expected_slippery(xpoints)[0], 'k-', linewidth = 1.0, label = 'expected th_y')
plt.plot(xpoints, expected_slippery(xpoints)[1], 'r-', linewidth = 1.0, label = 'expected u_z')
plt.plot(moosex, [d[11] for d in moose_slippery], 'ks', markersize = 10.0, label = 'MOOSE wc_y')
plt.plot(moosex, [d[5] for d in moose_slippery], 'r^', markersize = 10.0, label = 'MOOSE disp_z')
plt.legend(loc = 'lower left')
plt.xlabel("x (m)")
plt.ylabel("disp (m) and rot")
plt.title("Slippery beam deformation")
#plt.savefig("cosserat_beam_disp_slippery.pdf")
plt.figure()
plt.plot(hpoints, expected_slippery_h(hpoints)[1], 'k-', linewidth = 1.0, label = 'expected')
plt.plot(mooseh, [d[1] for d in moose_slippery_h], 'ks', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("h (m)")
plt.ylabel("deflection (m)")
plt.title("End-point deflection in slippery Cosserat bar")
plt.savefig("cosserat_beam_disp_slippery_h.pdf")
sys.exit(0)
|
nuclear-wizard/moose
|
modules/tensor_mechanics/doc/tests/beam_cosserat.py
|
Python
|
lgpl-2.1
| 4,962
|
[
"MOOSE"
] |
9a4440b18377b10ecfddc7e3855d06edf15ab8353efc0ebc8c0904acfc48e5ab
|
# 10/24/2013 Sebastian Raschka
# HydroBond
# PyMOL Plugin for visualization of hydrogen bonds between protein and ligand.
import tkSimpleDialog,tkFileDialog
import tkMessageBox
from pymol import cmd
import sys
def __init__(self):
self.menuBar.addmenuitem('Plugin', 'command',
'HydroBond',
label = 'HydroBond',
command = lambda s=self : fetch_bonds_dialog(s))
def draw_bonds(protein_obj, ligand_obj, distance):
cmd.select("donor", "(elem n,o and (neighbor hydro))")
cmd.select("acceptor", "(elem o or (elem n and not (neighbor hydro)))")
cmd.distance("HBAcc", "({} and acceptor)".format(ligand_obj),
"({} and donor)".format(protein_obj), distance)
cmd.distance("HBDon", "({} and donor)".format(ligand_obj),
"({} and acceptor)".format(protein_obj), distance)
cmd.delete("donor")
cmd.delete("acceptor")
cmd.hide("labels")
def fetch_bonds_dialog(app):
protein_obj = tkSimpleDialog.askstring('Object selection',
'Please enter the name of a protein object loaded in PyMOL: ',
parent=app.root)
ligand_obj = tkSimpleDialog.askstring('Object selection',
'Please enter the name of a ligand object loaded in PyMOL: ',
parent=app.root)
distance = tkSimpleDialog.askstring('Distance selection',
'Please enter a distance in Angstrom (e.g., 3.2): ',
parent=app.root)
draw_bonds(protein_obj, ligand_obj, distance)
|
rasbt/BondPack
|
src/HydroBond.py
|
Python
|
gpl-3.0
| 1,588
|
[
"PyMOL"
] |
26d7cd34439a989705dd785351007f536b67ff0ac4bdbc3162c24f220b2ac23f
|
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Grant Welch <gwelch925+github@gmail.com>
# Copyright (c) 2017-2018, 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017-2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2017 Dan Garrette <dhgarrette@gmail.com>
# Copyright (c) 2018-2019 Jim Robertson <jrobertson98atx@gmail.com>
# Copyright (c) 2018 Mike Miller <mtmiller@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 Drew <drewrisinger@users.noreply.github.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.guinta@protonmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Marianna Polatoglou <mpolatoglou@bloomberg.net>
# Copyright (c) 2018 mar-chi-pan <mar.polatoglou@gmail.com>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2019 Djailla <bastien.vallet@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2020 Andrew Simmons <anjsimmo@gmail.com>
# Copyright (c) 2020 Andrew Simmons <a.simmons@deakin.edu.au>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 Ashley Whetter <ashleyw@activestate.com>
# Copyright (c) 2021 haasea <44787650+haasea@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Alexander Kapshuna <kapsh@kap.sh>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""variables checkers for Python code
"""
import collections
import copy
import itertools
import os
import re
from functools import lru_cache
import astroid
from pylint.checkers import BaseChecker, utils
from pylint.checkers.utils import is_postponed_evaluation_enabled
from pylint.constants import PY39_PLUS
from pylint.interfaces import HIGH, INFERENCE, INFERENCE_FAILURE, IAstroidChecker
from pylint.utils import get_global_option
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = "__future__"
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile("_.*|^ignored_|^unused_")
# In Python 3.7 abc has a Python implementation which is preferred
# by astroid. Unfortunately this also messes up our explicit checks
# for `abc`
METACLASS_NAME_TRANSFORMS = {"_py_abc": "abc"}
TYPING_TYPE_CHECKS_GUARDS = frozenset({"typing.TYPE_CHECKING", "TYPE_CHECKING"})
BUILTIN_RANGE = "builtins.range"
TYPING_MODULE = "typing"
TYPING_NAMES = frozenset(
{
"Any",
"Callable",
"ClassVar",
"Generic",
"Optional",
"Tuple",
"Type",
"TypeVar",
"Union",
"AbstractSet",
"ByteString",
"Container",
"ContextManager",
"Hashable",
"ItemsView",
"Iterable",
"Iterator",
"KeysView",
"Mapping",
"MappingView",
"MutableMapping",
"MutableSequence",
"MutableSet",
"Sequence",
"Sized",
"ValuesView",
"Awaitable",
"AsyncIterator",
"AsyncIterable",
"Coroutine",
"Collection",
"AsyncGenerator",
"AsyncContextManager",
"Reversible",
"SupportsAbs",
"SupportsBytes",
"SupportsComplex",
"SupportsFloat",
"SupportsInt",
"SupportsRound",
"Counter",
"Deque",
"Dict",
"DefaultDict",
"List",
"Set",
"FrozenSet",
"NamedTuple",
"Generator",
"AnyStr",
"Text",
"Pattern",
"BinaryIO",
}
)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, astroid.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return isinstance(parent, astroid.For) and any(
else_stmt.parent_of(stmt) or else_stmt == stmt for else_stmt in parent.orelse
)
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, inferred):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ""
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = " %s" % inferred.as_string()
elif inferred.lineno:
more = " defined at line %s" % inferred.lineno
elif inferred.lineno:
more = f" defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(node, frame, defframe):
"""Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent, (astroid.FunctionDef, astroid.Arguments)):
return False
elif any(
not isinstance(f, (astroid.ClassDef, astroid.Module)) for f in (frame, defframe)
):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for current_scope in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _infer_name_module(node, name):
context = astroid.context.InferenceContext()
context.lookupname = name
return node.infer(context, asname=False)
def _fix_dot_imports(not_consumed):
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
def _is_type_checking_import(node):
parent = node.parent
if not isinstance(parent, astroid.If):
return False
test = parent.test
return test.as_string() in TYPING_TYPE_CHECKS_GUARDS
def _has_locals_call_after_node(stmt, scope):
skip_nodes = (
astroid.FunctionDef,
astroid.ClassDef,
astroid.Import,
astroid.ImportFrom,
)
for call in scope.nodes_of_class(astroid.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Used when a local variable is accessed before its assignment.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
'Used when a variable is defined through the "global" statement '
"but no assignment to this variable is done.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint just try to discourage this "
"usage. That doesn't mean you cannot use it !",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import %s from wildcard import",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in the outer scope.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0623": (
"Redefining name %r from %s in exception handler",
"redefine-in-handler",
"Used when an exception handler assigns the exception to an existing name",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with "
"sequence%s: "
"left side has %d label(s), right side has %d value(s)",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not "
"a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
}
ScopeConsumer = collections.namedtuple(
"ScopeConsumer", "to_consume consumed scope_type"
)
class NamesConsumer:
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
self.node = node
def __repr__(self):
to_consumes = [f"{k}->{v}" for k, v in self._atomic.to_consume.items()]
consumed = [f"{k}->{v}" for k, v in self._atomic.consumed.items()]
to_consumes = ", ".join(to_consumes)
consumed = ", ".join(consumed)
return f"""
to_consume : {to_consumes}
consumed : {consumed}
scope_type : {self._atomic.scope_type}
"""
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, new_node):
"""
Mark the name as consumed and delete it from
the to_consume dictionary
"""
self.consumed[name] = new_node
del self.to_consume[name]
def get_next_to_consume(self, node):
# Get the definition of `node` from this scope
name = node.name
parent_node = node.parent
found_node = self.to_consume.get(name)
if (
found_node
and isinstance(parent_node, astroid.Assign)
and parent_node == found_node[0].parent
):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
if (
found_node
and isinstance(parent_node, astroid.For)
and parent_node.iter == node
and parent_node.target in found_node
):
found_node = None
return found_node
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
(
"allowed-redefined-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of names allowed to shadow builtins",
},
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = (
None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
)
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._postponed_evaluation_enabled = False
@utils.check_messages("redefined-outer-name")
def visit_for(self, node):
assigned_to = [
var.name for var in node.target.nodes_of_class(astroid.AssignName)
]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node):
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"unused-variable",
)
def leave_module(self, node):
"""leave module: check globals"""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node):
"""visit class: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "class"))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "lambda"))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals"""
self._to_consume.append(NamesConsumer(node, "function"))
if not (
self.linter.is_message_enabled("redefined-outer-name")
or self.linter.is_message_enabled("redefined-builtin")
):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (
isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE
):
# It is a __future__ directive, not a symbol.
continue
# Do not take in account redefined names for the purpose
# of type checking.:
if any(
isinstance(definition.parent, astroid.If)
and definition.parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
for definition in globs[name]
):
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message(
"redefined-outer-name", args=(name, line), node=stmt
)
elif (
utils.is_builtin(name)
and not self._allowed_redefined_builtin(name)
and not self._should_ignore_redefined_builtin(stmt)
):
# do not print Redefining builtin for additional builtins
self.add_message("redefined-builtin", args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
self._check_metaclasses(node)
if node.type_comment_returns:
self._store_type_annotation_node(node.type_comment_returns)
if node.type_comment_args:
for argument_annotation in node.type_comment_args:
self._store_type_annotation_node(argument_annotation)
not_consumed = self._to_consume.pop().to_consume
if not (
self.linter.is_message_enabled("unused-variable")
or self.linter.is_message_enabled("possibly-unused-variable")
or self.linter.is_message_enabled("unused-argument")
):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in not_consumed.items():
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages(
"global-variable-undefined",
"global-variable-not-assigned",
"global-statement",
"global-at-module-level",
"redefined-builtin",
)
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message("global-at-module-level", node=node)
return
module = frame.root()
default_message = True
locals_ = node.scope().locals
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
not_defined_locally_by_import = not any(
isinstance(local, astroid.node_classes.Import)
for local in locals_.get(name, ())
)
if not assign_nodes and not_defined_locally_by_import:
self.add_message("global-variable-not-assigned", args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (
isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes
):
self.add_message("redefined-builtin", args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
if not_defined_locally_by_import:
# global undefined at the module scope
self.add_message("global-variable-undefined", args=name, node=node)
default_message = False
if default_message:
self.add_message("global-statement", node=node)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
def visit_name(self, node):
"""Check that a name is defined in the current scope"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from an astroid built from live code, skip
assert not stmt.root().file.endswith(".py")
return
name = node.name
frame = stmt.scope()
start_index = len(self._to_consume) - 1
undefined_variable_is_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
used_before_assignment_is_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# The list of base classes in the class definition is not part
# of the class body.
# If the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names.
if current_consumer.scope_type == "class" and (
utils.is_ancestor_name(current_consumer.node, node)
or (i != start_index and self._ignore_class_scope(node))
):
continue
# Ignore inner class scope for keywords in class definition
if (
current_consumer.scope_type == "class"
and isinstance(node.parent, astroid.Keyword)
and isinstance(node.parent.parent, astroid.ClassDef)
):
continue
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if (
current_consumer.scope_type == "function"
and self._defined_in_function_definition(node, current_consumer.node)
):
# ignore function scope if is an annotation/default/decorator, as not in the body
continue
if current_consumer.scope_type == "lambda" and utils.is_default_argument(
node, current_consumer.node
):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == "comprehension"
and self._has_homonym_in_upper_function_scope(node, i)
):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if (
undefined_variable_is_enabled or used_before_assignment_is_enabled
) and defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (
frame is defframe
and defframe.parent_of(node)
and isinstance(defframe, astroid.ClassDef)
and node.name == defframe.name
)
if (
recursive_klass
and utils.is_inside_lambda(node)
and (
not utils.is_default_argument(node)
or node.scope().parent.scope() is not defframe
)
):
# Self-referential class references are fine in lambda's --
# As long as they are not part of the default argument directly
# under the scope of the parent self-referring class.
# Example of valid default argument:
# class MyName3:
# myattr = 1
# mylambda3 = lambda: lambda a=MyName3: a
# Example of invalid default argument:
# class MyName4:
# myattr = 1
# mylambda4 = lambda a=MyName4: lambda: a
# If the above conditional is True,
# there is no possibility of undefined-variable
# Also do not consume class name
# (since consuming blocks subsequent checks)
# -- quit
break
(
maybee0601,
annotation_return,
use_outer_definition,
) = self._is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
)
if use_outer_definition:
continue
if (
maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ("NameError",))
):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = defstmt is stmt and isinstance(
node, (astroid.DelName, astroid.AssignName)
)
if (
recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)
):
if not utils.node_ignores_exception(node, NameError):
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt,
(
astroid.AnnAssign,
astroid.FunctionDef,
astroid.Arguments,
),
)
and name in node.root().locals
):
self.add_message(
"undefined-variable", args=name, node=node
)
elif base_scope_type != "lambda":
# E0601 may *not* occurs in lambda scope.
# Handle postponed evaluation of annotations
if not (
self._postponed_evaluation_enabled
and isinstance(
stmt, (astroid.AnnAssign, astroid.FunctionDef)
)
):
self.add_message(
"used-before-assignment", args=name, node=node
)
elif base_scope_type == "lambda":
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message(
"used-before-assignment", args=name, node=node
)
else:
self.add_message(
"undefined-variable", args=name, node=node
)
elif current_consumer.scope_type == "lambda":
self.add_message("undefined-variable", node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if undefined_variable_is_enabled and not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or (
name == "__class__"
and isinstance(frame, astroid.FunctionDef)
and frame.is_method()
)
):
if not utils.node_ignores_exception(node, NameError):
self.add_message("undefined-variable", args=name, node=node)
@utils.check_messages("no-name-in-module")
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split(".")
try:
module = next(_infer_name_module(node, parts[0]))
except astroid.ResolveError:
continue
if not isinstance(module, astroid.Module):
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages("no-name-in-module")
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split(".")
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == "*":
continue
self._check_module_attrs(node, module, name.split("."))
@utils.check_messages(
"unbalanced-tuple-unpacking", "unpacking-non-sequence", "self-cls-assignment"
)
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences as well as in case self/cls
get assigned.
"""
self._check_self_cls_assign(node)
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
inferred = utils.safe_infer(node.value)
if inferred is not None:
self._check_unpacking(inferred, node, targets)
except astroid.InferenceError:
return
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable"""
self._to_consume.append(NamesConsumer(node, "comprehension"))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable"""
# do not check for not used locals here
self._to_consume.pop()
def leave_assign(self, node):
self._store_type_annotation_names(node)
def leave_with(self, node):
self._store_type_annotation_names(node)
def visit_arguments(self, node):
for annotation in node.type_comment_args:
self._store_type_annotation_node(annotation)
# Relying on other checker's options, which might not have been initialized yet.
@astroid.decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, "analyse-fallback-blocks", default=False)
@astroid.decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, "ignored-modules", default=[])
@astroid.decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, "allow-global-unused-variables", default=True)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default_or_decorator = False
if isinstance(frame, astroid.FunctionDef) and node.statement() is frame:
in_annotation_or_default_or_decorator = (
(
node in frame.args.annotations
or node in frame.args.posonlyargs_annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation
)
or frame.args.parent_of(node)
or (frame.decorators and frame.decorators.parent_of(node))
or (
frame.returns
and (node is frame.returns or frame.returns.parent_of(node))
)
)
return in_annotation_or_default_or_decorator
@staticmethod
def _in_lambda_or_comprehension_body(
node: astroid.node_classes.NodeNG, frame: astroid.node_classes.NodeNG
) -> bool:
"""return True if node within a lambda/comprehension body (or similar) and thus should not have access to class attributes in frame"""
child = node
parent = node.parent
while parent is not None:
if parent is frame:
return False
if isinstance(parent, astroid.Lambda) and child is not parent.args:
# Body of lambda should not have access to class attributes.
return True
if (
isinstance(parent, astroid.node_classes.Comprehension)
and child is not parent.iter
):
# Only iter of list/set/dict/generator comprehension should have access.
return True
if isinstance(parent, astroid.scoped_nodes.ComprehensionScope) and not (
parent.generators and child is parent.generators[0]
):
# Body of list/set/dict/generator comprehension should not have access to class attributes.
# Furthermore, only the first generator (if multiple) in comprehension should have access.
return True
child = parent
parent = parent.parent
return False
@staticmethod
def _is_variable_violation(
node,
name,
defnode,
stmt,
defstmt,
frame,
defframe,
base_scope_type,
recursive_klass,
):
# pylint: disable=too-many-nested-blocks
# node: Node to check for violation
# name: name of node to check violation for
# frame: Scope of statement of node
# base_scope_type: local scope type
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = (
isinstance(frame, astroid.FunctionDef)
or isinstance(node.frame(), astroid.Lambda)
) and _assigned_locally(node)
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = stmt == defstmt and not isinstance(
defnode, astroid.node_classes.Comprehension
)
# check if we have a nonlocal
elif name in defframe.locals:
maybee0601 = not any(
isinstance(child, astroid.Nonlocal) and name in child.names
for child in defframe.get_children()
)
if (
base_scope_type == "lambda"
and isinstance(frame, astroid.ClassDef)
and name in frame.locals
):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (
isinstance(defnode, astroid.Arguments)
and node in defnode.defaults
and frame.locals[name][0].fromlineno < defstmt.fromlineno
)
elif isinstance(defframe, astroid.ClassDef) and isinstance(
frame, astroid.FunctionDef
):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if node is frame.returns and defframe.parent_of(frame.returns):
maybee0601 = annotation_return = True
if (
maybee0601
and defframe.name in defframe.locals
and defframe.locals[name][0].lineno < frame.lineno
):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (
isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt
):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
elif (
isinstance(
defstmt,
(
astroid.Assign,
astroid.AnnAssign,
astroid.AugAssign,
astroid.Expr,
),
)
and isinstance(defstmt.value, astroid.IfExp)
and frame is defframe
and defframe.parent_of(node)
and stmt is defstmt
):
# Single statement if, with assingment expression on same
# line as assigment
# x = b if (b := True) else False
maybee0601 = False
elif (
isinstance( # pylint: disable=too-many-boolean-expressions
defnode, astroid.NamedExpr
)
and frame is defframe
and defframe.parent_of(stmt)
and stmt is defstmt
and (
(
defnode.lineno == node.lineno
and defnode.col_offset < node.col_offset
)
or (defnode.lineno < node.lineno)
or (
# Issue in the `ast` module until py39
# Nodes in a multiline string have the same lineno
# Could be false-positive without check
not PY39_PLUS
and defnode.lineno == node.lineno
and isinstance(
defstmt,
(
astroid.Assign,
astroid.AnnAssign,
astroid.AugAssign,
astroid.Return,
),
)
and isinstance(defstmt.value, astroid.JoinedStr)
)
)
):
# Expressions, with assignment expressions
# Use only after assignment
# b = (c := 2) and c
maybee0601 = False
# Look for type checking definitions inside a type checking guard.
if isinstance(defstmt, (astroid.Import, astroid.ImportFrom)):
defstmt_parent = defstmt.parent
if (
isinstance(defstmt_parent, astroid.If)
and defstmt_parent.test.as_string() in TYPING_TYPE_CHECKS_GUARDS
):
# Exempt those definitions that are used inside the type checking
# guard or that are defined in both type checking guard branches.
used_in_branch = defstmt_parent.parent_of(node)
defined_in_or_else = False
for definition in defstmt_parent.orelse:
if isinstance(definition, astroid.Assign):
defined_in_or_else = any(
target.name == name for target in definition.targets
)
if defined_in_or_else:
break
if not used_in_branch and not defined_in_or_else:
maybee0601 = True
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
# class C:
# class Tp:
# pass
# class D(Tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default_or_decorator = self._defined_in_function_definition(
node, frame
)
in_ancestor_list = utils.is_ancestor_name(frame, node)
if in_annotation_or_default_or_decorator or in_ancestor_list:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not (
(
isinstance(frame, astroid.ClassDef)
or in_annotation_or_default_or_decorator
)
and not self._in_lambda_or_comprehension_body(node, frame)
and name in frame_locals
)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
if not self.linter.is_message_enabled("undefined-loop-variable"):
return
astmts = [stmt for stmt in node.lookup(name)[1] if hasattr(stmt, "assign_type")]
# If this variable usage exists inside a function definition
# that exists in the same loop,
# the usage is safe because the function will not be defined either if
# the variable is not defined.
scope = node.scope()
if isinstance(scope, astroid.FunctionDef) and any(
asmt.statement().parent_of(scope) for asmt in astmts
):
return
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if (
not astmts
or (astmts[0].is_statement or astmts[0].parent)
and astmts[0].statement().parent_of(node)
):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if astmts[i].statement().parent_of(stmt) and not in_for_else_branch(
astmts[i].statement(), stmt
):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) != 1:
return
assign = astmts[0].assign_type()
if not (
isinstance(
assign, (astroid.For, astroid.Comprehension, astroid.GeneratorExp)
)
and assign.statement() is not node.statement()
):
return
# For functions we can do more by inferring the length of the itered object
if not isinstance(assign, astroid.For):
self.add_message("undefined-loop-variable", args=name, node=node)
return
try:
inferred = next(assign.iter.infer())
except astroid.InferenceError:
self.add_message("undefined-loop-variable", args=name, node=node)
else:
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == BUILTIN_RANGE
):
# Consider range() objects safe, even if they might not yield any results.
return
# Consider sequences.
sequences = (
astroid.List,
astroid.Tuple,
astroid.Dict,
astroid.Set,
astroid.objects.FrozenSet,
)
if not isinstance(inferred, sequences):
self.add_message("undefined-loop-variable", args=name, node=node)
return
elements = getattr(inferred, "elts", getattr(inferred, "items", []))
if not elements:
self.add_message("undefined-loop-variable", args=name, node=node)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# pylint: disable=too-many-branches
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (
isinstance(node, astroid.FunctionDef)
and name == "__class__"
and len(node.locals["__class__"]) == 1
and isinstance(node.locals["__class__"][0], astroid.ClassDef)
):
return
# Ignore names imported by the global statement.
if isinstance(stmt, (astroid.Global, astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(
itertools.chain(node.argnames(), [arg.name for arg in node.args.kwonlyargs])
)
# Care about functions with unknown argument (builtins)
if name in argnames:
self._check_unused_arguments(name, node, stmt, argnames)
else:
if stmt.parent and isinstance(
stmt.parent, (astroid.Assign, astroid.AnnAssign)
):
if name in nonlocal_names:
return
qname = asname = None
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Need the complete name, which we don't have in .locals.
if len(stmt.names) > 1:
import_names = next(
(names for names in stmt.names if name in names), None
)
else:
import_names = stmt.names[0]
if import_names:
qname, asname = import_names
name = asname or qname
if _has_locals_call_after_node(stmt, node.scope()):
message_name = "possibly-unused-variable"
else:
if isinstance(stmt, astroid.Import):
if asname is not None:
msg = f"{qname} imported as {asname}"
else:
msg = "import %s" % name
self.add_message("unused-import", args=msg, node=stmt)
return
if isinstance(stmt, astroid.ImportFrom):
if asname is not None:
msg = f"{qname} imported from {stmt.modname} as {asname}"
else:
msg = f"{name} imported from {stmt.modname}"
self.add_message("unused-import", args=msg, node=stmt)
return
message_name = "unused-variable"
if isinstance(stmt, astroid.FunctionDef) and stmt.decorators:
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
self.add_message(message_name, args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (
isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)
or isinstance(stmt, astroid.Arguments)
):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_unused_arguments(self, name, node, stmt, argnames):
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = (
INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
)
else:
confidence = HIGH
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != "staticmethod" and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in (
"__init__",
"__new__",
):
return
# Don't check callback arguments
if any(
node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks
):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
# Don't check function stubs created only for type information
if utils.is_overload_stub(node):
return
# Don't check protocol classes
if utils.is_protocol_class(klass):
return
self.add_message("unused-argument", args=name, node=stmt, confidence=confidence)
def _check_late_binding_closure(self, node, assignment_node):
if not self.linter.is_message_enabled("cell-var-from-loop"):
return
def _is_direct_lambda_call():
return (
isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope
)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message("cell-var-from-loop", node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while maybe_for and not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (
maybe_for
and maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)
):
self.add_message("cell-var-from-loop", node=node, args=node.name)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
def _allowed_redefined_builtin(self, name):
return name in self.config.allowed_redefined_builtins
def _has_homonym_in_upper_function_scope(self, node, index):
"""
Return True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:param node: node to check for
:type node: astroid.Node
:param index: index of the current consumer inside self._to_consume
:type index: int
:return: True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:rtype: bool
"""
for _consumer in self._to_consume[index - 1 :: -1]:
if _consumer.scope_type == "function" and node.name in _consumer.to_consume:
return True
return False
def _store_type_annotation_node(self, type_annotation):
"""Given a type annotation, store all the name nodes it refers to"""
if isinstance(type_annotation, astroid.Name):
self._type_annotation_names.append(type_annotation.name)
return
if not isinstance(type_annotation, astroid.Subscript):
return
if (
isinstance(type_annotation.value, astroid.Attribute)
and isinstance(type_annotation.value.expr, astroid.Name)
and type_annotation.value.expr.name == TYPING_MODULE
):
self._type_annotation_names.append(TYPING_MODULE)
return
self._type_annotation_names.extend(
annotation.name
for annotation in type_annotation.nodes_of_class(astroid.Name)
)
def _store_type_annotation_names(self, node):
type_annotation = node.type_annotation
if not type_annotation:
return
self._store_type_annotation_node(node.type_annotation)
def _check_self_cls_assign(self, node):
"""Check that self/cls don't get assigned"""
assign_names = {
target.name
for target in node.targets
if isinstance(target, astroid.AssignName)
}
scope = node.scope()
nonlocals_with_same_name = any(
child
for child in scope.body
if isinstance(child, astroid.Nonlocal) and assign_names & set(child.names)
)
if nonlocals_with_same_name:
scope = node.scope().parent.scope()
if not (
isinstance(scope, astroid.scoped_nodes.FunctionDef)
and scope.is_method()
and "builtins.staticmethod" not in scope.decoratornames()
):
return
argument_names = scope.argnames()
if not argument_names:
return
self_cls_name = argument_names[0]
target_assign_names = (
target.name
for target in node.targets
if isinstance(target, astroid.node_classes.AssignName)
)
if self_cls_name in target_assign_names:
self.add_message("self-cls-assignment", node=node, args=(self_cls_name,))
def _check_unpacking(self, inferred, node, targets):
"""Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if inferred is astroid.Uninferable:
return
if (
isinstance(inferred.parent, astroid.Arguments)
and isinstance(node.value, astroid.Name)
and node.value.name == inferred.parent.vararg
):
# Variable-length argument, we can't determine the length.
return
if isinstance(inferred, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = inferred.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred) for target in targets):
return
self.add_message(
"unbalanced-tuple-unpacking",
node=node,
args=(
_get_unpacking_extra_info(node, inferred),
len(targets),
len(values),
),
)
# attempt to check unpacking may be possible (ie RHS is iterable)
elif not utils.is_iterable(inferred):
self.add_message(
"unpacking-non-sequence",
node=node,
args=(_get_unpacking_extra_info(node, inferred),),
)
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
while module_names:
name = module_names.pop(0)
if name == "__dict__":
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message(
"no-name-in-module", args=(name, module.name), node=node
)
return None
except astroid.InferenceError:
return None
if module_names:
modname = module.name if module else "__dict__"
self.add_message(
"no-name-in-module", node=node, args=(".".join(module_names), modname)
)
return None
if isinstance(module, astroid.Module):
return module
return None
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr("__all__"))
if assigned is astroid.Uninferable:
return
for elt in getattr(assigned, "elts", ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if not isinstance(elt_name, astroid.Const) or not isinstance(
elt_name.value, str
):
self.add_message("invalid-all-object", args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == "__init__":
name = node.name + "." + elt_name
try:
astroid.modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message(
"undefined-all-variable", args=(elt_name,), node=elt
)
except SyntaxError:
# don't yield a syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in not_consumed.items():
for node in nodes:
self.add_message("unused-variable", args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
is_type_annotation_import = (
imported_name in self._type_annotation_names
or as_name in self._type_annotation_names
)
if isinstance(stmt, astroid.Import) or (
isinstance(stmt, astroid.ImportFrom) and not stmt.modname
):
if isinstance(stmt, astroid.ImportFrom) and SPECIAL_OBJ.search(
imported_name
):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = f"{imported_name} imported as {as_name}"
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
elif isinstance(stmt, astroid.ImportFrom) and stmt.modname != FUTURE:
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if is_type_annotation_import:
# Most likely a typing import if it wasn't used so far.
continue
if imported_name == "*":
self.add_message("unused-wildcard-import", args=name, node=stmt)
else:
if as_name is None:
msg = f"{imported_name} imported from {stmt.modname}"
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
if not _is_type_checking_import(stmt):
self.add_message("unused-import", args=msg, node=stmt)
del self._to_consume
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute) and klass._metaclass.expr:
attr = klass._metaclass.expr
while not isinstance(attr, astroid.Name):
attr = attr.expr
name = attr.name
elif metaclass:
name = metaclass.root().name
found = None
name = METACLASS_NAME_TRANSFORMS.get(name, name)
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif (
isinstance(klass._metaclass, astroid.Attribute)
and klass._metaclass.expr
):
name = klass._metaclass.expr.name
if name is not None:
if not (
name in astroid.Module.scope_attrs
or utils.is_builtin(name)
or name in self.config.additional_builtins
or name in parent_node.locals
):
self.add_message("undefined-variable", node=klass, args=(name,))
return consumed
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/variables.py
|
Python
|
mit
| 83,499
|
[
"VisIt"
] |
6765080c63ed5be9e2871abfeac9b891f885f938696b420c1b2fc93fad54d286
|
#
# Copyright (c) 2008--2018 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import time
import string
import rpm
import sys
try:
# python 2
import xmlrpclib
except ImportError:
# python3
import xmlrpc.client as xmlrpclib
from spacewalk.common.usix import IntType
# common module
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import rhnCache, rhnFlags
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common.rhnTranslate import _
# local module
import rhnUser
import rhnSQL
import rhnLib
class NoBaseChannelError(Exception):
pass
class InvalidServerArchError(Exception):
pass
class BaseChannelDeniedError(Exception):
pass
class ChannelException(Exception):
def __init__(self, channel_id=None, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.channel_id = channel_id
self.channel = None
class ModifiedError(ChannelException):
pass
class IncompatibilityError(Exception):
pass
class InvalidDataError(Exception):
pass
class ChannelNotFoundError(Exception):
pass
class NoToolsChannel(Exception):
pass
class NoChildChannels(Exception):
pass
class InvalidChannel(Exception):
pass
class BaseDatabaseObject:
def __init__(self):
self._row = None
def __getattr__(self, name):
if name.startswith('get_'):
return rhnLib.CallableObj(name[4:], self._get)
if name.startswith('set_'):
return rhnLib.CallableObj(name[4:], self._set)
raise AttributeError(name)
def _set(self, name, val):
self._new_row()
self._row[name] = val
def _get(self, name):
return self._row[name]
def _new_row(self):
raise NotImplementedError()
def save(self, with_updates=1):
try:
return self._save(with_updates=with_updates)
except:
rhnSQL.rollback()
raise
def _save(self, with_updates=1):
try:
self._row.save(with_updates=with_updates)
except rhnSQL.ModifiedRowError:
raise_with_tb(ModifiedError(self._row['id']), sys.exc_info()[2])
class BaseChannelObject(BaseDatabaseObject):
_table_name = None
_sequence_name = None
_generic_fields = []
def load_by_label(self, label):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'label')
self._row.load(label)
return self
def load_by_id(self, obj_id):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'id')
self._row.load(obj_id)
return self
def load_from_dict(self, dict):
# Re-init
self.__init__()
for f in self._generic_fields:
method = getattr(self, 'set_' + f)
method(dict.get(f))
self._load_rest(dict)
return self
def _load_rest(self, dict):
pass
def exists(self):
if not self._row:
return 0
return self._row.real
def get_org_id(self):
org_id = self._row['org_id']
if org_id is None:
return None
row = self._lookup_org_id(org_id)
if row.real:
return row['login']
return org_id
def set_org_id(self, val):
self._new_row()
if val is None or isinstance(val, IntType):
self._row['org_id'] = val
return
row = self._lookup_org_by_login(val)
if not row.real:
raise InvalidDataError("No such org", val)
self._row['org_id'] = row['org_id']
def _lookup_org_id(self, org_id):
row = rhnSQL.Row('web_contact', 'org_id')
row.load(org_id)
return row
def _lookup_org_by_login(self, login):
row = rhnSQL.Row('web_contact', 'login')
row.load(login)
return row
def _lookup_channel_family_by_id(self, channel_family_id):
row = rhnSQL.Row('rhnChannelFamily', 'id')
row.load(channel_family_id)
return row
def _lookup_channel_family_by_label(self, channel_family):
row = rhnSQL.Row('rhnChannelFamily', 'label')
row.load(channel_family)
return row
def _new_row(self):
if self._row is None:
self._row = rhnSQL.Row(self._table_name, 'id')
channel_id = rhnSQL.Sequence(self._sequence_name).next()
self._row.create(channel_id)
def as_dict(self):
ret = {}
for f in self._generic_fields:
method = getattr(self, 'get_' + f)
val = method()
ret[f] = val
return ret
# Channel creation
class Channel(BaseChannelObject):
_table_name = 'rhnChannel'
_sequence_name = 'rhn_channel_id_seq'
_generic_fields = ['label', 'name', 'summary', 'description', 'basedir',
'org_id', 'gpg_key_url', 'gpg_key_id', 'gpg_key_fp', 'end_of_life',
'channel_families', 'channel_arch', ]
def __init__(self):
BaseChannelObject.__init__(self)
self._channel_families = []
self._dists = {}
self._parent_channel_arch = None
def load_by_label(self, label):
BaseChannelObject.load_by_label(self, label)
self._load_channel_families()
self._load_dists()
return self
def load_by_id(self, label):
BaseChannelObject.load_by_id(self, label)
self._load_channel_families()
self._load_dists()
return self
def _load_rest(self, dict):
dists = dict.get('dists')
if not dists:
return
for dist in dists:
release = dist.get('release')
os = dist.get('os')
self._dists[release] = os
_query_get_db_channel_families = rhnSQL.Statement("""
select channel_family_id
from rhnChannelFamilyMembers
where channel_id = :channel_id
""")
def _get_db_channel_families(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_channel_families)
h.execute(channel_id=channel_id)
return [x['channel_family_id'] for x in h.fetchall_dict() or []]
def _load_channel_families(self):
channel_id = self._row.get('id')
self._channel_families = self._get_db_channel_families(channel_id)
return 1
def _load_dists(self):
channel_id = self._row.get('id')
dists = self._get_db_dists(channel_id)
self.set_dists(dists)
_query_get_db_dists = rhnSQL.Statement("""
select os, release
from rhnDistChannelMap
where channel_id = :channel_id
and org_id is null
""")
def _get_db_dists(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_dists)
h.execute(channel_id=channel_id)
return h.fetchall_dict() or []
# Setters
def set_channel_arch(self, val):
self._new_row()
arch = self._sanitize_arch(val)
row = self._lookup_channel_arch(arch)
if not row.real:
raise InvalidDataError("No such architecture", arch)
self._row['channel_arch_id'] = row['id']
def _sanitize_arch(self, arch):
if arch == 'i386':
return 'channel-ia32'
p = 'channel-'
if arch[:len(p)] != p:
return p + arch
return arch
def set_parent_channel(self, val):
self._new_row()
if val is None:
self._row['parent_channel'] = None
return
row = self._lookup_channel_by_label(val)
if not row.real:
raise InvalidDataError("Invalid parent channel", val)
self._row['parent_channel'] = row['id']
self._parent_channel_arch = row['channel_arch_id']
def set_channel_families(self, val):
self._new_row()
self._channel_families = []
for cf_label in val:
self.add_channel_family(cf_label)
def set_end_of_life(self, val):
self._new_row()
if val is None:
self._row['end_of_life'] = None
return
t = time.strptime(val, "%Y-%m-%d")
seconds = time.mktime(t)
t = rhnSQL.TimestampFromTicks(seconds)
self._row['end_of_life'] = t
def add_channel_family(self, name):
self._new_row()
cf = self._lookup_channel_family_by_label(name)
if not cf.real:
raise InvalidDataError("Invalid channel family", name)
self._channel_families.append(cf['id'])
def add_dist(self, release, os=None):
if os is None:
os = 'Red Hat Linux'
self._dists[release] = os
def set_dists(self, val):
self._dists.clear()
for h in val:
release = h['release']
os = h['os']
self.add_dist(release, os)
# Getters
def get_parent_channel(self):
pc_id = self._row['parent_channel']
if pc_id is None:
return None
return self._lookup_channel_by_id(pc_id)['label']
def get_channel_families(self):
cf_labels = []
for cf_id in self._channel_families:
row = self._lookup_channel_family_by_id(cf_id)
if row.real:
cf_labels.append(row['label'])
return cf_labels
def get_channel_arch(self):
channel_arch_id = self._row['channel_arch_id']
row = self._lookup_channel_arch_by_id(channel_arch_id)
assert row.real
return row['label']
def get_end_of_life(self):
date_obj = self._row['end_of_life']
if date_obj is None:
return None
return "%s-%02d-%02d %02d:%02d:%02d" % (
date_obj.year, date_obj.month, date_obj.day,
date_obj.hour, date_obj.minute, date_obj.second)
def get_dists(self):
ret = []
for release, os in self._dists.items():
ret.append({'release': release, 'os': os})
return ret
def _lookup_channel_by_id(self, channel_id):
row = rhnSQL.Row('rhnChannel', 'id')
row.load(channel_id)
return row
def _lookup_channel_by_label(self, channel):
row = rhnSQL.Row('rhnChannel', 'label')
row.load(channel)
return row
def _lookup_channel_arch(self, channel_arch):
row = rhnSQL.Row('rhnChannelArch', 'label')
row.load(channel_arch)
return row
def _lookup_channel_arch_by_id(self, channel_arch_id):
row = rhnSQL.Row('rhnChannelArch', 'id')
row.load(channel_arch_id)
return row
def _save(self, with_updates=1):
if self._parent_channel_arch:
if not self._compatible_channel_arches(self._parent_channel_arch,
self._row['channel_arch_id']):
raise IncompatibilityError("Incompatible channel arches")
BaseChannelObject._save(self, with_updates=with_updates)
# Save channel families now
self._save_channel_families()
self._save_dists()
_query_remove_channel_families = rhnSQL.Statement("""
delete from rhnChannelFamilyMembers
where channel_id = :channel_id
and channel_family_id = :channel_family_id
""")
_query_add_channel_families = rhnSQL.Statement("""
insert into rhnChannelFamilyMembers (channel_id, channel_family_id)
values (:channel_id, :channel_family_id)
""")
def _save_channel_families(self):
channel_id = self._row['id']
db_cfids = self._get_db_channel_families(channel_id)
h = {}
for db_cfid in db_cfids:
h[db_cfid] = None
to_add = []
for cfid in self._channel_families:
if cfid in h:
del h[cfid]
continue
to_add.append(cfid)
to_delete = list(h.keys())
if to_delete:
h = rhnSQL.prepare(self._query_remove_channel_families)
cids = [channel_id] * len(to_delete)
h.executemany(channel_id=cids, channel_family_id=to_delete)
if to_add:
h = rhnSQL.prepare(self._query_add_channel_families)
cids = [channel_id] * len(to_add)
h.executemany(channel_id=cids, channel_family_id=to_add)
def _save_dists(self):
channel_id = self._row['id']
db_dists = self._get_db_dists(channel_id)
d = self._dists.copy()
to_add = [[], []]
to_remove = []
to_update = [[], []]
for h in db_dists:
release = h['release']
os = h['os']
if release not in d:
to_remove.append(release)
continue
# Need to update?
m_os = d[release]
if m_os == os:
# Nothing to do
del d[release]
continue
to_update[0].append(release)
to_update[1].append(os)
# Everything else should be added
for release, os in list(d.items()):
to_add[0].append(release)
to_add[1].append(os)
self._remove_dists(to_remove)
self._update_dists(to_update[0], to_update[1])
self._add_dists(to_add[0], to_add[1])
_query_add_dists = rhnSQL.Statement("""
insert into rhnDistChannelMap
(channel_id, channel_arch_id, release, os, org_id)
values (:channel_id, :channel_arch_id, :release, :os, null)
""")
def _add_dists(self, releases, oses):
self._modify_dists(self._query_add_dists, releases, oses)
def _modify_dists(self, query, releases, oses):
if not releases:
return
count = len(releases)
channel_ids = [self._row['id']] * count
query_args = {'channel_id': channel_ids, 'release': releases}
if oses:
channel_arch_ids = [self._row['channel_arch_id']] * count
query_args.update({'channel_arch_id': channel_arch_ids,
'os': oses})
h = rhnSQL.prepare(query)
h.executemany(**query_args)
_query_update_dists = rhnSQL.Statement("""
update rhnDistChannelMap
set channel_arch_id = :channel_arch_id,
os = :os
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _update_dists(self, releases, oses):
self._modify_dists(self._query_update_dists, releases, oses)
_query_remove_dists = rhnSQL.Statement("""
delete from rhnDistChannelMap
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _remove_dists(self, releases):
self._modify_dists(self._query_remove_dists, releases, None)
def _compatible_channel_arches(self, parent_channel_arch, channel_arch):
# This could get more complicated later
return (parent_channel_arch == channel_arch)
def as_dict(self):
ret = BaseChannelObject.as_dict(self)
ret['dists'] = self.get_dists()
return ret
class ChannelFamily(BaseChannelObject):
_table_name = 'rhnChannelFamily'
_sequence_name = 'rhn_channel_family_id_seq'
_generic_fields = ['label', 'name', 'product_url']
def _load_by_id(query, item_object, pattern=None):
qargs = {}
if pattern:
query += "and label like :pattern"
qargs['pattern'] = pattern
h = rhnSQL.prepare(query)
h.execute(**qargs)
ret = []
while 1:
row = h.fetchone_dict()
if not row:
break
c = item_object.load_by_id(row['id'])
ret.append(c.as_dict())
return ret
def list_channel_families(pattern=None):
query = """
select id
from rhnChannelFamily
where org_id is null
"""
return _load_by_id(query, ChannelFamily(), pattern)
def list_channels(pattern=None):
query = """
select id
from rhnChannel
where 1=1
"""
return _load_by_id(query, Channel(), pattern)
# makes sure there are no None values in dictionaries, etc.
def __stringify(object):
if object is None:
return ''
if type(object) == type([]):
return list(map(__stringify, object))
# We need to know __stringify converts immutable types into immutable
# types
if type(object) == type(()):
return tuple(map(__stringify, object))
if type(object) == type({}):
ret = {}
for k, v in object.items():
ret[__stringify(k)] = __stringify(v)
return ret
# by default, we just str() it
return str(object)
# return the channel information
def channel_info(channel):
log_debug(3, channel)
# get the channel information
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannel c,
rhnChannelArch ca
where
c.channel_arch_id = ca.id
and c.label = :channel
""")
h.execute(channel=str(channel))
ret = h.fetchone_dict()
return __stringify(ret)
# return information about a base channel for a server_id
def get_base_channel(server_id, none_ok=0):
log_debug(3, server_id)
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c, rhnChannelArch ca, rhnServerChannel sc
where sc.server_id = :server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
and c.parent_channel is NULL
""")
h.execute(server_id=str(server_id))
ret = h.fetchone_dict()
if not ret:
if not none_ok:
log_error("Server not subscribed to a base channel!", server_id)
return None
return __stringify(ret)
def channels_for_server(server_id):
"""channel info list for all channels accessible by this server.
list channels a server_id is subscribed to
We DO NOT want to cache this one because we depend on getting
accurate information and the caching would only introduce more
overhead on an otherwise very fast query
"""
log_debug(3, server_id)
try:
server_id = int(server_id)
except:
raise_with_tb(rhnFault(8, server_id), sys.exc_info()[2]) # Invalid rhnServer.id
# XXX: need to return unsubsubcribed channels and a way to indicate
# they arent already subscribed
# list all the channels this server is subscribed to. We also want
# to know if any of those channels has local packages in it... A
# local package has a org_id set.
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
c.gpg_key_url,
case s.org_id when c.org_id then 1 else 0 end local_channel,
TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s
where
c.id = sc.channel_id
and sc.server_id = :server_id
and s.id = :server_id
and ca.id = c.channel_arch_id
order by c.parent_channel nulls first
""")
h.execute(server_id=str(server_id))
channels = h.fetchall_dict()
if not channels:
log_error("Server not subscribed to any channels", server_id)
channels = []
return __stringify(channels)
def getSubscribedChannels(server_id):
"""
Format the response from channels_for_server in the way that the
handlers expect.
"""
channelList = channels_for_server(server_id)
channels = []
for each in channelList:
if 'last_modified' not in each:
# No last_modified attribute
# Probably an empty channel, so ignore
continue
channel = [each['label'], each['last_modified']]
# isBaseChannel
if each['parent_channel']:
flag = "0"
else:
flag = "1"
channel.append(flag)
# isLocalChannel
if each['local_channel']:
flag = "1"
else:
flag = "0"
channel.append(flag)
channels.append(channel)
return channels
def isCustomChannel(channel_id):
"""
Input: channel_id (from DB Table rhnChannel.id)
Returns: True if this is a custom channel
False if this is not a custom channel
"""
log_debug(3, channel_id)
h = rhnSQL.prepare("""
select
rcf.label
from
rhnChannelFamily rcf,
rhnChannelFamilyMembers rcfm
where
rcfm.channel_id = :channel_id
and rcfm.channel_family_id = rcf.id
and rcf.org_id is not null
""")
h.execute(channel_id=str(channel_id))
label = h.fetchone()
if label:
if label[0].startswith("private-channel-family"):
log_debug(3, channel_id, "is a custom channel")
return True
return False
# Fetch base channel for a given release and arch
def base_channel_for_rel_arch(release, server_arch, org_id=-1,
user_id=None):
log_debug(4, release, server_arch, org_id, user_id)
query = """
select ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnChannelArch ca
where c.channel_arch_id = ca.id
and c.id = rhn_channel.base_channel_for_release_arch(
:release, :server_arch, :org_id, :user_id)
"""
rhnSQL.transaction("base_channel_for_rel_arch")
h = rhnSQL.prepare(query)
try:
h.execute(release=str(release), server_arch=str(server_arch),
org_id=org_id, user_id=user_id)
except rhnSQL.SQLSchemaError:
e = sys.exc_info()[1]
rhnSQL.rollback("base_channel_for_rel_arch")
if e.errno == 20263:
# Insufficient permissions for subscription
log_debug(4, 'BaseChannelDeniedError')
raise_with_tb(BaseChannelDeniedError(), sys.exc_info()[2])
if e.errno == 20244:
# Server architecture could not be found
log_debug(4, 'InvalidServerArchError')
raise_with_tb(InvalidServerArchError(str(server_arch)), sys.exc_info()[2])
# Re-raise unknown eceptions
log_debug(4, 'unkown exception')
raise
log_debug(4, 'got past exceptions')
return h.fetchone_dict()
def base_eus_channel_for_ver_rel_arch(version, release, server_arch,
org_id=-1, user_id=None):
"""
given a redhat-release version, release, and server arch, return a list
of dicts containing the details of the channel z streams either match the
version/release pair, or are greater.
"""
log_debug(4, version, release, server_arch, org_id, user_id)
eus_channels_query = """
select c.id,
c.label,
c.name,
rcm.release,
c.receiving_updates
from
rhnChannelPermissions cp,
rhnChannel c,
rhnServerArch sa,
rhnServerChannelArchCompat scac,
rhnReleaseChannelMap rcm
where
rcm.version = :version
and scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = rcm.channel_arch_id
and rcm.channel_id = c.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and rhn_channel.loose_user_role_check(c.id, :user_id,
'subscribe') = 1
"""
eus_channels_prepared = rhnSQL.prepare(eus_channels_query)
eus_channels_prepared.execute(version=version,
server_arch=server_arch,
user_id=user_id,
org_id=org_id)
channels = []
while True:
channel = eus_channels_prepared.fetchone_dict()
if channel is None:
break
# the release part of redhat-release for rhel 4 is like
# 6.1 or 7; we just look at the first digit.
# for rhel 5 and up it's the full release number of rhel, followed by
# the true release number of the rpm, like 5.0.0.9 (for the 9th
# version of the redhat-release rpm, for RHEL GA)
db_release = channel['release']
if version in ['4AS', '4ES']:
parts = 1
else:
parts = 2
server_rel = '.'.join(release.split('-')[0].split('.')[:parts])
channel_rel = '.'.join(db_release.split('-')[0].split('.')[:parts])
# XXX we're no longer using the is_default column from the db
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) == 0:
channel['is_default'] = 'Y'
channels.append(channel)
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) < 0:
channel['is_default'] = 'N'
channels.append(channel)
return channels
def get_channel_for_release_arch(release, server_arch, org_id=None):
log_debug(3, release, server_arch)
server_arch = rhnLib.normalize_server_arch(str(server_arch))
log_debug(3, 'normalized arch as %s' % server_arch)
if org_id is None:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnDistChannelMap dcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = dcm.channel_arch_id
and dcm.release = :release
and dcm.channel_id = c.id
and dcm.channel_arch_id = c.channel_arch_id
and dcm.org_id is null
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
else:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = odcm.channel_arch_id
and odcm.release = :release
and odcm.channel_id = c.id
and odcm.channel_arch_id = c.channel_arch_id
and odcm.org_id = :org_id
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(release=str(release), server_arch=server_arch, org_id=org_id)
row = h.fetchone_dict()
if not row:
# No channles for this guy
log_debug(3, 'No channles for this guy')
return None
log_debug(3, 'row is %s' % str(row))
return row
def applet_channels_for_uuid(uuid):
log_debug(3, uuid)
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
to_char(s.channels_changed, 'YYYYMMDDHH24MISS') server_channels_changed
from rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s,
rhnServerUuid su
where su.uuid = :uuid
and su.server_id = s.id
and su.server_id = sc.server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(uuid=uuid)
rows = h.fetchall_dict() or []
return rows
# retrieve a list of public channels for a given release and architecture
# we cannot cache this if it involves an org_id
# If a user_id is passed to this function, and all the available base channels
# for this server_arch/release combination are denied by the org admin, this
# function raises BaseChannelDeniedError
def channels_for_release_arch(release, server_arch, org_id=-1, user_id=None):
if not org_id:
org_id = -1
org_id = string.strip(str(org_id))
log_debug(3, release, server_arch, org_id)
# Can raise BaseChannelDeniedError or InvalidServerArchError
base_channel = base_channel_for_rel_arch(release, server_arch,
org_id=org_id, user_id=user_id)
if not base_channel:
raise NoBaseChannelError()
# At this point, base_channel is not null
# We assume here that subchannels are compatible with the base channels,
# so there would be no need to check for arch compatibility from this
# point
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
-- If user_id is null, then the channel is subscribable
rhn_channel.loose_user_role_check(c.id, :user_id, 'subscribe')
subscribable
from
rhnChannelPermissions cp,
rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca
where
c.id = odcm.channel_id
and odcm.os in (
'Powertools'
)
and odcm.for_org_id = :org_id
and c.channel_arch_id = ca.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and c.parent_channel = :parent_channel
""")
h.execute(org_id=org_id,
parent_channel=base_channel['id'], user_id=user_id)
channels = [base_channel]
while 1:
row = h.fetchone_dict()
if not row:
break
subscribable = row['subscribable']
del row['subscribable']
if not subscribable:
# Not allowed to subscribe to this channel
continue
channels.append(row)
return __stringify(channels)
_query_get_source_packages_from_ids = rhnSQL.Statement("""
select srpm.name
from rhnChannelPackage cp,
rhnPackage p,
rhnSourceRPM srpm
where cp.channel_id = :channel_id
and cp.package_id = p.id
and p.source_rpm_id = srpm.id
""")
def list_packages_source(channel_id):
ret = []
h = rhnSQL.prepare(_query_get_source_packages_from_ids)
h.execute(channel_id=channel_id)
results = h.fetchall_dict()
if results:
for r in results:
r = r['name']
if string.find(r, ".rpm") != -1:
r = string.replace(r, ".rpm", "")
new_evr = rhnLib.make_evr(r, source=1)
new_evr_list = [new_evr['name'], new_evr['version'], new_evr['release'], new_evr['epoch']]
ret.append(new_evr_list)
return ret
# the latest packages from the specified channel
_query_all_packages_from_channel_checksum = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages with checksum info
def list_all_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
h = rhnSQL.prepare(_query_all_packages_from_channel_checksum)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']) for a in __stringify(ret)]
return ret
# This function executes the SQL call for listing latest packages with
# checksum info
def list_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size,
full_channel.checksum_type,
full_channel.checksum
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
h = rhnSQL.prepare(query)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']) for a in __stringify(ret)]
return ret
# This function executes the SQL call for listing packages
def _list_packages_sql(query, channel_id):
h = rhnSQL.prepare(query)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"]) for a in __stringify(ret)]
return ret
def list_packages_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id
from
rhnChannelPackage cp,
rhnPackage p
where
cp.channel_id = :channel_id
and cp.package_id = p.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
return _list_packages_sql(query, channel_id)
# the latest packages from the specified channel
_query_latest_packages_from_channel = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages
def list_all_packages_sql(channel_id):
log_debug(3, channel_id)
return _list_packages_sql(_query_latest_packages_from_channel, channel_id)
# This function executes the SQL call for listing packages with all the
# dep information for each package also
def list_all_packages_complete_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare(_query_latest_packages_from_channel)
# This gathers the provides, requires, conflicts, obsoletes info
g = rhnSQL.prepare("""
select
pp.package_id,
'provides' as capability_type,
pp.capability_id,
pp.sense,
pc.name,
pc.version
from
rhnPackageProvides pp,
rhnPackageCapability pc
where
pp.package_id = :package_id
and pp.capability_id = pc.id
union all
select
pr.package_id,
'requires' as capability_type,
pr.capability_id,
pr.sense,
pc.name,
pc.version
from
rhnPackageRequires pr,
rhnPackageCapability pc
where
pr.package_id = :package_id
and pr.capability_id = pc.id
union all
select
prec.package_id,
'recommends' as capability_type,
prec.capability_id,
prec.sense,
pc.name,
pc.version
from
rhnPackageRecommends prec,
rhnPackageCapability pc
where
prec.package_id = :package_id
and prec.capability_id = pc.id
union all
select
sugg.package_id,
'suggests' as capability_type,
sugg.capability_id,
sugg.sense,
pc.name,
pc.version
from
rhnPackageSuggests sugg,
rhnPackageCapability pc
where
sugg.package_id = :package_id
and sugg.capability_id = pc.id
union all
select
supp.package_id,
'supplements' as capability_type,
supp.capability_id,
supp.sense,
pc.name,
pc.version
from
rhnPackageSupplements supp,
rhnPackageCapability pc
where
supp.package_id = :package_id
and supp.capability_id = pc.id
union all
select
enh.package_id,
'enhances' as capability_type,
enh.capability_id,
enh.sense,
pc.name,
pc.version
from
rhnPackageEnhances enh,
rhnPackageCapability pc
where
enh.package_id = :package_id
and enh.capability_id = pc.id
union all
select
pcon.package_id,
'conflicts' as capability_type,
pcon.capability_id,
pcon.sense,
pc.name,
pc.version
from
rhnPackageConflicts pcon,
rhnPackageCapability pc
where
pcon.package_id = :package_id
and pcon.capability_id = pc.id
union all
select
po.package_id,
'obsoletes' as capability_type,
po.capability_id,
po.sense,
pc.name,
pc.version
from
rhnPackageObsoletes po,
rhnPackageCapability pc
where
po.package_id = :package_id
and po.capability_id = pc.id
union all
select
brks.package_id,
'breaks' as capability_type,
brks.capability_id,
brks.sense,
pc.name,
pc.version
from
rhnPackageBreaks brks,
rhnPackageCapability pc
where
brks.package_id = :package_id
and brks.capability_id = pc.id
union all
select
pdep.package_id,
'predepends' as capability_type,
pdep.capability_id,
pdep.sense,
pc.name,
pc.version
from
rhnPackagePredepends pdep,
rhnPackageCapability pc
where
pdep.package_id = :package_id
and pdep.capability_id = pc.id
""")
h.execute(channel_id=str(channel_id))
# XXX This query has to order the architectures somehow; the 7.2 up2date
# client was broken and was selecting the wrong architecture if athlons
# are passed first. The rank ordering here should make sure that i386
# kernels appear before athlons.
ret = h.fetchall_dict()
if not ret:
return []
for pkgi in ret:
pkgi['provides'] = []
pkgi['requires'] = []
pkgi['conflicts'] = []
pkgi['obsoletes'] = []
pkgi['recommends'] = []
pkgi['suggests'] = []
pkgi['supplements'] = []
pkgi['enhances'] = []
pkgi['breaks'] = []
pkgi['predepends'] = []
g.execute(package_id=pkgi["id"])
deps = g.fetchall_dict() or []
for item in deps:
version = item['version'] or ""
relation = ""
if version:
sense = item['sense'] or 0
if sense & 2:
relation = relation + "<"
if sense & 4:
relation = relation + ">"
if sense & 8:
relation = relation + "="
if relation:
relation = " " + relation
if version:
version = " " + version
dep = item['name'] + relation + version
pkgi[item['capability_type']].append(dep)
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['provides'],
a['requires'], a['conflicts'], a['obsoletes'], a['recommends'], a['suggests'], a['supplements'], a['enhances'], a['breaks'], a['predepends']) for a in __stringify(ret)]
return ret
def list_packages_path(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare("""
select
p.path
from
rhnPackage p,
rhnChannelPackage cp
where
cp.channel_id = :channel_id
and cp.package_id = p.id
""")
h.execute(channel_id=str(channel_id))
ret = h.fetchall()
if not ret:
return []
# process the results
# ret = map(lambda a: (a["path"]),
# __stringify(ret))
return ret
# list the latest packages for a channel
def list_packages(channel):
return _list_packages(channel, cache_prefix="list_packages",
function=list_packages_sql)
# list _all_ the packages for a channel
def list_all_packages(channel):
return _list_packages(channel, cache_prefix="list_all_packages",
function=list_all_packages_sql)
# list _all_ the packages for a channel, including checksum info
def list_all_packages_checksum(channel):
return _list_packages(channel, cache_prefix="list_all_packages_checksum",
function=list_all_packages_checksum_sql)
# list _all_ the packages for a channel
def list_all_packages_complete(channel):
return _list_packages(channel, cache_prefix="list_all_packages_complete",
function=list_all_packages_complete_sql)
# Common part of list_packages and list_all_packages*
# cache_prefix is the prefix for the file name we're caching this request as
# function is the generator function
def _list_packages(channel, cache_prefix, function):
log_debug(3, channel, cache_prefix)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "%s-%s" % (cache_prefix, channel)
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
return ret
ret = function(c_info["id"])
if not ret:
# we assume that channels with no packages are very fast to list,
# so we don't bother caching...
log_error("No packages found in channel",
c_info["id"], c_info["label"])
return []
# we need to append the channel label to the list
ret = list(map(lambda a, c=channel: a + (c,), ret))
ret = xmlrpclib.dumps((ret, ), methodresponse=1)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
# set the cache
rhnCache.set(cache_entry, ret, c_info["last_modified"])
return ret
def getChannelInfoForKickstart(kickstart):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart))
return h.fetchone_dict()
def getChannelInfoForKickstartOrg(kickstart, org_id):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
and kt.org_id = :org_id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart), org_id=int(org_id))
return h.fetchone_dict()
def getChannelInfoForKickstartSession(session):
# decode the session string
try:
session_id = int(session.split('x')[0].split(':')[0])
except Exception:
return None, None
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks
where c.id = kt.channel_id
and kt.id = ks.kstree_id
and ks.id = :session_id
"""
h = rhnSQL.prepare(query)
h.execute(session_id=session_id)
return h.fetchone_dict()
def getChildChannelInfoForKickstart(kickstart, child):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks,
rhnChannel c2
where c2.id = kt.channel_id
and kt.label = :kickstart_label
and c.label = :child_label
and c.parent_channel = c2.id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart), child_label=str(child))
return h.fetchone_dict()
def getChannelInfoForTinyUrl(tinyurl):
query = """
select tu.url
from rhnTinyUrl tu
where tu.enabled = 'Y'
and tu.token = :tinyurl
"""
h = rhnSQL.prepare(query)
h.execute(tinyurl=str(tinyurl))
return h.fetchone_dict()
# list the obsoletes for a channel
def list_obsoletes(channel):
log_debug(3, channel)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "list_obsoletes-%s" % channel
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
return ret
# Get the obsoleted packages
h = rhnSQL.prepare("""
select distinct
pn.name,
pe.version, pe.release, pe.epoch,
pa.label arch,
pc.name obsolete_name,
pc.version obsolete_version,
p_info.sense
from rhnPackageCapability pc,
rhnPackageArch pa,
rhnPackageEVR pe,
rhnPackageName pn,
rhnPackage p,
( select cp.channel_id,
po.package_id, po.capability_id, po.sense
from rhnPackageObsoletes po,
rhnChannelPackage cp,
rhnChannel c
where 1=1
and c.label = :channel
and c.id = cp.channel_id
and cp.package_id = po.package_id
) p_info
where 1=1
and p_info.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pe.id
and p.package_arch_id = pa.id
and p_info.capability_id = pc.id
""")
h.execute(channel=str(channel))
# Store stuff in a dictionary to makes things simpler
hash = {}
while 1:
row = h.fetchone_dict()
if not row:
break
row = __stringify(row)
key = (row['name'], row['version'], row['release'],
row["epoch"], row['arch'])
value = key + (row['obsolete_name'], row['obsolete_version'],
row['sense'])
if key not in hash:
hash[key] = []
hash[key].append(value)
# Now grab a listall and match it against what we got
pkglist = list_packages_sql(c_info["id"])
result = []
for pkg in pkglist:
key = tuple(pkg[:5])
if key in hash:
for p in hash[key]:
result.append(p)
# we can cache this now
rhnCache.set(cache_entry, result, c_info["last_modified"])
return result
def __auth_user(server_id, username, password):
""" Auth if user can add/remove channel from given server """
log_debug(3, server_id, username)
# check the username and password for compliance
user = rhnUser.auth_username_password(username, password)
# The user's password checks, verify that they have perms on that
# server.
h = rhnSQL.prepare("""
select 1
from rhnUserServerPerms usp
where usp.user_id = :user_id
and usp.server_id = :server_id
""")
h.execute(user_id=str(user.getid()), server_id=str(server_id))
res = h.fetchone_dict()
if not res:
# Not allowed to perform administrative tasks on this server
raise rhnFault(37)
return 1
# small wrapper around a PL/SQL function
def subscribe_sql(server_id, channel_id, commit=1):
log_debug(3, server_id, channel_id, commit)
subscribe_channel = rhnSQL.Procedure("rhn_channel.subscribe_server")
try:
# don't run the EC yet
subscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLSchemaError:
e = sys.exc_info()[1]
if e.errno == 20102: # channel_server_one_base
log_error("Channel subscribe failed, "
"%s already subscribed to %s (?)" % (server_id, channel_id))
raise_with_tb(rhnFault(38, "Server already subscribed to %s" % channel_id), sys.exc_info()[2])
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLSchemaError", e)
raise_with_tb(rhnException(e), sys.exc_info()[2])
except rhnSQL.SQLError:
e = sys.exc_info()[1]
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLError", e)
raise_with_tb(rhnException(e), sys.exc_info()[2])
if commit:
rhnSQL.commit()
return 1
_query_channel_details = rhnSQL.Statement("""
select c.id, c.label, c.parent_channel
from rhnChannel c
where c.label = :channel
""")
_query_server_parent_channel = rhnSQL.Statement("""
select pc.id, pc.label
from rhnChannel c
join rhnServerChannel sc on c.parent_channel = sc.channel_id
join rhnChannel pc on c.parent_channel = pc.id
where sc.server_id = :sid
group by pc.id, pc.label
""")
_query_can_subscribe = rhnSQL.Statement("""
select rhn_channel.user_role_check(:cid, wc.id, 'subscribe') as can_subscribe
from web_contact wc
where wc.login_uc = upper(:username)
""")
# subscribe a server to a channel with authentication
def subscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# get channel details
h = rhnSQL.prepare(_query_channel_details)
h.execute(channel=str(channel))
channel_details = h.fetchone_dict()
if not channel_details:
log_error("Channel %s does not exist?" % channel)
raise rhnFault(40, "Channel %s does not exist?" % channel)
# get server's parent channel
h = rhnSQL.prepare(_query_server_parent_channel)
h.execute(sid=server_id)
server_parent_channel = h.fetchone_dict()
# Can't add more than one parent or child of parent channel to which server isn't subscibed
if not channel_details['parent_channel'] and server_parent_channel:
log_error("Cannot add parent channel %s. Server already subscribed to parent channel %s." %
(channel, server_parent_channel['label']))
raise rhnFault(32, "Cannot add parent channel %s. Server already subscribed to parent channel %s." %
(channel, server_parent_channel['label']))
else:
if ( server_parent_channel and
server_parent_channel['id'] != channel_details['parent_channel'] ):
log_error("Server is not subscribed to parent of channel %s." % channel)
raise rhnFault(32, "Server is not subscribed to parent of channel %s." % channel)
# check specific channel subscription permissions
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_details['id'], username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
subscribe_sql(server_id, channel_details['id'])
return 1
raise rhnFault(71)
# This class is only a convenient encapsulation of a server's attributes:
# server_id, org_id, release, arch, user_id. Sometimes we only pass the
# server_id, and later down the road we have to message "no channel for
# release foo, arch bar", but we don't know the release and arch anymore
class LiteServer:
_attributes = ['id', 'org_id', 'release', 'arch']
def __init__(self, **kwargs):
# Initialize attributes from **kwargs (set to None if value is not
# present)
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr))
def init_from_server(self, server):
self.id = server.getid()
self.org_id = server.server['org_id']
self.release = server.server['release']
self.arch = server.archname
return self
def __repr__(self):
dict = {}
for attr in self._attributes:
dict[attr] = getattr(self, attr)
return "<%s instance at %s: attributes=%s>" % (
self.__class__.__name__, id(self), dict)
# If raise_exceptions is set, BaseChannelDeniedError, NoBaseChannelError are
# raised
def guess_channels_for_server(server, user_id=None, none_ok=0,
raise_exceptions=0):
log_debug(3, server)
if not isinstance(server, LiteServer):
raise rhnException("Server object is not a LiteServer")
if None in (server.org_id, server.release, server.arch):
# need to obtain the release and/or arch and/or org_id
h = rhnSQL.prepare("""
select s.org_id, s.release, sa.label arch
from rhnServer s, rhnServerArch sa
where s.id = :server_id and s.server_arch_id = sa.id
""")
h.execute(server_id=server.id)
ret = h.fetchone_dict()
if not ret:
log_error("Could not get the release/arch "
"for server %s" % server.id)
raise rhnFault(8, "Could not find the release/arch "
"for server %s" % server.id)
if server.org_id is None:
server.org_id = ret["org_id"]
if server.release is None:
server.release = ret["release"]
if server.arch is None:
server.arch = ret["arch"]
if raise_exceptions and not none_ok:
# Let exceptions pass through
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
try:
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
except NoBaseChannelError:
if none_ok:
return []
log_error("No available channels for (server, org)",
(server.id, server.org_id), server.release, server.arch)
msg = _("Your account does not have access to any channels matching "
"(release='%(release)s', arch='%(arch)s')%(www_activation)s")
error_strings = {
'release': server.release,
'arch': server.arch,
'www_activation': ''
}
if CFG.REFER_TO_WWW:
error_strings['www_activation'] = _("\nIf you have a "
"registration number, please register with it first at "
"http://www.redhat.com/apps/activate/ and then try again.\n\n")
raise_with_tb(rhnFault(19, msg % error_strings), sys.exc_info()[2])
except BaseChannelDeniedError:
if none_ok:
return []
raise raise_with_tb(rhnFault(71,
_("Insufficient subscription permissions for release (%s, %s")
% (server.release, server.arch)), sys.exc_info()[2])
# Subscribes the server to channels
# can raise BaseChannelDeniedError, NoBaseChannelError
# Only used for new server registrations
def subscribe_server_channels(server, user_id=None, none_ok=0):
s = LiteServer().init_from_server(server)
# bretm 02/19/2007 -- have to leave none_ok in here for now due to how
# the code is setup for reg token crap; it'd be very nice to clean up that
# path to eliminate any chance for a server to be registered and not have base
# channels, excluding expiration of channel entitlements
channels = guess_channels_for_server(s, user_id=user_id, none_ok=none_ok,
raise_exceptions=1)
rhnSQL.transaction('subscribe_server_channels')
for c in channels:
subscribe_sql(s.id, c["id"], 0)
return channels
# small wrapper around a PL/SQL function
def unsubscribe_sql(server_id, channel_id, commit=1):
log_debug(3, server_id, channel_id, commit)
unsubscribe_channel = rhnSQL.Procedure("rhn_channel.unsubscribe_server")
try:
# don't run the EC yet
unsubscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLError:
log_error("Channel unsubscribe from %s failed for %s" % (
channel_id, server_id))
return 0
if commit:
rhnSQL.commit()
return 1
# unsubscribe a server from a channel
def unsubscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# now get the id of the channel
h = rhnSQL.prepare("""
select id, parent_channel from rhnChannel where label = :channel
""")
h.execute(channel=channel)
ret = h.fetchone_dict()
if not ret:
log_error("Asked to unsubscribe server %s from non-existent channel %s" % (
server_id, channel))
raise rhnFault(40, "The specified channel '%s' does not exist." % channel)
if not ret["parent_channel"]:
log_error("Cannot unsubscribe %s from base channel %s" % (
server_id, channel))
raise rhnFault(72, "You can not unsubscribe %s from base channel %s." % (
server_id, channel))
# check specific channel subscription permissions
channel_id = ret['id']
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
return unsubscribe_sql(server_id, channel_id)
raise rhnFault(71)
# unsubscribe from all channels
def unsubscribe_all_channels(server_id):
log_debug(3, server_id)
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_all_channels")
h = rhnSQL.prepare("""
select
sc.channel_id id
from
rhnChannel c,
rhnServerChannel sc
where
sc.server_id = :server_id
and sc.channel_id = c.id
order by c.parent_channel nulls last
""")
h.execute(server_id=str(server_id))
while 1:
c = h.fetchone_dict()
if not c:
break
ret = unsubscribe_sql(server_id, c["id"], 0)
if not ret:
rhnSQL.rollback("unsub_all_channels")
raise rhnFault(36, "Could not unsubscribe server %s "
"from existing channels" % (server_id,))
# finished unsubscribing
return 1
# Unsubscribe the server from the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def unsubscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_channels")
base_channels = [x for x in channels if not x['parent_channel']]
child_channels = [x for x in channels if x['parent_channel']]
for channel in child_channels + base_channels:
ret = unsubscribe_sql(server_id, channel["id"], 0)
if not ret:
rhnSQL.rollback("unsub_channels")
raise rhnFault(36, "Could not unsubscribe server %s "
"from channel %s" % (server_id, channel["label"]))
# finished unsubscribing
return 1
# Subscribe the server to the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def subscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to subscribe the base channel before the child ones.
base_channels = [x for x in channels if not x['parent_channel']]
child_channels = [x for x in channels if x['parent_channel']]
for channel in base_channels + child_channels:
subscribe_sql(server_id, channel["id"], 0)
# finished subscribing
return 1
# check if a server is subscribed to a channel
def is_subscribed(server_id, channel):
log_debug(3, server_id, channel)
h = rhnSQL.prepare("""
select 1 subscribed
from rhnServerChannel sc, rhnChannel c
where
sc.channel_id = c.id
and c.label = :channel
and sc.server_id = :server_id
""")
h.execute(server_id=str(server_id), channel=str(channel))
ret = h.fetchone_dict()
if not ret:
# System not subscribed to channel
return 0
return 1
# Returns 0, "", "" if system does not need any message, or
# (error_code, message_title, message) otherwise
def system_reg_message(server):
server_id = server.server['id']
# Is this system subscribed to a channel?
h = rhnSQL.prepare("""
select sc.channel_id
from rhnServerChannel sc
where sc.server_id = :server_id
""")
h.execute(server_id=server_id)
ret = h.fetchone_dict()
if not ret:
# System not subscribed to any channel
#
return (-1, s_invalid_channel_title,
s_invalid_channel_message %
(server.server["release"], server.archname))
# System does have a base channel; check entitlements
from rhnServer import server_lib # having this on top, cause TB due circular imports
entitlements = server_lib.check_entitlement(server_id)
if not entitlements:
# No entitlement
# We don't have an autoentitle preference for now, so display just one
# message
templates = rhnFlags.get('templateOverrides')
if templates and 'hostname' in templates:
hostname = templates['hostname']
else:
# Default to www
hostname = "rhn.redhat.com"
params = {
'entitlement_url': "https://%s"
"/rhn/systems/details/Edit.do?sid=%s" %
(hostname, server_id)
}
return -1, no_entitlement_title, no_entitlement_message % params
return 0, "", ""
def subscribe_to_tools_channel(server_id):
"""
Subscribes server_id to the RHN Tools channel associated with its base channel, if one exists.
"""
base_channel_dict = get_base_channel(server_id, none_ok=1)
if base_channel_dict is None:
raise NoBaseChannelError("Server %s has no base channel." %
str(server_id))
lookup_child_channels = rhnSQL.Statement("""
select id, label, parent_channel
from rhnChannel
where parent_channel = :id
""")
child_channel_data = rhnSQL.prepare(lookup_child_channels)
child_channel_data.execute(id=base_channel_dict['id'])
child_channels = child_channel_data.fetchall_dict()
if child_channels is None:
raise NoChildChannels("Base channel id %s has no child channels associated with it." %
base_channel_dict['id'])
tools_channel = None
for channel in child_channels:
if 'label' in channel:
if 'rhn-tools' in channel['label']:
tools_channel = channel
if tools_channel is None:
raise NoToolsChannel("Base channel id %s does not have a RHN Tools channel as a child channel." %
base_channel_dict['id'])
else:
if 'id' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no id.")
if 'label' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no label.")
if 'parent_channel' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no parent_channel.")
subscribe_channels(server_id, [tools_channel])
# Various messages that can be reused
#
# bretm 02/07/2007 -- when we have better old-client documentation, probably
# will be safe to get rid of all this crap
h_invalid_channel_title = _("System Registered but Inactive")
h_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it is not subscribed to a channel. If you have not yet
activated your product for service, please visit our website at:
http://www.redhat.com/apps/activate/
...to activate your product.""")
s_invalid_channel_title = _("System Registered but Inactive")
s_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it could not be subscribed to a base channel.
Please contact your organization administrator for assistance.
""")
no_autoentitlement_message = _("""
This system has been successfully registered, but is not yet entitled
to service. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
no_entitlement_title = _("System Registered but Inactive")
no_entitlement_message = _("""
This system has been successfully registered, but no service entitlements
were available. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
|
shastah/spacewalk
|
backend/server/rhnChannel.py
|
Python
|
gpl-2.0
| 70,972
|
[
"VisIt"
] |
5118594c40b1a1b5d9d7365ccdf63b56847fb63418c7771a4416eb7c08be9416
|
from __future__ import unicode_literals
import datetime
import requests
from six.moves.urllib.parse import parse_qs, urlencode
from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER
from requests_oauthlib import OAuth1, OAuth2, OAuth2Session
from .constants import (
ACCESS_TOKEN_URL,
AUTHORIZE_URL,
REQUEST_TOKEN_URL,
XERO_BASE_URL,
XERO_OAUTH2_AUTHORIZE_URL,
XERO_OAUTH2_CONNECTIONS_URL,
XERO_OAUTH2_TOKEN_URL,
XeroScopes,
)
from .exceptions import (
XeroAccessDenied,
XeroBadRequest,
XeroException,
XeroExceptionUnknown,
XeroForbidden,
XeroInternalError,
XeroNotAvailable,
XeroNotFound,
XeroNotImplemented,
XeroNotVerified,
XeroRateLimitExceeded,
XeroUnauthorized,
)
from .utils import resolve_user_agent
OAUTH_EXPIRY_SECONDS = 3600 # Default unless a response reports differently
DEFAULT_SCOPE = [
XeroScopes.OFFLINE_ACCESS,
XeroScopes.ACCOUNTING_TRANSACTIONS_READ,
XeroScopes.ACCOUNTING_CONTACTS_READ,
]
class PrivateCredentials:
"""An object wrapping the 2-step OAuth process for Private Xero API access.
Usage:
1) Construct a PrivateCredentials() instance:
>>> from xero.auth import PrivateCredentials
>>> credentials = PrivateCredentials(<consumer_key>, <rsa_key>)
rsa_key should be a multi-line string, starting with:
-----BEGIN RSA PRIVATE KEY-----\n
2) Use the credentials:
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, rsa_key, api_url=XERO_BASE_URL):
self.consumer_key = consumer_key
self.rsa_key = rsa_key
self.base_url = api_url
# Private API uses consumer key as the OAuth token.
self.oauth_token = consumer_key
self.oauth = OAuth1(
self.consumer_key,
resource_owner_key=self.oauth_token,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
)
class PublicCredentials:
"""An object wrapping the 3-step OAuth process for Public Xero API access.
Usage:
1) Construct a PublicCredentials() instance:
>>> from xero import PublicCredentials
>>> credentials = PublicCredentials(<consumer_key>, <consumer_secret>)
2) Visit the authentication URL:
>>> credentials.url
If a callback URI was provided (e.g., https://example.com/oauth),
the user will be redirected to a URL of the form:
https://example.com/oauth?oauth_token=<token>&oauth_verifier=<verifier>&org=<organization ID>
from which the verifier can be extracted. If no callback URI is
provided, the verifier will be shown on the screen, and must be
manually entered by the user.
3) Verify the instance:
>>> credentials.verify(<verifier string>)
4) Use the credentials.
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(
self,
consumer_key,
consumer_secret,
callback_uri=None,
verified=False,
oauth_token=None,
oauth_token_secret=None,
oauth_expires_at=None,
oauth_authorization_expires_at=None,
scope=None,
user_agent=None,
api_url=XERO_BASE_URL,
):
"""Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self._oauth = None
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self.scope = scope
self.user_agent = resolve_user_agent(user_agent)
self.base_url = api_url
self._signature_method = SIGNATURE_HMAC
# These are not strictly used by Public Credentials, but
# are reserved for use by other credentials (i.e. Partner)
self.rsa_key = None
self.oauth_session_handle = None
self._init_credentials(oauth_token, oauth_token_secret)
def _init_credentials(self, oauth_token, oauth_token_secret):
"Depending on the state passed in, get self._oauth up and running"
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# credentials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
# This is a brand new set of credentials - we need to generate
# an oauth token so it's available for the url property.
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=self._signature_method,
)
url = self.base_url + REQUEST_TOKEN_URL
headers = {"User-Agent": self.user_agent}
response = requests.post(url=url, headers=headers, auth=oauth)
self._process_oauth_response(response)
def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize a verified set of OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method,
)
def _process_oauth_response(self, response):
"Extracts the fields from an oauth response"
if response.status_code == 200:
credentials = parse_qs(response.text)
# Initialize the oauth credentials
self._init_oauth(
credentials.get("oauth_token")[0],
credentials.get("oauth_token_secret")[0],
)
# If tokens are refreshable, we'll get a session handle
self.oauth_session_handle = credentials.get("oauth_session_handle", [None])[
0
]
# Calculate token/auth expiry
oauth_expires_in = credentials.get(
"oauth_expires_in", [OAUTH_EXPIRY_SECONDS]
)[0]
oauth_authorisation_expires_in = credentials.get(
"oauth_authorization_expires_in", [OAUTH_EXPIRY_SECONDS]
)[0]
self.oauth_expires_at = datetime.datetime.now() + datetime.timedelta(
seconds=int(oauth_expires_in)
)
self.oauth_authorization_expires_at = datetime.datetime.now() + datetime.timedelta(
seconds=int(oauth_authorisation_expires_in)
)
else:
self._handle_error_response(response)
def _handle_error_response(self, response):
if response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
@property
def state(self):
"""Obtain the useful state of this credentials object so that
we can reconstruct it independently.
"""
return dict(
(attr, getattr(self, attr))
for attr in (
"consumer_key",
"consumer_secret",
"callback_uri",
"verified",
"oauth_token",
"oauth_token_secret",
"oauth_session_handle",
"oauth_expires_at",
"oauth_authorization_expires_at",
"scope",
)
if getattr(self, attr) is not None
)
def verify(self, verifier):
"Verify an OAuth token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
verifier=verifier,
rsa_key=self.rsa_key,
signature_method=self._signature_method,
)
# Make the verification request, gettiung back an access token
url = self.base_url + ACCESS_TOKEN_URL
headers = {"User-Agent": self.user_agent}
response = requests.post(url=url, headers=headers, auth=oauth)
self._process_oauth_response(response)
self.verified = True
@property
def url(self):
"Returns the URL that can be visited to obtain a verifier code"
# The authorize url is always api.xero.com
query_string = {"oauth_token": self.oauth_token}
if self.scope:
query_string["scope"] = self.scope
url = self.base_url + AUTHORIZE_URL + "?" + urlencode(query_string)
return url
@property
def oauth(self):
"Returns the requests-compatible OAuth object"
if self._oauth is None:
raise XeroNotVerified("OAuth credentials haven't been verified")
return self._oauth
def expired(self, now=None):
if now is None:
now = datetime.datetime.now()
# Credentials states from older versions might not have
# oauth_expires_at available
if self.oauth_expires_at is None:
raise XeroException(None, "Expiry time is not available")
# Allow a bit of time for clock differences and round trip times
# to prevent false negatives. If users want the precise expiry,
# they can use self.oauth_expires_at
CONSERVATIVE_SECONDS = 30
return self.oauth_expires_at <= (
now + datetime.timedelta(seconds=CONSERVATIVE_SECONDS)
)
class PartnerCredentials(PublicCredentials):
"""An object wrapping the 3-step OAuth process for Partner Xero API access.
Usage is very similar to Public Credentials with the following changes:
1) You'll need to pass the private key for your RSA certificate.
>>> rsa_key = "-----BEGIN RSA PRIVATE KEY----- ..."
2) Once a token has expired, you can refresh it to get another 30 mins
>>> credentials = PartnerCredentials(**state)
>>> if credentials.expired():
credentials.refresh()
3) Authorization expiry and token expiry become different things.
oauth_expires_at tells when the current token expires (~30 min window)
oauth_authorization_expires_at tells when the overall access
permissions expire (~10 year window)
"""
def __init__(
self,
consumer_key,
consumer_secret,
rsa_key,
callback_uri=None,
verified=False,
oauth_token=None,
oauth_token_secret=None,
oauth_expires_at=None,
oauth_authorization_expires_at=None,
oauth_session_handle=None,
scope=None,
user_agent=None,
api_url=XERO_BASE_URL,
**kwargs
):
"""Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self._oauth = None
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self.scope = scope
self.user_agent = resolve_user_agent(user_agent)
self._signature_method = SIGNATURE_RSA
self.base_url = api_url
self.rsa_key = rsa_key
self.oauth_session_handle = oauth_session_handle
self._init_credentials(oauth_token, oauth_token_secret)
def refresh(self):
"Refresh an expired token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method,
)
# Make the verification request, getting back an access token
headers = {"User-Agent": self.user_agent}
params = {"oauth_session_handle": self.oauth_session_handle}
response = requests.post(
url=self.base_url + ACCESS_TOKEN_URL,
params=params,
headers=headers,
auth=oauth,
)
self._process_oauth_response(response)
class OAuth2Credentials(object):
"""An object wrapping the 3-step OAuth2.0 process for Xero API access.
For detailed documentation see README.md.
Usage:
1) Construct an `OAuth2Credentials` instance:
>>> credentials = OAuth2Credentials(client_id, client_secret,
>>> callback_uri=callback_uri, scope=scope)
2) Generate a unique authentication URL and visit it:
>>> credentials.generate_url()
The user will be redirected to a URL in the form:
https://example.com/oauth/xero/callback/?code=0123456789&scope=openid%20profile
&state=87784234sdf5ds8ad546a8sd545ss6
3) Verify the credentials using the full URL redirected to, including querystring:
>>> credentials.verify(full_url_with_querystring)
4) Use the credentials. It is usually necessary to set the tenant_id (Xero
organisation id) to specify the organisation against which the queries should
run:
>>> from xero import Xero
>>> credentials.set_default_tenant()
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
To use a different organisation, set credentials.tenant_id:
>>> tenants = credentials.get_tenants()
>>> credentials.tenant_id = tenants[1]['tenantId']
5) If a refresh token is available, it can be used to generate a new token:
>>> if credentials.expired():
>>> credentials.refresh()
Note that in order for tokens to be refreshable, Xero API requires
`offline_access` to be included in the scope.
"""
def __init__(
self,
client_id,
client_secret,
callback_uri=None,
auth_state=None,
auth_secret=None,
token=None,
scope=None,
tenant_id=None,
user_agent=None,
):
from xero import __version__ as VERSION
self.client_id = client_id
self.client_secret = client_secret
self.callback_uri = callback_uri
self.auth_state = auth_state
self.token = None
self.tenant_id = tenant_id # Used by BaseManager
self._oauth = None
self.scope = scope or DEFAULT_SCOPE[:]
if user_agent is None:
self.user_agent = (
"pyxero/%s " % VERSION + requests.utils.default_user_agent()
)
else:
self.user_agent = user_agent
self.base_url = XERO_BASE_URL # Used by BaseManager
self._init_credentials(token, auth_secret)
def _init_credentials(self, token, auth_secret):
"""
Depending on the state passed in, get self._oauth up and running.
"""
if token:
self._init_oauth(token)
elif auth_secret and self.auth_state:
self.verify(auth_secret)
def _init_oauth(self, token):
"""Set self._oauth for use by the xero client."""
self.token = token
if token:
self._oauth = OAuth2(client_id=self.client_id, token=self.token)
@property
def state(self):
"""Obtain the useful state of this credentials object so that
we can reconstruct it independently.
"""
return dict(
(attr, getattr(self, attr))
for attr in (
"client_id",
"client_secret",
"callback_uri",
"auth_state",
"token",
"scope",
"tenant_id",
"user_agent",
)
if getattr(self, attr) is not None
)
def verify(self, auth_secret):
"""Verify and return OAuth2 token."""
session = OAuth2Session(
self.client_id,
state=self.auth_state,
scope=self.scope,
redirect_uri=self.callback_uri,
)
try:
token = session.fetch_token(
XERO_OAUTH2_TOKEN_URL,
client_secret=self.client_secret,
authorization_response=auth_secret,
headers=self.headers,
)
# Various different exceptions may be raised, so pass the exception
# through as XeroAccessDenied
except Exception as e:
raise XeroAccessDenied(e)
self._init_oauth(token)
def generate_url(self):
"""Get the authorization url. This will also set `self.auth_state` to a
random string if it has not already been set.
"""
session = OAuth2Session(
self.client_id, scope=self.scope, redirect_uri=self.callback_uri
)
url, self.auth_state = session.authorization_url(
XERO_OAUTH2_AUTHORIZE_URL, state=self.auth_state
)
return url
@property
def oauth(self):
"""Return the requests-compatible OAuth object"""
if self._oauth is None:
raise XeroNotVerified("OAuth credentials haven't been verified")
return self._oauth
@property
def headers(self):
return {
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
"User-Agent": self.user_agent,
}
@property
def expires_at(self):
"""Return the expires_at value from the token as a UTC datetime."""
return datetime.datetime.utcfromtimestamp(self.token["expires_at"])
def expired(self, seconds=30, now=None):
"""Check if the token has expired yet.
:param seconds: the minimum number of seconds allowed before expiry.
"""
if now is None:
now = datetime.datetime.utcnow()
# Allow a bit of time for clock differences and round trip times
# to prevent false negatives. If users want the precise expiry,
# they can use self.expires_at.
return (self.expires_at - now) < datetime.timedelta(seconds=seconds)
def refresh(self):
"""Obtain a refreshed token. Note that `offline_access` must be
included in scope in order for a token to be refreshable.
"""
if not self.token:
raise XeroException(None, "Cannot refresh token, no token is present.")
elif not self.client_secret:
raise XeroException(
None, "Cannot refresh token, " "client_secret must be supplied."
)
elif not self.token.get("refresh_token"):
raise XeroException(
None,
"Token cannot be refreshed, was " "`offline_access` included in scope?",
)
session = OAuth2Session(
client_id=self.client_id, scope=self.scope, token=self.token
)
auth = requests.auth.HTTPBasicAuth(self.client_id, self.client_secret)
token = session.refresh_token(
XERO_OAUTH2_TOKEN_URL, auth=auth, headers=self.headers
)
self._init_oauth(token)
return token
def get_tenants(self, auth_event_id=None):
"""
Get the list of tenants (Xero Organisations) to which this token grants access.
Optionally, you may pass a UUID as auth_event_id that will be used to limit to
only those tenants that were authorised in that authorisation event.
"""
connection_url = self.base_url + XERO_OAUTH2_CONNECTIONS_URL
if auth_event_id:
connection_url += '?authEventId=' + auth_event_id
response = requests.get(connection_url, auth=self.oauth, headers=self.headers)
if response.status_code == 200:
return response.json()
else:
self._handle_error_response(response)
def set_default_tenant(self):
"""A quick way to set the tenant to the first in the list of available
connections.
"""
try:
self.tenant_id = self.get_tenants()[0]["tenantId"]
except IndexError:
raise XeroException(
None,
"This app is not authorised to access any Xero Organisations. Did the "
"scopes requested include access to organisation data, or has access "
"to the organisation(s) been removed?",
)
@staticmethod
def _handle_error_response(response):
if response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
|
freakboy3742/pyxero
|
xero/auth.py
|
Python
|
bsd-3-clause
| 23,846
|
[
"VisIt"
] |
732d0d8b9d8d37de07270d340cb66eec5a5512b79fcdaaf8043ae2d020ff6f02
|
"""
bpz: Bayesian Photo-Z estimation
Reference: Benitez 2000, ApJ, 536, p.571
Usage:
python bpz.py catalog.cat
Needs a catalog.columns file which describes the contents of catalog.cat
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import map
from builtins import input
from builtins import range
from past.utils import old_div
from useful import *
rolex = watch()
rolex.set()
#from Numeric import *
from numpy import *
from bpz_tools import *
from string import *
import os, glob, sys
import time
import pickle
import shelve
from coetools import pause, params_cl
class Printer():
"""Print things to stdout on one line dynamically"""
def __init__(self, data):
sys.stdout.write("\r\x1b[K" + data.__str__())
sys.stdout.flush()
def seglist(vals, mask=None):
"""Split vals into lists based on mask > 0"""
if mask == None:
mask = greater(vals, 0)
lists = []
i = 0
lastgood = False
list1 = []
for i in range(len(vals)):
if mask[i] == False:
if lastgood:
lists.append(list1)
list1 = []
lastgood = False
if mask[i]:
list1.append(vals[i])
lastgood = True
if lastgood:
lists.append(list1)
return lists
# Initialization and definitions#
#Current directory
homedir = os.getcwd()
#Parameter definition
pars = params()
pars.d = {
'SPECTRA': 'CWWSB4.list', # template list
#'PRIOR': 'hdfn_SB', # prior name
'PRIOR': 'hdfn_gen', # prior name
'NTYPES':
None, # Number of Elliptical, Spiral, and Starburst/Irregular templates Default: 1,2,n-3
'DZ': 0.01, # redshift resolution
'ZMIN': 0.01, # minimum redshift
'ZMAX': 10., # maximum redshift
'MAG': 'yes', # Data in magnitudes?
'MIN_MAGERR': 0.001, # minimum magnitude uncertainty --DC
'ODDS': 0.95, # Odds threshold: affects confidence limits definition
'INTERP':
0, # Number of interpolated templates between each of the original ones
'EXCLUDE': 'none', # Filters to be excluded from the estimation
'NEW_AB': 'no', # If yes, generate new AB files even if they already exist
'CHECK':
'yes', # Perform some checks, compare observed colors with templates, etc.
'VERBOSE': 'yes', # Print estimated redshifts to the standard output
'PROBS':
'no', # Save all the galaxy probability distributions (it will create a very large file)
'PROBS2':
'no', # Save all the galaxy probability distributions P(z,t) (but not priors) -- Compact
'PROBS_LITE': 'yes', # Save only the final probability distribution
'GET_Z': 'yes', # Actually obtain photo-z
'ONLY_TYPE': 'no', # Use spectroscopic redshifts instead of photo-z
'MADAU': 'yes', #Apply Madau correction to spectra
'Z_THR': 0, #Integrate probability for z>z_thr
'COLOR': 'no', #Use colors instead of fluxes
'PLOTS': 'no', #Don't produce plots
'INTERACTIVE': 'yes', #Don't query the user
'PHOTO_ERRORS':
'no', #Define the confidence interval using only the photometric errors
'MIN_RMS':
0.05, #"Intrinsic" photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
'N_PEAKS': 1,
'MERGE_PEAKS': 'no',
'CONVOLVE_P': 'yes',
'P_MIN': 1e-2,
'SED_DIR': sed_dir,
'AB_DIR': ab_dir,
'FILTER_DIR': fil_dir,
'DELTA_M_0': 0.,
'ZP_OFFSETS': 0.,
'ZC': None,
'FC': None,
"ADD_SPEC_PROB": None,
"ADD_CONTINUOUS_PROB": None,
"NMAX": None # Useful for testing
}
if pars.d['PLOTS'] == 'no': plots = 0
if plots:
# If pylab installed show plots
plots = 'pylab'
try:
import matplotlib
matplotlib.use('TkAgg')
from pylab import *
# from coeplot2a import *
plot([1])
title('KILL THIS WINDOW!')
show()
ioff()
except:
try:
from biggles import *
plots = 'biggles'
except:
plots = 0
#Define the default values of the parameters
pars.d['INPUT'] = sys.argv[1] # catalog with the photometry
obs_file = pars.d['INPUT']
root = os.path.splitext(pars.d['INPUT'])[0]
pars.d[
'COLUMNS'] = root + '.columns' # column information for the input catalog
pars.d['OUTPUT'] = root + '.bpz' # output
nargs = len(sys.argv)
ipar = 2
if nargs > 2: #Check for parameter file and update parameters
if sys.argv[2] == '-P':
pars.fromfile(sys.argv[3])
ipar = 4
# Update the parameters using command line additions
#pars.fromcommandline(sys.argv[ipar:])
#for key in pars.d:
# print key, pars.d[key]
#pause()
pars.d.update(
params_cl()) # allows for flag only (no value after), e.g., -CHECK
def updateblank(var, ext):
global pars
if pars.d[var] in [None, 'yes']:
pars.d[var] = root + '.' + ext
updateblank('CHECK', 'flux_comparison')
updateblank('PROBS_LITE', 'probs')
updateblank('PROBS', 'full_probs')
updateblank('PROBS2', 'chisq')
#if pars.d['CHECK'] in [None, 'yes']:
# pars.d['CHECK'] = root+'.flux_comparison'
#This allows to change the auxiliary directories used by BPZ
if pars.d['SED_DIR'] != sed_dir:
print("Changing sed_dir to ", pars.d['SED_DIR'])
sed_dir = pars.d['SED_DIR']
if sed_dir[-1] != '/': sed_dir += '/'
if pars.d['AB_DIR'] != ab_dir:
print("Changing ab_dir to ", pars.d['AB_DIR'])
ab_dir = pars.d['AB_DIR']
if ab_dir[-1] != '/': ab_dir += '/'
if pars.d['FILTER_DIR'] != fil_dir:
print("Changing fil_dir to ", pars.d['FILTER_DIR'])
fil_dir = pars.d['FILTER_DIR']
if fil_dir[-1] != '/': fil_dir += '/'
#Better safe than sorry
if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
print("This would delete the input file!")
sys.exit()
if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
print("This would delete the .columns file!")
sys.exit()
#Assign the intrinsin rms
if pars.d['SPECTRA'] == 'CWWSB.list':
print('Setting the intrinsic rms to 0.067(1+z)')
pars.d['MIN_RMS'] = 0.067
pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
if pars.d['INTERACTIVE'] == 'no': interactive = 0
else: interactive = 1
if pars.d['VERBOSE'] == 'yes':
print("Current parameters")
view_keys(pars.d)
pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
if pars.d["ADD_SPEC_PROB"] != None:
specprob = 1
specfile = pars.d["ADD_SPEC_PROB"]
spec = get_2Darray(specfile)
ns = spec.shape[1]
if old_div(ns, 2) != (old_div(ns, 2.)):
print("Number of columns in SPEC_PROB is odd")
sys.exit()
z_spec = spec[:, :old_div(ns, 2)]
p_spec = spec[:, old_div(ns, 2):]
# Write output file header
header = "#ID "
header += ns / 2 * " z_spec%i"
header += ns / 2 * " p_spec%i"
header += "\n"
header = header % tuple(list(range(old_div(ns, 2))) + list(range(old_div(
ns, 2))))
specout = open(specfile.split()[0] + ".p_spec", "w")
specout.write(header)
else:
specprob = 0
pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])
#Some misc. initialization info useful for the .columns file
#nofilters=['M_0','OTHER','ID','Z_S','X','Y']
nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']
#Numerical codes for nondetection, etc. in the photometric catalog
unobs = -99. #Objects not observed
undet = 99. #Objects not detected
#Define the z-grid
zmin = float(pars.d['ZMIN'])
zmax = float(pars.d['ZMAX'])
if zmin > zmax: raise 'zmin < zmax !'
dz = float(pars.d['DZ'])
linear = 1
if linear:
z = arange(zmin, zmax + dz, dz)
else:
if zmax != 0.:
zi = zmin
z = []
while zi <= zmax:
z.append(zi)
zi = zi + dz * (1. + zi)
z = array(z)
else:
z = array([0.])
#Now check the contents of the FILTERS,SED and A diBrectories
#Get the filters in stock
filters_db = []
filters_db = glob.glob(fil_dir + '*.res')
for i in range(len(filters_db)):
filters_db[i] = os.path.basename(filters_db[i])
filters_db[i] = filters_db[i][:-4]
#Get the SEDs in stock
sed_db = []
sed_db = glob.glob(sed_dir + '*.sed')
for i in range(len(sed_db)):
sed_db[i] = os.path.basename(sed_db[i])
sed_db[i] = sed_db[i][:-4]
#Get the ABflux files in stock
ab_db = []
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#Get a list with the filter names and check whether they are in stock
col_file = pars.d['COLUMNS']
filters = get_str(col_file, 0)
for cosa in nofilters:
if filters.count(cosa): filters.remove(cosa)
if pars.d['EXCLUDE'] != 'none':
if type(pars.d['EXCLUDE']) == type(' '):
pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
for cosa in pars.d['EXCLUDE']:
if filters.count(cosa): filters.remove(cosa)
for filter in filters:
if filter[-4:] == '.res': filter = filter[:-4]
if filter not in filters_db:
print('filter ', filter, 'not in database at', fil_dir, ':')
if ask('Print filters in database?'):
for line in filters_db:
print(line)
sys.exit()
#Get a list with the spectrum names and check whether they're in stock
#Look for the list in the home directory first,
#if it's not there, look in the SED directory
spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
if not os.path.exists(spectra_file):
spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])
spectra = get_str(spectra_file, 0)
for i in range(len(spectra)):
if spectra[i][-4:] == '.sed': spectra[i] = spectra[i][:-4]
nf = len(filters)
nt = len(spectra)
nz = len(z)
#Get the model fluxes
f_mod = zeros((nz, nt, nf)) * 0.
abfiles = []
for it in range(nt):
for jf in range(nf):
if filters[jf][-4:] == '.res': filtro = filters[jf][:-4]
else: filtro = filters[jf]
#model = join([spectra[it], filtro, 'AB'], '.')
model = '.'.join([spectra[it], filtro, 'AB'])
model_path = os.path.join(ab_dir, model)
abfiles.append(model)
#Generate new ABflux files if not present
# or if new_ab flag on
if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
if spectra[it] not in sed_db:
print('SED ', spectra[it], 'not in database at', sed_dir)
# for line in sed_db:
# print line
sys.exit()
#print spectra[it],filters[jf]
print(' Generating ', model, '....')
ABflux(spectra[it], filtro, madau=pars.d['MADAU'])
#z_ab=arange(0.,zmax_ab,dz_ab) #zmax_ab and dz_ab are def. in bpz_tools
# abflux=f_z_sed(spectra[it],filters[jf], z_ab,units='nu',madau=pars.d['MADAU'])
# abflux=clip(abflux,0.,1e400)
# buffer=join(['#',spectra[it],filters[jf], 'AB','\n'])
#for i in range(len(z_ab)):
# buffer=buffer+join([`z_ab[i]`,`abflux[i]`,'\n'])
#open(model_path,'w').write(buffer)
#zo=z_ab
#f_mod_0=abflux
#else:
#Read the data
zo, f_mod_0 = get_data(model_path, (0, 1))
#Rebin the data to the required redshift resolution
f_mod[:, it, jf] = match_resol(zo, f_mod_0, z)
#if sometrue(less(f_mod[:,it,jf],0.)):
if less(f_mod[:, it, jf], 0.).any():
print('Warning: some values of the model AB fluxes are <0')
print('due to the interpolation ')
print('Clipping them to f>=0 values')
#To avoid rounding errors in the calculation of the likelihood
f_mod[:, it, jf] = clip(f_mod[:, it, jf], 0., 1e300)
#We forbid f_mod to take values in the (0,1e-100) interval
#f_mod[:,it,jf]=where(less(f_mod[:,it,jf],1e-100)*greater(f_mod[:,it,jf],0.),0.,f_mod[:,it,jf])
#Here goes the interpolacion between the colors
ninterp = int(pars.d['INTERP'])
ntypes = pars.d['NTYPES']
if ntypes == None:
nt0 = nt
else:
nt0 = list(ntypes)
for i, nt1 in enumerate(nt0):
print(i, nt1)
nt0[i] = int(nt1)
if (len(nt0) != 3) or (sum(nt0) != nt):
print()
print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
print('does not add up to %d templates' % nt)
print('USAGE: -NTYPES nell,nsp,nsb')
print('nell = # of elliptical templates')
print('nsp = # of spiral templates')
print('nsb = # of starburst templates')
print(
'These must add up to the number of templates in the SPECTRA list')
print('Quitting BPZ.')
sys.exit()
if ninterp:
nti = nt + (nt - 1) * ninterp
buffer = zeros((nz, nti, nf)) * 1.
tipos = arange(0., float(nti), float(ninterp) + 1.)
xtipos = arange(float(nti))
for iz in arange(nz):
for jf in range(nf):
buffer[iz, :, jf] = match_resol(tipos, f_mod[iz, :, jf], xtipos)
nt = nti
f_mod = buffer
#for j in range(nf):
# plot=FramedPlot()
# for i in range(nt): plot.add(Curve(z,log(f_mod[:,i,j]+1e-40)))
# plot.show()
# ask('More?')
#Load all the parameters in the columns file to a dictionary
col_pars = params()
col_pars.fromfile(col_file)
# Read which filters are in which columns
flux_cols = []
eflux_cols = []
cals = []
zp_errors = []
zp_offsets = []
for filter in filters:
datos = col_pars.d[filter]
flux_cols.append(int(datos[0]) - 1)
eflux_cols.append(int(datos[1]) - 1)
cals.append(datos[2])
zp_errors.append(datos[3])
zp_offsets.append(datos[4])
zp_offsets = array(list(map(float, zp_offsets)))
if pars.d['ZP_OFFSETS']:
zp_offsets += array(list(map(float, pars.d['ZP_OFFSETS'])))
flux_cols = tuple(flux_cols)
eflux_cols = tuple(eflux_cols)
#READ the flux and errors from obs_file
f_obs = get_2Darray(obs_file, flux_cols)
ef_obs = get_2Darray(obs_file, eflux_cols)
#Convert them to arbitrary fluxes if they are in magnitudes
if pars.d['MAG'] == 'yes':
seen = greater(f_obs, 0.) * less(f_obs, undet)
no_seen = equal(f_obs, undet)
no_observed = equal(f_obs, unobs)
todo = seen + no_seen + no_observed
#The minimum photometric error is 0.01
#ef_obs=ef_obs+seen*equal(ef_obs,0.)*0.001
ef_obs = where(
greater_equal(ef_obs, 0.), clip(ef_obs, pars.d['MIN_MAGERR'], 1e10),
ef_obs)
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected magnitudes!')
print("""Allowed values for magnitudes are
0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" + repr(
unobs) + "(not observed)")
for i in range(len(todo)):
if not alltrue(todo[i, :]):
print(i + 1, f_obs[i, :], ef_obs[i, :])
sys.exit()
#Detected objects
try:
f_obs = where(seen, 10.**(-.4 * f_obs), f_obs)
except OverflowError:
print(
'Some of the input magnitudes have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(f_obs))
print('Maximum value', max(f_obs))
print('Indexes for minimum values', argmin(f_obs, 0.))
print('Indexes for maximum values', argmax(f_obs, 0.))
print('Bye.')
sys.exit()
try:
ef_obs = where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
except OverflowError:
print(
'Some of the input magnitude errors have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(ef_obs))
print('Maximum value', max(ef_obs))
print('Indexes for minimum values', argmin(ef_obs, 0.))
print('Indexes for maximum values', argmax(ef_obs, 0.))
print('Bye.')
sys.exit()
#print 'ef', ef_obs[0,:nf]
#print 'f', f_obs[1,:nf]
#print 'ef', ef_obs[1,:nf]
#Looked at, but not detected objects (mag=99.)
#We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
#If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
#with the sign we take the absolute value of dm
f_obs = where(no_seen, 0., f_obs)
ef_obs = where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)
#Objects not looked at (mag=-99.)
f_obs = where(no_observed, 0., f_obs)
ef_obs = where(no_observed, 0., ef_obs)
#Flux codes:
# If f>0 and ef>0 : normal objects
# If f==0 and ef>0 :object not detected
# If f==0 and ef==0: object not observed
#Everything else will crash the program
#Check that the observed error fluxes are reasonable
#if sometrue(less(ef_obs,0.)): raise 'Negative input flux errors'
if less(ef_obs, 0.).any():
raise ValueError('Negative input flux errors')
f_obs = where(less(f_obs, 0.), 0., f_obs) #Put non-detections to 0
ef_obs = where(
less(f_obs, 0.), maximum(1e-100, f_obs + ef_obs),
ef_obs) # Error equivalent to 1 sigma upper limit
#if sometrue(less(f_obs,0.)) : raise 'Negative input fluxes'
seen = greater(f_obs, 0.) * greater(ef_obs, 0.)
no_seen = equal(f_obs, 0.) * greater(ef_obs, 0.)
no_observed = equal(f_obs, 0.) * equal(ef_obs, 0.)
todo = seen + no_seen + no_observed
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected fluxes/errors')
#Convert (internally) objects with zero flux and zero error(non observed)
#to objects with almost infinite (~1e108) error and still zero flux
#This will yield reasonable likelihoods (flat ones) for these objects
ef_obs = where(no_observed, 1e108, ef_obs)
#Include the zero point errors
zp_errors = array(list(map(float, zp_errors)))
zp_frac = e_mag2frac(zp_errors)
#zp_frac=10.**(.4*zp_errors)-1.
ef_obs = where(seen, sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2), ef_obs)
ef_obs = where(no_seen,
sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
ef_obs)
#Add the zero-points offset
#The offsets are defined as m_new-m_old
zp_offsets = array(list(map(float, zp_offsets)))
zp_offsets = where(not_equal(zp_offsets, 0.), 10.**(-.4 * zp_offsets), 1.)
f_obs = f_obs * zp_offsets
ef_obs = ef_obs * zp_offsets
#Convert fluxes to AB if needed
for i in range(f_obs.shape[1]):
if cals[i] == 'Vega':
const = mag2flux(VegatoAB(0., filters[i]))
f_obs[:, i] = f_obs[:, i] * const
ef_obs[:, i] = ef_obs[:, i] * const
elif cals[i] == 'AB':
continue
else:
print('AB or Vega?. Check ' + col_file + ' file')
sys.exit()
#Get m_0 (if present)
if 'M_0' in col_pars.d:
m_0_col = int(col_pars.d['M_0']) - 1
m_0 = get_data(obs_file, m_0_col)
m_0 += pars.d['DELTA_M_0']
#Get the objects ID (as a string)
if 'ID' in col_pars.d:
# print col_pars.d['ID']
id_col = int(col_pars.d['ID']) - 1
id = get_str(obs_file, id_col)
else:
id = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))
#Get spectroscopic redshifts (if present)
if 'Z_S' in col_pars.d:
z_s_col = int(col_pars.d['Z_S']) - 1
z_s = get_data(obs_file, z_s_col)
#Get the X,Y coordinates
if 'X' in col_pars.d:
datos = col_pars.d['X']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
x_col = int(col_pars.d['X']) - 1
x = get_data(obs_file, x_col)
if 'Y' in col_pars.d:
datos = col_pars.d['Y']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
y_col = int(datos) - 1
y = get_data(obs_file, y_col)
#If 'check' on, initialize some variables
check = pars.d['CHECK']
# This generates a file with m,z,T and observed/expected colors
#if check=='yes': pars.d['FLUX_COMPARISON']=root+'.flux_comparison'
checkSED = check != 'no'
ng = f_obs.shape[0]
if checkSED:
# PHOTOMETRIC CALIBRATION CHECK
#r=zeros((ng,nf),float)+1.
#dm=zeros((ng,nf),float)+1.
#w=r*0.
# Defaults: r=1, dm=1, w=0
frat = ones((ng, nf), float)
dmag = ones((ng, nf), float)
fw = zeros((ng, nf), float)
#Visualize the colors of the galaxies and the templates
#When there are spectroscopic redshifts available
if interactive and 'Z_S' in col_pars.d and plots and checkSED and ask(
'Plot colors vs spectroscopic redshifts?'):
color_m = zeros((nz, nt, nf - 1)) * 1.
if plots == 'pylab':
figure(1)
nrows = 2
ncols = old_div((nf - 1), nrows)
if (nf - 1) % nrows: ncols += 1
for i in range(nf - 1):
##plot=FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (z_s, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
subplot(nrows, ncols, i + 1)
plot(zz, colour, "bo")
elif plots == 'biggles':
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
plot(zz, colour, "r")
elif plots == 'biggles':
d = Curve(zz, colour, color='red')
plot.add(d)
if plots == 'pylab':
xlabel(r'$z$')
ylabel('%s - %s' % (filters[i], filters[i + 1]))
elif plots == 'biggles':
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
if plots == 'pylab':
show()
inp = eval(input('Hit Enter to continue.'))
#Get other information which will go in the output file (as strings)
if 'OTHER' in col_pars.d:
if col_pars.d['OTHER'] != 'all':
other_cols = col_pars.d['OTHER']
if type(other_cols) == type((2, )):
other_cols = tuple(map(int, other_cols))
else:
other_cols = (int(other_cols), )
other_cols = [x - 1 for x in other_cols]
n_other = len(other_cols)
else:
n_other = get_2Darray(obs_file, cols='all', nrows=1).shape[1]
other_cols = list(range(n_other))
others = get_str(obs_file, other_cols)
if len(other_cols) > 1:
other = []
for j in range(len(others[0])):
lista = []
for i in range(len(others)):
lista.append(others[i][j])
other.append(join(lista))
else:
other = others
if pars.d['GET_Z'] == 'no': get_z = 0
else: get_z = 1
#Prepare the output file
out_name = pars.d['OUTPUT']
if get_z:
if os.path.exists(out_name):
os.system('cp %s %s.bak' % (out_name, out_name))
print("File %s exists. Copying it to %s.bak" % (out_name, out_name))
output = open(out_name, 'w')
if pars.d['PROBS_LITE'] == 'no': save_probs = 0
else: save_probs = 1
if pars.d['PROBS'] == 'no': save_full_probs = 0
else: save_full_probs = 1
if pars.d['PROBS2'] == 'no': save_probs2 = 0
else: save_probs2 = 1
#Include some header information
# File name and the date...
time_stamp = time.ctime(time.time())
if get_z: output.write('## File ' + out_name + ' ' + time_stamp + '\n')
#and also the parameters used to run bpz...
if get_z: output.write("""##
##Parameters used to run BPZ:
##
""")
claves = list(pars.d.keys())
claves.sort()
for key in claves:
if type(pars.d[key]) == type((1, )):
cosa = join(list(pars.d[key]), ',')
else:
cosa = str(pars.d[key])
if get_z: output.write('##' + key.upper() + '=' + cosa + '\n')
if save_full_probs:
#Shelve some info on the run
full_probs = shelve.open(pars.d['PROBS'])
full_probs['TIME'] = time_stamp
full_probs['PARS'] = pars.d
if save_probs:
probs = open(pars.d['PROBS_LITE'], 'w')
probs.write('# ID p_bayes(z) where z=arange(%.4f,%.4f,%.4f) \n' %
(zmin, zmax + dz, dz))
if save_probs2:
probs2 = open(pars.d['PROBS2'], 'w')
probs2.write(
'# id t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#probs2.write('# ID\n')
#probs2.write('# t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#Use a empirical prior?
tipo_prior = pars.d['PRIOR']
useprior = 0
if 'M_0' in col_pars.d:
has_mags = 1
else:
has_mags = 0
if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
useprior = 1
#Add cluster 'spikes' to the prior?
cluster_prior = 0.
if pars.d['ZC']:
cluster_prior = 1
if type(pars.d['ZC']) == type(""): zc = array([float(pars.d['ZC'])])
else: zc = array(list(map(float, pars.d['ZC'])))
if type(pars.d['FC']) == type(""): fc = array([float(pars.d['FC'])])
else: fc = array(list(map(float, pars.d['FC'])))
fcc = add.reduce(fc)
if fcc > 1.:
print(ftc)
raise 'Too many galaxies in clusters!'
pi_c = zeros((nz, nt)) * 1.
#Go over the different cluster spikes
for i in range(len(zc)):
#We define the cluster within dz=0.01 limits
cluster_range = less_equal(abs(z - zc[i]), .01) * 1.
#Clip values to avoid overflow
exponente = clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
#Outside the cluster range g is 0
g = exp(exponente) * cluster_range
norm = add.reduce(g)
pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]
#Go over the different types
print('We only apply the cluster prior to the early type galaxies')
for i in range(1, 3 + 2 * ninterp):
pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]
#Output format
format = '%' + repr(maximum(5, len(id[0]))) + 's' #ID format
format = format + pars.d[
'N_PEAKS'] * ' %.3f %.3f %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'
#Add header with variable names to the output file
sxhdr = """##
##Column information
##
# 1 ID"""
k = 1
if pars.d['N_PEAKS'] > 1:
for j in range(pars.d['N_PEAKS']):
sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
k + 5, j + 1)
k += 5
else:
sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
k += 5
sxhdr += """
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)
nh = k + 4
if 'Z_S' in col_pars.d:
sxhdr = sxhdr + '# %i Z_S\n' % nh
format = format + ' %.3f'
nh += 1
if has_mags:
format = format + ' %.3f'
sxhdr = sxhdr + '# %i M_0\n' % nh
nh += 1
if 'OTHER' in col_pars.d:
sxhdr = sxhdr + '# %i OTHER\n' % nh
format = format + ' %s'
nh += n_other
#print sxhdr
if get_z: output.write(sxhdr + '##\n')
odds_i = float(pars.d['ODDS'])
oi = inv_gauss_int(odds_i)
print(odds_i, oi)
#Proceed to redshift estimation
if checkSED: buffer_flux_comparison = ""
if pars.d['CONVOLVE_P'] == 'yes':
# Will Convolve with a dz=0.03 gaussian to make probabilities smoother
# This is necessary; if not there are too many close peaks
sigma_g = 0.03
x = arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
dz) # made symmetric --DC
gaus = exp(-(old_div(x, sigma_g))**2)
if pars.d["NMAX"] != None: ng = int(pars.d["NMAX"])
for ig in range(ng):
currentPercent = ig / ng * 100
status = "{:.3f}% of {} completed.".format(currentPercent, ng)
Printer(status)
#Don't run BPZ on galaxies with have z_s > z_max
#if col_pars.d.has_key('Z_S'):
# if z_s[ig]<9.9 and z_s[ig]>zmax : continue
if not get_z: continue
if pars.d['COLOR'] == 'yes':
likelihood = p_c_z_t_color(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
else:
likelihood = p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
if 0:
print(f_obs[ig, :nf])
print(ef_obs[ig, :nf])
iz_ml = likelihood.i_z_ml
t_ml = likelihood.i_t_ml
red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
#p=likelihood.Bayes_likelihood
#likelihood.various_plots()
#print 'FULL BAYESAIN LIKELIHOOD'
p = likelihood.likelihood
if not ig:
print('ML * prior -- NOT QUITE BAYESIAN')
if pars.d[
'ONLY_TYPE'] == 'yes': #Use only the redshift information, no priors
p_i = zeros((nz, nt)) * 1.
j = searchsorted(z, z_s[ig])
#print j,nt,z_s[ig]
try:
p_i[j, :] = old_div(1., float(nt))
except IndexError:
pass
else:
if useprior:
if pars.d['PRIOR'] == 'lensing':
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp, x[ig], y[ig])
else:
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp)
else:
p_i = old_div(ones((nz, nt), float), float(nz * nt))
if cluster_prior: p_i = (1. - fcc) * p_i + pi_c
if save_full_probs:
full_probs[id[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]
#Multiply the prior by the likelihood to find the final probability
pb = p_i[:nz, :nt] * p[:nz, :nt]
#plo=FramedPlot()
#for i in range(p.shape[1]):
# plo.add(Curve(z,p_i[:nz,i]/sum(sum(p_i[:nz,:]))))
#for i in range(p.shape[1]):
# plo.add(Curve(z,p[:nz,i]/sum(sum(p[:nz,:])),color='red'))
#plo.add(Curve(z,pb[:nz,-1]/sum(pb[:nz,-1]),color='blue'))
#plo.show()
#ask('More?')
#Convolve with a gaussian of width \sigma(1+z) to take into
#accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
#(to be done)
#Estimate the bayesian quantities
p_bayes = add.reduce(pb[:nz, :nt], -1)
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
#Convolve with a gaussian
if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
#print 'GAUSS CONV'
p_bayes = convolve(p_bayes, gaus, 1)
#print 'gaus', gaus
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
# Eliminate all low level features in the prob. distribution
pmax = max(p_bayes)
p_bayes = where(
greater(p_bayes, pmax * float(pars.d['P_MIN'])), p_bayes, 0.)
norm = add.reduce(p_bayes)
p_bayes = old_div(p_bayes, norm)
if specprob:
p_spec[ig, :] = match_resol(z, p_bayes, z_spec[ig, :]) * p_spec[ig, :]
norma = add.reduce(p_spec[ig, :])
if norma == 0.: norma = 1.
p_spec[ig, :] /= norma
#vyjod=tuple([id[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
# int(float(other[ig]))])
vyjod = tuple([id[ig]] + list(z_spec[ig, :]) + list(p_spec[ig, :]))
formato = "%s " + 5 * " %.4f"
formato += 5 * " %.3f"
#formato+=" %4f %i"
formato += "\n"
print(formato % vyjod)
specout.write(formato % vyjod)
if pars.d['N_PEAKS'] > 1:
# Identify maxima and minima in the final probability
g_max = less(p_bayes[2:], p_bayes[1:-1]) * less(p_bayes[:-2],
p_bayes[1:-1])
g_min = greater(p_bayes[2:], p_bayes[1:-1]) * greater(p_bayes[:-2],
p_bayes[1:-1])
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[2:], 0.)
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[:-2], 0.)
i_max = compress(g_max, arange(nz - 2)) + 1
i_min = compress(g_min, arange(nz - 2)) + 1
# Check that the first point and the last one are not minima or maxima,
# if they are, add them to the index arrays
if p_bayes[0] > p_bayes[1]:
i_max = concatenate([[0], i_max])
i_min = concatenate([[0], i_min])
if p_bayes[-1] > p_bayes[-2]:
i_max = concatenate([i_max, [nz - 1]])
i_min = concatenate([i_min, [nz - 1]])
if p_bayes[0] < p_bayes[1]:
i_min = concatenate([[0], i_min])
if p_bayes[-1] < p_bayes[-2]:
i_min = concatenate([i_min, [nz - 1]])
p_max = take(p_bayes, i_max)
#p_min=take(p_bayes,i_min)
p_tot = []
z_peaks = []
t_peaks = []
# Sort them by probability values
p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
# For each maximum, define the minima which sandwich it
# Assign minima to each maximum
jm = searchsorted(i_min, i_max)
p_max = list(p_max)
for i in range(len(i_max)):
z_peaks.append([z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
t_peaks.append(argmax(pb[i_max[i], :nt]))
p_tot.append(sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
# print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]
if ninterp:
t_peaks = list(old_div(array(t_peaks), (1. + ninterp)))
if pars.d['MERGE_PEAKS'] == 'yes':
# Merge peaks which are very close 0.03(1+z)
merged = []
for k in range(len(z_peaks)):
for j in range(len(z_peaks)):
if j > k and k not in merged and j not in merged:
if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
1. + z_peaks[j][0]):
# Modify the element which receives the accretion
z_peaks[k][1] = minimum(z_peaks[k][1],
z_peaks[j][1])
z_peaks[k][2] = maximum(z_peaks[k][2],
z_peaks[j][2])
p_tot[k] += p_tot[j]
# Put the merged element in the list
merged.append(j)
#print merged
# Clean up
copia = p_tot[:]
for j in merged:
p_tot.remove(copia[j])
copia = z_peaks[:]
for j in merged:
z_peaks.remove(copia[j])
copia = t_peaks[:]
for j in merged:
t_peaks.remove(copia[j])
copia = p_max[:]
for j in merged:
p_max.remove(copia[j])
if sum(array(p_tot)) != 1.:
p_tot = old_div(array(p_tot), sum(array(p_tot)))
# Define the peak
iz_b = argmax(p_bayes)
zb = z[iz_b]
# OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
# if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
# else: zb=zb-dz #This corrects another small bias --DC
#Integrate within a ~ oi*sigma interval to estimate
# the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
#Look for the number of sigma corresponding
#to the odds_i confidence limit
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if pars.d['Z_THR'] > 0:
zo1 = float(pars.d['Z_THR'])
zo2 = float(pars.d['ZMAX'])
o = odds(p_bayes[:nz], z, zo1, zo2)
# Integrate within the same odds interval to find the type
# izo1=maximum(0,searchsorted(z,zo1)-1)
# izo2=minimum(nz,searchsorted(z,zo2))
# t_b=argmax(add.reduce(p[izo1:izo2,:nt],0))
it_b = argmax(pb[iz_b, :nt])
t_b = it_b + 1
if ninterp:
tt_b = old_div(float(it_b), (1. + ninterp))
tt_ml = old_div(float(t_ml), (1. + ninterp))
else:
tt_b = it_b
tt_ml = t_ml
if max(pb[iz_b, :]) < 1e-300:
print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
t_b = -1.
tt_b = -1.
#print it_b, t_b, tt_b, pb.shape
if 0:
print(f_mod[iz_b, it_b, :nf])
print(min(ravel(p_i)), max(ravel(p_i)))
print(min(ravel(p)), max(ravel(p)))
print(p_i[iz_b, :])
print(p[iz_b, :])
print(p_i[iz_b, it_b]) # prior
print(p[iz_b, it_b]) # chisq
print(likelihood.likelihood[iz_b, it_b])
print(likelihood.chi2[iz_b, it_b])
print(likelihood.ftt[iz_b, it_b])
print(likelihood.foo)
print()
print('t_b', t_b)
print('iz_b', iz_b)
print('nt', nt)
print(max(ravel(pb)))
impb = argmax(ravel(pb))
impbz = old_div(impb, nt)
impbt = impb % nt
print(impb, impbz, impbt)
print(ravel(pb)[impb])
print(pb.shape, (nz, nt))
print(pb[impbz, impbt])
print(pb[iz_b, it_b])
print('z, t', z[impbz], t_b)
print(t_b)
# Redshift confidence limits
z1, z2 = interval(p_bayes[:nz], z, odds_i)
if pars.d['PHOTO_ERRORS'] == 'no':
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if zo1 < z1: z1 = maximum(0., zo1)
if zo2 > z2: z2 = zo2
# Print output
if pars.d['N_PEAKS'] == 1:
salida = [id[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1,
red_chi2]
else:
salida = [id[ig]]
for k in range(pars.d['N_PEAKS']):
if k <= len(p_tot) - 1:
salida = salida + list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
else:
salida += [-1., -1., -1., -1., -1.]
salida += [z[iz_ml], tt_ml + 1, red_chi2]
if 'Z_S' in col_pars.d: salida.append(z_s[ig])
if has_mags: salida.append(m_0[ig] - pars.d['DELTA_M_0'])
if 'OTHER' in col_pars.d: salida.append(other[ig])
if get_z: output.write(format % tuple(salida) + '\n')
if pars.d['VERBOSE'] == 'yes': print(format % tuple(salida))
#try:
# if sometrue(greater(z_peaks,7.5)):
# connect(z,p_bayes)
# ask('More?')
#except:
# pass
odd_check = odds_i
if checkSED:
ft = f_mod[iz_b, it_b, :]
fo = f_obs[ig, :]
efo = ef_obs[ig, :]
dfosq = (old_div((ft - fo), efo))**2
if 0:
print(ft)
print(fo)
print(efo)
print(dfosq)
pause()
factor = ft / efo / efo
ftt = add.reduce(ft * factor)
fot = add.reduce(fo * factor)
am = old_div(fot, ftt)
ft = ft * am
if 0:
print(factor)
print(ftt)
print(fot)
print(am)
print(ft)
print()
pause()
flux_comparison = [id[ig], m_0[ig], z[iz_b], t_b, am] + list(
concatenate([ft, fo, efo]))
nfc = len(flux_comparison)
format_fc = '%s %.2f %.2f %i' + (nfc - 4) * ' %.3e' + '\n'
buffer_flux_comparison = buffer_flux_comparison + format_fc % tuple(
flux_comparison)
if o >= odd_check:
# PHOTOMETRIC CALIBRATION CHECK
# Calculate flux ratios, but only for objects with ODDS >= odd_check
# (odd_check = 0.95 by default)
# otherwise, leave weight w = 0 by default
eps = 1e-10
frat[ig, :] = divsafe(fo, ft, inf=eps, nan=eps)
#fw[ig,:] = greater(fo, 0)
fw[ig, :] = divsafe(fo, efo, inf=1e8, nan=0)
fw[ig, :] = clip(fw[ig, :], 0, 100)
#print fw[ig,:]
#print
if 0:
bad = less_equal(ft, 0.)
#Avoid overflow by setting r to 0.
fo = where(bad, 0., fo)
ft = where(bad, 1., ft)
r[ig, :] = old_div(fo, ft)
try:
dm[ig, :] = -flux2mag(old_div(fo, ft))
except:
dm[ig, :] = -100
# Clip ratio between 0.01 & 100
r[ig, :] = where(greater(r[ig, :], 100.), 100., r[ig, :])
r[ig, :] = where(less_equal(r[ig, :], 0.), 0.01, r[ig, :])
#Weight by flux
w[ig, :] = where(greater(fo, 0.), 1, 0.)
#w[ig,:]=where(greater(fo,0.),fo,0.)
#print fo
#print r[ig,:]
#print
# This is no good becasue r is always > 0 (has been clipped that way)
#w[ig,:]=where(greater(r[ig,:],0.),fo,0.)
# The is bad because it would include non-detections:
#w[ig,:]=where(greater(r[ig,:],0.),1.,0.)
if save_probs:
texto = '%s ' % str(id[ig])
texto += len(p_bayes) * '%.3e ' + '\n'
probs.write(texto % tuple(p_bayes))
# pb[z,t] -> p_bayes[z]
# 1. tb are summed over
# 2. convolved with Gaussian if CONVOLVE_P
# 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
# 4. normalized such that sum(P(z)) = 1
if save_probs2: # P = exp(-chisq / 2)
#probs2.write('%s\n' % id[ig])
pmin = pmax * float(pars.d['P_MIN'])
#pb = where(less(pb,pmin), 0, pb)
chisq = -2 * log(pb)
for itb in range(nt):
chisqtb = chisq[:, itb]
pqual = greater(pb[:, itb], pmin)
chisqlists = seglist(chisqtb, pqual)
if len(chisqlists) == 0:
continue
#print pb[:,itb]
#print chisqlists
zz = arange(zmin, zmax + dz, dz)
zlists = seglist(zz, pqual)
for i in range(len(zlists)):
probs2.write('%s %2d %.3f ' %
(id[ig], itb + 1, zlists[i][0]))
fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
probs2.write(fmt % tuple(chisqlists[i]))
#fmt = len(chisqtb) * '%4.2f '+'\n'
#probs2.write('%d ' % itb)
#probs2.write(fmt % tuple(chisqtb))
#if checkSED: open(pars.d['FLUX_COMPARISON'],'w').write(buffer_flux_comparison)
if checkSED: open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)
if get_z: output.close()
#if checkSED and get_z:
if checkSED:
#try:
if 1:
if interactive:
print("")
print("")
print("PHOTOMETRIC CALIBRATION TESTS")
# See PHOTOMETRIC CALIBRATION CHECK above
#ratios=add.reduce(w*r,0)/add.reduce(w,0)
#print "Average, weighted by flux ratios f_obs/f_model for objects with odds >= %g" % odd_check
#print len(filters)*' %s' % tuple(filters)
#print nf*' % 7.3f ' % tuple(ratios)
#print "Corresponding zero point shifts"
#print nf*' % 7.3f ' % tuple(-flux2mag(ratios))
#print
fratavg = old_div(sum(fw * frat, axis=0), sum(fw, axis=0))
dmavg = -flux2mag(fratavg)
fnobj = sum(greater(fw, 0), axis=0)
#print 'fratavg', fratavg
#print 'dmavg', dmavg
#print 'fnobj', fnobj
#fnobj = sum(greater(w[:,i],0))
print(
"If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ.")
print(
"(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)")
print()
print(' fo/ft dmag nobj filter')
#print nf
for i in range(nf):
print('% 7.3f % 7.3f %5d %s'\
% (fratavg[i], dmavg[i], fnobj[i], filters[i]))
#% (ratios[i], -flux2mag(ratios)[i], sum(greater(w[:,i],0)), filters[i])
#print ' fo/ft dmag filter'
#for i in range(nf):
# print '% 7.3f % 7.3f %s' % (ratios[i], -flux2mag(ratios)[i], filters[i])
print(
"fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
% odd_check)
print(
"dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)")
print(
"nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
% odd_check)
# print r
# print w
#print
#print "Number of galaxies considered (with ODDS >= %g):" % odd_check
#print ' ', sum(greater(w,0)) / float(nf)
#print '(Note a galaxy detected in only 5 / 6 filters counts as 5/6 = 0.833)'
#print sum(greater(w,0))
#This part is experimental and may not work in the general case
#print "Median color offsets for objects with odds > "+`odd_check`+" (not weighted)"
#print len(filters)*' %s' % tuple(filters)
#r=flux2mag(r)
#print nf*' %.3f ' % tuple(-median(r))
#print nf*' %.3f ' % tuple(median(dm))
#rms=[]
#efobs=[]
#for j in range(nf):
# ee=where(greater(f_obs[:,j],0.),f_obs[:,j],2.)
# zz=e_frac2mag(ef_obs[:,j]/ee)
#
# xer=arange(0.,1.,.02)
# hr=hist(abs(r[:,j]),xer)
# hee=hist(zz,xer)
# rms.append(std_log(compress(less_equal(r[:,j],1.),r[:,j])))
# zz=compress(less_equal(zz,1.),zz)
# efobs.append(sqrt(mean(zz*zz)))
#print nf*' %.3f ' % tuple(rms)
#print nf*' %.3f ' % tuple(efobs)
#print nf*' %.3f ' % tuple(sqrt(abs(array(rms)**2-array(efobs)**2)))
#except: pass
if save_full_probs: full_probs.close()
if save_probs: probs.close()
if save_probs2: probs2.close()
if plots and checkSED:
zb, zm, zb1, zb2, o, tb = get_data(out_name, (1, 6, 2, 3, 5, 4))
#Plot the comparison between z_spec and z_B
if 'Z_S' in col_pars.d:
if not interactive or ask('Compare z_B vs z_spec?'):
good = less(z_s, 9.99)
print(
'Total initial number of objects with spectroscopic redshifts= ',
sum(good))
od_th = 0.
if ask('Select for galaxy characteristics?\n'):
od_th = eval(input('Odds threshold?\n'))
good *= greater_equal(o, od_th)
t_min = eval(input('Minimum spectral type\n'))
t_max = eval(input('Maximum spectral type\n'))
good *= less_equal(tb, t_max) * greater_equal(tb, t_min)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(
m_0, mg_min)
zmo, zso, zbo, zb1o, zb2o, tb = multicompress(good, (zm, z_s, zb,
zb1, zb2, tb))
print('Number of objects with odds > %.2f= %i ' %
(od_th, len(zbo)))
deltaz = old_div((zso - zbo), (1. + zso))
sz = stat_robust(deltaz, 3., 3)
sz.run()
outliers = greater_equal(abs(deltaz), 3. * sz.rms)
print('Number of outliers [dz >%.2f*(1+z)]=%i' %
(3. * sz.rms, add.reduce(outliers)))
catastrophic = greater_equal(deltaz * (1. + zso), 1.)
n_catast = sum(catastrophic)
print('Number of catastrophic outliers [dz >1]=', n_catast)
print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
if interactive and plots:
if plots == 'pylab':
figure(2)
subplot(211)
plot(
arange(
min(zso), max(zso) + 0.01, 0.01), arange(
min(zso), max(zso) + 0.01, 0.01), "r")
errorbar(zso,
zbo, [abs(zbo - zb1o), abs(zb2o - zbo)],
fmt="bo")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{bpz}$')
subplot(212)
plot(zso, zmo, "go", zso, zso, "r")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{ML}$')
show()
elif plots == 'biggles':
plot = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot.add(Points(zso, zbo, symboltype=symbol, color='blue'))
plot.add(Curve(zso, zso, linewidth=2., color='red'))
plot.add(ErrorBarsY(zso, zb1o, zb2o))
plot.xlabel = r'$z_{spec}$'
plot.ylabel = r'$z_{bpz}$'
# plot.xrange=0.,1.5
# plot.yrange=0.,1.5
plot.show()
#
plot_ml = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot_ml.add(Points(
zso, zmo, symboltype=symbol,
color='blue'))
plot_ml.add(Curve(zso, zso, linewidth=2., color='red'))
plot_ml.xlabel = r"$z_{spec}$"
plot_ml.ylabel = r"$z_{ML}$"
plot_ml.show()
if interactive and plots and ask('Plot Bayesian photo-z histogram?'):
if plots == 'biggles':
dz = eval(input('Redshift interval?\n'))
od_th = eval(input('Odds threshold?\n'))
good = greater_equal(o, od_th)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(m_0,
mg_min)
z = compress(good, zb)
xz = arange(zmin, zmax, dz)
hz = hist(z, xz)
plot = FramedPlot()
h = Histogram(hz, 0., dz, color='blue')
plot.add(h)
plot.xlabel = r'$z_{bpz}$'
plot.ylabel = r'$N(z_{bpz})$'
plot.show()
if ask('Want to save plot as eps file?'):
file = eval(input('File name?\n'))
if file[-2:] != 'ps': file = file + '.eps'
plot.save_as_eps(file)
if interactive and plots and ask(
'Compare colors with photometric redshifts?'):
if plots == 'biggles':
color_m = zeros((nz, nt, nf - 1)) * 1.
for i in range(nf - 1):
plot = FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (zb, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Curve(zz, colour, color='red')
plot.add(d)
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
rolex.check()
|
boada/planckClusters
|
MOSAICpipe/bpz-1.99.3/bpz.py
|
Python
|
mit
| 52,171
|
[
"Galaxy",
"Gaussian"
] |
d945d9addff2a74caf50413042a561f855e57b39b69c8395c6352fabe32a23b0
|
__author__ = 'chris'
import pkg_resources # part of setuptools
import argparse
from pythomics.proteomics import config
version = pkg_resources.require('pyquant-ms')[0].version
description = """
This will quantify labeled peaks (such as SILAC) in ms1 spectra. It relies solely on the distance between peaks,
which can correct for errors due to amino acid conversions.
"""
PEAK_RESOLUTION_RT_MODE = 'rt'
PEAK_RESOLUTION_COMMON_MODE = 'common-peak'
PEAK_FINDING_REL_MAX = 'relative-max'
PEAK_FINDING_DERIVATIVE = 'derivative'
PEAK_FIT_MODE_FAST = 'fast'
PEAK_FIT_MODE_AVERAGE = 'average'
PEAK_FIT_MODE_SLOW = 'slow'
pyquant_parser = argparse.ArgumentParser(prog='PyQuant v{}'.format(version), description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pyquant_parser.add_argument('-p', help="Threads to run", type=int, default=1)
pyquant_parser.add_argument('--theo-xic', help=argparse.SUPPRESS, action='store_true')
raw_group = pyquant_parser.add_argument_group("Raw Data Parameters")
raw_group.add_argument('--scan-file', help="The scan file(s) for the raw data. If not provided, assumed to be in the directory of the processed/tabbed/peaklist file.", type=argparse.FileType('r'), nargs='*')
raw_group.add_argument('--scan-file-dir', help="The directory containing raw data.", type=str)
raw_group.add_argument('--precision', help="The precision for storing m/z values. Defaults to 6 decimal places.", type=int, default=6)
raw_group.add_argument('--precursor-ppm', help="The mass accuracy for the first monoisotopic peak in ppm.", type=float, default=5)
raw_group.add_argument('--isotope-ppm', help="The mass accuracy for the isotopic cluster.", type=float, default=2.5)
raw_group.add_argument('--spread', help="Assume there is spread of the isotopic label.", action='store_true')
search_group = pyquant_parser.add_argument_group("Search Information")
search_group.add_argument('--search-file', help='A search output or Proteome Discoverer msf file', type=argparse.FileType('rb'), required=False)
search_group.add_argument('--skip', help="If true, skip scans with missing files in the mapping.", action='store_true')
search_group.add_argument('--peptide', help="The peptide(s) to limit quantification to.", type=str, nargs='*')
search_group.add_argument('--peptide-file', help="A file of peptide(s) to limit quantification to.", type=argparse.FileType('r'))
search_group.add_argument('--scan', help="The scan(s) to limit quantification to.", type=str, nargs='*')
replicate_group = pyquant_parser.add_argument_group("Missing Value Analysis")
replicate_group.add_argument('--mva', help="Analyze files in 'missing value' mode.", action='store_true')
replicate_group.add_argument('--rt-window', help="The maximal deviation of a scan's retention time to be considered for analysis.", default=0.25, type=float)
label_group = pyquant_parser.add_argument_group("Labeling Information")
label_subgroup = label_group.add_mutually_exclusive_group()
label_subgroup.add_argument('--label-scheme', help='The file corresponding to the labeling scheme utilized.', type=argparse.FileType('r'))
label_subgroup.add_argument('--label-method', help='Predefined labeling schemes to use.', type=str, choices=sorted(config.LABEL_SCHEMES.keys()))
label_group.add_argument('--reference-label', help='The label to use as a reference (by default all comparisons are taken).', type=str)
tsv_group = pyquant_parser.add_argument_group('Tabbed File Input')
tsv_group.add_argument('--tsv', help='A delimited file containing scan information.', type=argparse.FileType('r'))
tsv_group.add_argument('--label', help='The column indicating the label state of the peptide. If not found, entry assumed to be light variant.', default='Labeling State')
tsv_group.add_argument('--peptide-col', help='The column indicating the peptide.', default='Peptide')
tsv_group.add_argument('--rt', help='The column indicating the retention time.', default='Retention time')
tsv_group.add_argument('--mz', help='The column indicating the MZ value of the precursor ion. This is not the MH+.', default='Light Precursor')
tsv_group.add_argument('--scan-col', help='The column indicating the scan corresponding to the ion.', default='MS2 Spectrum ID')
tsv_group.add_argument('--charge', help='The column indicating the charge state of the ion.', default='Charge')
tsv_group.add_argument('--source', help='The column indicating the raw file the scan is contained in.', default='Raw file')
ion_search_group = pyquant_parser.add_argument_group('Targetted Ion Search Parameters')
ion_search_group.add_argument('--msn-id', help='The ms level to search for the ion in. Default: 2 (ms2)', type=int, default=2)
ion_search_group.add_argument('--msn-quant-from', help='The ms level to quantify values from. i.e. if we are identifying an ion in ms2, we can quantify it in ms1 (or ms2). Default: msn value-1', type=int, default=None)
ion_search_group.add_argument('--msn-ion', help='M/Z values to search for in the scans. To search for multiple m/z values for a given ion, separate m/z values with a comma.', nargs='+', type=str)
ion_search_group.add_argument('--msn-ion-rt', help='RT values each ion is expected at.', nargs='+', type=float)
ion_search_group.add_argument('--msn-peaklist', help='A file containing peaks to search for in the scans.', type=argparse.FileType('rb'))
ion_search_group.add_argument('--msn-ppm', help='The error tolerance for identifying the ion(s).', type=float, default=200)
ion_search_group.add_argument('--msn-rt-window', help='The range of retention times for identifying the ion(s). (ex: 7.54-9.43)', type=str, nargs='+')
ion_search_group.add_argument('--msn-all-scans', help='Search for the ion across all scans (ie if you have 3 ions, you will have 3 results with one long XIC)', action='store_true')
ion_search_group.add_argument('--require-all-ions', help='If multiple ions are set (in the style of 93.15,105.15), all ions must be found in a scan.', action='store_true')
quant_parameters = pyquant_parser.add_argument_group('Quantification Parameters')
quant_parameters.add_argument('--quant-method', help='The process to use for quantification. Default: Integrate for ms1, sum for ms2+.', choices=['integrate', 'sum'], default=None)
quant_parameters.add_argument('--reporter-ion', help='Indicates that reporter ions are being used. As such, we only analyze a single scan.', action='store_true')
quant_parameters.add_argument('--isotopologue-limit', help='How many isotopologues to quantify', type=int, default=-1)
quant_parameters.add_argument('--overlapping-labels', help='This declares the mz values of labels will overlap. It is useful for data such as neucode, but not needed for only SILAC labeling.', action='store_true')
quant_parameters.add_argument('--labels-needed', help='How many labels need to be detected to quantify a scan (ie if you have a 2 state experiment and set this to 2, it will only quantify scans where both occur.', default=1, type=int)
quant_parameters.add_argument('--merge-labels', help='Merge labels together to a single XIC.', action='store_true')
quant_parameters.add_argument('--min-scans', help='How many quantification scans are needed to quantify a scan.', default=1, type=int)
quant_parameters.add_argument('--min-resolution', help='The minimal resolving power of a scan to consider for quantification. Useful for skipping low-res scans', default=0, type=float)
quant_parameters.add_argument('--no-mass-accuracy-correction', help='Disables the mass accuracy correction.', action='store_true')
quant_parameters.add_argument('--no-contaminant-detection', help='Disables routine to check if an ion is a contaminant of a nearby peptide (checks if its a likely isotopologue).', action='store_true')
peak_parameters = pyquant_parser.add_argument_group('Peak Fitting Parameters')
peak_parameters.add_argument('--peak-find-method', help='The method to use to identify peaks within data. For LC-MS, relative-max is usually best. For smooth data, derivative is better.', type=str, choices=(PEAK_FINDING_REL_MAX, PEAK_FINDING_DERIVATIVE), default=PEAK_FINDING_REL_MAX)
peak_parameters.add_argument(
'--peak-find-mode',
help='This picks some predefined parameters for various use cases. Fast is good for robust data with few peaks, slow is good for complex data with overlapping peaks of very different size.',
type=str,
choices=(PEAK_FIT_MODE_SLOW, PEAK_FIT_MODE_AVERAGE, PEAK_FIT_MODE_FAST),
default=PEAK_FIT_MODE_AVERAGE
)
peak_parameters.add_argument('--gap-interpolation', help='This interpolates missing data in scans. The parameter should be a number that is the maximal gap size to fill (ie 2 means a gap of 2 scans). Can be useful for low intensity LC-MS data.', type=int, default=0)
peak_parameters.add_argument('--remove-baseline', help='Fit a separate line for the baseline of each peak.', action='store_true')
peak_parameters.add_argument('--peak-cutoff', help='The threshold from the initial retention time a peak can fall by before being discarded', type=float, default=0.05)
peak_parameters.add_argument('--max-peaks', help='The maximal number of peaks to detect per scan. A lower value can help with very noisy data.', type=int, default=-1)
peak_parameters.add_argument('--peaks-n', help='The number of peaks to report per scan. Useful for ions with multiple elution times.', type=int, default=1)
peak_parameters.add_argument('--no-rt-guide', help='Do not use the retention time to bias for peaks containing the MS trigger time.', action='store_true')
peak_parameters.add_argument('--snr-filter', help='Filter peaks below a given SNR.', type=float, default=0)
peak_parameters.add_argument('--zscore-filter', help='Peaks below a given z-score are excluded.', type=float, default=0)
peak_parameters.add_argument('--filter-width', help='The window size for snr/zscore filtering. Default: entire scan', type=float, default=0)
peak_parameters.add_argument('--r2-cutoff', help='The minimal R^2 for a peak to be kept. Should be a value between 0 and 1', type=float, default=None)
peak_parameters.add_argument('--intensity-filter', help='Filter peaks whose peak are below a given intensity.', type=float, default=0)
peak_parameters.add_argument('--percentile-filter', help='Filter peaks whose peak are below a given percentile of the data.', type=float, default=0)
peak_parameters.add_argument('--min-peak-separation', help='Peaks separated by less than this distance will be combined. For very crisp data, set this to a lower number. (minimal value is 1)', type=int, default=5)
peak_parameters.add_argument('--disable-peak-filtering', help='This will disable smoothing of data prior to peak finding. If you have very good LC, this may be used to identify small peaks.', action='store_true')
peak_parameters.add_argument('--merge-isotopes', help='Merge Isotopologues together prior to fitting.', action='store_true')
peak_parameters.add_argument('--peak-resolution-mode', help='The method to use to resolve peaks across multiple XICs', choices=(PEAK_RESOLUTION_RT_MODE, PEAK_RESOLUTION_COMMON_MODE), type=str, default='common-peak')
xic_parameters = pyquant_parser.add_argument_group('XIC Options')
xic_parameters.add_argument('--xic-snr', help='When the SNR of the XIC falls below this, stop searching for more data. Useful for escaping from noisy shoulders and contaminants.', type=float, default=1.0)
xic_parameters.add_argument('--xic-missing-ion-count', help='This specifies how many consequtive scans an ion can be missing for until it is no longer considered.', type=int, default=1)
xic_parameters.add_argument('--xic-window-size', help='When the number of scans in a given direction from the initial datapoint of an XIC passes this, stop. Default is -1 (disabled). Useful for removing contaminants', type=int, default=-1)
xic_parameters.add_argument('--xic-smooth', help='Prior to fitting, smooth data with a Gaussian filter.', action='store_true')
xic_parameters.add_argument('--export-msn', help='This will export spectra of a given MSN that were used to provide the quantification.', action='store_false')
mrm_parameters = pyquant_parser.add_argument_group('SRM/MRM Parameters')
#'A file indicating light and heavy peptide pairs, and optionally the known elution time.'
mrm_parameters.add_argument('--mrm-map', help=argparse.SUPPRESS, type=argparse.FileType('r'))
output_group = pyquant_parser.add_argument_group("Output Options")
output_group.add_argument('--debug', help="This will output debug information.", action='store_true')
output_group.add_argument('--html', help="Output a HTML table summary.", action='store_true')
output_group.add_argument('--resume', help="Will resume from the last run. Only works if not directing output to stdout.", action='store_true')
output_group.add_argument('--sample', help="How much of the data to sample. Enter as a decimal (ie 1.0 for everything, 0.1 for 10%%)", type=float, default=1.0)
output_group.add_argument('--disable-stats', help="Disable confidence statistics on data.", action='store_true')
output_group.add_argument('--no-ratios', help="Disable reporting of ratios in output.", action='store_true')
output_group.add_argument('-o', '--out', nargs='?', help='The prefix for the file output', type=str)
PER_PEAK = 'per-peak'
PER_FILE = 'per-file'
PER_ID = 'per-id'
spectra_output = pyquant_parser.add_argument_group("Spectra Output Options")
spectra_output.add_argument('--export-mzml', help='Create an mzml file of spectra contained within each peak.', action='store_true')
spectra_output.add_argument('--export-mode', help='How to export the scans. per-peak: A mzML per peak identified. per-id: A mzML per ion identified (each row of the output gets an mzML). per-file: All scans matched per raw file.', type=str, default='per-peak', choices={PER_PEAK, PER_ID, PER_FILE})
convenience_group = pyquant_parser.add_argument_group('Convenience Parameters')
convenience_group.add_argument('--neucode', help='This will select parameters specific for neucode. Note: You still must define a labeling scheme.', action='store_true')
convenience_group.add_argument('--isobaric-tags', help='This will select parameters specific for isobaric tag based labeling (TMT/iTRAQ).', action='store_true')
convenience_group.add_argument('--ms3', help='This will select parameters specific for ms3 based quantification.', action='store_true')
convenience_group.add_argument('--maxquant', help='This will select parameters specific for a MaxQuant evidence file.', action='store_true')
convenience_group.add_argument('--gcms', help='This will select parameters specific for ion identification and quantification in GCMS experiments.', action='store_true')
#'This will select parameters specific for Selective/Multiple Reaction Monitoring (SRM/MRM).'
convenience_group.add_argument('--mrm', help=argparse.SUPPRESS, action='store_true')
|
pandeylab/pyquant
|
pyquant/__init__.py
|
Python
|
mit
| 14,839
|
[
"Gaussian"
] |
2ee38c173c9aad2cb41326e12423029f3e2356a8ff58fad2599a2914d82bad33
|
"""
Physical units and dimensions.
The base class is Unit, where all here defined units (~200) inherit from.
"""
from sympy import Rational, pi
from sympy.core.basic import Basic, Atom
class Unit(Atom):
"""
Base class for all physical units.
Create own units like:
m = Unit("meter", "m")
"""
is_positive = True # make (m**2)**Rational(1,2) --> m
is_commutative = True
__slots__ = ["name", "abbrev"]
def __new__(cls, name, abbrev, **assumptions):
obj = Basic.__new__(cls, **assumptions)
assert isinstance(name, str),`type(name)`
assert isinstance(abbrev, str),`type(abbrev)`
obj.name = name
obj.abbrev = abbrev
return obj
def __getnewargs__(self):
return (self.name, self.abbrev)
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
def _hashable_content(self):
return (self.name,self.abbrev)
# Delete this so it doesn't pollute the namespace
del Atom
def defunit(value, *names):
u = value
g = globals()
for name in names:
g[name] = u
# Dimensionless
percent = percents = Rational(1,100)
permille = permille = Rational(1,1000)
ten = Rational(10)
yotta = ten**24
zetta = ten**21
exa = ten**18
peta = ten**15
tera = ten**12
giga = ten**9
mega = ten**6
kilo = ten**3
deca = ten**1
deci = ten**-1
centi = ten**-2
milli = ten**-3
micro = ten**-6
nano = ten**-9
pico = ten**-12
femto = ten**-15
atto = ten**-18
zepto = ten**-21
yocto = ten**-24
rad = radian = radians = 1
deg = degree = degrees = pi/180
# Base units
defunit(Unit('meter', 'm'), 'm', 'meter', 'meters')
defunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')
defunit(Unit('second', 's'), 's', 'second', 'seconds')
defunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')
defunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')
defunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')
defunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')
# Derived units
defunit(1/s, 'Hz', 'hz', 'hertz')
defunit(m*kg/s**2, 'N', 'newton', 'newtons')
defunit(N*m, 'J', 'joule', 'joules')
defunit(J/s, 'W', 'watt', 'watts')
defunit(N/m**2, 'Pa', 'pa', 'pascal', 'pascals')
defunit(s*A, 'C', 'coulomb', 'coulombs')
defunit(W/A, 'v', 'V', 'volt', 'volts')
defunit(V/A, 'ohm', 'ohms')
defunit(A/V, 'S', 'siemens', 'mho', 'mhos')
defunit(C/V, 'F', 'farad', 'farads')
defunit(J/A, 'Wb', 'wb', 'weber', 'webers')
defunit(V*s/m**2, 'T', 'tesla', 'teslas')
defunit(V*s/A, 'H', 'henry', 'henrys')
# Common length units
defunit(kilo*m, 'km', 'kilometer', 'kilometers')
defunit(deci*m, 'dm', 'decimeter', 'decimeters')
defunit(centi*m, 'cm', 'centimeter', 'centimeters')
defunit(milli*m, 'mm', 'millimeter', 'millimeters')
defunit(micro*m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')
defunit(nano*m, 'nm', 'nanometer', 'nanometers')
defunit(pico*m, 'pm', 'picometer', 'picometers')
defunit(Rational('0.3048')*m, 'ft', 'foot', 'feet')
defunit(Rational('25.4')*mm, 'inch', 'inches')
defunit(3*ft, 'yd', 'yard', 'yards')
defunit(5280*ft, 'mi', 'mile', 'miles')
# Common volume and area units
defunit(m**3 / 1000, 'l', 'liter', 'liters')
defunit(deci*l, 'dl', 'deciliter', 'deciliters')
defunit(centi*l, 'cl', 'centiliter', 'centiliters')
defunit(milli*l, 'ml', 'milliliter', 'milliliters')
# Common time units
defunit(milli*s, 'ms', 'millisecond', 'milliseconds')
defunit(micro*s, 'us', 'microsecond', 'microseconds')
defunit(nano*s, 'ns', 'nanosecond', 'nanoseconds')
defunit(pico*s, 'ps', 'picosecond', 'picoseconds')
defunit(60*s, 'minute', 'minutes')
defunit(60*minute, 'h', 'hour', 'hours')
defunit(24*hour, 'day', 'days')
defunit(Rational('31558149.540')*s, 'sidereal_year', 'sidereal_years')
defunit(Rational('365.24219')*day, 'tropical_year', 'tropical_years')
defunit(Rational('365')*day, 'common_year', 'common_years')
defunit(Rational('365.25')*day, 'julian_year', 'julian_years')
year = years = tropical_year
# Common mass units
defunit(kilogram / kilo, 'g', 'gram', 'grams')
defunit(milli * g, 'mg', 'milligram', 'milligrams')
defunit(micro * g, 'ug', 'microgram', 'micrograms')
#----------------------------------------------------------------------------
# Physical constants
#
c = speed_of_light = 299792458 * m/s
G = gravitational_constant = Rational('6.67428') * ten**-11 * m**3 / kg / s**2
u0 = magnetic_constant = 4*pi * ten**-7 * N/A**2
e0 = electric_constant = 1/(u0 * c**2)
Z0 = vacuum_impedance = u0 * c
planck = Rational('6.62606896') * ten**-34 * J*s
hbar = planck / (2*pi)
avogadro = (Rational('6.02214179') * 10**23) / mol
boltzmann = Rational('1.3806505') * ten**-23 * J / K
gee = gees = Rational('9.80665') * m/s**2
atmosphere = atmospheres = atm = 101325 * pascal
# Other convenient units and magnitudes
defunit(c*julian_year, 'ly', 'lightyear', 'lightyears')
defunit(149597870691*m, 'au', 'astronomical_unit', 'astronomical_units')
# Delete this so it doesn't pollute the namespace
del Rational, pi
|
hazelnusse/sympy-old
|
sympy/physics/units.py
|
Python
|
bsd-3-clause
| 5,011
|
[
"Avogadro"
] |
881ed2267d51a74eca85ca354c2690808928db74b185187581e266daac765d87
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, The WGS-HGT Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
# Implement Distance Method for HGT detection based on algorithm described
# in:
# Wei. X et al., "A Distance-Based Method for Detecting HGT in Whole
# Genomes", International Symposium on Bioinformatics Research and
# Applications (ISBRA), 2008, pages 26-37
#
# The workflow follows the algorithm:
# 1. For each gene in target genome,
# i. BLAST sequence against all other genes in the reference
# genomes;
# ii. Go to step 3 if gene has more than threshold number of homologs
# (min-num-homologs), otherwise go to next gene in target genome;
# iii. Compute multiple sequence alignment on homolog genes using
# CLUSTAL;
# iv. Compute pairwise distance matrix using PHYLIP's protdist
# function and Z-score normalize the set of pairwise distances
# for each gene family and species;
# v. Add distance matrix for all pairwise distances into global
# distance matrix storing results for all genes
#
# 2. Cluster gene families by species,
# vi. Compute all species sets (sets of genes whose orthologs are
# detectable in exactly the same subset of the considered
# species);
# vii. Cluster genes to each core species set cluster using the
# Hamming distance clustering algorithm;
# viii. Run outlier detection algorithm on each cluster (paragraph 2
# of section 'Detecting Outlier Genes' in original paper)
#
# Requires protdist version 3.696
#
import sys
import click
import numpy
import operator
import threading
import subprocess
import traceback
import shlex
from os.path import join, basename, isdir, exists, getsize
from os import mkdir
from glob import glob
import skbio.io
class Command(object):
"""Run subprocess commands in a different thread with TIMEOUT option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
https://gist.github.com/kirpit/1306188
"""
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, str):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
def hamming(str1, str2):
"""Compute the Hamming distance between two strings.
Parameters
----------
str1: string
string
str2: string
string
"""
assert len(str1) == len(str2)
return sum(map(operator.ne, str1, str2))
def preprocess_data(working_dir,
target_proteomes_dir,
extensions,
verbose=False):
""" Map each gene to sudo name (ex. 1_1 for species 1, gene 1).
Parameters
----------
working_dir: string
path to working directory
target_proteomes_dir: string
path to directory holding proteomes for all target organisms
extensions: list
list of extensions for reference proteomes
verbose: boolean, optional
output details about the running processes of this function
Returns
-------
gene_map: dictionary
"two-way" dictionary storing gene names as keys and their pseudo
names as values, and vica versa
ref_db: dictionary
dictionary storing FASTA label as key and sequence as value for the
reference databases
species: integer
the number of species in the reference databases
Notes
-----
This will facilitate easier output comparison and the 10 character
name limitation in PHYLIP output. This format is limited up to 9999
species and 99999 genes per species.
"""
gene_map = {}
ref_db = {}
if verbose:
sys.stdout.write("Target organism\tNumber of genes\n")
# each file contains genes for species
files = [f
for ext in extensions
for f in glob("%s/*%s" % (target_proteomes_dir, ext))]
for species, _file in enumerate(files):
if verbose:
sys.stdout.write("%s. %s\t" % (
species+1, basename(_file)))
for gene, seq in enumerate(skbio.io.read(_file, format='fasta')):
label = seq.metadata['id']
ref_db[label] = seq
sudo_label = "%s_%s" % (species, gene)
if label in gene_map:
raise ValueError("Duplicate sequence labels are "
"not allowed: %s" % label)
gene_map[label] = sudo_label
gene_map[sudo_label] = label
if verbose:
sys.stdout.write("%s\n" % gene)
return gene_map, ref_db, species+1
def launch_diamond(query_proteome_fp,
ref_fp,
working_dir,
tmp_dir,
e_value=10e-20,
threads=1,
debug=False):
""" Launch DIAMOND for a query and a reference database of proteomes.
Parameters
----------
query_proteome_fp: string
filepath to query proteome
ref_fp: string
filepath to reference proteome
working_dir: string
working directory path
tmp_dir:
temporary working directory for DIAMOND
e_value: float, optional
the cutoff E-value for BLASTP results
threads: integer
number of threads to use for running DIAMOND BLASTP
debug: boolean
if True, run function in debug mode
Returns
-------
out_file_fp: string
filepath to tabular alignment file output by DIAMOND
"""
db_file_fp = join(working_dir, "%s" % basename(ref_fp))
# build DIAMOND database
makediamonddb_command = ["diamond",
"makedb",
"--in", ref_fp,
"-d", db_file_fp,
"--threads", str(threads)]
proc = subprocess.Popen(makediamonddb_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if (stderr and debug):
print("[DEBUG] %s\n" % stderr)
# launch DIAMOND
out_file_fp = join(
working_dir, "%s.daa" % basename(query_proteome_fp))
diamond_command = ["diamond",
"blastp",
"-t", tmp_dir,
"--db", "%s.dmnd" % db_file_fp,
"--query", query_proteome_fp,
"--evalue", str(e_value),
"--threads", str(threads),
"--daa", out_file_fp,
"--sensitive"]
proc = subprocess.Popen(diamond_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if (stderr and debug):
print("[DEBUG] %s\n" % stderr)
# convert output to tab delimited file
out_file_conv_fp = join(
working_dir, "%s.m8" % basename(query_proteome_fp))
diamond_convert_command = ["diamond",
"view",
"--daa", out_file_fp,
"-f", "tab",
"-o", out_file_conv_fp]
proc = subprocess.Popen(diamond_convert_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if (stderr and debug):
print("[DEBUG] %s\n" % stderr)
return out_file_conv_fp
def launch_blast(query_proteome_fp,
ref_fp,
working_dir,
e_value=10e-20,
threads=1,
debug=False):
""" Launch BLASTp for a query and a reference database of proteomes.
Parameters
----------
query_proteome_fp: string
filepath to query proteome
ref_fp: string
filepath to reference proteome
working_dir: string
working directory path
e_value: float, optional
the cutoff E-value for BLASTP results
threads: integer
number of threads to use for running BLASTP
debug: boolean
if True, run function in debug mode
Returns
-------
out_file_fp: string
filepath to tabular alignment file output by
BLASTP
"""
db_file_fp = join(working_dir, "%s" % basename(ref_fp))
# build blast database
makeblastdb_command = ["makeblastdb",
"-in", ref_fp,
"-out", db_file_fp,
"-dbtype", "prot"]
proc = subprocess.Popen(makeblastdb_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if (stderr and debug):
print("[DEBUG] %s\n" % stderr)
# launch blast
out_file_fp = join(
working_dir, "%s.blast" % basename(query_proteome_fp))
blastp_command = ["blastp",
"-db", db_file_fp,
"-query", query_proteome_fp,
"-evalue", str(e_value),
"-num_threads", str(threads),
"-outfmt", "6 std qcovs",
"-task", "blastp",
"-out", out_file_fp]
proc = subprocess.Popen(blastp_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if (stderr and debug):
print("[DEBUG] %s\n" % stderr)
return out_file_fp
def parse_blast(alignments_fp,
hits,
gene_map,
debug=False):
""" Parse BLASTp alignment file into a dictionary.
Parameters
----------
alignments_fp: string
filepath to tabular alignment file output by BLASTP
hits: dictionary
dictionary storing query (gene) names as keys and the best aligning
reference sequences as values (one alignment per reference sequence)
gene_map: dictionary
"two-way" dictionary storing gene names as keys and their pseudo
names as values, and vica versa
debug: boolean
if True, run function in debug mode
Notes
-----
The keys are the queries and the values are all the reference
sequences to which the query mapped with E-value cutoff score.
"""
# read blastp results
with open(alignments_fp, 'r') as alignments_f:
for line in alignments_f:
if debug:
sys.stdout.write("[DEBUG] %s" % line)
query, ref = line.split()[:2]
if query not in hits:
hits[query] = [ref]
else:
# check that the query mapped to a different species
# since we only want the best homolog per species
if gene_map[ref].split('_')[0] not in [
gene_map[gene].split('_')[0] for gene in hits[query]]:
hits[query].append(ref)
def launch_msa(fasta_in_fp,
clustal_command_fp,
gene_map,
ref_db,
hits,
query,
timeout):
""" Create MSA for all gene othologs using Clustalw.
Parameters
----------
fasta_in_fp: string
filepath to FASTA file of protein sequences to use as input to
Clustalw
clustal_command_fp: string
filepath to Clustalw command (interactive)
gene_map: dictionary
"two-way" dictionary storing gene names as keys and their pseudo
names as values, and vica versa
ref_db: dictionary
dictionary storing FASTA label as key and sequence as value for the
reference databases
hits: dictionary
dictionary storing query (gene) names as keys and the best aligning
reference sequences as values (one alignment per reference sequence)
query: string
query gene name
timeout: integer
number of seconds to allow Clustalw to run before terminating the
process
"""
with open(fasta_in_fp, 'w') as in_f:
for ref in hits[query]:
in_f.write(">%s\n%s\n" % (gene_map[ref], ref_db[ref]))
with open(clustal_command_fp, 'r') as clustal_command_f:
clustalw_command = Command("clustalw")
status, output, error = clustalw_command.run(
timeout=timeout,
stdin=clustal_command_f,
close_fds=True)
if status < 0:
sys.stdout.write(
"status: %s\noutput: %s\terror: %s\t" % (
status, output, error))
def compute_distances(phylip_command_fp,
warnings=False):
""" Compute distances between each pair of sequences in the MSA.
Parameters
----------
phylip_command_fp: string
filepath to the PHYLIP command (interactive)
warnings: boolean, optional
print warnings output by PHYLIP
Notes
-----
Use PHYLIP's protdist function.
"""
with open(phylip_command_fp, 'r') as phylip_command_f:
proc = subprocess.Popen("protdist",
stdin=phylip_command_f,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
proc.wait()
stdout, stderr = proc.communicate()
if stderr and warnings:
print(stderr)
def normalize_distances(phylip_fp,
full_distance_matrix,
num_species,
full_distance_matrix_offset,
species_set_dict,
gene_bitvector_map,
debug=False):
""" Parse and normalize the output file of PHYLIP's protdist function.
Parameters
----------
phylip_fp: string
filepath to distance matrix output by PHYLIP's protdist function
full_distance_matrix: dictionary
complete distance matrix for pairwise alignments between all species
for every gene
num_species: integer
number of species in the reference database
full_distance_matrix_offset: integer
the index offset for elements in full_distance_matrix where to write
the next array
species_set_dict: dictionary
dictionary containing the binary indicator vectors as keys and the
number of genes with identical species set represented by the binary
vectors as values
gene_bitvector_map: list
list containing the binary indicator vector for each query gene
debug: boolean
if True, run function in debug mode
Notes
-----
Parse PHYLIP's protdist output containing the distance matrix, Z-score
normalize the set of pairwise distances between the gene in a species
and all other species and stores the results in a separate array.
Each normalized distance matrix is then sorted by species name and
added to the complete array storing distance matrices for all genes.
In addition, a list of missing species (species which did not include
a certain gene) is also maintained and used for setting nan's in array
cells which represent those species.
Below is an example of a parsed distance matrix
for 3 genes and 3 species:
0 1 2 (genes)
0_0 nan nan nan
0_1 0.53099 0.878855 0.83673
0_2 0.642856 1.083039 1.083039
1_0 0.300297 0.300297 0.702003
1_1 nan nan nan
1_2 0.399722 0.379156 0.356543
2_0 0.53099 0.53099 0.83673
2_1 0.399722 0.399722 0.356543
2_2 nan nan nan
(species pairs)
Example of Z-score normalized distance matrix from
above:
0 1 2 (genes)
0_0 nan nan nan
0_1 -1.40548346 0.83861735 0.56686611
0_2 -1.41421356 0.70710678 0.70710678
1_0 -0.70710678 -0.70710678 1.41421356
1_1 nan nan nan
1_2 1.20493966 0.03869341 -1.24363308
2_0 -0.70710678 -0.70710678 1.41421356
2_1 0.70710678 0.70710678 -1.41421356
2_2 nan nan nan
(species pairs)
"""
# assume a pairwise alignment exists for all species
missing_species = [str(x) for x in range(0, num_species)]
# scan through file and remove species that exist
# from missing_species list
if exists(phylip_fp) and getsize(phylip_fp) > 0:
with open(phylip_fp, 'r') as phylip_f:
next(phylip_f)
for line in phylip_f:
if not line.startswith(' '):
species = line.split()[0].split('_')[0]
missing_species.remove(species)
else:
raise ValueError('%s does not exist or is empty' % phylip_fp)
# scan through file again, collecting alignment
# distances
orig_order_labels = []
p = numpy.empty(shape=(num_species, num_species))
p.fill(numpy.nan)
idx = 0
with open(phylip_fp, 'r') as phylip_f:
alignment_list = []
# skip first line containing number of lines in
# the file
next(phylip_f)
for line in phylip_f:
if debug:
sys.stdout.write("[DEBUG] %s" % line)
alignment_dist = line.strip().split()
if line.startswith(' '):
alignment_list.extend(alignment_dist)
else:
# new species alignment pairs
if alignment_list:
for i in range(0, len(missing_species)):
alignment_list.append(None)
a = numpy.asarray(alignment_list[1:], dtype=float)
a[idx] = numpy.nan
p[idx] = (a - numpy.nanmean(a)) / numpy.nanstd(a)
idx += 1
orig_order_labels.append(alignment_list[0])
alignment_list = alignment_dist
# add distance on final line
for i in range(0, len(missing_species)):
alignment_list.append(None)
a = numpy.asarray(alignment_list[1:], dtype=float)
a[idx] = numpy.nan
p[idx] = (a - numpy.nanmean(a)) / numpy.nanstd(a)
orig_order_labels.append(alignment_list[0])
# add the missing species names to the labels array
bitvector_gene = 'I' * num_species
for species in missing_species:
orig_order_labels.append("%s_X" % species)
# indicate missing gene for current species
l = list(bitvector_gene)
l[int(species)] = 'O'
bitvector_gene = ''.join(l)
# update species set counts
if bitvector_gene not in species_set_dict:
species_set_dict[bitvector_gene] = 1
else:
species_set_dict[bitvector_gene] += 1
gene_bitvector_map[full_distance_matrix_offset] = bitvector_gene
# sort the distance matrix based on species names (S1, S2, S3 ..)
# in order to be consistent across all gene families
x = numpy.argsort(numpy.array(orig_order_labels))
# re-order rows and columns by ordered species name (0,1,2 ..)
p2 = numpy.zeros(shape=(num_species, num_species))
for idx_a, arr in enumerate(p):
t = numpy.zeros(shape=num_species)
for idx_b, el in enumerate(arr):
t[x[idx_b]] = el
p2[x[idx_a]] = t
del p
# add normalized distance matrix for current gene
# to full distance matrix
full_distance_matrix[full_distance_matrix_offset] = p2
def cluster_distances(species_set_dict,
species_set_size,
hamming_distance):
""" Hamming distance clustering algorithm
Parameters
----------
species_set_dict: dictionary
dictionary containing the binary indicator vectors as
keys and the number of genes with identical species
set represented by the binary vectors as values
species_set_size: integer
threshold number of genes in a species set to
allow it to form a core cluster
hamming_distance: integer
maximum number of mismatches between two binary
indicator vectors (ex. IIII and I0II) for the
genes in a candidate vector to be merged into the
core cluster
Returns
-------
gene_clusters_list: list of tuples
list of tuples containing core species sets and all belonging species
sets (determined by the Hamming distance clustering algorithm)
Notes
-----
Cluster gene families by species with detectable orthologs in exactly
the same subset of the considered species.
Ex. Assume we have 4 genes and 5 species with the following distance
matrix:
0 1 2 3
0_0 nan nan nan nan
0_1 -1.59564844 -1.388031632 -0.9634704748 -1.342272936
0_2 -0.4259542606 nan 0.7035215923 1.223837777
0_3 -1.55041393 -1.51499567 -0.9634704748 -1.330178178
0_4 -0.3659762821 0.8346037464 0.6565725705 1.15274682
..
..
There are two binary indicator vectors to represent the species
present in the four genes: IIIII (gene 0, 2 and 3), II0II (gene 1). If
the core set threshold was 3, then there would be 1 core species set
represented by IIIII.
"""
sorted_species_set = sorted(list(species_set_dict.items()),
key=operator.itemgetter(1), reverse=True)
# determine core clusters (initial species sets with more than
# species_set_size genes)
gene_clusters_list = []
# if the largest species set contains less than threshold
# (species_set_size) elements, set the only core cluster to the largest
# species set
if sorted_species_set[0][1] < species_set_size:
cluster_core = (sorted_species_set[0][0], [])
gene_clusters_list = []
for bitvector in sorted_species_set:
if bitvector[1] >= species_set_size:
cluster_core = (bitvector[0], [])
gene_clusters_list.append(cluster_core)
# assign species sets with fewer than species_set_size species to core
# clusters if the Hamming distance between the two bitvectors is less than
# hamming_distance
species_set_assigned = []
for idx, cluster_core in enumerate(gene_clusters_list):
for bitvector in sorted_species_set:
bv = bitvector[0]
if (bv not in species_set_assigned and
hamming(cluster_core[0], bv) <= hamming_distance):
gene_clusters_list[idx][1].append(bv)
species_set_assigned.append(bv)
# assign the remaining species sets to the cluster with the closest core
# Hamming distance
for bitvector in sorted_species_set:
bv = bitvector[0]
if bv not in species_set_assigned:
min_hamming_cluster = -1
min_hamming_distance = sys.maxsize
# find cluster core with smallest Hamming distance to species set
for idx, cluster_core in enumerate(gene_clusters_list):
dist = hamming(cluster_core[0], bv)
if dist < min_hamming_distance:
min_hamming_distance = dist
min_hamming_cluster = idx
if min_hamming_cluster >= 0:
gene_clusters_list[min_hamming_cluster][1].append(bv)
return gene_clusters_list
def detect_outlier_genes(species_set,
gene_bitvector_map,
full_distance_matrix,
stdev_offset,
outlier_hgt,
num_species,
total_genes,
debug=False):
""" Detect outlier genes.
Parameters
----------
species_set: list
list of bitvectors representing species clusters to use in detecting
outlier genes
gene_bitvector_map: list
list containing the binary indicator vector for each query gene
full_distance_matrix: dictionary
complete distance matrix for pairwise alignments between all species
for every gene
stdev_offset: integer
the number of standard deviations a gene's normalized distance is from
the mean to identify it as an outlier for a species pair
outlier_hgt: float
the fraction (value between (0,1]) of normalized pairwise distances
over all species-pair vectors belonging to the same gene that are
z-score standard deviations from the mean
num_species: integer
number of species in the reference database
total_genes: integer
total number of genes in the query genome with at least
min_num_homologs (determined by BLAST search)
debug: boolean
if True, run function in debug mode
Returns
-------
outlier_genes: set
set of atypical genes
Notes
-----
Algorithm described in section "Detecting `Outlier' Genes" of the Wei.
X et al. paper. The full distance matrix is represented in the format:
full_distance_matrix[#genes][#species][#species] =
[[[0_0, 0_1, 0_2, .., 0_n]
[1_0, 1_1, 1_2, .., 1_n]
..
[n_0, n_1, n_2, .., n_n]]
[[0_0, 0_1, 0_2, .., 0_n]
[1_0, 1_1, 1_2, .., 1_n]
..
[n_0, n_1, n_2, .., n_n]]
..
[[0_0, 0_1, 0_2, .., 0_n]
[1_0, 1_1, 1_2, .., 1_n]
..
[n_0, n_1, n_2, .., n_n]]]
The mean and standard deviation are computed for each species pair
including all genes.
"""
numpy.around(full_distance_matrix, decimals=5, out=full_distance_matrix)
outlier_flag_matrix = numpy.zeros(
shape=(total_genes, num_species, num_species), dtype=bool)
distance_vector = numpy.zeros(total_genes)
if debug:
sys.stdout.write("[DEBUG] species_species\t")
for k in range(total_genes):
sys.stdout.write("gene # %s".ljust(12) % k)
sys.stdout.write("[low_bound, up_bound]\n")
for i in range(num_species):
for j in range(num_species):
if i != j:
for k in range(total_genes):
distance_vector[k] = full_distance_matrix[k][i][j]
mean = numpy.nanmean(distance_vector)
stdev = numpy.nanstd(distance_vector)
low_bound = round(mean - stdev_offset*stdev, 5)
up_bound = round(mean + stdev_offset*stdev, 5)
if debug:
sys.stdout.write("[DEBUG] %s_%s\t".ljust(20) % (i, j))
for k, distance in enumerate(distance_vector):
spaces = "".ljust(2)
if distance < 0:
spaces = "".ljust(1)
if (distance != numpy.nan and
((distance < low_bound) or (distance > up_bound))):
outlier_flag_matrix[k][i][j] = 1
if debug:
sys.stdout.write(
"%s\033[92m%s\033[0m" % (spaces, distance))
elif debug:
sys.stdout.write("%s%s" % (spaces, distance))
if debug:
sys.stdout.write("\t[%s, %s]\n" % (low_bound, up_bound))
# traverse outlier_matrix by gene and count the number of outlier
# distances by species
outlier_count_matrix = numpy.zeros(
shape=(total_genes, num_species), dtype=int)
for i in range(total_genes):
for j in range(num_species):
for k in range(num_species):
if outlier_flag_matrix[i][j][k]:
outlier_count_matrix[i][k] += 1
# if number of outlier distances exceeds threshold, label gene as outlier
outlier_genes = set()
for i in range(total_genes):
for j in range(num_species):
if outlier_count_matrix[i][j] > num_species*outlier_hgt:
outlier_genes.add(i)
return outlier_genes
def output_full_matrix(matrix, num_species):
""" Output distance matrix to stdout
"""
for i in range(num_species):
for j in range(num_species):
# for gene number
for k in range(len(matrix)):
sys.stdout.write("%s\t" % matrix[k][i][j])
sys.stdout.write("\n")
def distance_method(query_proteome_fp,
target_proteomes_dir,
working_dir,
output_hgt_fp,
align_software,
tabular_alignments_fp=None,
ext=['fa', 'fasta', 'faa'],
min_num_homologs=3,
e_value=10e-20,
threads=1,
stdev_offset=2.326,
outlier_hgt=0.5,
species_set_size=30,
hamming_distance=2,
verbose=False,
debug=False,
warnings=False,
timeout=120):
""" Run Distance Method algorithm
Parameters
----------
query_proteome_fp: string
filepath to query proteome
target_proteomes_dir: string
dirpath to target proteomes
working_dir: string
dirpath to working directory
output_hgt_fp: string
filepath to output file for storing detected HGTs
align_software: string
software to use for sequence alignment (BLAST or DIAMOND)
tabular_alignments_fp: string, optional
filepath to tabular sequence alignments
ext: list, optional
list of file extensions to open in the target proteomes directory
min_num_homologs: integer, optional
the mininum number of homologs (determined by BLAST search)
for each gene to test
e_value: float, optional
the E-value cutoff to identify orthologous genes using BLASTP
threads: integer, optional
number of threads to use for sequence alignment
stdev_offset: float, optional
the number of standard deviations a gene's normalized distance
is from the mean to identify it as an outlier for a species pair
outlier_hgt: float, optional
the fraction (value between (0,1]) of normalized pairwise distances
over all species-pair vectors belonging to the same gene that are
z-score standard deviations from the mean
species_set_size: integer, optional
threshold number of genes to consider a species set large (a species
set is a set of genes whose orthologs are detectable in exactly the
same subset of the considered species)
hamming_distance: integer, optional
distance between two binary vectors indicating the species in which
the corresponding ortholog gene appears
verbose: boolean, optional
if True, run in verbose mode
debug: boolean, optional
if True, run in debug mode
warnings: boolean, optional
if True, output warnings
timeout: integer, optional
number of seconds to allow Clustalw to run per call
"""
if verbose:
sys.stdout.write(
"Begin whole-genome HGT detection using the Distance method.\n\n")
sys.stdout.write("Query genome: %s\n" % query_proteome_fp)
extensions = set(['fa', 'fasta', 'faa'])
extensions.update(ext)
# create working directory if doesn't exist
if not isdir(working_dir):
mkdir(working_dir)
gene_map, ref_db, num_species = preprocess_data(
working_dir=working_dir,
target_proteomes_dir=target_proteomes_dir,
extensions=extensions,
verbose=verbose)
if debug:
sys.stdout.write("\n[DEBUG] gene map:\n")
for gene in gene_map:
sys.stdout.write("[DEBUG] %s: %s\n" % (gene, gene_map[gene]))
if verbose:
sys.stdout.write("\nRunning BLASTp ..\n")
hits = {}
# tabular alignments provided
if tabular_alignments_fp is not None:
# generate a dictionary of orthologous genes
parse_blast(alignments_fp=tabular_alignments_fp,
hits=hits,
gene_map=gene_map,
debug=debug)
# tabular alignments to be created
else:
files = [f
for e in extensions
for f in glob("%s/*%s" % (target_proteomes_dir, e))]
for _file in files:
# launch BLASTp
if align_software == "blast":
alignments_fp = launch_blast(
query_proteome_fp=query_proteome_fp,
ref_fp=_file,
working_dir=working_dir,
e_value=e_value,
threads=threads,
debug=debug)
elif align_software == "diamond":
alignments_fp = launch_diamond(
query_proteome_fp=query_proteome_fp,
ref_fp=_file,
working_dir=working_dir,
tmp_dir=working_dir,
e_value=e_value,
threads=threads,
debug=debug)
else:
raise ValueError(
"Software not supported: %s" % align_software)
# generate a dictionary of orthologous genes
parse_blast(alignments_fp=alignments_fp,
hits=hits,
gene_map=gene_map,
debug=debug)
# keep only genes with >= min_num_homologs
hits_min_num_homologs = {}
max_homologs = 0
for query in hits:
len_hits = len(hits[query])
if query in hits[query]:
len_hits -= 1
if len_hits >= min_num_homologs:
if query in hits_min_num_homologs:
raise ValueError("Duplicate gene names found: %s" % query)
hits_min_num_homologs[query] = hits[query]
if len_hits > max_homologs:
max_homologs = len_hits
hits.clear()
if verbose:
sys.stdout.write(
"Total number of orthologous gene families with at "
"least %s genes: %s\n" % (
min_num_homologs, len(hits_min_num_homologs)))
if debug:
sys.stdout.write("[DEBUG] Blast matches:\n")
for query in hits_min_num_homologs:
sys.stdout.write(
"[DEBUG] %s: %s\n" % (query, hits_min_num_homologs[query]))
# generate command for CLUSTALW
phy_msa_fp = join(working_dir, "msa.phy")
open(phy_msa_fp, 'a').close()
dnd_msa_fp = join(working_dir, "msa.dnd")
open(dnd_msa_fp, 'a').close()
phylip_fp = join(working_dir, "msa.dis")
open(phylip_fp, 'a').close()
# create fasta file for each gene family and run CLUSTALW
fasta_in_fp = join(working_dir, "input.faa")
clustal_command_fp = join(working_dir, "clustal_command.txt")
with open(clustal_command_fp, 'w') as clustal_command_f:
clustal_command_f.write(
'1\n%s\n2\n9\n1\n4\n\n1\n%s\n%s\nX\n\nX\n' % (
fasta_in_fp, phy_msa_fp, dnd_msa_fp))
phylip_command_fp = join(working_dir, "phylip_command.txt")
with open(phylip_command_fp, 'w') as phylip_command_f:
phylip_command_f.write('%s\nF\n%s\nR\nY\n' % (phy_msa_fp, phylip_fp))
total_genes = len(hits_min_num_homologs)
if verbose:
sys.stdout.write("\nRunning CLUSTALW and PROTDIST ..\n")
if max_homologs > num_species:
raise ValueError(
"max_homologs > num_species: %s > %s " % (
max_homologs, num_species))
# distance matrix containing distances between all ortholog genes
full_distance_matrix = numpy.zeros(
shape=(total_genes, num_species, num_species), dtype=float)
# dictionary to store all subsets of orthologs (keys) and
# their number of occurrences (values) (maximum occurrences
# is equal to the number of genes)
species_set_dict = {}
gene_bitvector_map = {}
gene_id = {}
for i, query in enumerate(hits_min_num_homologs):
if verbose:
print("Computing MSA and distances for gene %s .. (%s/%s)" % (
query, i+1, total_genes))
gene_id[i] = query
# generate a multiple sequence alignment
# for each orthologous gene family
launch_msa(fasta_in_fp=fasta_in_fp,
clustal_command_fp=clustal_command_fp,
ref_db=ref_db,
gene_map=gene_map,
hits=hits_min_num_homologs,
query=query,
timeout=timeout)
# compute distances between each pair of sequences in MSA
compute_distances(phylip_command_fp=phylip_command_fp,
warnings=warnings)
# Z-score normalize distance matrix and add results
# to full distance matrix (for all genes)
normalize_distances(phylip_fp=phylip_fp,
full_distance_matrix=full_distance_matrix,
num_species=num_species,
full_distance_matrix_offset=i,
species_set_dict=species_set_dict,
gene_bitvector_map=gene_bitvector_map,
debug=debug)
# output_full_matrix(full_distance_matrix, num_species)
# cluster gene families by species
gene_clusters_dict = cluster_distances(
species_set_dict=species_set_dict,
species_set_size=species_set_size,
hamming_distance=hamming_distance)
# detect outlier genes per core cluster of genes
with open(output_hgt_fp, 'w') as output_hgt_f:
output_hgt_f.write("\n# Candidate HGT genes: \n")
for core_cluster in gene_clusters_dict:
outlier_genes = detect_outlier_genes(
species_set=gene_clusters_dict[core_cluster],
gene_bitvector_map=gene_bitvector_map,
full_distance_matrix=full_distance_matrix,
stdev_offset=stdev_offset,
outlier_hgt=outlier_hgt,
num_species=num_species,
total_genes=total_genes,
debug=debug)
if outlier_genes:
for gene in outlier_genes:
output_hgt_f("%s\n" % gene_id[gene])
# output_full_matrix(outlier_genes, num_species)
@click.command()
@click.argument('query-proteome-fp', required=True,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True))
@click.argument('target-proteomes-dir', required=True,
type=click.Path(resolve_path=True, readable=True, exists=True,
file_okay=True))
@click.argument('working-dir', required=True,
type=click.Path(resolve_path=True, readable=True, exists=False,
file_okay=True))
@click.argument('output-hgt-fp', required=True,
type=click.Path(resolve_path=True, readable=True, exists=False,
file_okay=True))
@click.option('--align-software', type=click.Choice(['diamond', 'blast']),
required=False, default=['diamond'], show_default=True,
help="Software to use for blasting sequences")
@click.option('--tabular-alignments-fp', required=False,
type=click.Path(resolve_path=True, readable=True, exists=False,
file_okay=True),
help="Tabular alignments in m6 format (output from BLAST or "
"DIAMOND)")
@click.option('--ext', multiple=True, type=str, required=False,
default=['fa', 'fasta', 'faa'], show_default=True,
help="File extensions of target proteomes (multiple extensions "
"can be given by calling --ext ext1 --ext ext2)")
@click.option('--min-num-homologs', type=int, required=False, default=3,
show_default=True, help="The mininum number of homologs "
"(determined by BLAST search) for each "
"gene to test")
@click.option('--e-value', type=float, required=False, default=10e-20,
show_default=True, help="The E-value cutoff to identify "
"orthologous genes using BLASTP")
@click.option('--threads', type=int, required=False, default=1,
show_default=True, help="Number of threads to use")
@click.option('--stdev-offset', type=float, required=False, default=2.326,
show_default=True, help="The number of standard deviations a "
"gene's normalized distance is from "
"the mean to identify it as an outlier "
"for a species pair")
@click.option('--outlier-hgt', type=float, default=0.5, show_default=True,
required=False, help="The fraction (value between (0,1]) of "
"normalized pairwise distances over all "
"species-pair vectors belonging to the "
"same gene that are z-score standard "
"deviations from the mean")
@click.option('--species-set-size', type=int, required=False, default=30,
show_default=True, help="Threshold number of genes to consider "
"a species set large (a species set is "
"a set of genes whose orthologs are "
"detectable in exactly the same subset "
"of the considered species)")
@click.option('--hamming-distance', type=int, required=False, default=2,
show_default=True, help="Distance between two binary vectors "
"indicating the species in which the "
"corresponding ortholog gene appears")
@click.option('--verbose', type=bool, required=False, default=False,
show_default=True, help="Run in verbose mode")
@click.option('--debug', type=bool, required=False, default=False,
show_default=True, help="Run in debug mode")
@click.option('--warnings', type=bool, required=False, default=False,
show_default=True, help="Print program warnings")
@click.option('--timeout', type=int, required=False, default=120,
show_default=True, help="Number of seconds to allow Clustalw "
"to run per call")
def distance_method_main(query_proteome_fp,
target_proteomes_dir,
working_dir,
output_hgt_fp,
align_software,
tabular_alignments_fp,
ext,
min_num_homologs,
e_value,
threads,
stdev_offset,
outlier_hgt,
species_set_size,
hamming_distance,
verbose,
debug,
warnings,
timeout):
""" Run the Distance-Method HGT detection algorithm.
"""
distance_method(query_proteome_fp=query_proteome_fp,
target_proteomes_dir=target_proteomes_dir,
working_dir=working_dir,
output_hgt_fp=output_hgt_fp,
align_software=align_software,
tabular_alignments_fp=tabular_alignments_fp,
ext=ext,
min_num_homologs=min_num_homologs,
e_value=e_value,
threads=threads,
stdev_offset=stdev_offset,
outlier_hgt=outlier_hgt,
species_set_size=species_set_size,
hamming_distance=hamming_distance,
verbose=verbose,
debug=debug,
warnings=warnings,
timeout=timeout)
if __name__ == "__main__":
distance_method_main()
|
qiyunzhu/horizomer
|
horizomer/misc/distance-method/distance_method.py
|
Python
|
bsd-3-clause
| 46,229
|
[
"BLAST"
] |
ba9045193e76f6c8d3727bef1007ca2efa7657289dbe4402f6f73fa0b035118a
|
# -*- coding: utf-8 -*-
"""
Microsoft translator API
The Microsoft Translator services can be used in web or client
applications to perform language translation operations. The services
support users who are not familiar with the default language of a page or
application, or those desiring to communicate with people of a different
language group.
This module implements the AJAX API for the Microsoft Translator service.
An example::
>>> from microsofttranslator import Translator
>>> translator = Translator('<Your API Key>')
>>> print translator.translate("Hello", "pt")
"Olá"
The documentation for the service can be obtained here:
http://msdn.microsoft.com/en-us/library/ff512423.aspx
The project is hosted on GitHub where your could fork the project or report
issues. Visit https://github.com/openlabs/Microsoft-Translator-Python-API
:copyright: © 2011 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import os
from setuptools import setup
import sys
PY_VERSION = sys.version_info[0], sys.version_info[1]
def read(fname):
if PY_VERSION < (3, 0):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
else:
return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
setup(
name="microsofttranslator",
version="0.5",
packages=[
'microsofttranslator',
],
package_dir={
'microsofttranslator': '.'
},
author="Openlabs Technologies & Consulting (P) Limited",
author_email="info@openlabs.co.in",
description="Microsoft Translator V2 - Python API",
long_description=read('README.rst'),
license="BSD",
keywords="translation microsoft",
url="http://openlabs.co.in/",
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Software Development :: Internationalization",
"Topic :: Utilities",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
],
test_suite="microsofttranslator.test.test_all",
install_requires=[
'requests >= 1.2.3',
]
)
|
Akoten/Microsoft-Translator-Python-API
|
setup.py
|
Python
|
bsd-3-clause
| 2,436
|
[
"VisIt"
] |
79b3b562da73892f894a0308a0a9c3e1f264f6756441b1c926f046921d1afb4b
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Water dynamics analysis --- :mod:`MDAnalysis.analysis.waterdynamics`
=======================================================================
:Author: Alejandro Bernardin
:Year: 2014-2015
:Copyright: GNU Public License v3
.. versionadded:: 0.11.0
This module provides functions to analize water dynamics trajectories and water
interactions with other molecules. The functions in this module are: water
orientational relaxation (WOR) [Yeh1999]_, hydrogen bond lifetimes (HBL)
[Rapaport1983]_, angular distribution (AD) [Grigera1995]_, mean square
displacement (MSD) [Brodka1994]_ and survival probability (SP) [Liu2004]_.
For more information about this type of analysis please refer to
[Araya-Secchi2014]_ (water in a protein cavity) and [Milischuk2011]_ (water in
a nanopore).
.. rubric:: References
.. [Rapaport1983] D.C. Rapaport (1983): Hydrogen bonds in water, Molecular
Physics: An International Journal at the Interface Between
Chemistry and Physics, 50:5, 1151-1162.
.. [Yeh1999] Yu-ling Yeh and Chung-Yuan Mou (1999). Orientational Relaxation
Dynamics of Liquid Water Studied by Molecular Dynamics Simulation,
J. Phys. Chem. B 1999, 103, 3699-3705.
.. [Grigera1995] Raul Grigera, Susana G. Kalko and Jorge Fischbarg
(1995). Wall-Water Interface. A Molecular Dynamics Study,
Langmuir 1996,12,154-158
.. [Liu2004] Pu Liu, Edward Harder, and B. J. Berne (2004).On the Calculation
of Diffusion Coefficients in Confined Fluids and Interfaces with
an Application to the Liquid-Vapor Interface of Water,
J. Phys. Chem. B 2004, 108, 6595-6602.
.. [Brodka1994] Aleksander Brodka (1994). Diffusion in restricted volume,
Molecular Physics, 1994, Vol. 82, No. 5, 1075-1078.
.. [Araya-Secchi2014] Araya-Secchi, R., Tomas Perez-Acle, Seung-gu Kang, Tien
Huynh, Alejandro Bernardin, Yerko Escalona, Jose-Antonio
Garate, Agustin D. Martinez, Isaac E. Garcia, Juan
C. Saez, Ruhong Zhou (2014). Characterization of a novel
water pocket inside the human Cx26 hemichannel
structure. Biophysical journal, 107(3), 599-612.
.. [Milischuk2011] Anatoli A. Milischuk and Branka M. Ladanyi. Structure and
dynamics of water confined in silica
nanopores. J. Chem. Phys. 135, 174709 (2011); doi:
10.1063/1.3657408
.. _examples:
Example use of the analysis classes
-----------------------------------
HydrogenBondLifetimes
~~~~~~~~~~~~~~~~~~~~~
Analyzing hydrogen bond lifetimes (HBL) :class:`HydrogenBondLifetimes`, both
continuos and intermittent. In this case we are analyzing how residue 38
interact with a water sphere of radius 6.0 centered on the geometric center of
protein and residue 42. If the hydrogen bond lifetimes are very stable, we can
assume that residue 38 is hydrophilic, on the other hand, if the are very
unstable, we can assume that residue 38 is hydrophobic::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import HydrogenBondLifetimes as HBL
u = MDAnalysis.Universe(pdb, trajectory)
selection1 = "byres name OH2 and sphzone 6.0 protein and resid 42"
selection2 = "resid 38"
HBL_analysis = HBL(universe, selection1, selection2, 0, 2000, 30)
HBL_analysis.run()
time = 0
#now we print the data ready to graph. The first two columns are the HBLc vs t graph and
#the second two columns are the HBLi vs t graph
for HBLc, HBLi in HBL_analysis.timeseries:
print("{time} {HBLc} {time} {HBLi}".format(time=time, HBLc=HBLc, HBLi=HBLi))
time += 1
#we can also plot our data
plt.figure(1,figsize=(18, 6))
#HBL continuos
plt.subplot(121)
plt.xlabel('time')
plt.ylabel('HBLc')
plt.title('HBL Continuos')
plt.plot(range(0,time),[column[0] for column in HBL_analysis.timeseries])
#HBL intermitent
plt.subplot(122)
plt.xlabel('time')
plt.ylabel('HBLi')
plt.title('HBL Intermitent')
plt.plot(range(0,time),[column[1] for column in HBL_analysis.timeseries])
plt.show()
where HBLc is the value for the continuos hydrogen bond lifetimes and HBLi is
the value for the intermittent hydrogen bond lifetime, t0 = 0, tf = 2000 and
dtmax = 30. In this way we create 30 windows timestep (30 values in x
axis). The continuos hydrogen bond lifetimes should decay faster than
intermittent.
WaterOrientationalRelaxation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Analyzing water orientational relaxation (WOR)
:class:`WaterOrientationalRelaxation`. In this case we are analyzing "how fast"
water molecules are rotating/changing direction. If WOR is very stable we can
assume that water molecules are rotating/changing direction very slow, on the
other hand, if WOR decay very fast, we can assume that water molecules are
rotating/changing direction very fast::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import WaterOrientationalRelaxation as WOR
u = MDAnalysis.Universe(pdb, trajectory)
selection = "byres name OH2 and sphzone 6.0 protein and resid 42"
WOR_analysis = WOR(universe, selection, 0, 1000, 20)
WOR_analysis.run()
time = 0
#now we print the data ready to graph. The first two columns are WOR_OH vs t graph,
#the second two columns are WOR_HH vs t graph and the third two columns are WOR_dip vs t graph
for WOR_OH, WOR_HH, WOR_dip in WOR_analysis.timeseries:
print("{time} {WOR_OH} {time} {WOR_HH} {time} {WOR_dip}".format(time=time, WOR_OH=WOR_OH, WOR_HH=WOR_HH,WOR_dip=WOR_dip))
time += 1
#now, if we want, we can plot our data
plt.figure(1,figsize=(18, 6))
#WOR OH
plt.subplot(131)
plt.xlabel('time')
plt.ylabel('WOR')
plt.title('WOR OH')
plt.plot(range(0,time),[column[0] for column in WOR_analysis.timeseries])
#WOR HH
plt.subplot(132)
plt.xlabel('time')
plt.ylabel('WOR')
plt.title('WOR HH')
plt.plot(range(0,time),[column[1] for column in WOR_analysis.timeseries])
#WOR dip
plt.subplot(133)
plt.xlabel('time')
plt.ylabel('WOR')
plt.title('WOR dip')
plt.plot(range(0,time),[column[2] for column in WOR_analysis.timeseries])
plt.show()
where t0 = 0, tf = 1000 and dtmax = 20. In this way we create 20 windows
timesteps (20 values in the x axis), the first window is created with 1000
timestep average (1000/1), the second window is created with 500 timestep
average(1000/2), the third window is created with 333 timestep average (1000/3)
and so on.
AngularDistribution
~~~~~~~~~~~~~~~~~~~
Analyzing angular distribution (AD) :class:`AngularDistribution` for OH vector,
HH vector and dipole vector. It returns a line histogram with vector
orientation preference. A straight line in the output graphic means no
preferential orientation in water molecules. In this case we are analyzing if
water molecules have some orientational preference, in this way we can see if
water molecules are under an electric field or if they are interacting with
something (residue, protein, etc)::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import AngularDistribution as AD
u = MDAnalysis.Universe(pdb, trajectory)
selection = "byres name OH2 and sphzone 6.0 (protein and (resid 42 or resid 26) )"
bins = 30
AD_analysis = AD(universe,selection,bins)
AD_analysis.run()
#now we print data ready to graph. The first two columns are P(cos(theta)) vs cos(theta) for OH vector ,
#the seconds two columns are P(cos(theta)) vs cos(theta) for HH vector and thirds two columns
#are P(cos(theta)) vs cos(theta) for dipole vector
for bin in range(bins):
print("{AD_analysisOH} {AD_analysisHH} {AD_analysisDip}".format(AD_analysis.graph0=AD_analysis.graph[0][bin], AD_analysis.graph1=AD_analysis.graph[1][bin],AD_analysis.graph2=AD_analysis.graph[2][bin]))
#and if we want to graph our results
plt.figure(1,figsize=(18, 6))
#AD OH
plt.subplot(131)
plt.xlabel('cos theta')
plt.ylabel('P(cos theta)')
plt.title('PDF cos theta for OH')
plt.plot([float(column.split()[0]) for column in AD_analysis.graph[0][:-1]],[float(column.split()[1]) for column in AD_analysis.graph[0][:-1]])
#AD HH
plt.subplot(132)
plt.xlabel('cos theta')
plt.ylabel('P(cos theta)')
plt.title('PDF cos theta for HH')
plt.plot([float(column.split()[0]) for column in AD_analysis.graph[1][:-1]],[float(column.split()[1]) for column in AD_analysis.graph[1][:-1]])
#AD dip
plt.subplot(133)
plt.xlabel('cos theta')
plt.ylabel('P(cos theta)')
plt.title('PDF cos theta for dipole')
plt.plot([float(column.split()[0]) for column in AD_analysis.graph[2][:-1]],[float(column.split()[1]) for column in AD_analysis.graph[2][:-1]])
plt.show()
where `P(cos(theta))` is the angular distribution or angular probabilities.
MeanSquareDisplacement
~~~~~~~~~~~~~~~~~~~~~~
Analyzing mean square displacement (MSD) :class:`MeanSquareDisplacement` for
water molecules. In this case we are analyzing the average distance that water
molecules travels inside protein in XYZ direction (cylindric zone of radius
11[nm], Zmax 4.0[nm] and Zmin -8.0[nm]). A strong rise mean a fast movement of
water molecules, a weak rise mean slow movement of particles::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import MeanSquareDisplacement as MSD
u = MDAnalysis.Universe(pdb, trajectory)
selection = "byres name OH2 and cyzone 11.0 4.0 -8.0 protein"
MSD_analysis = MSD(universe, selection, 0, 1000, 20)
MSD_analysis.run()
#now we print data ready to graph. The graph
#represents MSD vs t
time = 0
for msd in MSD_analysis.timeseries:
print("{time} {msd}".format(time=time, msd=msd))
time += 1
#Plot
plt.xlabel('time')
plt.ylabel('MSD')
plt.title('MSD')
plt.plot(range(0,time),MSD_analysis.timeseries)
plt.show()
.. _SP-examples:
SurvivalProbability
~~~~~~~~~~~~~~~~~~~
Analyzing survival probability (SP) :class:`SurvivalProbability` for water
molecules. In this case we are analyzing how long water molecules remain in a
sphere of radius 12.3 centered in the geometrical center of resid 42, 26, 34
and 80. A slow decay of SP means a long permanence time of water molecules in
the zone, on the other hand, a fast decay means a short permanence time::
import MDAnalysis
from MDAnalysis.analysis.waterdynamics import SurvivalProbability as SP
u = MDAnalysis.Universe(pdb, trajectory)
selection = "byres name OH2 and sphzone 12.3 (resid 42 or resid 26 or resid 34 or resid 80) "
SP_analysis = SP(universe, selection, 0, 100, 20)
SP_analysis.run()
#now we print data ready to graph. The graph
#represents SP vs t
time = 0
for sp in SP_analysis.timeseries:
print("{time} {sp}".format(time=time, sp=sp))
time += 1
#Plot
plt.xlabel('time')
plt.ylabel('SP')
plt.title('Survival Probability')
plt.plot(range(0,time),MSD_analysis.timeseries)
plt.show()
.. _Output:
Output
------
HydrogenBondLifetimes
~~~~~~~~~~~~~~~~~~~~~
Hydrogen bond lifetimes (HBL) data is returned per window timestep, which is
stored in :attr:`HydrogenBondLifetimes.timeseries` (in all the following
descriptions, # indicates comments that are not part of the output)::
results = [
[ # time t0
<HBL_c>, <HBL_i>
],
[ # time t1
<HBL_c>, <HBL_i>
],
...
]
WaterOrientationalRelaxation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Water orientational relaxation (WOR) data is returned per window timestep,
which is stored in :attr:`WaterOrientationalRelaxation.timeseries`::
results = [
[ # time t0
<WOR_OH>, <WOR_HH>, <WOR_dip>
],
[ # time t1
<WOR_OH>, <WOR_HH>, <WOR_dip>
],
...
]
AngularDistribution
~~~~~~~~~~~~~~~~~~~
Angular distribution (AD) data is returned per vector, which is stored in
:attr:`AngularDistribution.graph`. In fact, AngularDistribution returns a
histogram::
results = [
[ # OH vector values
# the values are order in this way: <x_axis y_axis>
<cos_theta0 ang_distr0>, <cos_theta1 ang_distr1>, ...
],
[ # HH vector values
<cos_theta0 ang_distr0>, <cos_theta1 ang_distr1>, ...
],
[ # dip vector values
<cos_theta0 ang_distr0>, <cos_theta1 ang_distr1>, ...
],
]
MeanSquareDisplacement
~~~~~~~~~~~~~~~~~~~~~~
Mean Square Displacement (MSD) data is returned in a list, which each element
represents a MSD value in its respective window timestep. Data is stored in
:attr:`MeanSquareDisplacement.timeseries`::
results = [
#MSD values orders by window timestep
<MSD_t0>, <MSD_t1>, ...
]
SurvivalProbability
~~~~~~~~~~~~~~~~~~~
Survival Probability (SP) data is returned in a list, which each element
represents a SP value in its respective window timestep. Data is stored in
:attr:`SurvivalProbability.timeseries`::
results = [
# SP values order by window timestep
<SP_t0>, <SP_t1>, ...
]
Classes
--------
.. autoclass:: HydrogenBondLifetimes
:members:
:inherited-members:
.. autoclass:: WaterOrientationalRelaxation
:members:
:inherited-members:
.. autoclass:: AngularDistribution
:members:
:inherited-members:
.. autoclass:: MeanSquareDisplacement
:members:
:inherited-members:
.. autoclass:: SurvivalProbability
:members:
:inherited-members:
"""
from __future__ import print_function
from six.moves import range, zip_longest
import numpy as np
import multiprocessing
import MDAnalysis.analysis.hbonds
from MDAnalysis.lib.log import _set_verbose
class HydrogenBondLifetimes(object):
r"""Hydrogen bond lifetime analysis
This is a autocorrelation function that gives the "Hydrogen Bond Lifetimes"
(HBL) proposed by D.C. Rapaport [Rapaport1983]_. From this function we can
obtain the continuous and intermittent behavior of hydrogen bonds in
time. A fast decay in these parameters indicate a fast change in HBs
connectivity. A slow decay indicate very stables hydrogen bonds, like in
ice. The HBL is also know as "Hydrogen Bond Population Relaxation"
(HBPR). In the continuos case we have:
.. math::
C_{HB}^c(\tau) = \frac{\sum_{ij}h_{ij}(t_0)h'_{ij}(t_0+\tau)}{\sum_{ij}h_{ij}(t_0)}
where :math:`h'_{ij}(t_0+\tau)=1` if there is a H-bond between a pair
:math:`ij` during time interval :math:`t_0+\tau` (continuos) and
:math:`h'_{ij}(t_0+\tau)=0` otherwise. In the intermittent case we have:
.. math::
C_{HB}^i(\tau) = \frac{\sum_{ij}h_{ij}(t_0)h_{ij}(t_0+\tau)}{\sum_{ij}h_{ij}(t_0)}
where :math:`h_{ij}(t_0+\tau)=1` if there is a H-bond between a pair
:math:`ij` at time :math:`t_0+\tau` (intermittent) and
:math:`h_{ij}(t_0+\tau)=0` otherwise.
Parameters
----------
universe : Universe
Universe object
selection1 : str
Selection string for first selection [‘byres name OH2’].
It could be any selection available in MDAnalysis, not just water.
selection2 : str
Selection string to analize its HBL against selection1
t0 : int
frame where analysis begins
tf : int
frame where analysis ends
dtmax : int
Maximum dt size, `dtmax` < `tf` or it will crash.
nproc : int
Number of processors to use, by default is 1.
.. versionadded:: 0.11.0
"""
def __init__(self, universe, selection1, selection2, t0, tf, dtmax, nproc=1):
self.universe = universe
self.selection1 = selection1
self.selection2 = selection2
self.t0 = t0
self.tf = tf - 1
self.dtmax = dtmax
self.nproc = nproc
self.timeseries = None
def _getC_i(self,HBP,t0,t):
"""
This function give the intermitent Hydrogen Bond Lifetime
C_i = <h(t0)h(t)>/<h(t0)> between t0 and t
"""
C_i = 0
for i in range(len(HBP[t0])):
for j in range(len(HBP[t])):
if (HBP[t0][i][0] == HBP[t][j][0] and HBP[t0][i][1] == HBP[t][j][1]):
C_i += 1
break
if len(HBP[t0]) == 0 :
return 0.0
else:
return float(C_i)/len(HBP[t0])
def _getC_c(self,HBP,t0,t):
"""
This function give the continous Hydrogen Bond Lifetime
C_c = <h(t0)h'(t)>/<h(t0)> between t0 and t
"""
C_c = 0
dt = 1
begt0 = t0
HBP_cp = HBP
HBP_t0 = HBP[t0]
newHBP = []
if t0==t:
return 1.0
while t0+dt <= t:
for i in range(len(HBP_t0)):
for j in range(len(HBP_cp[t0+dt])):
if (HBP_t0[i][0] == HBP_cp[t0+dt][j][0] and HBP_t0[i][1] == HBP_cp[t0+dt][j][1]):
newHBP.append(HBP_t0[i])
break
C_c = len(newHBP)
t0 += dt
HBP_t0 = newHBP
newHBP = []
if len(HBP[begt0]) == 0 :
return 0
else:
return C_c/float(len(HBP[begt0]))
def _intervC_c(self,HBP,t0,tf,dt):
"""
This function gets all the data for the h(t0)h(t0+dt)', where
t0 = 1,2,3,...,tf. This function give us one point of the final graphic
HBL vs t
"""
a = 0
count = 0
for i in range(len(HBP)):
if (t0+dt <= tf):
if t0 == t0+dt:
b = self._getC_c(HBP,t0,t0)
break
b = self._getC_c(HBP,t0,t0+dt)
t0 += dt
a += b
count += 1
if count == 0:
return 1.0
return a/count
def _intervC_i(self,HBP,t0,tf,dt):
"""
This function gets all the data for the h(t0)h(t0+dt), where
t0 = 1,2,3,...,tf. This function give us a point of the final graphic
HBL vs t
"""
a = 0
count = 0
for i in range(len(HBP)):
if (t0+dt <= tf ):
b = self._getC_i(HBP,t0,t0+dt)
t0 += dt
a += b
count += 1
return a/count
def _finalGraphGetC_i(self,HBP,t0,tf,maxdt):
"""
This function gets the final data of the C_i graph.
"""
output = []
for dt in range(maxdt):
a = self._intervC_i(HBP,t0,tf,dt)
output.append(a)
return output
def _finalGraphGetC_c(self,HBP,t0,tf,maxdt):
"""
This function gets the final data of the C_c graph.
"""
output = []
for dt in range(maxdt):
a = self._intervC_c(HBP,t0,tf,dt)
output.append(a)
return output
def _getGraphics(self,HBP,t0,tf,maxdt):
"""
Function that join all the results into a graphics.
"""
a = []
cont = self._finalGraphGetC_c(HBP,t0,tf,maxdt)
inte = self._finalGraphGetC_i(HBP,t0,tf,maxdt)
for i in range(len(cont)):
fix = [cont[i],inte[i]]
a.append(fix)
return a
def _HBA(self, ts, conn, universe, selAtom1, selAtom2,
verbose=None, quiet=None):
"""
Main function for calculate C_i and C_c in parallel.
"""
verbose = _set_verbose(verbose, quiet, default=False)
finalGetResidue1 = selAtom1
finalGetResidue2 = selAtom2
frame = ts.frame
h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(universe, finalGetResidue1,
finalGetResidue2, distance=3.5, angle=120.0,
start=frame-1, stop=frame)
while True:
try:
h.run(verbose=verbose)
break
except:
print("error")
print("trying again")
sys.stdout.flush()
sys.stdout.flush()
conn.send(h.timeseries[0])
conn.close()
def run(self, **kwargs):
"""
Analyze trajectory and produce timeseries
"""
h_list = []
i = 0
if (self.nproc > 1):
while i < len(self.universe.trajectory):
jobs = []
k=i
for j in range(self.nproc):
#start
print("ts=",i+1)
if i >= len(self.universe.trajectory):
break
conn_parent, conn_child = multiprocessing.Pipe(False)
while True:
try:
#new thread
jobs.append(
(multiprocessing.Process(
target=self._HBA,
args=(self.universe.trajectory[i], conn_child, self.universe,
self.selection1, self.selection2,)),
conn_parent))
break
except:
print("error in jobs.append")
jobs[j][0].start()
i = i + 1
for j in range(self.nproc):
if k >= len(self.universe.trajectory):
break
rec01 = jobs[j][1]
received = rec01.recv()
h_list.append(received)
jobs[j][0].join()
k += 1
self.timeseries = self._getGraphics( h_list , 0 , self.tf-1 , self.dtmax )
else:
h_list = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(self.universe, self.selection1,
self.selection2,distance=3.5, angle=120.0)
h_list.run(**kwargs)
self.timeseries = self._getGraphics(h_list.timeseries, self.t0, self.tf, self.dtmax)
class WaterOrientationalRelaxation(object):
r"""Water orientation relaxation analysis
Function to evaluate the Water Orientational Relaxation proposed by Yu-ling
Yeh and Chung-Yuan Mou [Yeh1999_]. WaterOrientationalRelaxation indicates
"how fast" water molecules are rotating or changing direction. This is a
time correlation function given by:
.. math::
C_{\hat u}(\tau)=\langle \mathit{P}_2[\mathbf{\hat{u}}(t_0)\cdot\mathbf{\hat{u}}(t_0+\tau)]\rangle
where :math:`P_2=(3x^2-1)/2` is the second-order Legendre polynomial and :math:`\hat{u}` is
a unit vector along HH, OH or dipole vector.
Parameters
----------
universe : Universe
Universe object
selection : str
Selection string for water [‘byres name OH2’].
t0 : int
frame where analysis begins
tf : int
frame where analysis ends
dtmax : int
Maximum dt size, `dtmax` < `tf` or it will crash.
.. versionadded:: 0.11.0
"""
def __init__(self, universe, selection, t0, tf, dtmax, nproc=1):
self.universe = universe
self.selection = selection
self.t0 = t0
self.tf = tf
self.dtmax= dtmax
self.nproc = nproc
self.timeseries = None
def _repeatedIndex(self,selection,dt,totalFrames):
"""
Indicate the comparation between all the t+dt.
The results is a list of list with all the repeated index per frame (or time).
Ex: dt=1, so compare frames (1,2),(2,3),(3,4)...
Ex: dt=2, so compare frames (1,3),(3,5),(5,7)...
Ex: dt=3, so compare frames (1,4),(4,7),(7,10)...
"""
rep=[]
for i in range(int(round( (totalFrames-1)/float(dt) ) ) ):
if ( dt*i+dt < totalFrames ):
rep.append(self._sameMolecTandDT(selection,dt*i,(dt*i)+dt))
return rep
def _getOneDeltaPoint(self,universe, repInd, i ,t0, dt):
"""
Give one point to promediate and get one point of the graphic C_vect vs t
Ex: t0=1 and tau=1 so calculate the t0-tau=1-2 intervale.
Ex: t0=5 and tau=3 so calcultate the t0-tau=5-8 intervale.
i = come from getMeanOnePoint (named j) (int)
"""
valOH = 0
valHH = 0
valdip= 0
n = 0
for j in range(len(repInd[i])/3):
begj = 3*j
universe.trajectory[t0]
Ot0 = repInd[i][begj]
H1t0 = repInd[i][begj+1]
H2t0 = repInd[i][begj+2]
OHVector0 = H1t0.position - Ot0.position
HHVector0 = H1t0.position-H2t0.position
dipVector0 = ((H1t0.position + H2t0.position)*0.5)-Ot0.position
universe.trajectory[t0+dt]
Otp = repInd[i][begj]
H1tp = repInd[i][begj+1]
H2tp = repInd[i][begj+2]
OHVectorp = H1tp.position- Otp.position
HHVectorp = H1tp.position - H2tp.position
dipVectorp = ((H1tp.position + H2tp.position)*0.5)-Otp.position
normOHVector0 = np.linalg.norm(OHVector0)
normOHVectorp = np.linalg.norm(OHVectorp)
normHHVector0 = np.linalg.norm(HHVector0)
normHHVectorp = np.linalg.norm(HHVectorp)
normdipVector0 = np.linalg.norm(dipVector0)
normdipVectorp = np.linalg.norm(dipVectorp)
unitOHVector0 = [OHVector0[0]/normOHVector0,OHVector0[1]/normOHVector0,OHVector0[2]/normOHVector0]
unitOHVectorp = [OHVectorp[0]/normOHVectorp,OHVectorp[1]/normOHVectorp,OHVectorp[2]/normOHVectorp]
unitHHVector0 = [HHVector0[0]/normHHVector0,HHVector0[1]/normHHVector0,HHVector0[2]/normHHVector0]
unitHHVectorp = [HHVectorp[0]/normHHVectorp,HHVectorp[1]/normHHVectorp,HHVectorp[2]/normHHVectorp]
unitdipVector0 = [dipVector0[0]/normdipVector0,dipVector0[1]/normdipVector0,dipVector0[2]/normdipVector0]
unitdipVectorp = [dipVectorp[0]/normdipVectorp,dipVectorp[1]/normdipVectorp,dipVectorp[2]/normdipVectorp]
valOH += self.lg2(np.dot(unitOHVector0,unitOHVectorp))
valHH += self.lg2(np.dot(unitHHVector0,unitHHVectorp))
valdip += self.lg2(np.dot(unitdipVector0,unitdipVectorp))
n += 1
valOH = valOH/n
valHH = valHH/n
valdip = valdip/n
return (valOH,valHH,valdip)
def _getMeanOnePoint(self,universe,selection1,selection_str,dt,totalFrames):
"""
This function get one point of the graphic C_OH vs t. It uses the
_getOneDeltaPoint() function to calculate the average.
"""
repInd = self._repeatedIndex(selection1,dt,totalFrames)
sumsdt = 0
n = 0.0
sumDeltaOH = 0.0
sumDeltaHH = 0.0
sumDeltadip = 0.0
valOHList = []
valHHList = []
valdipList = []
for j in range(totalFrames/dt-1):
# If the selection of atoms is too small, there will be a division by zero in the next line.
# The except clause avoid the use of the result of _getOneDeltaPoint() on the mean.
try:
a = self._getOneDeltaPoint(universe,repInd,j,sumsdt,dt)
except ZeroDivisionError:
continue
sumDeltaOH += a[0]
sumDeltaHH += a[1]
sumDeltadip += a[2]
valOHList.append(a[0])
valHHList.append(a[1])
valdipList.append(a[2])
sumsdt += dt
n += 1
return (sumDeltaOH/n,sumDeltaHH/n,sumDeltadip/n)
def _sameMolecTandDT(self,selection,t0d,tf):
"""
Compare the molecules in the t0d selection and the t0d+dt selection and
select only the particles that are repeated in both frame. This is to consider
only the molecules that remains in the selection after the dt time has elapsed.
The result is a list with the indexs of the atoms.
"""
a = set(selection[t0d])
b = set(selection[tf])
sort = sorted(list(a.intersection(b)))
return sort
def _selection_serial(self, universe, selection_str):
selection = []
for ts in universe.trajectory:
selection.append(universe.select_atoms(selection_str))
print(ts.frame)
return selection
# Second Legendre polynomial
lg2 = lambda self,x : (3*x*x - 1)/2
def run(self, **kwargs):
"""
Analyze trajectory and produce timeseries
"""
#All the selection to an array, this way is faster than selecting later.
if self.nproc==1:
selection_out = self._selection_serial(self.universe,self.selection)
else:
#selection_out = self._selection_parallel(self.universe,self.selection,self.nproc)
#parallel selection to be implemented
selection_out = self._selection_serial(self.universe,self.selection)
self.timeseries = []
for dt in list(range(1,self.dtmax+1)):
output = self._getMeanOnePoint(self.universe,selection_out,self.selection,dt,self.tf)
self.timeseries.append(output)
class AngularDistribution(object):
r"""Angular distribution function analysis
The angular distribution function (AD) is defined as the distribution
probability of the cosine of the :math:`\theta` angle formed by the OH
vector, HH vector or dipolar vector of water molecules and a vector
:math:`\hat n` parallel to chosen axis (z is the default value). The cosine
is define as :math:`\cos \theta = \hat u \cdot \hat n`, where :math:`\hat
u` is OH, HH or dipole vector. It creates a histogram and returns a list
of lists, see Output_. The AD is also know as Angular Probability (AP).
Parameters
----------
universe : Universe
Universe object
selection : str
Selection string to evaluate its angular distribution [‘byres name OH2’]
bins : int (optional)
Number of bins to create the histogram by means of :func:`numpy.histogram [40]
axis : {'x', 'y', 'z'} (optional)
Axis to create angle with the vector (HH, OH or dipole) and calculate
cosine theta ['z'].
.. versionadded:: 0.11.0
"""
def __init__(self, universe, selection_str, bins=40, nproc=1, axis="z"):
self.universe = universe
self.selection_str = selection_str
self.bins = bins
self.nproc = nproc
self.axis = axis
self.graph = None
def _getCosTheta(self,universe,selection,axis):
valOH = []
valHH = []
valdip= []
i = 0
while i <= (len(selection)-1):
universe.trajectory[i]
line = selection[ i ].positions
Ot0 = line[::3]
H1t0 = line[1::3]
H2t0 = line[2::3]
OHVector0 = H1t0 - Ot0
HHVector0 = H1t0 - H2t0
dipVector0 = (H1t0 + H2t0)*0.5 - Ot0
unitOHVector0 = OHVector0/np.linalg.norm(OHVector0, axis = 1)[:,None]
unitHHVector0 = HHVector0/np.linalg.norm(HHVector0, axis = 1)[:,None]
unitdipVector0 = dipVector0/np.linalg.norm(dipVector0, axis = 1)[:,None]
j=0
while j < len(line)/3:
if axis == "z":
valOH.append(unitOHVector0[j][2])
valHH.append(unitHHVector0[j][2])
valdip.append(unitdipVector0[j][2])
elif axis == "x":
valOH.append(unitOHVector0[j][0])
valHH.append(unitHHVector0[j][0])
valdip.append(unitdipVector0[j][0])
elif axis == "y":
valOH.append(unitOHVector0[j][1])
valHH.append(unitHHVector0[j][1])
valdip.append(unitdipVector0[j][1])
j += 1
i += 1
return (valOH,valHH,valdip)
def _getHistogram(self,universe,selection,bins,axis):
"""
This function gets a normalized histogram of the cos(theta) values. It return a list of list.
"""
a = self._getCosTheta(universe,selection,axis)
cosThetaOH = a[0]
cosThetaHH = a[1]
cosThetadip = a[2]
lencosThetaOH = len(cosThetaOH)
lencosThetaHH = len(cosThetaHH)
lencosThetadip = len(cosThetadip)
histInterval = bins
histcosThetaOH = np.histogram(cosThetaOH,histInterval, normed = True)
histcosThetaHH = np.histogram(cosThetaHH,histInterval, normed = True)
histcosThetadip = np.histogram(cosThetadip,histInterval, normed = True)
return (histcosThetaOH,histcosThetaHH,histcosThetadip)
def _hist2column(self,aList):
"""
This function transform from the histogram format
to a column format.
"""
a = []
for x in zip_longest(*aList, fillvalue="."):
a.append(" ".join(str(i) for i in x))
return a
def run(self,**kwargs):
"""
Function to evaluate the angular distribution of cos(theta)
"""
if self.nproc ==1:
selection = self._selection_serial(self.universe,self.selection_str)
else:
#not implemented yet
#selection = self._selection_parallel(self.universe,self.selection_str,self.nproc)
selection = self._selection_serial(self.universe,self.selection_str)
self.graph = []
output=self._getHistogram(self.universe,selection,self.bins,self.axis)
#this is to format the exit of the file
#maybe this output could be improved
listOH = [list(output[0][1]),list(output[0][0])]
listHH = [list(output[1][1]),list(output[1][0])]
listdip = [list(output[2][1]),list(output[2][0])]
self.graph.append(self._hist2column(listOH))
self.graph.append(self._hist2column(listHH))
self.graph.append(self._hist2column(listdip))
def _selection_serial(self,universe,selection_str):
selection = []
for ts in universe.trajectory:
selection.append(universe.select_atoms(selection_str))
print(ts.frame)
return selection
class MeanSquareDisplacement(object):
r"""Mean square displacement analysis
Function to evaluate the Mean Square Displacement (MSD_). The MSD gives the
average distance that particles travels. The MSD is given by:
.. math::
\langle\Delta r(t)^2\rangle = 2nDt
where :math:`r(t)` is the position of particle in time :math:`t`, :math:`\Delta r(t)` is the displacement
after time lag :math:`t`, :math:`n` is the dimensionality, in this case :math:`n=3`, :math:`D` is the diffusion
coefficient and :math:`t` is the time.
.. _MSD: http://en.wikipedia.org/wiki/Mean_squared_displacement
Parameters
----------
universe : Universe
Universe object
selection : str
Selection string for water [‘byres name OH2’].
t0 : int
frame where analysis begins
tf : int
frame where analysis ends
dtmax : int
Maximum dt size, `dtmax` < `tf` or it will crash.
.. versionadded:: 0.11.0
"""
def __init__(self, universe, selection, t0, tf, dtmax, nproc=1):
self.universe = universe
self.selection = selection
self.t0 = t0
self.tf = tf
self.dtmax= dtmax
self.nproc = nproc
self.timeseries = None
def _repeatedIndex(self,selection,dt,totalFrames):
"""
Indicate the comparation between all the t+dt.
The results is a list of list with all the repeated index per frame (or time).
Ex: dt=1, so compare frames (1,2),(2,3),(3,4)...
Ex: dt=2, so compare frames (1,3),(3,5),(5,7)...
Ex: dt=3, so compare frames (1,4),(4,7),(7,10)...
"""
rep=[]
for i in range(int(round( (totalFrames-1)/float(dt) ) ) ):
if ( dt*i+dt < totalFrames ):
rep.append(self._sameMolecTandDT(selection,dt*i,(dt*i)+dt))
return rep
def _getOneDeltaPoint(self,universe, repInd, i ,t0, dt):
"""
Give one point to promediate and get one point of the grapic C_vect vs t
Ex: t0=1 and dt=1 so calculate the t0-dt=1-2 intervale.
Ex: t0=5 and dt=3 so calcultate the t0-dt=5-8 intervale
i = come from getMeanOnePoint (named j) (int)
"""
valO = 0
n = 0
for j in range(len(repInd[i])/3):
begj = 3*j
universe.trajectory[t0]
#Plus zero is to avoid 0to be equal to 0tp
Ot0 = repInd[i][begj].position + 0
universe.trajectory[t0+dt]
#Plus zero is to avoid 0to be equal to 0tp
Otp = repInd[i][begj].position + 0
#position oxygen
OVector = Ot0 - Otp
#here it is the difference with waterdynamics.WaterOrientationalRelaxation
valO += np.dot(OVector, OVector)
n += 1
valO = valO/n
return (valO)
def _getMeanOnePoint(self,universe,selection1,selection_str,dt,totalFrames):
"""
This function get one point of the graphic C_OH vs t. It's uses the
_getOneDeltaPoint() function to calculate the average.
"""
repInd = self._repeatedIndex(selection1,dt,totalFrames)
sumsdt = 0
n = 0.0
sumDeltaO = 0.0
valOList = []
for j in range(totalFrames/dt-1):
a = self._getOneDeltaPoint(universe,repInd,j,sumsdt,dt)
sumDeltaO += a
valOList.append(a)
sumsdt += dt
n += 1
return (sumDeltaO/n)
def _sameMolecTandDT(self,selection,t0d,tf):
"""
Compare the molecules in the t0d selection and the t0d+dt selection and
select only the particles that are repeated in both frame. This is to consider
only the molecules that remains in the selection after the dt time has elapsed.
The result is a list with the indexs of the atoms.
"""
a = set(selection[t0d])
b = set(selection[tf])
sort = sorted(list(a.intersection(b)))
return sort
def _selection_serial(self,universe,selection_str):
selection = []
for ts in universe.trajectory:
selection.append(universe.select_atoms(selection_str))
print(ts.frame)
return selection
def run(self,**kwargs):
"""
Analyze trajectory and produce timeseries
"""
#All the selection to an array, this way is faster than selecting later.
if self.nproc==1:
selection_out = self._selection_serial(self.universe,self.selection)
else:
#parallel not yet implemented
#selection = selection_parallel(universe,selection_str,nproc)
selection_out = self._selection_serial(self.universe,self.selection)
self.timeseries = []
for dt in list(range(1,self.dtmax+1)):
output = self._getMeanOnePoint(self.universe,selection_out,self.selection,dt,self.tf)
self.timeseries.append(output)
class SurvivalProbability(object):
r"""Survival probability analysis
Function to evaluate the Survival Probability (SP). The SP gives the
probability for a group of particles to remain in certain region. The SP is
given by:
.. math::
P(\tau) = \frac1T \sum_{t=1}^T \frac{N(t,t+\tau)}{N(t)}
where :math:`T` is the maximum time of simulation, :math:`\tau` is the
timestep and :math:`N` the number of particles in certain time.
Parameters
----------
universe : Universe
Universe object
selection : str
Selection string; any selection is allowed. With this selection you
define the region/zone where to analyze, e.g.: "selection_a" and "zone"
(see `SP-examples`_ )
t0 : int
frame where analysis begins
tf : int
frame where analysis ends
dtmax : int
Maximum dt size, `dtmax` < `tf` or it will crash.
.. versionadded:: 0.11.0
"""
def __init__(self,universe,selection,t0,tf,dtmax,nproc=1):
self.universe = universe
self.selection = selection
self.t0 = t0
self.tf = tf
self.dtmax= dtmax
self.nproc = nproc
self.timeseries = None
def _getOneDeltaPoint(self,selection, totalFrames, t0, tau):
"""
Give one point to promediate and get one point of the graphic C_vect vs t
Ex: t0=1 and tau=1 so calculate the t0-tau=1-2 intervale.
Ex: t0=5 and tau=3 so calcultate the t0-tau=5-8 intervale.
"""
Ntau = self._NumPart_tau(selection, totalFrames, t0, tau)
Nt = float(self._NumPart(selection,t0))
return Ntau/Nt
def _getMeanOnePoint(self,universe,selection1,selection_str,wint,totalFrames):
"""
This function get one point of the graphic P(t) vs t. It uses the
_getOneDeltaPoint() function to calculate the average.
"""
n = 0.0
sumDeltaP = 0.0
for frame in range(totalFrames-wint):
#This "try" is to avoid a division by zero when there is no particles in time t0,
#this happens in very small selection regions.
try:
a = self._getOneDeltaPoint(selection1,totalFrames ,frame, wint)
except ZeroDivisionError:
continue
sumDeltaP += a
n += 1
return sumDeltaP/n
def _NumPart_tau(self,selection, totalFrames, t0,tau):
"""
Compare the molecules in t0 selection and t0+tau selection and
select only the particles that remaing from t0 to t0+tau. It returns
the number of remaining particles.
"""
a = set(selection[t0])
i=0
while (t0+i) < t0+tau and (t0+i) < totalFrames:
b = set(selection[t0+i])
a = a.intersection(b)
i += 1
return len(a)
def _NumPart(self, selection, t):
return len(selection[t])
def _selection_serial(self,universe,selection_str):
selection = []
for ts in universe.trajectory:
selection.append(universe.select_atoms(selection_str))
print(ts.frame)
return selection
def run(self,**kwargs):
"""
Analyze trajectory and produce timeseries
"""
#All the selection to an array, this way is faster than selecting later.
if self.nproc==1:
selection_out = self._selection_serial(self.universe, self.selection)
else:
#selection = selection_parallel(universe,selection_str,nproc)
#parallel selection to be implemented
selection_out = self._selection_serial(self.universe, self.selection)
self.timeseries = []
for dt in list(range(1,self.dtmax+1)):
output = self._getMeanOnePoint(self.universe, selection_out, self.selection, dt, self.tf)
self.timeseries.append(output)
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/waterdynamics.py
|
Python
|
gpl-2.0
| 44,292
|
[
"MDAnalysis"
] |
4c9842b0c8355f555882e5aa890961976071ef0d28c3f88c5e3d1b94b1d717c6
|
# Orca
# Copyright (C) 2014-2015 Synthicity, LLC
# Copyright (C) 2015 Autodesk
# See full license in LICENSE.
import pandas as pd
import pytest
from .. import testing
def test_frames_equal_not_frames():
frame = pd.DataFrame({'a': [1]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(frame, 1)
assert 'Inputs must both be pandas DataFrames.' in str(info.value)
def test_frames_equal_mismatched_columns():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'b': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert "Expected column 'a' not found." in str(info.value)
def test_frames_equal_mismatched_rows():
expected = pd.DataFrame({'a': [1]}, index=[0])
actual = pd.DataFrame({'a': [1]}, index=[1])
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert "Expected row 0 not found." in str(info.value)
def test_frames_equal_mismatched_items():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'a': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert ("""
Items are not equal:
ACTUAL: 2
DESIRED: 1
Column: 'a'
Row: 0""" in str(info.value))
def test_frames_equal():
frame = pd.DataFrame({'a': [1]})
testing.assert_frames_equal(frame, frame)
def test_frames_equal_close():
frame1 = pd.DataFrame({'a': [1]})
frame2 = pd.DataFrame({'a': [1.00000000000002]})
with pytest.raises(AssertionError):
testing.assert_frames_equal(frame1, frame2)
testing.assert_frames_equal(frame1, frame2, use_close=True)
def test_index_equal_order_agnostic():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1])
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_left():
left = pd.Index([1, 2, 3, 4])
right = pd.Index([3, 2, 1])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_right():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1, 4])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
|
SANDAG/orca
|
orca/utils/tests/test_testing.py
|
Python
|
bsd-3-clause
| 2,290
|
[
"ORCA"
] |
872f50228c2cfb76bb75ceb49ff718d27b937946c2f502c0cc9239736ee653cc
|
from ase.optimize.sciopt import Converged, SciPyOptimizer
from ase.optimize import Optimizer
class SciPyFminLBFGSB(SciPyOptimizer):
"""Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
def call_fmin(self, fmax, steps):
output = opt.fmin_bfgs(self.f,
self.x0(),
fprime=self.fprime,
#args=(),
gtol=fmax * 0.1, #Should never be reached
norm=np.inf,
#epsilon=1.4901161193847656e-08,
maxiter=steps,
#full_output=1,
disp=0,
#retall=0,
callback=self.callback
)
|
alexei-matveev/ase-local
|
ase/optimize/for_lbfgsb.py
|
Python
|
gpl-2.0
| 859
|
[
"ASE"
] |
ce2a1a4ee3079751b18f66bddeaa801a8bdde5f6e313d53be41001b809a1071b
|
"""
Support for the Amazon Polly text to speech service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tts.amazon_polly/
"""
import logging
import voluptuous as vol
from homeassistant.components.tts import Provider, PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['boto3==1.9.16']
_LOGGER = logging.getLogger(__name__)
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
CONF_PROFILE_NAME = 'profile_name'
ATTR_CREDENTIALS = 'credentials'
DEFAULT_REGION = 'us-east-1'
SUPPORTED_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2',
'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2',
'eu-west-3', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-2', 'ap-northeast-1', 'ap-south-1',
'sa-east-1']
CONF_VOICE = 'voice'
CONF_OUTPUT_FORMAT = 'output_format'
CONF_SAMPLE_RATE = 'sample_rate'
CONF_TEXT_TYPE = 'text_type'
SUPPORTED_VOICES = [
'Zhiyu', # Chinese
'Mads', 'Naja', # Danish
'Ruben', 'Lotte', # Dutch
'Russell', 'Nicole', # English Austrailian
'Brian', 'Amy', 'Emma', # English
'Aditi', 'Raveena', # English, Indian
'Joey', 'Justin', 'Matthew', 'Ivy', 'Joanna', 'Kendra', 'Kimberly',
'Salli', # English
'Geraint', # English Welsh
'Mathieu', 'Celine', 'Léa', # French
'Chantal', # French Canadian
'Hans', 'Marlene', 'Vicki', # German
'Aditi', # Hindi
'Karl', 'Dora', # Icelandic
'Giorgio', 'Carla', 'Bianca', # Italian
'Takumi', 'Mizuki', # Japanese
'Seoyeon', # Korean
'Liv', # Norwegian
'Jacek', 'Jan', 'Ewa', 'Maja', # Polish
'Ricardo', 'Vitoria', # Portuguese, Brazilian
'Cristiano', 'Ines', # Portuguese, European
'Carmen', # Romanian
'Maxim', 'Tatyana', # Russian
'Enrique', 'Conchita', 'Lucia', # Spanish European
'Mia', # Spanish Mexican
'Miguel', 'Penelope', # Spanish US
'Astrid', # Swedish
'Filiz', # Turkish
'Gwyneth', # Welsh
]
SUPPORTED_OUTPUT_FORMATS = ['mp3', 'ogg_vorbis', 'pcm']
SUPPORTED_SAMPLE_RATES = ['8000', '16000', '22050']
SUPPORTED_SAMPLE_RATES_MAP = {
'mp3': ['8000', '16000', '22050'],
'ogg_vorbis': ['8000', '16000', '22050'],
'pcm': ['8000', '16000'],
}
SUPPORTED_TEXT_TYPES = ['text', 'ssml']
CONTENT_TYPE_EXTENSIONS = {
'audio/mpeg': 'mp3',
'audio/ogg': 'ogg',
'audio/pcm': 'pcm',
}
DEFAULT_VOICE = 'Joanna'
DEFAULT_OUTPUT_FORMAT = 'mp3'
DEFAULT_TEXT_TYPE = 'text'
DEFAULT_SAMPLE_RATES = {
'mp3': '22050',
'ogg_vorbis': '22050',
'pcm': '16000',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_REGION, default=DEFAULT_REGION):
vol.In(SUPPORTED_REGIONS),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES),
vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT):
vol.In(SUPPORTED_OUTPUT_FORMATS),
vol.Optional(CONF_SAMPLE_RATE):
vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)),
vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE):
vol.In(SUPPORTED_TEXT_TYPES),
})
def get_engine(hass, config):
"""Set up Amazon Polly speech component."""
output_format = config.get(CONF_OUTPUT_FORMAT)
sample_rate = config.get(
CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format])
if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
_LOGGER.error("%s is not a valid sample rate for %s",
sample_rate, output_format)
return None
config[CONF_SAMPLE_RATE] = sample_rate
import boto3
profile = config.get(CONF_PROFILE_NAME)
if profile is not None:
boto3.setup_default_session(profile_name=profile)
aws_config = {
CONF_REGION: config.get(CONF_REGION),
CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID),
CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY),
}
del config[CONF_REGION]
del config[CONF_ACCESS_KEY_ID]
del config[CONF_SECRET_ACCESS_KEY]
polly_client = boto3.client('polly', **aws_config)
supported_languages = []
all_voices = {}
all_voices_req = polly_client.describe_voices()
for voice in all_voices_req.get('Voices'):
all_voices[voice.get('Id')] = voice
if voice.get('LanguageCode') not in supported_languages:
supported_languages.append(voice.get('LanguageCode'))
return AmazonPollyProvider(
polly_client, config, supported_languages, all_voices)
class AmazonPollyProvider(Provider):
"""Amazon Polly speech api provider."""
def __init__(self, polly_client, config, supported_languages,
all_voices):
"""Initialize Amazon Polly provider for TTS."""
self.client = polly_client
self.config = config
self.supported_langs = supported_languages
self.all_voices = all_voices
self.default_voice = self.config.get(CONF_VOICE)
self.name = 'Amazon Polly'
@property
def supported_languages(self):
"""Return a list of supported languages."""
return self.supported_langs
@property
def default_language(self):
"""Return the default language."""
return self.all_voices.get(self.default_voice).get('LanguageCode')
@property
def default_options(self):
"""Return dict include default options."""
return {CONF_VOICE: self.default_voice}
@property
def supported_options(self):
"""Return a list of supported options."""
return [CONF_VOICE]
def get_tts_audio(self, message, language=None, options=None):
"""Request TTS file from Polly."""
voice_id = options.get(CONF_VOICE, self.default_voice)
voice_in_dict = self.all_voices.get(voice_id)
if language != voice_in_dict.get('LanguageCode'):
_LOGGER.error("%s does not support the %s language",
voice_id, language)
return None, None
resp = self.client.synthesize_speech(
OutputFormat=self.config[CONF_OUTPUT_FORMAT],
SampleRate=self.config[CONF_SAMPLE_RATE],
Text=message,
TextType=self.config[CONF_TEXT_TYPE],
VoiceId=voice_id
)
return (CONTENT_TYPE_EXTENSIONS[resp.get('ContentType')],
resp.get('AudioStream').read())
|
tinloaf/home-assistant
|
homeassistant/components/tts/amazon_polly.py
|
Python
|
apache-2.0
| 6,759
|
[
"Brian"
] |
f68dc4639f74022938a41a51277bd66b6f6b9af0b782f5ff90aa040d51f24525
|
"""
This module can be used to randomize for example galaxy positions.
:depends: NumPy
:author: Sami-Matias Niemi
:date: 21 May, 2011
:version: 0.1
"""
import numpy as np
__author__ = 'Sami-Matias Niemi'
def randomUnitSphere(points=1):
"""
This function returns random positions on a unit sphere. The number of random
points returned can be controlled with the optional points keyword argument.
:param points: the number of random points drawn
:type points: int
:return: random theta and phi angles
:rtype: dictionary
"""
#get random values u and v
u = np.random.rand(points)
v = np.random.rand(points)
#Convert to spherical coordinates
#Note that one cannot randomize theta and phi
#directly because the values would
#be packed on the poles due to the fact that
#the area element has sin(phi)!
theta = 2. * np.pi * u
phi = np.arccos(2. * v - 1)
#pack all the results to a dictionary
out = {'theta': theta,
'phi': phi,
'points': points}
return out
|
sniemi/SamPy
|
astronomy/randomizers.py
|
Python
|
bsd-2-clause
| 1,064
|
[
"Galaxy"
] |
43bc37de3fdebe6eec70f8e85ec3aea3fb6b4f260d978d21ac323947a3053a45
|
""" This is a test of the chain
ReqClient -> ReqManagerHandler -> ReqDB
It supposes that the DB is present, and that the service is running
"""
# pylint: disable=invalid-name,wrong-import-position
from __future__ import print_function
import unittest
import sys
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Security.Properties import FULL_DELEGATION, LIMITED_DELEGATION
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
class ReqClientTestCase(unittest.TestCase):
"""
.. class:: ReqClientTestCase
"""
def setUp(self):
""" test case set up """
gLogger.setLevel('INFO')
self.file = File()
self.file.LFN = "/lhcb/user/c/cibak/testFile"
self.file.Checksum = "123456"
self.file.ChecksumType = "ADLER32"
self.file2 = File()
self.file2.LFN = "/lhcb/user/f/fstagni/testFile"
self.file2.Checksum = "654321"
self.file2.ChecksumType = "ADLER32"
self.operation = Operation()
self.operation.Type = "ReplicateAndRegister"
self.operation.TargetSE = "CERN-USER"
self.operation.addFile(self.file)
self.operation.addFile(self.file2)
proxyInfo = getProxyInfo()['Value']
self.request = Request()
self.request.RequestName = "RequestManagerHandlerTests"
self.request.OwnerDN = proxyInfo['identity']
self.request.OwnerGroup = proxyInfo['group']
self.request.JobID = 123
self.request.addOperation(self.operation)
# # JSON representation of a whole request
self.jsonStr = self.request.toJSON()['Value']
# # request client
self.requestClient = ReqClient()
def tearDown(self):
""" clean up """
del self.request
del self.operation
del self.file
del self.jsonStr
class ReqClientMix(ReqClientTestCase):
def test01fullChain(self):
put = self.requestClient.putRequest(self.request)
self.assertTrue(put['OK'], put)
self.assertEqual(type(put['Value']), long)
reqID = put['Value']
# # summary
ret = self.requestClient.getDBSummary()
self.assertTrue(ret['OK'])
self.assertEqual(ret['Value'],
{'Operation': {'ReplicateAndRegister': {'Waiting': 1}},
'Request': {'Waiting': 1},
'File': {'Waiting': 2}})
get = self.requestClient.getRequest(reqID)
self.assertTrue(get['OK'])
self.assertEqual(isinstance(get['Value'], Request), True)
# # summary - the request became "Assigned"
res = self.requestClient.getDBSummary()
self.assertTrue(res['OK'])
self.assertEqual(res['Value'],
{'Operation': {'ReplicateAndRegister': {'Waiting': 1}},
'Request': {'Assigned': 1},
'File': {'Waiting': 2}})
res = self.requestClient.getRequestInfo(reqID)
self.assertEqual(res['OK'], True, res['Message'] if 'Message' in res else 'OK')
res = self.requestClient.getRequestFileStatus(reqID, self.file.LFN)
self.assertEqual(res['OK'], True, res['Message'] if 'Message' in res else 'OK')
res = self.requestClient.getRequestFileStatus(reqID, [self.file.LFN])
self.assertEqual(res['OK'], True, res['Message'] if 'Message' in res else 'OK')
res = self.requestClient.getDigest(reqID)
self.assertEqual(res['OK'], True, res['Message'] if 'Message' in res else 'OK')
res = self.requestClient.readRequestsForJobs([123])
self.assertEqual(res['OK'], True, res['Message'] if 'Message' in res else 'OK')
self.assertTrue(isinstance(res['Value']['Successful'][123], Request))
proxyInfo = getProxyInfo()['Value']
# Adding new request
request2 = Request()
request2.RequestName = "RequestManagerHandlerTests-2"
self.request.OwnerDN = proxyInfo['identity']
self.request.OwnerGroup = proxyInfo['group']
request2.JobID = 456
request2.addOperation(self.operation)
# # update
res = self.requestClient.putRequest(request2)
self.assertEqual(res['OK'], True, res['Message'] if 'Message' in res else 'OK')
reqID2 = res['Value']
# # get summary again
ret = self.requestClient.getDBSummary()
self.assertTrue(ret['OK'])
self.assertEqual(ret['Value'],
{'Operation': {'ReplicateAndRegister': {'Waiting': 2}},
'Request': {'Waiting': 1, 'Assigned': 1},
'File': {'Waiting': 4}})
delete = self.requestClient.deleteRequest(reqID)
self.assertEqual(delete['OK'], True, delete['Message'] if 'Message' in delete else 'OK')
delete = self.requestClient.deleteRequest(reqID2)
self.assertEqual(delete['OK'], True, delete['Message'] if 'Message' in delete else 'OK')
# # should be empty now
ret = self.requestClient.getDBSummary()
self.assertTrue(ret['OK'])
self.assertEqual(ret['Value'], {'Operation': {}, 'Request': {}, 'File': {}})
def test02Authorization(self):
""" Test whether request sets on behalf of others are rejected, unless done with Delegation properties
This test is kind of stupid though, since we do the same thing than the server... not a real test !
"""
request = Request({"RequestName": "unauthorized"})
request.OwnerDN = 'NotMe'
request.OwnerDN = 'AnotherGroup'
op = Operation({"Type": "RemoveReplica", "TargetSE": "CERN-USER"})
op += File({"LFN": "/lhcb/user/c/cibak/foo"})
request += op
res = self.requestClient.putRequest(request)
credProperties = getProxyInfo()['Value']['groupProperties']
# If the proxy with which we test has delegation, it should work
if FULL_DELEGATION in credProperties or LIMITED_DELEGATION in credProperties:
self.assertTrue(res['OK'], res)
self.requestClient.deleteRequest(res['Value'])
# otherwise no
else:
self.assertFalse(res['OK'], res)
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(ReqClientTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReqClientMix))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
fstagni/DIRAC
|
tests/Integration/RequestManagementSystem/Test_Client_Req.py
|
Python
|
gpl-3.0
| 6,366
|
[
"DIRAC"
] |
d18c29b4ad3beb7f467c70153dc45e3b73524fb0c098e0c0c719773fc78bc652
|
#!/usr/bin/env python3
import sys
import os
import filecmp
import unittest
from farm_blast import blast
modules_dir = os.path.dirname(os.path.abspath(blast.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data')
class TestBlast(unittest.TestCase):
def test_check_blast_type(self):
'''Check dies if blast_type not recognised'''
with self.assertRaises(blast.Error):
b = blast.Blast('query.fasta', 'ref.fasta', blast_type='oops')
def test_blast_db_exists(self):
'''Test detection or not of blast database'''
query_file = 'tmp.test_blast_db_exists.fa'
b_blastn = blast.Blast(query_file, 'ref.fasta')
b_blastp = blast.Blast(query_file, 'ref.fasta', blast_type='blastp')
nuc_suffixes = ['nin', 'nhr', 'nsq']
nuc_suffixes2 = ['00.' + x for x in nuc_suffixes]
pro_suffixes = ['pin', 'phr', 'psq']
pro_suffixes2 = ['00.' + x for x in pro_suffixes]
open(query_file, 'w').close()
self.assertFalse(b_blastn.blast_db_exists())
self.assertFalse(b_blastp.blast_db_exists())
tuples = [
(nuc_suffixes, b_blastn, b_blastp),
(nuc_suffixes2, b_blastn, b_blastp),
(pro_suffixes, b_blastp, b_blastn),
(pro_suffixes2, b_blastp, b_blastn)
]
for suffixes, blast1, blast2 in tuples:
print(suffixes, blast1.blast_type, blast2.blast_type)
for suff in suffixes:
open(query_file + '.' + suff, 'w').close()
self.assertTrue(blast1.blast_db_exists())
self.assertFalse(blast2.blast_db_exists())
for suff in suffixes:
missing_file = query_file + '.' + suff
os.unlink(missing_file)
self.assertFalse(blast1.blast_db_exists())
open(missing_file, 'w').close()
for suff in suffixes:
os.unlink(query_file + '.' + suff)
os.unlink(query_file)
def test_format_database_command(self):
'''Test command to format database made correctly for blast+ and blastall'''
b_all_nuc = blast.Blast('ref.fasta', 'qry.fasta', blastall=True)
b_all_pro = blast.Blast('ref.fasta', 'qry.fasta', blast_type='blastp', blastall=True)
b_plus_nuc = blast.Blast('ref.fasta', 'qry.fasta')
b_plus_pro = blast.Blast('ref.fasta', 'qry.fasta', blast_type='blastp')
blastall_nuc = 'formatdb -p F -i ref.fasta'
blastall_pro = 'formatdb -p T -i ref.fasta'
blastplus_nuc = 'makeblastdb -dbtype nucl -in ref.fasta'
blastplus_pro = 'makeblastdb -dbtype prot -in ref.fasta'
self.assertEqual(b_all_nuc.format_database_command(), blastall_nuc)
self.assertEqual(b_all_pro.format_database_command(), blastall_pro)
self.assertEqual(b_plus_nuc.format_database_command(), blastplus_nuc)
self.assertEqual(b_plus_pro.format_database_command(), blastplus_pro)
query_file = 'tmp.test_blast_db_exists.fa'
b_blastn = blast.Blast(query_file, 'ref.fasta')
files = [query_file + '.' + x for x in ['nin', 'nhr', 'nsq']]
for f in files:
open(f, 'w').close()
self.assertEqual(b_blastn.format_database_command(), None)
for f in files:
os.unlink(f)
def test_make_options_string(self):
'''Test options set correctly'''
test_objs = [
blast.Blast('ref', 'qry'),
blast.Blast('ref', 'qry', evalue=0.1),
blast.Blast('ref', 'qry', word_size=42),
blast.Blast('ref', 'qry', no_filter=True),
blast.Blast('ref', 'qry', extra_options='-x 1'),
blast.Blast('ref', 'qry', blast_type='blastp'),
blast.Blast('ref', 'qry', blastall=True),
blast.Blast('ref', 'qry', blastall=True, evalue=0.1),
blast.Blast('ref', 'qry', blastall=True, word_size=42),
blast.Blast('ref', 'qry', blastall=True, no_filter=True),
]
correct = [
'-outfmt 6',
'-outfmt 6 -evalue 0.1',
'-outfmt 6 -word_size 42',
'-outfmt 6 -dust no',
'-outfmt 6 -x 1',
'-outfmt 6 -seg yes',
'-m 8',
'-m 8 -e 0.1',
'-m 8 -W 42',
'-m 8 -F F'
]
for i in range(len(correct)):
self.assertEqual(correct[i], test_objs[i]._make_options_string())
def test_make_io_string(self):
'''Test input/output files string'''
test_objs = [
blast.Blast('ref', 'qry'),
blast.Blast('ref', 'qry', blastall=True),
]
correct = [
'-db ref -query qry -out blast.out',
'-d ref -i qry -o blast.out',
]
for i in range(len(correct)):
self.assertEqual(correct[i], test_objs[i]._make_io_string())
def test_make_blast_type_string(self):
'''Test blast type string'''
test_objs = [
(blast.Blast('ref', 'qry'), 'blastn -task blastn'),
(blast.Blast('ref', 'qry', blast_type='blastn'), 'blastn -task blastn'),
(blast.Blast('ref', 'qry', blast_type='blastn-short'), 'blastn -task blastn-short'),
(blast.Blast('ref', 'qry', blast_type='dc-megablast'), 'blastn -task dc-megablast'),
(blast.Blast('ref', 'qry', blast_type='megablast'), 'blastn -task megablast'),
(blast.Blast('ref', 'qry', blast_type='rmblastn'), 'blastn -task rmblastn'),
(blast.Blast('ref', 'qry', blast_type='blastx'), 'blastx'),
(blast.Blast('ref', 'qry', blast_type='blastp'), 'blastp -task blastp'),
(blast.Blast('ref', 'qry', blast_type='blastp-short'), 'blastp -task blastp-short'),
(blast.Blast('ref', 'qry', blast_type='deltablast'), 'blastp -task deltablast'),
(blast.Blast('ref', 'qry', blast_type='tblastn'), 'tblastn'),
(blast.Blast('ref', 'qry', blast_type='tblastx'), 'tblastx'),
(blast.Blast('ref', 'qry', blastall=True), 'blastall -p blastn'),
(blast.Blast('ref', 'qry', blastall=True, blast_type='blastn'), 'blastall -p blastn'),
(blast.Blast('ref', 'qry', blastall=True, blast_type='blastx'), 'blastall -p blastx'),
(blast.Blast('ref', 'qry', blastall=True, blast_type='blastp'), 'blastall -p blastp'),
(blast.Blast('ref', 'qry', blastall=True, blast_type='tblastn'), 'blastall -p tblastn'),
(blast.Blast('ref', 'qry', blastall=True, blast_type='tblastx'), 'blastall -p tblastx'),
(blast.Blast('ref', 'qry', blastall=True, blast_type='megablast'), 'blastall -p blastn -n T')
]
for t in test_objs:
self.assertEqual(t[1], t[0]._make_blast_type_string())
with self.assertRaises(blast.Error):
b = blast.Blast('ref', 'qry', blast_type='oops')
def test_get_run_command(self):
'''Test command to run blast made OK'''
b = blast.Blast('qry.fasta', 'ref.fasta', evalue=0.1)
expected = ' '.join([
b._make_blast_type_string(),
b._make_io_string(),
b._make_options_string()
])
self.assertEqual(expected, b.get_run_command())
if __name__ == '__main__':
unittest.main()
|
sanger-pathogens/Farm_blast
|
farm_blast/tests/blast_test.py
|
Python
|
gpl-3.0
| 7,401
|
[
"BLAST"
] |
220975c7e03688df5a0a6cbb32bf9ac3a2cd5e36cb623143c2262e9be364d145
|
#!/usr/bin/python
import sys
import numpy as np
import pysam
from collections import Counter
import vcf
from vcf.parser import _Info as VcfInfo
input=sys.argv[1].split("/")[-1]
input=input.split(".")[0]
bam=sys.argv[2].split(".")[0]
bam=bam.split("/")[-1]
#print(input)
#Ensure the bam and csv file match
#if input==bam:
# print("working with vcf: "+input + " and bam: " +bam)
#else:
# print( "bam:"+bam +" does not match vcf: " +input)
# sys.exit(1)
#Put header on true false and exp csv files
in_var= vcf.Reader(open(sys.argv[1], 'r'))
## update infos ##
in_var.infos['MapQ']=VcfInfo(id='MapQ',num=1,type='Float',desc="The average MapQ of the reads containing the called variant")#,source=None, version=None)
in_var.infos['Read_pos']=VcfInfo(id='Read_pos',num=1,type='Float',desc="The average read cycle that called the given variant")#,source=None, version=None)
in_var.infos['Phred']=VcfInfo(id='Phred',num=1,type='Float',desc="The average Phred score of the called variant")#,source=None, version=None)
variants=list(in_var)
with pysam.AlignmentFile(sys.argv[2], "rb") as bamfile:
#with pysam.AlignmentFile('../data/5_5.removed.bam', "rb") as bamfile:
for record in variants:
#print(record)
mapq=[]# This will hold a list of the mapping qualtties that map to the variant
phred=[] #This will hold a list of the phred that map to the variant
Read_pos=[] # This will hold a list of position relative to the read
chr=record.CHROM
pos=int(record.POS)
py_pos=pos-1
ref=record.REF
var=record.ALT[0]
record.ID=input
for pileupcolumn in bamfile.pileup(chr,py_pos,py_pos+1,truncate=True,stepper="all",max_depth=1E6):
if pileupcolumn.pos==py_pos:
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip:
called_base=pileupread.alignment.query_sequence[pileupread.query_position]
called_phred=pileupread.alignment.query_qualities[pileupread.query_position]
if called_phred>0 and called_base==var: # change this if you change the phred cut off in deepSNV
mapq.append(pileupread.alignment.mapping_quality)
phred.append(called_phred)
Read_pos.append(pileupread.query_position)
mean_map=np.mean(mapq)
mean_phred=np.mean(phred)
mean_Read_pos=np.mean(Read_pos)
if mean_map==[]:
print( "OOPS didn't find the variant looks like you didn't fix the bug")
sys.exit(1)
record.add_info('MapQ',mean_map)
record.add_info('Read_pos',mean_Read_pos)
record.add_info('Phred',mean_phred)
# print record.INFO
# print 'done with \n'
# print record
print "done updating"
iter(variants)
vcf_writer = vcf.Writer(open(sys.argv[3], 'w'), in_var)
for record in variants:
vcf_writer.write_record(record)
vcf_writer.close()
|
lauringlab/Benchmarking_paper
|
scripts/mapq_vcf.py
|
Python
|
mit
| 2,777
|
[
"pysam"
] |
b8b2ee8e187a0279b7df5a10eaf98a79655d45708fd6f92d7618b2542e64b05b
|
"""
Migration script to update the deferred job parameters for liftover transfer jobs.
"""
import datetime
import logging, sys
from galaxy.model.custom_types import JSONType
from galaxy.util.bunch import Bunch
from sqlalchemy import *
from sqlalchemy import Integer, Table, MetaData, Column
from sqlalchemy.orm import *
from sqlalchemy.orm import scoped_session, sessionmaker
from migrate import *
# Need our custom types, but don't import anything else from model
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
context = scoped_session( sessionmaker( autoflush=False, autocommit=True ) )
class DeferredJob( object ):
states = Bunch( NEW = 'new',
WAITING = 'waiting',
QUEUED = 'queued',
RUNNING = 'running',
OK = 'ok',
ERROR = 'error' )
def __init__( self, state=None, plugin=None, params=None ):
self.state = state
self.plugin = plugin
self.params = params
def upgrade(migrate_engine):
metadata.bind = migrate_engine
DeferredJob.table = Table( "deferred_job", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "state", String( 64 ), index=True ),
Column( "plugin", String( 128 ), index=True ),
Column( "params", JSONType ) )
mapper( DeferredJob, DeferredJob.table, properties = {} )
liftoverjobs = dict()
jobs = context.query( DeferredJob ).filter_by( plugin='LiftOverTransferPlugin' ).all()
for job in jobs:
if job.params[ 'parentjob' ] not in liftoverjobs:
liftoverjobs[ job.params[ 'parentjob' ] ] = []
liftoverjobs[ job.params[ 'parentjob'] ].append( job.id )
for parent in liftoverjobs:
lifts = liftoverjobs[ parent ]
deferred = context.query( DeferredJob ).filter_by( id=parent ).first()
deferred.params[ 'liftover' ] = lifts
context.flush()
def downgrade(migrate_engine):
metadata.bind = migrate_engine
jobs = context.query( DeferredJob ).filter_by( plugin='GenomeTransferPlugin' ).all()
for job in jobs:
if len( job.params[ 'liftover' ] ) == 0:
continue
transfers = []
for lift in job.params[ 'liftover' ]:
liftoverjob = context.query( DeferredJob ).filter_by( id=lift ).first()
transfers.append( liftoverjob.params[ 'transfer_job_id' ] )
job.params[ 'liftover' ] = transfers
context.flush()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0104_update_genome_downloader_job_parameters.py
|
Python
|
gpl-3.0
| 2,861
|
[
"Galaxy"
] |
1e1fa8493bc8ca4a77f2e151b6c98322aa7f2803f919f4ffcc9f3a81cef97196
|
"""Support for Konnected devices."""
import asyncio
import hmac
import json
import logging
import voluptuous as vol
from aiohttp.hdrs import AUTHORIZATION
from aiohttp.web import Request, Response
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.components.discovery import SERVICE_KONNECTED
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_START,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
CONF_DEVICES,
CONF_BINARY_SENSORS,
CONF_SENSORS,
CONF_SWITCHES,
CONF_HOST,
CONF_PORT,
CONF_ID,
CONF_NAME,
CONF_TYPE,
CONF_PIN,
CONF_ZONE,
CONF_ACCESS_TOKEN,
ATTR_ENTITY_ID,
ATTR_STATE,
STATE_ON,
)
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers import discovery
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
CONF_REPEAT,
CONF_INVERSE,
CONF_BLINK,
CONF_DISCOVERY,
CONF_DHT_SENSORS,
CONF_DS18B20_SENSORS,
DOMAIN,
STATE_LOW,
STATE_HIGH,
PIN_TO_ZONE,
ZONE_TO_PIN,
ENDPOINT_ROOT,
UPDATE_ENDPOINT,
SIGNAL_SENSOR_UPDATE,
)
from .handlers import HANDLERS
_LOGGER = logging.getLogger(__name__)
_BINARY_SENSOR_SCHEMA = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_PIN, "s_pin"): vol.Any(*PIN_TO_ZONE),
vol.Exclusive(CONF_ZONE, "s_pin"): vol.Any(*ZONE_TO_PIN),
vol.Required(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
_SENSOR_SCHEMA = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_PIN, "s_pin"): vol.Any(*PIN_TO_ZONE),
vol.Exclusive(CONF_ZONE, "s_pin"): vol.Any(*ZONE_TO_PIN),
vol.Required(CONF_TYPE): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
_SWITCH_SCHEMA = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_PIN, "a_pin"): vol.Any(*PIN_TO_ZONE),
vol.Exclusive(CONF_ZONE, "a_pin"): vol.Any(*ZONE_TO_PIN),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.Any(STATE_HIGH, STATE_LOW)
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_API_HOST): vol.Url(),
vol.Required(CONF_DEVICES): [
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [_BINARY_SENSOR_SCHEMA]
),
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [_SENSOR_SCHEMA]
),
vol.Optional(CONF_SWITCHES): vol.All(
cv.ensure_list, [_SWITCH_SCHEMA]
),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}
],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Konnected platform."""
import konnected
cfg = config.get(DOMAIN)
if cfg is None:
cfg = {}
access_token = cfg.get(CONF_ACCESS_TOKEN)
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {
CONF_ACCESS_TOKEN: access_token,
CONF_API_HOST: cfg.get(CONF_API_HOST),
}
def setup_device(host, port):
"""Set up a Konnected device at `host` listening on `port`."""
discovered = DiscoveredDevice(hass, host, port)
if discovered.is_configured:
discovered.setup()
else:
_LOGGER.warning(
"Konnected device %s was discovered on the network"
" but not specified in configuration.yaml",
discovered.device_id,
)
def device_discovered(service, info):
"""Call when a Konnected device has been discovered."""
host = info.get(CONF_HOST)
port = info.get(CONF_PORT)
setup_device(host, port)
async def manual_discovery(event):
"""Init devices on the network with manually assigned addresses."""
specified = [
dev
for dev in cfg.get(CONF_DEVICES)
if dev.get(CONF_HOST) and dev.get(CONF_PORT)
]
while specified:
for dev in specified:
_LOGGER.debug(
"Discovering Konnected device %s at %s:%s",
dev.get(CONF_ID),
dev.get(CONF_HOST),
dev.get(CONF_PORT),
)
try:
await hass.async_add_executor_job(
setup_device, dev.get(CONF_HOST), dev.get(CONF_PORT)
)
specified.remove(dev)
except konnected.Client.ClientError as err:
_LOGGER.error(err)
await asyncio.sleep(10) # try again in 10 seconds
# Initialize devices specified in the configuration on boot
for device in cfg.get(CONF_DEVICES):
ConfiguredDevice(hass, device, config).save_data()
discovery.async_listen(hass, SERVICE_KONNECTED, device_discovered)
hass.http.register_view(KonnectedView(access_token))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, manual_discovery)
return True
class ConfiguredDevice:
"""A representation of a configured Konnected device."""
def __init__(self, hass, config, hass_config):
"""Initialize the Konnected device."""
self.hass = hass
self.config = config
self.hass_config = hass_config
@property
def device_id(self):
"""Device id is the MAC address as string with punctuation removed."""
return self.config.get(CONF_ID)
def save_data(self):
"""Save the device configuration to `hass.data`."""
binary_sensors = {}
for entity in self.config.get(CONF_BINARY_SENSORS) or []:
if CONF_ZONE in entity:
pin = ZONE_TO_PIN[entity[CONF_ZONE]]
else:
pin = entity[CONF_PIN]
binary_sensors[pin] = {
CONF_TYPE: entity[CONF_TYPE],
CONF_NAME: entity.get(
CONF_NAME,
"Konnected {} Zone {}".format(self.device_id[6:], PIN_TO_ZONE[pin]),
),
CONF_INVERSE: entity.get(CONF_INVERSE),
ATTR_STATE: None,
}
_LOGGER.debug(
"Set up binary_sensor %s (initial state: %s)",
binary_sensors[pin].get("name"),
binary_sensors[pin].get(ATTR_STATE),
)
actuators = []
for entity in self.config.get(CONF_SWITCHES) or []:
if CONF_ZONE in entity:
pin = ZONE_TO_PIN[entity[CONF_ZONE]]
else:
pin = entity[CONF_PIN]
act = {
CONF_PIN: pin,
CONF_NAME: entity.get(
CONF_NAME,
"Konnected {} Actuator {}".format(
self.device_id[6:], PIN_TO_ZONE[pin]
),
),
ATTR_STATE: None,
CONF_ACTIVATION: entity[CONF_ACTIVATION],
CONF_MOMENTARY: entity.get(CONF_MOMENTARY),
CONF_PAUSE: entity.get(CONF_PAUSE),
CONF_REPEAT: entity.get(CONF_REPEAT),
}
actuators.append(act)
_LOGGER.debug("Set up switch %s", act)
sensors = []
for entity in self.config.get(CONF_SENSORS) or []:
if CONF_ZONE in entity:
pin = ZONE_TO_PIN[entity[CONF_ZONE]]
else:
pin = entity[CONF_PIN]
sensor = {
CONF_PIN: pin,
CONF_NAME: entity.get(
CONF_NAME,
"Konnected {} Sensor {}".format(
self.device_id[6:], PIN_TO_ZONE[pin]
),
),
CONF_TYPE: entity[CONF_TYPE],
CONF_POLL_INTERVAL: entity.get(CONF_POLL_INTERVAL),
}
sensors.append(sensor)
_LOGGER.debug(
"Set up %s sensor %s (initial state: %s)",
sensor.get(CONF_TYPE),
sensor.get(CONF_NAME),
sensor.get(ATTR_STATE),
)
device_data = {
CONF_BINARY_SENSORS: binary_sensors,
CONF_SENSORS: sensors,
CONF_SWITCHES: actuators,
CONF_BLINK: self.config.get(CONF_BLINK),
CONF_DISCOVERY: self.config.get(CONF_DISCOVERY),
}
if CONF_DEVICES not in self.hass.data[DOMAIN]:
self.hass.data[DOMAIN][CONF_DEVICES] = {}
_LOGGER.debug(
"Storing data in hass.data[%s][%s][%s]: %s",
DOMAIN,
CONF_DEVICES,
self.device_id,
device_data,
)
self.hass.data[DOMAIN][CONF_DEVICES][self.device_id] = device_data
for platform in ["binary_sensor", "sensor", "switch"]:
discovery.load_platform(
self.hass,
platform,
DOMAIN,
{"device_id": self.device_id},
self.hass_config,
)
class DiscoveredDevice:
"""A representation of a discovered Konnected device."""
def __init__(self, hass, host, port):
"""Initialize the Konnected device."""
self.hass = hass
self.host = host
self.port = port
import konnected
self.client = konnected.Client(host, str(port))
self.status = self.client.get_status()
def setup(self):
"""Set up a newly discovered Konnected device."""
_LOGGER.info(
"Discovered Konnected device %s. Open http://%s:%s in a "
"web browser to view device status.",
self.device_id,
self.host,
self.port,
)
self.save_data()
self.update_initial_states()
self.sync_device_config()
def save_data(self):
"""Save the discovery information to `hass.data`."""
self.stored_configuration["client"] = self.client
self.stored_configuration["host"] = self.host
self.stored_configuration["port"] = self.port
@property
def device_id(self):
"""Device id is the MAC address as string with punctuation removed."""
return self.status["mac"].replace(":", "")
@property
def is_configured(self):
"""Return true if device_id is specified in the configuration."""
return bool(self.hass.data[DOMAIN][CONF_DEVICES].get(self.device_id))
@property
def stored_configuration(self):
"""Return the configuration stored in `hass.data` for this device."""
return self.hass.data[DOMAIN][CONF_DEVICES].get(self.device_id)
def binary_sensor_configuration(self):
"""Return the configuration map for syncing binary sensors."""
return [{"pin": p} for p in self.stored_configuration[CONF_BINARY_SENSORS]]
def actuator_configuration(self):
"""Return the configuration map for syncing actuators."""
return [
{
"pin": data.get(CONF_PIN),
"trigger": (0 if data.get(CONF_ACTIVATION) in [0, STATE_LOW] else 1),
}
for data in self.stored_configuration[CONF_SWITCHES]
]
def dht_sensor_configuration(self):
"""Return the configuration map for syncing DHT sensors."""
return [
{CONF_PIN: sensor[CONF_PIN], CONF_POLL_INTERVAL: sensor[CONF_POLL_INTERVAL]}
for sensor in self.stored_configuration[CONF_SENSORS]
if sensor[CONF_TYPE] == "dht"
]
def ds18b20_sensor_configuration(self):
"""Return the configuration map for syncing DS18B20 sensors."""
return [
{"pin": sensor[CONF_PIN]}
for sensor in self.stored_configuration[CONF_SENSORS]
if sensor[CONF_TYPE] == "ds18b20"
]
def update_initial_states(self):
"""Update the initial state of each sensor from status poll."""
for sensor_data in self.status.get("sensors"):
sensor_config = self.stored_configuration[CONF_BINARY_SENSORS].get(
sensor_data.get(CONF_PIN), {}
)
entity_id = sensor_config.get(ATTR_ENTITY_ID)
state = bool(sensor_data.get(ATTR_STATE))
if sensor_config.get(CONF_INVERSE):
state = not state
dispatcher_send(self.hass, SIGNAL_SENSOR_UPDATE.format(entity_id), state)
def desired_settings_payload(self):
"""Return a dict representing the desired device configuration."""
desired_api_host = (
self.hass.data[DOMAIN].get(CONF_API_HOST) or self.hass.config.api.base_url
)
desired_api_endpoint = desired_api_host + ENDPOINT_ROOT
return {
"sensors": self.binary_sensor_configuration(),
"actuators": self.actuator_configuration(),
"dht_sensors": self.dht_sensor_configuration(),
"ds18b20_sensors": self.ds18b20_sensor_configuration(),
"auth_token": self.hass.data[DOMAIN].get(CONF_ACCESS_TOKEN),
"endpoint": desired_api_endpoint,
"blink": self.stored_configuration.get(CONF_BLINK),
"discovery": self.stored_configuration.get(CONF_DISCOVERY),
}
def current_settings_payload(self):
"""Return a dict of configuration currently stored on the device."""
settings = self.status["settings"]
if not settings:
settings = {}
return {
"sensors": [{"pin": s[CONF_PIN]} for s in self.status.get("sensors")],
"actuators": self.status.get("actuators"),
"dht_sensors": self.status.get(CONF_DHT_SENSORS),
"ds18b20_sensors": self.status.get(CONF_DS18B20_SENSORS),
"auth_token": settings.get("token"),
"endpoint": settings.get("apiUrl"),
"blink": settings.get(CONF_BLINK),
"discovery": settings.get(CONF_DISCOVERY),
}
def sync_device_config(self):
"""Sync the new pin configuration to the Konnected device if needed."""
_LOGGER.debug(
"Device %s settings payload: %s",
self.device_id,
self.desired_settings_payload(),
)
if self.desired_settings_payload() != self.current_settings_payload():
_LOGGER.info("pushing settings to device %s", self.device_id)
self.client.put_settings(**self.desired_settings_payload())
class KonnectedView(HomeAssistantView):
"""View creates an endpoint to receive push updates from the device."""
url = UPDATE_ENDPOINT
name = "api:konnected"
requires_auth = False # Uses access token from configuration
def __init__(self, auth_token):
"""Initialize the view."""
self.auth_token = auth_token
@staticmethod
def binary_value(state, activation):
"""Return binary value for GPIO based on state and activation."""
if activation == STATE_HIGH:
return 1 if state == STATE_ON else 0
return 0 if state == STATE_ON else 1
async def get(self, request: Request, device_id) -> Response:
"""Return the current binary state of a switch."""
hass = request.app["hass"]
pin_num = int(request.query.get("pin"))
data = hass.data[DOMAIN]
device = data[CONF_DEVICES][device_id]
if not device:
return self.json_message(
"Device " + device_id + " not configured", status_code=HTTP_NOT_FOUND
)
try:
pin = next(
filter(
lambda switch: switch[CONF_PIN] == pin_num, device[CONF_SWITCHES]
)
)
except StopIteration:
pin = None
if not pin:
return self.json_message(
format("Switch on pin {} not configured", pin_num),
status_code=HTTP_NOT_FOUND,
)
return self.json(
{
"pin": pin_num,
"state": self.binary_value(
hass.states.get(pin[ATTR_ENTITY_ID]).state, pin[CONF_ACTIVATION]
),
}
)
async def put(self, request: Request, device_id) -> Response:
"""Receive a sensor update via PUT request and async set state."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
try: # Konnected 2.2.0 and above supports JSON payloads
payload = await request.json()
pin_num = payload["pin"]
except json.decoder.JSONDecodeError:
_LOGGER.error(
(
"Your Konnected device software may be out of "
"date. Visit https://help.konnected.io for "
"updating instructions."
)
)
auth = request.headers.get(AUTHORIZATION, None)
if not hmac.compare_digest(f"Bearer {self.auth_token}", auth):
return self.json_message("unauthorized", status_code=HTTP_UNAUTHORIZED)
pin_num = int(pin_num)
device = data[CONF_DEVICES].get(device_id)
if device is None:
return self.json_message(
"unregistered device", status_code=HTTP_BAD_REQUEST
)
pin_data = device[CONF_BINARY_SENSORS].get(pin_num) or next(
(s for s in device[CONF_SENSORS] if s[CONF_PIN] == pin_num), None
)
if pin_data is None:
return self.json_message(
"unregistered sensor/actuator", status_code=HTTP_BAD_REQUEST
)
pin_data["device_id"] = device_id
for attr in ["state", "temp", "humi", "addr"]:
value = payload.get(attr)
handler = HANDLERS.get(attr)
if value is not None and handler:
hass.async_create_task(handler(hass, pin_data, payload))
return self.json_message("ok")
|
Cinntax/home-assistant
|
homeassistant/components/konnected/__init__.py
|
Python
|
apache-2.0
| 19,557
|
[
"VisIt"
] |
8742458de6c495245c9364c92103ecdb3b27e60f75530d2c22515db94d6f7896
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.test import test as TestCommand
import numpy
# Get __version__ from version.py without importing package itself.
with open('cubefit/version.py') as f:
exec(f.read())
fname = os.path.join("cubefit", "psffuncs.pyx")
USE_CYTHON = True
if not os.path.exists(fname):
fname = fname.replace(".pyx", ".c")
USE_CYTHON = False
exts = [Extension("cubefit.psffuncs", [fname],
include_dirs=[numpy.get_include()],
libraries=["m"])]
if USE_CYTHON:
from Cython.Build import cythonize
exts = cythonize(exts)
class PyTest(TestCommand):
"""Enables setup.py test"""
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run_tests(self):
#import here, because outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(name="cubefit",
version=__version__,
description=("Fit combined supernova and galaxy model on a Nearby "
"Supernova Factory spectral data cube."),
license="MIT",
classifiers=["Topic :: Scientific/Engineering :: Astronomy",
"Intended Audience :: Science/Research"],
url="https://github.com/snfactory/cubefit",
author="Kyle Barbary, Seb Bongard, Clare Saunders",
author_email="kylebarbary@gmail.com",
packages=['cubefit', 'cubefit.extern'],
ext_modules=exts,
scripts=['scripts/cubefit',
'scripts/cubefit-subtract',
'scripts/cubefit-plot'],
cmdclass={'test': PyTest}
)
|
kbarbary/cubefit
|
setup.py
|
Python
|
mit
| 1,829
|
[
"Galaxy"
] |
a34644706419700824a10142c8731a6ff6e355ca20da01fd32fdfff43452c31b
|
"""
Course Outline page in Studio.
"""
import datetime
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from ..common.utils import click_css, confirm_prompt
from .course_page import CoursePage
from .container import ContainerPage
from .utils import set_input_value_and_save, set_input_value
class CourseOutlineItem(object):
"""
A mixin class for any :class:`PageObject` shown in a course outline.
"""
BODY_SELECTOR = None
EDIT_BUTTON_SELECTOR = '.xblock-field-value-edit'
NAME_SELECTOR = '.item-title'
NAME_INPUT_SELECTOR = '.xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.xblock-title .wrapper-xblock-field'
STATUS_MESSAGE_SELECTOR = '> div[class$="status"] .status-message'
CONFIGURATION_BUTTON_SELECTOR = '.action-item .configure-button'
def __repr__(self):
# CourseOutlineItem is also used as a mixin for CourseOutlinePage, which doesn't have a locator
# Check for the existence of a locator so that errors when navigating to the course outline page don't show up
# as errors in the repr method instead.
try:
return "{}(<browser>, {!r})".format(self.__class__.__name__, self.locator)
except AttributeError:
return "{}(<browser>)".format(self.__class__.__name__)
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineItem` context
"""
# If the item doesn't have a body selector or locator, then it can't be bounded
# This happens in the context of the CourseOutlinePage
if self.BODY_SELECTOR and hasattr(self, 'locator'):
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
else:
return selector
@property
def name(self):
"""
Returns the display name of this object.
"""
name_element = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).first
if name_element:
return name_element.text[0]
else:
return None
@property
def has_status_message(self):
"""
Returns True if the item has a status message, False otherwise.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).first.visible
@property
def status_message(self):
"""
Returns the status message of this item.
"""
return self.q(css=self._bounded_selector(self.STATUS_MESSAGE_SELECTOR)).text[0]
@property
def has_staff_lock_warning(self):
""" Returns True if the 'Contains staff only content' message is visible """
return self.status_message == 'Contains staff only content' if self.has_status_message else False
@property
def is_staff_only(self):
""" Returns True if the visiblity state of this item is staff only (has a black sidebar) """
return "is-staff-only" in self.q(css=self._bounded_selector(''))[0].get_attribute("class")
def edit_name(self):
"""
Puts the item's name into editable form.
"""
self.q(css=self._bounded_selector(self.EDIT_BUTTON_SELECTOR)).first.click()
def enter_name(self, new_name):
"""
Enters new_name as the item's display name.
"""
set_input_value(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
def change_name(self, new_name):
"""
Changes the container's name.
"""
self.edit_name()
set_input_value_and_save(self, self._bounded_selector(self.NAME_INPUT_SELECTOR), new_name)
self.wait_for_ajax()
def finalize_name(self):
"""
Presses ENTER, saving the value of the display name for this item.
"""
self.q(css=self._bounded_selector(self.NAME_INPUT_SELECTOR)).results[0].send_keys(Keys.ENTER)
self.wait_for_ajax()
def set_staff_lock(self, is_locked):
"""
Sets the explicit staff lock of item on the container page to is_locked.
"""
modal = self.edit()
modal.is_explicitly_locked = is_locked
modal.save()
def in_editable_form(self):
"""
Return whether this outline item's display name is in its editable form.
"""
return "is-editing" in self.q(
css=self._bounded_selector(self.NAME_FIELD_WRAPPER_SELECTOR)
)[0].get_attribute("class")
def edit(self):
self.q(css=self._bounded_selector(self.CONFIGURATION_BUTTON_SELECTOR)).first.click()
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
return modal
@property
def release_date(self):
element = self.q(css=self._bounded_selector(".status-release-value"))
return element.first.text[0] if element.present else None
@property
def due_date(self):
element = self.q(css=self._bounded_selector(".status-grading-date"))
return element.first.text[0] if element.present else None
@property
def policy(self):
element = self.q(css=self._bounded_selector(".status-grading-value"))
return element.first.text[0] if element.present else None
def publish(self):
"""
Publish the unit.
"""
click_css(self, self._bounded_selector('.action-publish'), require_notification=False)
modal = CourseOutlineModal(self)
EmptyPromise(lambda: modal.is_shown(), 'Modal is shown.')
modal.publish()
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css=self._bounded_selector('.action-publish')).first
class CourseOutlineContainer(CourseOutlineItem):
"""
A mixin to a CourseOutline page object that adds the ability to load
a child page object by title or by index.
CHILD_CLASS must be a :class:`CourseOutlineChild` subclass.
"""
CHILD_CLASS = None
ADD_BUTTON_SELECTOR = '> .outline-content > .add-item a.button-new'
def child(self, title, child_class=None):
"""
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return child_class(
self.browser,
self.q(css=child_class.BODY_SELECTOR).filter(
lambda el: title in [inner.text for inner in
el.find_elements_by_css_selector(child_class.NAME_SELECTOR)]
).attrs('data-locator')[0]
)
def children(self, child_class=None):
"""
Returns all the children page objects of class child_class.
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.q(css=self._bounded_selector(child_class.BODY_SELECTOR)).map(
lambda el: child_class(self.browser, el.get_attribute('data-locator'))).results
def child_at(self, index, child_class=None):
"""
Returns the child at the specified index.
:type self: object
"""
if not child_class:
child_class = self.CHILD_CLASS
return self.children(child_class)[index]
def add_child(self, require_notification=True):
"""
Adds a child to this xblock, waiting for notifications.
"""
click_css(
self,
self._bounded_selector(self.ADD_BUTTON_SELECTOR),
require_notification=require_notification,
)
def expand_subsection(self):
"""
Toggle the expansion of this subsection.
"""
self.browser.execute_script("jQuery.fx.off = true;")
def subsection_expanded():
add_button = self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).first.results
return add_button and add_button[0].is_displayed()
currently_expanded = subsection_expanded()
self.q(css=self._bounded_selector('.ui-toggle-expansion i')).first.click()
self.wait_for_element_presence(self._bounded_selector(self.ADD_BUTTON_SELECTOR), 'Subsection is expanded')
EmptyPromise(
lambda: subsection_expanded() != currently_expanded,
"Check that the container {} has been toggled".format(self.locator)
).fulfill()
self.browser.execute_script("jQuery.fx.off = false;")
return self
@property
def is_collapsed(self):
"""
Return whether this outline item is currently collapsed.
"""
return "is-collapsed" in self.q(css=self._bounded_selector('')).first.attrs("class")[0]
class CourseOutlineChild(PageObject, CourseOutlineItem):
"""
A page object that will be used as a child of :class:`CourseOutlineContainer`.
"""
url = None
BODY_SELECTOR = '.outline-item'
def __init__(self, browser, locator):
super(CourseOutlineChild, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def delete(self, cancel=False):
"""
Clicks the delete button, then cancels at the confirmation prompt if cancel is True.
"""
click_css(self, self._bounded_selector('.delete-button'), require_notification=False)
confirm_prompt(self, cancel)
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant items of this item.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineChild(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
class CourseOutlineUnit(CourseOutlineChild):
"""
PageObject that wraps a unit link on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-unit'
NAME_SELECTOR = '.unit-title a'
def go_to(self):
"""
Open the container page linked to by this unit link, and return
an initialized :class:`.ContainerPage` for that unit.
"""
return ContainerPage(self.browser, self.locator).visit()
def is_browser_on_page(self):
return self.q(css=self.BODY_SELECTOR).present
def children(self):
return self.q(css=self._bounded_selector(self.BODY_SELECTOR)).map(
lambda el: CourseOutlineUnit(self.browser, el.get_attribute('data-locator'))).results
class CourseOutlineSubsection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a subsection block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-subsection'
NAME_SELECTOR = '.subsection-title'
NAME_FIELD_WRAPPER_SELECTOR = '.subsection-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineUnit
def unit(self, title):
"""
Return the :class:`.CourseOutlineUnit with the title `title`.
"""
return self.child(title)
def units(self):
"""
Returns the units in this subsection.
"""
return self.children()
def unit_at(self, index):
"""
Returns the CourseOutlineUnit at the specified index.
"""
return self.child_at(index)
def add_unit(self):
"""
Adds a unit to this subsection
"""
self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()
class CourseOutlineSection(CourseOutlineContainer, CourseOutlineChild):
"""
:class`.PageObject` that wraps a section block on the Studio Course Outline page.
"""
url = None
BODY_SELECTOR = '.outline-section'
NAME_SELECTOR = '.section-title'
NAME_FIELD_WRAPPER_SELECTOR = '.section-header .wrapper-xblock-field'
CHILD_CLASS = CourseOutlineSubsection
def subsection(self, title):
"""
Return the :class:`.CourseOutlineSubsection` with the title `title`.
"""
return self.child(title)
def subsections(self):
"""
Returns a list of the CourseOutlineSubsections of this section
"""
return self.children()
def subsection_at(self, index):
"""
Returns the CourseOutlineSubsection at the specified index.
"""
return self.child_at(index)
def add_subsection(self):
"""
Adds a subsection to this section
"""
self.add_child()
class ExpandCollapseLinkState(object):
"""
Represents the three states that the expand/collapse link can be in
"""
MISSING = 0
COLLAPSE = 1
EXPAND = 2
class CourseOutlinePage(CoursePage, CourseOutlineContainer):
"""
Course Outline page in Studio.
"""
url_path = "course"
CHILD_CLASS = CourseOutlineSection
EXPAND_COLLAPSE_CSS = '.button-toggle-expand-collapse'
BOTTOM_ADD_SECTION_BUTTON = '.outline > .add-section .button-new'
def is_browser_on_page(self):
return self.q(css='body.view-outline').present and self.q(css='div.ui-loading.is-hidden').present
def view_live(self):
"""
Clicks the "View Live" link and switches to the new tab
"""
click_css(self, '.view-live-button', require_notification=False)
self.browser.switch_to_window(self.browser.window_handles[-1])
def section(self, title):
"""
Return the :class:`.CourseOutlineSection` with the title `title`.
"""
return self.child(title)
def section_at(self, index):
"""
Returns the :class:`.CourseOutlineSection` at the specified index.
"""
return self.child_at(index)
def click_section_name(self, parent_css=''):
"""
Find and click on first section name in course outline
"""
self.q(css='{} .section-name'.format(parent_css)).first.click()
def get_section_name(self, parent_css='', page_refresh=False):
"""
Get the list of names of all sections present
"""
if page_refresh:
self.browser.refresh()
return self.q(css='{} .section-name'.format(parent_css)).text
def section_name_edit_form_present(self, parent_css=''):
"""
Check that section name edit form present
"""
return self.q(css='{} .section-name input'.format(parent_css)).present
def change_section_name(self, new_name, parent_css=''):
"""
Change section name of first section present in course outline
"""
self.click_section_name(parent_css)
self.q(css='{} .section-name input'.format(parent_css)).first.fill(new_name)
self.q(css='{} .section-name .save-button'.format(parent_css)).first.click()
self.wait_for_ajax()
def click_release_date(self):
"""
Open release date edit modal of first section in course outline
"""
self.q(css='div.section-published-date a.edit-release-date').first.click()
def sections(self):
"""
Returns the sections of this course outline page.
"""
return self.children()
def add_section_from_top_button(self):
"""
Clicks the button for adding a section which resides at the top of the screen.
"""
click_css(self, '.wrapper-mast nav.nav-actions .button-new')
def add_section_from_bottom_button(self, click_child_icon=False):
"""
Clicks the button for adding a section which resides at the bottom of the screen.
"""
element_css = self.BOTTOM_ADD_SECTION_BUTTON
if click_child_icon:
element_css += " .fa-plus"
click_css(self, element_css)
def toggle_expand_collapse(self):
"""
Toggles whether all sections are expanded or collapsed
"""
self.q(css=self.EXPAND_COLLAPSE_CSS).click()
def start_reindex(self):
"""
Starts course reindex by clicking reindex button
"""
self.reindex_button.click()
def open_exam_settings_dialog(self):
"""
clicks on the settings button of subsection.
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
def change_problem_release_date_in_studio(self):
"""
Sets a new start date
"""
self.q(css=".subsection-header-actions .configure-button").first.click()
self.q(css="#start_date").fill("01/01/2030")
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_exam_proctored(self):
"""
Makes a Proctored exam.
"""
self.q(css="#id_timed_examination").first.click()
self.q(css="#id_exam_proctoring").first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def make_exam_timed(self):
"""
Makes a timed exam.
"""
self.q(css="#id_timed_examination").first.click()
self.q(css=".action-save").first.click()
self.wait_for_ajax()
def proctoring_items_are_displayed(self):
"""
Returns True if all the items are found.
"""
# The Timed exam checkbox
if not self.q(css="#id_timed_examination").present:
return False
# The time limit field
if not self.q(css="#id_time_limit").present:
return False
# The Practice exam checkbox
if not self.q(css="#id_practice_exam").present:
return False
# The Proctored exam checkbox
if not self.q(css="#id_exam_proctoring").present:
return False
return True
@property
def bottom_add_section_button(self):
"""
Returns the query representing the bottom add section button.
"""
return self.q(css=self.BOTTOM_ADD_SECTION_BUTTON).first
@property
def has_no_content_message(self):
"""
Returns true if a message informing the user that the course has no content is visible
"""
return self.q(css='.outline .no-content').is_present()
@property
def has_rerun_notification(self):
"""
Returns true iff the rerun notification is present on the page.
"""
return self.q(css='.wrapper-alert.is-shown').is_present()
def dismiss_rerun_notification(self):
"""
Clicks the dismiss button in the rerun notification.
"""
self.q(css='.dismiss-button').click()
@property
def expand_collapse_link_state(self):
"""
Returns the current state of the expand/collapse link
"""
link = self.q(css=self.EXPAND_COLLAPSE_CSS)[0]
if not link.is_displayed():
return ExpandCollapseLinkState.MISSING
elif "collapse-all" in link.get_attribute("class"):
return ExpandCollapseLinkState.COLLAPSE
else:
return ExpandCollapseLinkState.EXPAND
@property
def reindex_button(self):
"""
Returns reindex button.
"""
return self.q(css=".button.button-reindex")[0]
def expand_all_subsections(self):
"""
Expands all the subsections in this course.
"""
for section in self.sections():
if section.is_collapsed:
section.expand_subsection()
for subsection in section.subsections():
if subsection.is_collapsed:
subsection.expand_subsection()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the outline page.
"""
return self.children(CourseOutlineChild)
@property
def license(self):
"""
Returns the course license text, if present. Else returns None.
"""
return self.q(css=".license-value").first.text[0]
@property
def deprecated_warning_visible(self):
"""
Returns true if the deprecated warning is visible.
"""
return self.q(css='.wrapper-alert-error.is-shown').is_present()
@property
def warning_heading_text(self):
"""
Returns deprecated warning heading text.
"""
return self.q(css='.warning-heading-text').text[0]
@property
def components_list_heading(self):
"""
Returns deprecated warning component list heading text.
"""
return self.q(css='.components-list-heading-text').text[0]
@property
def modules_remove_text_shown(self):
"""
Returns True if deprecated warning advance modules remove text is visible.
"""
return self.q(css='.advance-modules-remove-text').visible
@property
def modules_remove_text(self):
"""
Returns deprecated warning advance modules remove text.
"""
return self.q(css='.advance-modules-remove-text').text[0]
@property
def components_visible(self):
"""
Returns True if components list visible.
"""
return self.q(css='.components-list').visible
@property
def components_display_names(self):
"""
Returns deprecated warning components display name list.
"""
return self.q(css='.components-list li>a').text
@property
def deprecated_advance_modules(self):
"""
Returns deprecated advance modules list.
"""
return self.q(css='.advance-modules-list li').text
class CourseOutlineModal(object):
MODAL_SELECTOR = ".wrapper-modal-window"
def __init__(self, page):
self.page = page
def _bounded_selector(self, selector):
"""
Returns `selector`, but limited to this particular `CourseOutlineModal` context.
"""
return " ".join([self.MODAL_SELECTOR, selector])
def is_shown(self):
return self.page.q(css=self.MODAL_SELECTOR).present
def find_css(self, selector):
return self.page.q(css=self._bounded_selector(selector))
def click(self, selector, index=0):
self.find_css(selector).nth(index).click()
def save(self):
self.click(".action-save")
self.page.wait_for_ajax()
def publish(self):
self.click(".action-publish")
self.page.wait_for_ajax()
def cancel(self):
self.click(".action-cancel")
def has_release_date(self):
return self.find_css("#start_date").present
def has_due_date(self):
return self.find_css("#due_date").present
def has_policy(self):
return self.find_css("#grading_type").present
def set_date(self, property_name, input_selector, date):
"""
Set `date` value to input pointed by `selector` and `property_name`.
"""
month, day, year = map(int, date.split('/'))
self.click(input_selector)
if getattr(self, property_name):
current_month, current_year = map(int, getattr(self, property_name).split('/')[1:])
else: # Use default timepicker values, which are current month and year.
current_month, current_year = datetime.datetime.today().month, datetime.datetime.today().year
date_diff = 12 * (year - current_year) + month - current_month
selector = "a.ui-datepicker-{}".format('next' if date_diff > 0 else 'prev')
for i in xrange(abs(date_diff)):
self.page.q(css=selector).click()
self.page.q(css="a.ui-state-default").nth(day - 1).click() # set day
self.page.wait_for_element_invisibility("#ui-datepicker-div", "datepicker should be closed")
EmptyPromise(
lambda: getattr(self, property_name) == u'{m}/{d}/{y}'.format(m=month, d=day, y=year),
"{} is updated in modal.".format(property_name)
).fulfill()
@property
def release_date(self):
return self.find_css("#start_date").first.attrs('value')[0]
@release_date.setter
def release_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('release_date', "#start_date", date)
@property
def due_date(self):
return self.find_css("#due_date").first.attrs('value')[0]
@due_date.setter
def due_date(self, date):
"""
Date is "mm/dd/yyyy" string.
"""
self.set_date('due_date', "#due_date", date)
@property
def policy(self):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
return self.get_selected_option_text(element)
@policy.setter
def policy(self, grading_label):
"""
Select the grading format with `value` in the drop-down list.
"""
element = self.find_css('#grading_type')[0]
select = Select(element)
select.select_by_visible_text(grading_label)
EmptyPromise(
lambda: self.policy == grading_label,
"Grading label is updated.",
).fulfill()
@property
def is_explicitly_locked(self):
"""
Returns true if the explict staff lock checkbox is checked, false otherwise.
"""
return self.find_css('#staff_lock')[0].is_selected()
@is_explicitly_locked.setter
def is_explicitly_locked(self, value):
"""
Checks the explicit staff lock box if value is true, otherwise unchecks the box.
"""
if value != self.is_explicitly_locked:
self.find_css('label[for="staff_lock"]').click()
EmptyPromise(lambda: value == self.is_explicitly_locked, "Explicit staff lock is updated").fulfill()
def shows_staff_lock_warning(self):
"""
Returns true iff the staff lock warning is visible.
"""
return self.find_css('.staff-lock .tip-warning').visible
def get_selected_option_text(self, element):
"""
Returns the text of the first selected option for the element.
"""
if element:
select = Select(element)
return select.first_selected_option.text
else:
return None
|
doismellburning/edx-platform
|
common/test/acceptance/pages/studio/overview.py
|
Python
|
agpl-3.0
| 27,128
|
[
"VisIt"
] |
026f87d5ab596b77bea4067a7d8c5c6c5bef39796b6b26d0d1d9670778856c58
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008 Stephane Charette
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Find unused objects and remove with the user's permission."
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.db import DbTxn
from gramps.gen.errors import WindowActiveError
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.datehandler import displayer as _dd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.plug import tool
from gramps.gui.glade import Glade
from gramps.gen.filters import GenericFilterFactory, rules
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# runTool
#
#-------------------------------------------------------------------------
class RemoveUnused(tool.Tool, ManagedWindow, UpdateCallback):
MARK_COL = 0
OBJ_ID_COL = 1
OBJ_NAME_COL = 2
OBJ_TYPE_COL = 3
OBJ_HANDLE_COL = 4
BUSY_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.WATCH)
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
self.title = _('Unused Objects')
tool.Tool.__init__(self, dbstate, options_class, name)
if self.db.readonly:
return
ManagedWindow.__init__(self, uistate, [], self.__class__)
UpdateCallback.__init__(self, self.uistate.pulse_progressbar)
self.dbstate = dbstate
self.uistate = uistate
self.tables = {
'events': {'get_func': self.db.get_event_from_handle,
'remove': self.db.remove_event,
'get_text': self.get_event_text,
'editor': 'EditEvent',
'icon': 'gramps-event',
'name_ix': 4},
'sources': {'get_func': self.db.get_source_from_handle,
'remove': self.db.remove_source,
'get_text': None,
'editor': 'EditSource',
'icon': 'gramps-source',
'name_ix': 2},
'citations': {'get_func': self.db.get_citation_from_handle,
'remove': self.db.remove_citation,
'get_text': None,
'editor': 'EditCitation',
'icon': 'gramps-citation',
'name_ix': 3},
'places': {'get_func': self.db.get_place_from_handle,
'remove': self.db.remove_place,
'get_text': self.get_place_text,
'editor': 'EditPlace',
'icon': 'gramps-place',
'name_ix': 2},
'media': {'get_func': self.db.get_media_from_handle,
'remove': self.db.remove_media,
'get_text': None,
'editor': 'EditMedia',
'icon': 'gramps-media',
'name_ix': 4},
'repos': {'get_func': self.db.get_repository_from_handle,
'remove': self.db.remove_repository,
'get_text': None,
'editor': 'EditRepository',
'icon': 'gramps-repository',
'name_ix': 3},
'notes': {'get_func': self.db.get_note_from_handle,
'remove': self.db.remove_note,
'get_text': self.get_note_text,
'editor': 'EditNote',
'icon': 'gramps-notes',
'name_ix': 2},
}
self.init_gui()
def init_gui(self):
self.top = Glade()
window = self.top.toplevel
self.set_window(window, self.top.get_object('title'), self.title)
self.setup_configs('interface.removeunused', 400, 520)
self.events_box = self.top.get_object('events_box')
self.sources_box = self.top.get_object('sources_box')
self.citations_box = self.top.get_object('citations_box')
self.places_box = self.top.get_object('places_box')
self.media_box = self.top.get_object('media_box')
self.repos_box = self.top.get_object('repos_box')
self.notes_box = self.top.get_object('notes_box')
self.find_button = self.top.get_object('find_button')
self.remove_button = self.top.get_object('remove_button')
self.events_box.set_active(self.options.handler.options_dict['events'])
self.sources_box.set_active(
self.options.handler.options_dict['sources'])
self.citations_box.set_active(
self.options.handler.options_dict['citations'])
self.places_box.set_active(
self.options.handler.options_dict['places'])
self.media_box.set_active(self.options.handler.options_dict['media'])
self.repos_box.set_active(self.options.handler.options_dict['repos'])
self.notes_box.set_active(self.options.handler.options_dict['notes'])
self.warn_tree = self.top.get_object('warn_tree')
self.warn_tree.connect('button_press_event', self.double_click)
self.selection = self.warn_tree.get_selection()
self.mark_button = self.top.get_object('mark_button')
self.mark_button.connect('clicked', self.mark_clicked)
self.unmark_button = self.top.get_object('unmark_button')
self.unmark_button.connect('clicked', self.unmark_clicked)
self.invert_button = self.top.get_object('invert_button')
self.invert_button.connect('clicked', self.invert_clicked)
self.real_model = Gtk.ListStore(GObject.TYPE_BOOLEAN,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
# a short term Gtk introspection means we need to try both ways:
if hasattr(self.real_model, "sort_new_with_model"):
self.sort_model = self.real_model.sort_new_with_model()
else:
self.sort_model = Gtk.TreeModelSort.new_with_model(self.real_model)
self.warn_tree.set_model(self.sort_model)
self.renderer = Gtk.CellRendererText()
self.img_renderer = Gtk.CellRendererPixbuf()
self.bool_renderer = Gtk.CellRendererToggle()
self.bool_renderer.connect('toggled', self.selection_toggled)
# Add mark column
mark_column = Gtk.TreeViewColumn(_('Mark'), self.bool_renderer,
active=RemoveUnused.MARK_COL)
mark_column.set_sort_column_id(RemoveUnused.MARK_COL)
self.warn_tree.append_column(mark_column)
# Add image column
img_column = Gtk.TreeViewColumn(None, self.img_renderer)
img_column.set_cell_data_func(self.img_renderer, self.get_image)
self.warn_tree.append_column(img_column)
# Add column with object gramps_id
id_column = Gtk.TreeViewColumn(_('ID'), self.renderer,
text=RemoveUnused.OBJ_ID_COL)
id_column.set_sort_column_id(RemoveUnused.OBJ_ID_COL)
self.warn_tree.append_column(id_column)
# Add column with object name
name_column = Gtk.TreeViewColumn(_('Name'), self.renderer,
text=RemoveUnused.OBJ_NAME_COL)
name_column.set_sort_column_id(RemoveUnused.OBJ_NAME_COL)
self.warn_tree.append_column(name_column)
self.top.connect_signals({
"destroy_passed_object" : self.close,
"on_remove_button_clicked": self.do_remove,
"on_find_button_clicked" : self.find,
"on_delete_event" : self.close,
})
self.dc_label = self.top.get_object('dc_label')
self.sensitive_list = [self.warn_tree, self.mark_button,
self.unmark_button, self.invert_button,
self.dc_label, self.remove_button]
for item in self.sensitive_list:
item.set_sensitive(False)
self.show()
def build_menu_names(self, obj):
return (self.title, None)
def find(self, obj):
self.options.handler.options_dict.update(
events=self.events_box.get_active(),
sources=self.sources_box.get_active(),
citations=self.citations_box.get_active(),
places=self.places_box.get_active(),
media=self.media_box.get_active(),
repos=self.repos_box.get_active(),
notes=self.notes_box.get_active(),
)
for item in self.sensitive_list:
item.set_sensitive(True)
self.uistate.set_busy_cursor(True)
self.uistate.progress.show()
self.window.get_window().set_cursor(self.BUSY_CURSOR)
self.real_model.clear()
self.collect_unused()
self.uistate.progress.hide()
self.uistate.set_busy_cursor(False)
self.window.get_window().set_cursor(None)
self.reset()
# Save options
self.options.handler.save_options()
def collect_unused(self):
# Run through all requested tables and check all objects
# for being referenced some place. If not, add_results on them.
db = self.db
tables = (
('events', db.get_event_cursor, db.get_number_of_events),
('sources', db.get_source_cursor, db.get_number_of_sources),
('citations', db.get_citation_cursor, db.get_number_of_citations),
('places', db.get_place_cursor, db.get_number_of_places),
('media', db.get_media_cursor, db.get_number_of_media),
('repos', db.get_repository_cursor, db.get_number_of_repositories),
('notes', db.get_note_cursor, db.get_number_of_notes),
)
# bug 7619 : don't select notes from to do list.
# notes associated to the todo list doesn't have references.
# get the todo list (from get_note_list method of the todo gramplet )
all_notes = self.dbstate.db.get_note_handles()
FilterClass = GenericFilterFactory('Note')
filter1 = FilterClass()
filter1.add_rule(rules.note.HasType(["To Do"]))
todo_list = filter1.apply(self.dbstate.db, all_notes)
filter2 = FilterClass()
filter2.add_rule(rules.note.HasType(["Link"]))
link_list = filter2.apply(self.dbstate.db, all_notes)
for (the_type, cursor_func, total_func) in tables:
if not self.options.handler.options_dict[the_type]:
# This table was not requested. Skip it.
continue
with cursor_func() as cursor:
self.set_total(total_func())
fbh = db.find_backlink_handles
for handle, data in cursor:
if not any(h for h in fbh(handle)):
if handle not in todo_list and handle not in link_list:
self.add_results((the_type, handle, data))
self.update()
self.reset()
def do_remove(self, obj):
with DbTxn(_("Remove unused objects"), self.db, batch=False) as trans:
self.db.disable_signals()
for row_num in range(len(self.real_model)-1, -1, -1):
path = (row_num,)
row = self.real_model[path]
if not row[RemoveUnused.MARK_COL]:
continue
the_type = row[RemoveUnused.OBJ_TYPE_COL]
handle = row[RemoveUnused.OBJ_HANDLE_COL]
remove_func = self.tables[the_type]['remove']
remove_func(handle, trans)
self.real_model.remove(row.iter)
self.db.enable_signals()
self.db.request_rebuild()
def selection_toggled(self, cell, path_string):
sort_path = tuple(map(int, path_string.split(':')))
real_path = self.sort_model.convert_path_to_child_path(Gtk.TreePath(sort_path))
row = self.real_model[real_path]
row[RemoveUnused.MARK_COL] = not row[RemoveUnused.MARK_COL]
self.real_model.row_changed(real_path, row.iter)
def mark_clicked(self, mark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = True
def unmark_clicked(self, unmark_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = False
def invert_clicked(self, invert_button):
for row_num in range(len(self.real_model)):
path = (row_num,)
row = self.real_model[path]
row[RemoveUnused.MARK_COL] = not row[RemoveUnused.MARK_COL]
def double_click(self, obj, event):
if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS
and event.button == 1):
(model, node) = self.selection.get_selected()
if not node:
return
sort_path = self.sort_model.get_path(node)
real_path = self.sort_model.convert_path_to_child_path(sort_path)
row = self.real_model[real_path]
the_type = row[RemoveUnused.OBJ_TYPE_COL]
handle = row[RemoveUnused.OBJ_HANDLE_COL]
self.call_editor(the_type, handle)
def call_editor(self, the_type, handle):
try:
obj = self.tables[the_type]['get_func'](handle)
editor_str = 'from gramps.gui.editors import %s as editor' % (
self.tables[the_type]['editor'])
exec(editor_str, globals())
editor(self.dbstate, self.uistate, [], obj)
except WindowActiveError:
pass
def get_image(self, column, cell, model, iter, user_data=None):
the_type = model.get_value(iter, RemoveUnused.OBJ_TYPE_COL)
the_icon = self.tables[the_type]['icon']
cell.set_property('icon-name', the_icon)
def add_results(self, results):
(the_type, handle, data) = results
gramps_id = data[1]
# if we have a function that will return to us some type
# of text summary, then we should use it; otherwise we'll
# use the generic field index provided in the tables above
if self.tables[the_type]['get_text']:
text = self.tables[the_type]['get_text'](the_type, handle, data)
else:
# grab the text field index we know about, and hope
# it represents something useful to the user
name_ix = self.tables[the_type]['name_ix']
text = data[name_ix]
# insert a new row into the table
self.real_model.append(row=[False, gramps_id, text, the_type, handle])
def get_event_text(self, the_type, handle, data):
"""
Come up with a short line of text that we can use as
a summary to represent this event.
"""
# get the event:
event = self.tables[the_type]['get_func'](handle)
# first check to see if the event has a descriptive name
text = event.get_description() # (this is rarely set for events)
# if we don't have a description...
if text == '':
# ... then we merge together several fields
# get the event type (marriage, birth, death, etc.)
text = str(event.get_type())
# see if there is a date
date = _dd.display(event.get_date_object())
if date != '':
text += '; %s' % date
# see if there is a place
if event.get_place_handle():
text += '; %s' % _pd.display_event(self.db, event)
return text
def get_note_text(self, the_type, handle, data):
"""
We need just the first few words of a note as a summary.
"""
# get the note object
note = self.tables[the_type]['get_func'](handle)
# get the note text; this ignores (discards) formatting
text = note.get()
# convert whitespace to a single space
text = " ".join(text.split())
# if the note is too long, truncate it
if len(text) > 80:
text = text[:80] + "..."
return text
def get_place_text(self, the_type, handle, data):
"""
We need just the place name.
"""
# get the place object
place = self.tables[the_type]['get_func'](handle)
# get the name
text = place.get_name().get_value()
return text
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class CheckOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
# Options specific for this report
self.options_dict = {
'events': 1,
'sources': 1,
'citations': 1,
'places': 1,
'media': 1,
'repos': 1,
'notes': 1,
}
self.options_help = {
'events': ("=0/1", "Whether to use check for unused events",
["Do not check events", "Check events"],
True),
'sources': ("=0/1", "Whether to use check for unused sources",
["Do not check sources", "Check sources"],
True),
'citations': ("=0/1", "Whether to use check for unused citations",
["Do not check citations", "Check citations"],
True),
'places': ("=0/1", "Whether to use check for unused places",
["Do not check places", "Check places"],
True),
'media': ("=0/1", "Whether to use check for unused media",
["Do not check media", "Check media"],
True),
'repos': ("=0/1", "Whether to use check for unused repositories",
["Do not check repositories", "Check repositories"],
True),
'notes': ("=0/1", "Whether to use check for unused notes",
["Do not check notes", "Check notes"],
True),
}
|
SNoiraud/gramps
|
gramps/plugins/tool/removeunused.py
|
Python
|
gpl-2.0
| 19,993
|
[
"Brian"
] |
41b7e01986440e779c37c953c3a48262985842c48cefcd10e142620f420baddf
|
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2017-2021 Dominik Kriegner <dominik.kriegner@gmail.com>
import os
import unittest
from multiprocessing import freeze_support
import numpy
import xrayutilities as xu
try:
import lmfit
except ImportError:
lmfit = None
testfile = 'LaB6_d500_si_psd.xye.bz2'
datadir = os.path.join(os.path.dirname(__file__), 'data')
fullfilename = os.path.join(datadir, testfile)
@unittest.skipIf(not os.path.isfile(fullfilename) or lmfit is None,
"additional test data (see http://xrayutilities.sf.io) and "
"the lmfit Python package are needed")
class Test_PowderModel(unittest.TestCase):
chi2max = 1.5
# define powder material
La = xu.materials.elements.La
B = xu.materials.elements.B
LaB6 = xu.materials.Crystal(
"LaB6", xu.materials.SGLattice(221, 4.15692, atoms=[La, B],
pos=['1a', ('6f', 0.19750)],
b=[0.05, 0.15]))
LaB6_powder = xu.simpack.Powder(LaB6, 1,
crystallite_size_gauss=1e6,
crystallite_size_lor=0.5e-6,
strain_gauss=0,
strain_lor=0)
# machine settings
settings = {'classoptions': {'oversampling': 10},
'global': {'diffractometer_radius': 0.337,
'equatorial_divergence_deg': 0.40},
'tube_tails': {'tail_left': -0.001,
'main_width': 0.00015,
'tail_right': 0.001,
'tail_intens': 0.0015},
'axial': {'angI_deg': 2.0, 'angD_deg': 2.0,
'slit_length_target': 0.008,
'n_integral_points': 21,
'length_sample': 0.015,
'slit_length_source': 0.008001},
'si_psd': {'si_psd_window_bounds': (0, 32e-3)},
'absorption': {'sample_thickness': 500e-6,
'absorption_coefficient': 3e4},
'displacement': {'specimen_displacement': -3.8e-5,
'zero_error_deg': 0.0},
'emission': {'emiss_intensities': (1.0, 0.45)}}
# define background
btt, bint = numpy.asarray([(15.158, 1136.452),
(17.886, 841.925),
(22.906, 645.784),
(26.556, 551.663),
(34.554, 401.219),
(45.764, 260.595),
(58.365, 171.993),
(81.950, 112.838),
(92.370, 101.276),
(106.441, 102.486),
(126.624, 112.838),
(139.096, 132.063),
(146.240, 136.500),
(152.022, 157.204)]).T
def setUp(self):
with xu.io.xu_open(fullfilename) as fid:
self.tt, self.det, self.sig = numpy.loadtxt(fid, unpack=True)
self.mask = numpy.logical_and(self.tt > 18, self.tt < 148)
self.pm = xu.simpack.PowderModel(self.LaB6_powder, I0=1.10e6,
fpsettings=self.settings)
self.pm.set_background('spline', x=self.btt, y=self.bint)
def test_Calculation(self):
x = numpy.arange(10, 140+numpy.random.rand()*10,
0.0075+numpy.random.rand()*0.005)
sim = self.pm.simulate(x)
self.pm.close()
self.assertEqual(len(sim), len(x))
self.assertTrue(numpy.all(sim >= 0))
def test_multiprocessing(self):
x = numpy.arange(10, 140+numpy.random.rand()*10,
0.0075+numpy.random.rand()*0.005)
st_sim = self.pm.simulate(x, mode='local')
mt_sim = self.pm.simulate(x, mode='multi')
self.pm.close()
self.assertAlmostEqual(numpy.sum(numpy.abs(st_sim - mt_sim)), 0.0)
def test_fitting(self):
# first fit run
p = self.pm.create_fitparameters()
for pn, limit in (
('primary_beam_intensity', (None, None)),
('displacement_specimen_displacement', (-1e-4, 1e-4)),
('displacement_zero_error_deg', (-0.01, 0.01))):
p[pn].set(vary=True, min=limit[0], max=limit[1])
fitres1 = self.pm.fit(p, self.tt[self.mask], self.det[self.mask],
std=self.sig[self.mask], maxfev=50)
# second fit run to optimize absorption
p = fitres1.params
for pn, limit in (
('primary_beam_intensity', (None, None)),
('displacement_specimen_displacement', (-1e-4, 1e-4)),
('absorption_absorption_coefficient', (1e4, 10e4))):
p[pn].set(vary=True, min=limit[0], max=limit[1])
fitres2 = self.pm.fit(p, self.tt[self.mask], self.det[self.mask],
std=self.sig[self.mask])
fitsim = self.pm.simulate(self.tt[self.mask])
M, Rp, Rwp, Rwpexp, chi2 = xu.simpack.Rietveld_error_metrics(
self.det[self.mask], fitsim, std=self.sig[self.mask],
Nvar=fitres2.nvarys, disp=False)
self.pm.close()
self.assertTrue(chi2 < self.chi2max)
if __name__ == '__main__':
freeze_support() # required for MS Windows
unittest.main()
|
dkriegner/xrayutilities
|
tests/test_simpack_powdermodel.py
|
Python
|
gpl-2.0
| 6,195
|
[
"CRYSTAL"
] |
be88d893adff66c6fed161339852ea838ee451c59bdc31ce07157a71d5a61a51
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.arihiro.titestfairy.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComArihiroTitestfairyModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
hiroara/TiTestFairy
|
build.py
|
Python
|
mit
| 8,785
|
[
"VisIt"
] |
05adce732cc60e266d994245967437efc2da87f5367e7b2b84dcd0815a4189db
|
#!/usr/bin/env python
import os, time, re
from functools import partial
from flask import Flask, Module, url_for, render_template, request, session, redirect, g, make_response, current_app
from decorators import login_required, guest_or_login_required, with_lock
from decorators import global_lock
# Make flask use the old session foo from <=flask-0.9
from flask_oldsessions import OldSecureCookieSessionInterface
from flask.ext.autoindex import AutoIndex
try:
from sage.env import SAGE_SRC
except ImportError:
SAGE_SRC = os.environ.get('SAGE_SRC', os.path.join(os.environ['SAGE_ROOT'], 'devel', 'sage'))
SRC = os.path.join(SAGE_SRC, 'sage')
from flask.ext.openid import OpenID
from flask.ext.babel import Babel, gettext, ngettext, lazy_gettext, get_locale
from sagenb.misc.misc import SAGENB_ROOT, DATA, SAGE_DOC, translations_path, N_, nN_
oid = OpenID()
class SageNBFlask(Flask):
static_path = ''
def __init__(self, *args, **kwds):
self.startup_token = kwds.pop('startup_token', None)
Flask.__init__(self, *args, **kwds)
self.session_interface = OldSecureCookieSessionInterface()
self.config['SESSION_COOKIE_HTTPONLY'] = False
self.root_path = SAGENB_ROOT
self.add_static_path('/css', os.path.join(DATA, "sage", "css"))
self.add_static_path('/images', os.path.join(DATA, "sage", "images"))
self.add_static_path('/javascript', DATA)
self.add_static_path('/static', DATA)
self.add_static_path('/java', DATA)
self.add_static_path('/java/jmol', os.path.join(os.environ["SAGE_ROOT"],"local","share","jmol"))
self.add_static_path('/jsmol', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol"))
self.add_static_path('/jsmol/js', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","js"))
self.add_static_path('/j2s', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","j2s"))
self.add_static_path('/jsmol/j2s', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","j2s"))
self.add_static_path('/j2s/core', os.path.join(os.environ["SAGE_ROOT"],"local","share","jsmol","j2s","core"))
import mimetypes
mimetypes.add_type('text/plain','.jmol')
#######
# Doc #
#######
#These "should" be in doc.py
DOC = os.path.join(SAGE_DOC, 'output', 'html', 'en')
self.add_static_path('/pdf', os.path.join(SAGE_DOC, 'output', 'pdf'))
self.add_static_path('/doc/static', DOC)
#self.add_static_path('/doc/static/reference', os.path.join(SAGE_DOC, 'reference'))
def create_jinja_environment(self):
from sagenb.notebook.template import env
env.globals.update(url_for=url_for)
return env
def static_view_func(self, root_path, filename):
from flask.helpers import send_from_directory
return send_from_directory(root_path, filename)
def add_static_path(self, base_url, root_path):
self.add_url_rule(base_url + '/<path:filename>',
endpoint='/static'+base_url,
view_func=partial(self.static_view_func, root_path))
def message(self, msg, cont='/', username=None, **kwds):
"""Returns an error message to the user."""
template_dict = {'msg': msg, 'cont': cont, 'username': username}
template_dict.update(kwds)
return render_template(os.path.join('html', 'error_message.html'),
**template_dict)
base = Module('sagenb.flask_version.base')
#############
# Main Page #
#############
@base.route('/')
def index():
if 'username' in session:
# If there is a next request use that. See issue #76
if 'next' in request.args:
response = redirect(request.values.get('next', ''))
return response
response = redirect(url_for('worksheet_listing.home', username=session['username']))
if 'remember' in request.args:
response.set_cookie('nb_session_%s'%g.notebook.port,
expires=(time.time() + 60 * 60 * 24 * 14))
else:
response.set_cookie('nb_session_%s'%g.notebook.port)
response.set_cookie('cookie_test_%s'%g.notebook.port, expires=1)
return response
from authentication import login
if current_app.startup_token is not None and 'startup_token' in request.args:
if request.args['startup_token'] == current_app.startup_token:
g.username = session['username'] = 'admin'
session.modified = True
current_app.startup_token = None
return index()
return login()
######################
# Dynamic Javascript #
######################
from hashlib import sha1
@base.route('/javascript/dynamic/notebook_dynamic.js')
def dynamic_js():
from sagenb.notebook.js import javascript
# the javascript() function is cached, so there shouldn't be a big slowdown calling it
data,datahash = javascript()
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
_localization_cache = {}
@base.route('/javascript/dynamic/localization.js')
def localization_js():
global _localization_cache
locale=repr(get_locale())
if _localization_cache.get(locale,None) is None:
data = render_template(os.path.join('js/localization.js'), N_=N_, nN_=nN_)
_localization_cache[locale] = (data, sha1(repr(data)).hexdigest())
data,datahash = _localization_cache[locale]
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
_mathjax_js_cache = None
@base.route('/javascript/dynamic/mathjax_sage.js')
def mathjax_js():
global _mathjax_js_cache
if _mathjax_js_cache is None:
from sagenb.misc.misc import mathjax_macros
data = render_template('js/mathjax_sage.js', theme_mathjax_macros=mathjax_macros)
_mathjax_js_cache = (data, sha1(repr(data)).hexdigest())
data,datahash = _mathjax_js_cache
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
@base.route('/javascript/dynamic/keyboard/<browser_os>')
def keyboard_js(browser_os):
from sagenb.notebook.keyboards import get_keyboard
data = get_keyboard(browser_os)
datahash=sha1(data).hexdigest()
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
response.headers['Etag']=datahash
return response
###############
# Dynamic CSS #
###############
@base.route('/css/main.css')
def main_css():
from sagenb.notebook.css import css
data,datahash = css()
if request.environ.get('HTTP_IF_NONE_MATCH', None) == datahash:
response = make_response('',304)
else:
response = make_response(data)
response.headers['Content-Type'] = 'text/css; charset=utf-8'
response.headers['Etag']=datahash
return response
########
# Help #
########
@base.route('/help')
@login_required
def help():
from sagenb.notebook.tutorial import notebook_help
from sagenb.misc.misc import SAGE_VERSION
return render_template(os.path.join('html', 'docs.html'), username = g.username, notebook_help = notebook_help, sage_version=SAGE_VERSION)
###########
# History #
###########
@base.route('/history')
@login_required
def history():
return render_template(os.path.join('html', 'history.html'), username = g.username,
text = g.notebook.user_history_text(g.username), actions = False)
@base.route('/live_history')
@login_required
def live_history():
W = g.notebook.create_new_worksheet_from_history(gettext('Log'), g.username, 100)
from worksheet import url_for_worksheet
return redirect(url_for_worksheet(W))
###########
# Favicon #
###########
@base.route('/favicon.ico')
def favicon():
from flask.helpers import send_file
return send_file(os.path.join(DATA, 'sage', 'images', 'favicon.ico'))
@base.route('/loginoid', methods=['POST', 'GET'])
@guest_or_login_required
@oid.loginhandler
def loginoid():
if not g.notebook.conf()['openid']:
return redirect(url_for('base.index'))
if g.username != 'guest':
return redirect(request.values.get('next', url_for('base.index')))
if request.method == 'POST':
openid = request.form.get('url')
if openid:
return oid.try_login(openid, ask_for=['email', 'fullname', 'nickname'])
return redirect(url_for('authentication.login'))
#render_template('html/login.html', next=oid.get_next_url(), error=oid.fetch_error())
@oid.after_login
@with_lock
def create_or_login(resp):
if not g.notebook.conf()['openid']:
return redirect(url_for('base.index'))
try:
username = g.notebook.user_manager().get_username_from_openid(resp.identity_url)
session['username'] = g.username = username
session.modified = True
except (KeyError, LookupError):
session['openid_response'] = resp
session.modified = True
return redirect(url_for('set_profiles'))
return redirect(request.values.get('next', url_for('base.index')))
@base.route('/openid_profiles', methods=['POST','GET'])
def set_profiles():
if not g.notebook.conf()['openid']:
return redirect(url_for('base.index'))
from sagenb.notebook.challenge import challenge
show_challenge=g.notebook.conf()['challenge']
if show_challenge:
chal = challenge(g.notebook.conf(),
is_secure = g.notebook.secure,
remote_ip = request.environ['REMOTE_ADDR'])
if request.method == 'GET':
if 'openid_response' in session:
from sagenb.notebook.misc import valid_username_chars
re_invalid_username_chars = re.compile('[^(%s)]' % valid_username_chars)
openid_resp = session['openid_response']
if openid_resp.fullname is not None:
openid_resp.fullname = re.sub(re_invalid_username_chars, '_', openid_resp.fullname)
template_dict={}
if show_challenge:
template_dict['challenge_html'] = chal.html()
return render_template('html/accounts/openid_profile.html', resp=openid_resp,
challenge=show_challenge, **template_dict)
else:
return redirect(url_for('base.index'))
if request.method == 'POST':
if 'openid_response' in session:
parse_dict = {'resp':session['openid_response']}
else:
return redirect(url_for('base.index'))
try:
resp = session['openid_response']
username = request.form.get('username')
from sagenb.notebook.user import User
from sagenb.notebook.misc import is_valid_username, is_valid_email
if show_challenge:
parse_dict['challenge'] = True
status = chal.is_valid_response(req_args = request.values)
if status.is_valid is True:
pass
elif status.is_valid is False:
err_code = status.error_code
if err_code:
parse_dict['challenge_html'] = chal.html(error_code = err_code)
else:
parse_dict['challenge_invalid'] = True
raise ValueError("Invalid challenge")
else:
parse_dict['challenge_missing'] = True
raise ValueError("Missing challenge")
if not is_valid_username(username):
parse_dict['username_invalid'] = True
raise ValueError("Invalid username")
if g.notebook.user_manager().user_exists(username):
parse_dict['username_taken'] = True
raise ValueError("Pre-existing username")
if not is_valid_email(request.form.get('email')):
parse_dict['email_invalid'] = True
raise ValueError("Invalid email")
try:
new_user = User(username, '', email = resp.email, account_type='user')
g.notebook.user_manager().add_user_object(new_user)
except ValueError as msg:
parse_dict['creation_error'] = True
raise ValueError("Error in creating user\n%s"%msg)
g.notebook.user_manager().create_new_openid(resp.identity_url, username)
session['username'] = g.username = username
session.modified = True
except ValueError:
return render_template('html/accounts/openid_profile.html', **parse_dict)
return redirect(url_for('base.index'))
#############
# OLD STUFF #
#############
############################
# Notebook autosave.
############################
# save if make a change to notebook and at least some seconds have elapsed since last save.
def init_updates():
global save_interval, idle_interval, last_save_time, last_idle_time
from sagenb.misc.misc import walltime
save_interval = notebook.conf()['save_interval']
idle_interval = notebook.conf()['idle_check_interval']
last_save_time = walltime()
last_idle_time = walltime()
def notebook_save_check():
global last_save_time
from sagenb.misc.misc import walltime
t = walltime()
if t > last_save_time + save_interval:
with global_lock:
# if someone got the lock before we did, they might have saved,
# so we check against the last_save_time again
# we don't put the global_lock around the outer loop since we don't need
# it unless we are actually thinking about saving.
if t > last_save_time + save_interval:
notebook.save()
last_save_time = t
def notebook_idle_check():
global last_idle_time
from sagenb.misc.misc import walltime
t = walltime()
if t > last_idle_time + idle_interval:
with global_lock:
# if someone got the lock before we did, they might have already idled,
# so we check against the last_idle_time again
# we don't put the global_lock around the outer loop since we don't need
# it unless we are actually thinking about quitting worksheets
if t > last_idle_time + idle_interval:
notebook.update_worksheet_processes()
notebook.quit_idle_worksheet_processes()
last_idle_time = t
def notebook_updates():
notebook_save_check()
notebook_idle_check()
notebook = None
#CLEAN THIS UP!
def create_app(path_to_notebook, *args, **kwds):
"""
This is the main method to create a running notebook. This is
called from the process spawned in run_notebook.py
"""
global notebook
startup_token = kwds.pop('startup_token', None)
#############
# OLD STUFF #
#############
import sagenb.notebook.notebook as notebook
notebook.MATHJAX = True
notebook = notebook.load_notebook(path_to_notebook, *args, **kwds)
init_updates()
##############
# Create app #
##############
app = SageNBFlask('flask_version', startup_token=startup_token)
app.secret_key = os.urandom(24)
oid.init_app(app)
app.debug = True
@app.before_request
def set_notebook_object():
g.notebook = notebook
####################################
# create Babel translation manager #
####################################
babel = Babel(app, default_locale='en_US')
#Check if saved default language exists. If not fallback to default
@app.before_first_request
def check_default_lang():
def_lang = notebook.conf()['default_language']
trans_ids = [str(trans) for trans in babel.list_translations()]
if def_lang not in trans_ids:
notebook.conf()['default_language'] = None
#register callback function for locale selection
#this function must be modified to add per user language support
@babel.localeselector
def get_locale():
return g.notebook.conf()['default_language']
########################
# Register the modules #
########################
app.register_blueprint(base)
from worksheet_listing import worksheet_listing
app.register_blueprint(worksheet_listing)
from admin import admin
app.register_blueprint(admin)
from authentication import authentication
app.register_blueprint(authentication)
from doc import doc
app.register_blueprint(doc)
from worksheet import ws as worksheet
app.register_blueprint(worksheet)
from settings import settings
app.register_blueprint(settings)
# Handles all uncaught exceptions by sending an e-mail to the
# administrator(s) and displaying an error page.
@app.errorhandler(Exception)
def log_exception(error):
from sagenb.notebook.notification import logger
logger.exception(error)
return app.message(
gettext('''500: Internal server error.'''),
username=getattr(g, 'username', 'guest')), 500
#autoindex v0.3 doesnt seem to work with modules
#routing with app directly does the trick
#TODO: Check to see if autoindex 0.4 works with modules
idx = AutoIndex(app, browse_root=SRC, add_url_rules=False)
@app.route('/src/')
@app.route('/src/<path:path>')
@guest_or_login_required
def autoindex(path='.'):
filename = os.path.join(SRC, path)
if os.path.isfile(filename):
from cgi import escape
src = escape(open(filename).read().decode('utf-8','ignore'))
if (os.path.splitext(filename)[1] in
['.py','.c','.cc','.h','.hh','.pyx','.pxd']):
return render_template(os.path.join('html', 'source_code.html'),
src_filename=path,
src=src, username = g.username)
return src
return idx.render_autoindex(path)
return app
|
bollu/sagenb
|
sagenb/flask_version/base.py
|
Python
|
gpl-3.0
| 18,811
|
[
"Jmol"
] |
971c455568624e822c10439ec00ab3d5558a1c82500e91f39666c83c38eb875c
|
# -*- coding: utf-8 -*-
from math import sqrt
import numpy as np
from ase.neb import fit0
def NudgedElasticBand(images):
N = images.repeat.prod()
natoms = images.natoms // N
R = images.P[:, :natoms]
E = images.E
F = images.F[:, :natoms]
s, E, Sfit, Efit, lines = fit0(E, F, R)
import pylab
import matplotlib
#matplotlib.use('GTK')
pylab.ion()
x = 2.95
pylab.figure(figsize=(x * 2.5**0.5, x))
pylab.plot(s, E, 'o')
for x, y in lines:
pylab.plot(x, y, '-g')
pylab.plot(Sfit, Efit, 'k-')
pylab.xlabel(u'path [Å]')
pylab.ylabel(u'energy [eV]')
pylab.title('Maximum: %.3f eV' % max(Efit))
pylab.show()
|
grhawk/ASE
|
tools/ase/gui/neb.py
|
Python
|
gpl-2.0
| 687
|
[
"ASE"
] |
b7e25efc2ab7bc0d10ead311bc4f68291381c2e4d0fffc04fd83339843d743fd
|
# proxy module
from __future__ import absolute_import
from mayavi.modules.outline import *
|
enthought/etsproxy
|
enthought/mayavi/modules/outline.py
|
Python
|
bsd-3-clause
| 91
|
[
"Mayavi"
] |
c32f2dc88fc9768e931c2e0d7e510ecadaac7f34b185804ef782f3a6e95b6ffe
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: netcdf.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test NetCDF driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2016, Even Rouault <even.rouault at spatialys.com>
# Copyright (c) 2010, Kyle Shannon <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
sys.path.append( '../pymod' )
import gdaltest
import test_cli_utilities
###############################################################################
# Netcdf Functions
###############################################################################
###############################################################################
# Get netcdf version and test for supported files
def netcdf_setup():
gdaltest.netcdf_drv_version = 'unknown'
gdaltest.netcdf_drv_has_nc2 = False
gdaltest.netcdf_drv_has_nc4 = False
gdaltest.netcdf_drv_has_hdf4 = False
gdaltest.netcdf_drv_silent = False;
gdaltest.netcdf_drv = gdal.GetDriverByName( 'NETCDF' )
if gdaltest.netcdf_drv is None:
print('NOTICE: netcdf not supported, skipping checks')
return 'skip'
#get capabilities from driver
metadata = gdaltest.netcdf_drv.GetMetadata()
if metadata is None:
print('NOTICE: netcdf metadata not found, skipping checks')
return 'skip'
#netcdf library version "3.6.3" of Dec 22 2009 06:10:17 $
#netcdf library version 4.1.1 of Mar 4 2011 12:52:19 $
if 'NETCDF_VERSION' in metadata:
v = metadata['NETCDF_VERSION']
v = v[ 0 : v.find(' ') ].strip('"');
gdaltest.netcdf_drv_version = v
if 'NETCDF_HAS_NC2' in metadata \
and metadata['NETCDF_HAS_NC2'] == 'YES':
gdaltest.netcdf_drv_has_nc2 = True
if 'NETCDF_HAS_NC4' in metadata \
and metadata['NETCDF_HAS_NC4'] == 'YES':
gdaltest.netcdf_drv_has_nc4 = True
if 'NETCDF_HAS_HDF4' in metadata \
and metadata['NETCDF_HAS_HDF4'] == 'YES':
gdaltest.netcdf_drv_has_hdf4 = True
print( 'NOTICE: using netcdf version ' + gdaltest.netcdf_drv_version + \
' has_nc2: '+str(gdaltest.netcdf_drv_has_nc2)+' has_nc4: ' + \
str(gdaltest.netcdf_drv_has_nc4) )
return 'success'
###############################################################################
# test file copy
# helper function needed so we can call Process() on it from netcdf_test_copy_timeout()
def netcdf_test_copy( ifile, band, checksum, ofile, opts=[], driver='NETCDF' ):
test = gdaltest.GDALTest( 'NETCDF', '../'+ifile, band, checksum, options=opts )
return test.testCreateCopy(check_gt=0, check_srs=0, new_filename=ofile, delete_copy = 0, check_minmax = 0)
###############################################################################
#test file copy, optional timeout arg
def netcdf_test_copy_timeout( ifile, band, checksum, ofile, opts=[], driver='NETCDF', timeout=None ):
from multiprocessing import Process
result = 'success'
drv = gdal.GetDriverByName( driver )
if os.path.exists( ofile ):
drv.Delete( ofile )
if timeout is None:
result = netcdf_test_copy( ifile, band, checksum, ofile, opts, driver )
else:
sys.stdout.write('.')
sys.stdout.flush()
proc = Process( target=netcdf_test_copy, args=(ifile, band, checksum, ofile, opts ) )
proc.start()
proc.join( timeout )
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
if os.path.exists( ofile ):
drv.Delete( ofile )
print('testCreateCopy() for file %s has reached timeout limit of %d seconds' % (ofile, timeout) )
result = 'fail'
return result
###############################################################################
#check support for DEFLATE compression, requires HDF5 and zlib
def netcdf_test_deflate( ifile, checksum, zlevel=1, timeout=None ):
try:
from multiprocessing import Process
Process.is_alive
except:
print('from multiprocessing import Process failed')
return 'skip'
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ofile1 = 'tmp/' + os.path.basename(ifile) + '-1.nc'
ofile1_opts = [ 'FORMAT=NC4C', 'COMPRESS=NONE']
ofile2 = 'tmp/' + os.path.basename(ifile) + '-2.nc'
ofile2_opts = [ 'FORMAT=NC4C', 'COMPRESS=DEFLATE', 'ZLEVEL='+str(zlevel) ]
if not os.path.exists( ifile ):
gdaltest.post_reason( 'ifile %s does not exist' % ifile )
return 'fail'
result1 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile1, ofile1_opts, 'NETCDF', timeout )
result2 = netcdf_test_copy_timeout( ifile, 1, checksum, ofile2, ofile2_opts, 'NETCDF', timeout )
if result1 == 'fail' or result2 == 'fail':
return 'fail'
# make sure compressed file is smaller than uncompressed files
try:
size1 = os.path.getsize( ofile1 )
size2 = os.path.getsize( ofile2 )
except:
gdaltest.post_reason( 'Error getting file sizes.' )
return 'fail'
if size2 >= size1:
gdaltest.post_reason( 'Compressed file is not smaller than reference, check your netcdf-4, HDF5 and zlib installation' )
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_check_vars( ifile, vals_global=None, vals_band=None ):
src_ds = gdal.Open( ifile )
if src_ds is None:
gdaltest.post_reason( 'could not open dataset ' + ifile )
return 'fail'
metadata_global = src_ds.GetMetadata()
if metadata_global is None:
gdaltest.post_reason( 'could not get global metadata from ' + ifile )
return 'fail'
missval = src_ds.GetRasterBand(1).GetNoDataValue()
if missval != 1:
gdaltest.post_reason( 'got invalid nodata value %s for Band' % str(missval) )
return 'fail'
metadata_band = src_ds.GetRasterBand(1).GetMetadata()
if metadata_band is None:
gdaltest.post_reason( 'could not get Band metadata' )
return 'fail'
metadata = metadata_global
vals = vals_global
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
metadata = metadata_band
vals = vals_band
if vals is None:
vals = dict()
for k, v in vals.items():
if not k in metadata:
gdaltest.post_reason("missing metadata [%s]" % (str(k)))
return 'fail'
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
if mk != v:
gdaltest.post_reason("invalid value [%s] for metadata [%s]=[%s]" \
% (str(mk),str(k),str(v)))
return 'fail'
return 'success'
###############################################################################
# Netcdf Tests
###############################################################################
###############################################################################
# Perform simple read test.
def netcdf_1():
#setup netcdf environment
netcdf_setup()
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'NETCDF:"data/bug636.nc":tas', 1, 31621,
filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
# Verify a simple createcopy operation. We can't do the trivial gdaltest
# operation because the new file will only be accessible via subdatasets.
def netcdf_2():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open( 'data/byte.tif' )
gdaltest.netcdf_drv.CreateCopy( 'tmp/netcdf2.nc', src_ds)
tst = gdaltest.GDALTest( 'NetCDF', 'tmp/netcdf2.nc',
1, 4672,
filename_absolute = 1 )
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
result = tst.testOpen( check_prj = wkt )
if result != 'success':
return result
# Test that in raster-only mode, update isn't supported (not sure what would be missing for that...)
with gdaltest.error_handler():
ds = gdal.Open( 'tmp/netcdf2.nc', gdal.GA_Update )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
gdaltest.clean_tmp()
return 'success'
###############################################################################
def netcdf_3():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/sombrero.grd' )
bnd = ds.GetRasterBand(1)
minmax = bnd.ComputeRasterMinMax()
if abs(minmax[0] - (-0.675758)) > 0.000001 or abs(minmax[1] - 1.0) > 0.000001:
gdaltest.post_reason( 'Wrong min or max.' )
return 'fail'
bnd = None
ds = None
return 'success'
###############################################################################
# In #2582 5dimensional files were causing problems. Verify use ok.
def netcdf_4():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
3, 1218, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
# In #2583 5dimensional files were having problems unrolling the highest
# dimension - check handling now on band 7.
def netcdf_5():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
7, 1227, filename_absolute = 1 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
#don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum = True)
gdal.PopErrorHandler()
return result
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#1 standard parallel.
def netcdf_6():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc1sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
lat_origin = sr.GetProjParm( 'latitude_of_origin' )
if lat_origin != 25:
gdaltest.post_reason( 'Latitude of origin does not match expected:\n%f'
% lat_origin )
return 'fail'
ds = None
return 'success'
###############################################################################
#ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
#2 standard parallels.
def netcdf_7():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_lcc2sp.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
std_p1 = sr.GetProjParm( 'standard_parallel_1' )
std_p2 = sr.GetProjParm( 'standard_parallel_2' )
if std_p1 != 33.0 or std_p2 != 45.0:
gdaltest.post_reason( 'Standard Parallels do not match expected:\n%f,%f'
% ( std_p1, std_p2 ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check for cf convention read of albers equal area
# Previous version compared entire wkt, which varies slightly among driver versions
# now just look for PROJECTION=Albers_Conic_Equal_Area and some parameters
def netcdf_8():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_aea2sp_invf.nc' )
srs = osr.SpatialReference( )
srs.ImportFromWkt( ds.GetProjection( ) )
proj = srs.GetAttrValue( 'PROJECTION' )
if proj != 'Albers_Conic_Equal_Area':
gdaltest.post_reason( 'Projection does not match expected : ' + proj )
return 'fail'
param = srs.GetProjParm('latitude_of_center')
if param != 37.5:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
param = srs.GetProjParm('longitude_of_center')
if param != -96:
gdaltest.post_reason( 'Got wrong parameter value (%g)' % param )
return 'fail'
ds = None
return 'success'
###############################################################################
#check to see if projected systems default to wgs84 if no spheroid def
def netcdf_9():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
spheroid = sr.GetAttrValue( 'SPHEROID' )
if spheroid != 'WGS 84':
gdaltest.post_reason( 'Incorrect spheroid read from file\n%s'
% ( spheroid ) )
return 'fail'
ds = None
sr = None
return 'success'
###############################################################################
#check if km pixel size makes it through to gt
def netcdf_10():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_no_sphere.nc' )
prj = ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -1897186.0290038721, 5079.3608398440065,
0.0,2674684.0244560046,
0.0,-5079.4721679684635 )
gt2 = ( -1897.186029003872, 5.079360839844003,
0.0, 2674.6840244560044,
0.0,-5.079472167968456 )
if gt != gt1:
sr = osr.SpatialReference()
sr.ImportFromWkt( prj )
#new driver uses UNIT vattribute instead of scaling values
if not (sr.GetAttrValue("PROJCS|UNIT",1)=="1000" and gt == gt2) :
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check if ll gets caught in km pixel size check
def netcdf_11():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/cf_geog.nc' )
gt = ds.GetGeoTransform( )
if gt != (-0.5, 1.0, 0.0, 10.5, 0.0, -1.0):
gdaltest.post_reason( 'Incorrect geotransform' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset set/get.
def netcdf_12():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset = None if no scale or offset is available
def netcdf_13():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/no_scale_offset.nc' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != None or offset != None:
gdaltest.post_reason( 'Incorrect scale or offset' )
return 'fail'
ds = None
return 'success'
###############################################################################
#check for scale/offset for two variables
def netcdf_14():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:z' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.01 or offset != 1.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
ds = None
ds = gdal.Open( 'NETCDF:data/two_vars_scale_offset.nc:q' )
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
scale = ds.GetRasterBand( 1 ).GetScale();
offset = ds.GetRasterBand( 1 ).GetOffset()
if scale != 0.1 or offset != 2.5:
gdaltest.post_reason( 'Incorrect scale(%f) or offset(%f)' % ( scale, offset ) )
return 'fail'
return 'success'
###############################################################################
#check support for netcdf-2 (64 bit)
# This test fails in 1.8.1, because the driver does not support NC2 (bug #3890)
def netcdf_15():
if gdaltest.netcdf_drv is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc2:
ds = gdal.Open( 'data/trmm-nc2.nc' )
if ds is None:
return 'fail'
else:
ds = None
return 'success'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4
def netcdf_16():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not open file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
gdaltest.post_reason('netcdf driver did not identify file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 - make sure hdf5 is not read by netcdf driver
def netcdf_17():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/groups.h5'
#skip test if Hdf5 is not enabled
if gdal.GetDriverByName( 'HDF5' ) is None and \
gdal.GetDriverByName( 'HDF5Image' ) is None:
return 'skip'
if gdaltest.netcdf_drv_has_nc4:
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf5 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf5 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf5 file')
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for netcdf-4 classic (NC4C)
def netcdf_18():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/trmm-nc4c.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open( ifile )
if ds is None:
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if did not open with the netCDF driver (i.e. HDF5Image)
if name != 'netCDF':
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name != 'netCDF':
return 'fail'
else:
return 'skip'
return 'success'
###############################################################################
#check support for reading with DEFLATE compression, requires NC4
def netcdf_19():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'data/trmm-nc4z.nc', 1, 50235,
filename_absolute = 1 )
result = tst.testOpen(skip_checksum = True)
return result
###############################################################################
#check support for writing with DEFLATE compression, requires NC4
def netcdf_20():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
#simple test with tiny file
return netcdf_test_deflate( 'data/utm.tif', 50235 )
###############################################################################
#check support for writing large file with DEFLATE compression
#if chunking is not defined properly within the netcdf driver, this test can take 1h
def netcdf_21():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
bigfile = 'tmp/cache/utm-big.tif'
sys.stdout.write('.')
sys.stdout.flush()
#create cache dir if absent
if not os.path.exists( 'tmp/cache' ):
os.mkdir( 'tmp/cache' )
#look for large gtiff in cache
if not os.path.exists( bigfile ):
#create large gtiff
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'skip'
warp_cmd = test_cli_utilities.get_gdalwarp_path() +\
' -q -overwrite -r bilinear -ts 7680 7680 -of gtiff ' +\
'data/utm.tif ' + bigfile
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# test compression of the file, with a conservative timeout of 60 seconds
return netcdf_test_deflate( bigfile, 26695, 6, 60 )
###############################################################################
#check support for hdf4
def netcdf_22():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_hdf4:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#suppress warning
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( 'NETCDF:' + ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason('netcdf driver did not open hdf4 file')
return 'fail'
else:
ds = None
return 'success'
###############################################################################
#check support for hdf4 - make sure hdf4 file is not read by netcdf driver
def netcdf_23():
#don't skip if netcdf is not enabled in GDAL
#if gdaltest.netcdf_drv is None:
# return 'skip'
#if not gdaltest.netcdf_drv_has_hdf4:
# return 'skip'
#skip test if Hdf4 is not enabled in GDAL
if gdal.GetDriverByName( 'HDF4' ) is None and \
gdal.GetDriverByName( 'HDF4Image' ) is None:
return 'skip'
ifile = 'data/hdifftst2.hdf'
#test with Open()
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason('GDAL did not open hdf4 file')
return 'fail'
else:
name = ds.GetDriver().GetDescription()
ds = None
#return fail if opened with the netCDF driver
if name == 'netCDF':
gdaltest.post_reason('netcdf driver opened hdf4 file')
return 'fail'
# test with Identify()
name = gdal.IdentifyDriver( ifile ).GetDescription()
if name == 'netCDF':
gdaltest.post_reason('netcdf driver was identified for hdf4 file')
return 'fail'
return 'success'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_24():
if gdaltest.netcdf_drv is None:
return 'skip'
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'data/nc_vars.nc', vals_global, vals_band )
###############################################################################
# check support for NC4 reading attributes (single values and array values)
def netcdf_24_nc4():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'data/nc4_vars.nc', vals_global, vals_band )
###############################################################################
# check support for writing attributes (single values and array values)
def netcdf_25():
if gdaltest.netcdf_drv is None:
return 'skip'
result = netcdf_test_copy( 'data/nc_vars.nc', 1, None, 'tmp/netcdf_25.nc' )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'tmp/netcdf_25.nc', vals_global, vals_band )
###############################################################################
# check support for NC4 writing attributes (single values and array values)
def netcdf_25_nc4():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
result = netcdf_test_copy( 'data/nc4_vars.nc', 1, None, 'tmp/netcdf_25_nc4.nc', [ 'FORMAT=NC4' ] )
if result != 'success':
return result
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars( 'tmp/netcdf_25_nc4.nc', vals_global, vals_band )
###############################################################################
# check support for WRITE_BOTTOMUP file creation option
# use a dummy file with no lon/lat info to force a different checksum
# depending on y-axis order
def netcdf_26():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
gdal.PopErrorHandler()
if result != 'success':
print('failed create copy without WRITE_BOTTOMUP')
return result
#test WRITE_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855,
options=['WRITE_BOTTOMUP=NO'] )
result = test.testCreateCopy(check_gt=0, check_srs=0, check_minmax = 0)
if result != 'success':
print('failed create copy with WRITE_BOTTOMUP=NO')
return result
return 'success'
###############################################################################
# check support for GDAL_NETCDF_BOTTOMUP configuration option
def netcdf_27():
if gdaltest.netcdf_drv is None:
return 'skip'
#test default config
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4672 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open without GDAL_NETCDF_BOTTOMUP')
return result
#test GDAL_NETCDF_BOTTOMUP=NO
test = gdaltest.GDALTest( 'NETCDF', '../data/int16-nogeo.nc', 1, 4855 )
config_bak = gdal.GetConfigOption( 'GDAL_NETCDF_BOTTOMUP' )
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
result = test.testOpen()
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', config_bak )
if result != 'success':
print('failed open with GDAL_NETCDF_BOTTOMUP')
return result
return 'success'
###############################################################################
# check support for writing multi-dimensional files (helper function)
def netcdf_test_4dfile( ofile ):
# test result file has 8 bands and 0 subdasets (instead of 0 bands and 8 subdatasets)
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( 'SUBDATASETS' )
subds_count = 0
if not md is None:
subds_count = len(md) / 2
if ds.RasterCount != 8 or subds_count != 0:
gdaltest.post_reason( 'copy has %d bands (expected 8) and has %d subdatasets'\
' (expected 0)' % (ds.RasterCount, subds_count ) )
return 'fail'
ds is None
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
print('NOTICE: ncdump not found')
return 'success'
if err == None or not 'netcdf library version' in err:
print('NOTICE: ncdump not found')
return 'success'
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h '+ ofile )
if ret == '' or err != '':
gdaltest.post_reason( 'ncdump failed' )
return 'fail'
# simple dimension tests using ncdump output
err = ""
if not 'int t(time, levelist, lat, lon) ;' in ret:
err = err + 'variable (t) has wrong dimensions or is missing\n'
if not 'levelist = 2 ;' in ret:
err = err + 'levelist dimension is missing or incorrect\n'
if not 'int levelist(levelist) ;' in ret:
err = err + 'levelist variable is missing or incorrect\n'
if not 'time = 4 ;' in ret:
err = err + 'time dimension is missing or incorrect\n'
if not 'double time(time) ;' in ret:
err = err + 'time variable is missing or incorrect\n'
# uncomment this to get full header in output
#if err != '':
# err = err + ret
if err != '':
gdaltest.post_reason( err )
return 'fail'
return 'success'
###############################################################################
# check support for writing multi-dimensional files using CreateCopy()
def netcdf_28():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile = 'tmp/netcdf_28.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
return netcdf_test_4dfile( ofile )
###############################################################################
# Check support for writing multi-dimensional files using gdalwarp.
# Requires metadata copy support in gdalwarp (see bug #3898).
# First create a vrt file using gdalwarp, then copy file to netcdf.
# The workaround is (currently ??) necessary because dimension rolling code is
# in netCDFDataset::CreateCopy() and necessary dimension metadata
# is not saved to netcdf when using gdalwarp (as the driver does not write
# metadata to netcdf file with SetMetadata() and SetMetadataItem()).
def netcdf_29():
if gdaltest.netcdf_drv is None:
return 'skip'
# create tif file using gdalwarp
if test_cli_utilities.get_gdalwarp_path() is None:
gdaltest.post_reason('gdalwarp not found')
return 'skip'
ifile = 'data/netcdf-4d.nc'
ofile1 = 'tmp/netcdf_29.vrt'
ofile = 'tmp/netcdf_29.nc'
warp_cmd = '%s -q -overwrite -of vrt %s %s' %\
( test_cli_utilities.get_gdalwarp_path(), ifile, ofile1 )
try:
(ret, err) = gdaltest.runexternal_out_and_err( warp_cmd )
except:
gdaltest.post_reason('gdalwarp execution failed')
return 'fail'
if ( err != '' or ret != '' ):
gdaltest.post_reason('gdalwarp returned error\n'+str(ret)+' '+str(err))
return 'fail'
# copy vrt to netcdf, with proper dimension rolling
result = netcdf_test_copy( ofile1, 0, None, ofile )
if result != 'success':
return 'fail'
# test file
result = netcdf_test_4dfile( ofile )
if result == 'fail':
print('test failed - does gdalwarp support metadata copying?')
return result
###############################################################################
# check support for file with nan values (bug #4705)
def netcdf_30():
if gdaltest.netcdf_drv is None:
return 'skip'
tst = gdaltest.GDALTest( 'NetCDF', 'trmm-nan.nc', 1, 62519 )
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
#check if 2x2 file has proper geotransform
#1 pixel (in width or height) still unsupported because we can't get the pixel dimensions
def netcdf_31():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.Open( 'data/trmm-2x2.nc' )
ds.GetProjection( )
gt = ds.GetGeoTransform( )
gt1 = ( -80.0, 0.25, 0.0, -19.5, 0.0, -0.25 )
if gt != gt1:
gdaltest.post_reason( 'Incorrect geotransform, got '+str(gt) )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test NC_UBYTE write/read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_32():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ifile = 'data/byte.tif'
ofile = 'tmp/netcdf_32.nc'
#gdal.SetConfigOption('CPL_DEBUG', 'ON')
# test basic read/write
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return 'fail'
result = netcdf_test_copy( ifile, 1, 4672, ofile, [ 'FORMAT=NC4C' ] )
if result != 'success':
return 'fail'
return 'success'
###############################################################################
# TEST NC_UBYTE metadata read - netcdf-4 (FORMAT=NC4) only (#5053)
def netcdf_33():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/nc_vars.nc'
ofile = 'tmp/netcdf_33.nc'
result = netcdf_test_copy( ifile, 1, None, ofile, [ 'FORMAT=NC4' ] )
if result != 'success':
return result
return netcdf_check_vars( 'tmp/netcdf_33.nc' )
###############################################################################
# check support for reading large file with chunking and DEFLATE compression
# if chunking is not supported within the netcdf driver, this test can take very long
def netcdf_34():
filename = 'utm-big-chunks.nc'
# this timeout is more than enough - on my system takes <1s with fix, about 25 seconds without
timeout = 5
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
if not gdaltest.run_slow_tests():
return 'skip'
try:
from multiprocessing import Process
except:
print('from multiprocessing import Process failed')
return 'skip'
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/netcdf/'+filename,filename):
return 'skip'
sys.stdout.write('.')
sys.stdout.flush()
tst = gdaltest.GDALTest( 'NetCDF', '../tmp/cache/'+filename, 1, 31621 )
#tst.testOpen()
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
proc = Process( target=tst.testOpen )
proc.start()
proc.join( timeout )
gdal.PopErrorHandler()
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
print('testOpen() for file %s has reached timeout limit of %d seconds' % (filename, timeout) )
return 'fail'
return 'success'
###############################################################################
# test writing a long metadata > 8196 chars (bug #5113)
def netcdf_35():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ofile = 'tmp/netcdf_35.nc'
# copy file
result = netcdf_test_copy( ifile, 0, None, ofile )
if result != 'success':
return 'fail'
# test long metadata is copied correctly
ds = gdal.Open( ofile )
if ds is None:
gdaltest.post_reason( 'open of copy failed' )
return 'fail'
md = ds.GetMetadata( '' )
if not 'U#bla' in md:
gdaltest.post_reason( 'U#bla metadata absent' )
return 'fail'
bla = md['U#bla']
if not len(bla) == 9591:
gdaltest.post_reason( 'U#bla metadata is of length %d, expecting %d' % (len(bla),9591) )
return 'fail'
if not bla[-4:] == '_bla':
gdaltest.post_reason( 'U#bla metadata ends with [%s], expecting [%s]' % (bla[-4:], '_bla') )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform (bug #5114)
def netcdf_36():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/netcdf_fixes.nc'
ds = gdal.Open( ifile )
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-3.498749944898817, 0.0025000042385525173, 0.0, 46.61749818589952, 0.0, -0.001666598849826389)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# test for reading gaussian grid (bugs #4513 and #5118)
def netcdf_37():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/reduce-cgcms.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1.875, 3.75, 0.0, 89.01354337620016, 0.0, -3.7088976406750063)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
md = ds.GetMetadata( 'GEOLOCATION2' )
if not md or not 'Y_VALUES' in md:
gdaltest.post_reason( 'did not get 1D geolocation' )
return 'fail'
y_vals = md['Y_VALUES']
if not y_vals.startswith('{-87.15909455586265,-83.47893666931698,') \
or not y_vals.endswith(',83.47893666931698,87.15909455586265}'):
gdaltest.post_reason( 'got incorrect values in 1D geolocation' )
return 'fail'
return 'success'
###############################################################################
# test for correct geotransform of projected data in km units (bug #5118)
def netcdf_38():
if gdaltest.netcdf_drv is None:
return 'skip'
ifile = 'data/bug5118.nc'
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( ifile )
gdal.PopErrorHandler()
if ds is None:
gdaltest.post_reason( 'open failed' )
return 'fail'
gt = ds.GetGeoTransform( )
if gt is None:
gdaltest.post_reason( 'got no GeoTransform' )
return 'fail'
gt_expected = (-1659.3478178136488, 13.545000861672793, 0.0, 2330.054725283668, 0.0, -13.54499744233631)
if gt != gt_expected:
gdaltest.post_reason( 'got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)) )
return 'fail'
return 'success'
###############################################################################
# Test VRT and NETCDF:
def netcdf_39():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:tmp/two_vars_scale_offset.nc:z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"tmp/two_vars_scale_offset.nc":z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"%s/tmp/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/netcdf_39.vrt' % os.getcwd(), src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
src_ds = gdal.Open('NETCDF:"%s/data/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
del out_ds
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/netcdf_39.vrt')
if cs != 65463:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Check support of reading of chunked bottom-up files.
def netcdf_40():
if gdaltest.netcdf_drv is None or not gdaltest.netcdf_drv_has_nc4:
return 'skip'
return netcdf_test_copy( 'data/bug5291.nc', 0, None, 'tmp/netcdf_40.nc' )
###############################################################################
# Test support for georeferenced file without CF convention
def netcdf_41():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.Open('data/byte_no_cf.nc')
if ds.GetGeoTransform() != (440720, 60, 0, 3751320, 0, -60):
gdaltest.post_reason('failure')
print(ds.GetGeoTransform())
return 'fail'
if ds.GetProjectionRef().find('26711') < 0:
gdaltest.post_reason('failure')
print(ds.GetGeoTransform())
return 'fail'
return 'success'
###############################################################################
# Test writing & reading GEOLOCATION array
def netcdf_42():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.GetDriverByName('MEM').Create('', 60, 39, 1)
src_ds.SetMetadata( [
'LINE_OFFSET=0',
'LINE_STEP=1',
'PIXEL_OFFSET=0',
'PIXEL_STEP=1',
'SRS=GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4326"]]',
'X_BAND=1',
'X_DATASET=../gcore/data/sstgeo.tif',
'Y_BAND=2',
'Y_DATASET=../gcore/data/sstgeo.tif'], 'GEOLOCATION' )
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
src_ds.SetProjection(sr.ExportToWkt())
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_42.nc', src_ds)
ds = gdal.Open('tmp/netcdf_42.nc')
if ds.GetMetadata('GEOLOCATION') != {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lat',
'Y_BAND': '1'}:
gdaltest.post_reason('failure')
print(ds.GetMetadata('GEOLOCATION'))
return 'fail'
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lon')
if ds.GetRasterBand(1).Checksum() != 36043:
gdaltest.post_reason('failure')
print(ds.GetRasterBand(1).Checksum())
return 'fail'
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lat')
if ds.GetRasterBand(1).Checksum() != 33501:
gdaltest.post_reason('failure')
print(ds.GetRasterBand(1).Checksum())
return 'fail'
return 'success'
###############################################################################
# Test reading GEOLOCATION array from geotransform (non default)
def netcdf_43():
if gdaltest.netcdf_drv is None:
return 'skip'
src_ds = gdal.Open('data/byte.tif')
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_43.nc', src_ds, options = ['WRITE_LONLAT=YES'] )
ds = gdal.Open('tmp/netcdf_43.nc')
if ds.GetMetadata('GEOLOCATION') != {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lat',
'Y_BAND': '1'}:
gdaltest.post_reason('failure')
print(ds.GetMetadata('GEOLOCATION'))
return 'fail'
return 'success'
###############################################################################
# Test NC_USHORT/UINT read/write - netcdf-4 only (#6337)
def netcdf_44():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
for f, md5 in ('data/ushort.nc', 18), ('data/uint.nc', 10):
if (netcdf_test_copy( f, 1, md5, 'tmp/netcdf_44.nc', [ 'FORMAT=NC4' ] )
!= 'success'):
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file
def netcdf_45():
if gdaltest.netcdf_drv is None:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
# Test that a raster cannot be opened in vector-only mode
ds = gdal.OpenEx( 'data/cf-bug636.nc', gdal.OF_VECTOR )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_45.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_45.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_45.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_45.csv')
gdal.Unlink('/vsimem/netcdf_45.csvt')
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file
def netcdf_46():
if gdaltest.netcdf_drv is None:
return 'skip'
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/test_ogr_nc3.nc')
if ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 4 file
def netcdf_47():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_VECTOR )
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_47.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_47.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_47.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_47.csv')
gdal.Unlink('/vsimem/netcdf_47.csvt')
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file without any geometry
def netcdf_48():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_no_xyz_var.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
if lyr.GetGeomType() != ogr.wkbNone:
gdaltest.post_reason('failure')
return 'fail'
f = lyr.GetNextFeature()
if f['int32'] != 1:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test reading a vector NetCDF 3 file with X,Y,Z vars as float
def netcdf_49():
if gdaltest.netcdf_drv is None:
return 'skip'
with gdaltest.error_handler():
ds = gdal.OpenEx( 'data/test_ogr_xyz_float.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_49.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_49.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32
"POINT Z (1 2 3)",1
"POINT (1 2)",
,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_49.csv')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with WKT geometry field
def netcdf_50():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( '../ogr/data/poly.shp', gdal.OF_VECTOR )
out_ds = gdal.VectorTranslate( 'tmp/netcdf_50.nc', ds, format = 'netCDF', layerCreationOptions = [ 'WKT_DEFAULT_WIDTH=1'] )
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
out_ds = gdal.OpenEx( 'tmp/netcdf_50.nc', gdal.OF_VECTOR )
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
if srs.find('PROJCS["OSGB 1936') < 0:
gdaltest.post_reason('failure')
print(srs)
return 'fail'
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
gdal.Unlink('tmp/netcdf_50.nc')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields
def netcdf_51():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
# Test autogrow of string fields
gdal.VectorTranslate( 'tmp/netcdf_51.nc', ds, format = 'netCDF', layerCreationOptions = [ 'STRING_DEFAULT_WIDTH=1'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_51.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
lyr.CreateField( ogr.FieldDefn('extra', ogr.OFTInteger) )
lyr.CreateField( ogr.FieldDefn('extra_str', ogr.OFTString) )
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['extra'] = 5
f['extra_str'] = 'foobar'
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_51.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['int32'] != 1 or f['extra'] != 5 or f['extra_str'] != 'foobar':
gdaltest.post_reason('failure')
return 'fail'
f = None
ds = None
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_51.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('tmp/netcdf_51.nc')
gdal.Unlink('tmp/netcdf_51.csv')
gdal.Unlink('tmp/netcdf_51.csvt')
return 'success'
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields with WRITE_GDAL_TAGS=NO
def netcdf_51_no_gdal_tags():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc3.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( 'tmp/netcdf_51_no_gdal_tags.nc', ds, format = 'netCDF', datasetCreationOptions = [ 'WRITE_GDAL_TAGS=NO'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_51_no_gdal_tags.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_51_no_gdal_tags.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51_no_gdal_tags.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x1,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_51_no_gdal_tags.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String(10),Date,DateTime,DateTime,Real,Real,Integer,Integer,Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.nc')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csv')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csvt')
return 'success'
###############################################################################
# Test creating a vector NetCDF 4 file with X,Y,Z fields
def netcdf_52():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.OpenEx( 'data/test_ogr_nc4.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( 'tmp/netcdf_52.nc', ds, format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'] )
with gdaltest.error_handler():
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR )
gdal.VectorTranslate( '/vsimem/netcdf_52.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT'] )
ds = None
fp = gdal.VSIFOpenL( '/vsimem/netcdf_52.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
fp = gdal.VSIFOpenL( '/vsimem/netcdf_52.csvt', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
lyr.CreateField( ogr.FieldDefn('extra', ogr.OFTInteger) )
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['extra'] = 5
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_52.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['int32'] != 1 or f['extra'] != 5:
gdaltest.post_reason('failure')
return 'fail'
f = None
ds = None
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_52.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('tmp/netcdf_52.nc')
gdal.Unlink('tmp/netcdf_52.csv')
gdal.Unlink('tmp/netcdf_52.csvt')
return 'success'
###############################################################################
# Test creating a vector NetCDF 4 file with WKT geometry field
def netcdf_53():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = gdal.OpenEx( '../ogr/data/poly.shp', gdal.OF_VECTOR )
out_ds = gdal.VectorTranslate( 'tmp/netcdf_53.nc', ds, format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'] )
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
out_ds = gdal.OpenEx( 'tmp/netcdf_53.nc', gdal.OF_VECTOR )
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
if srs.find('PROJCS["OSGB 1936') < 0:
gdaltest.post_reason('failure')
print(srs)
return 'fail'
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
out_ds = None
gdal.Unlink('tmp/netcdf_53.nc')
return 'success'
###############################################################################
# Test appending to a vector NetCDF 4 file with unusual types (ubyte, ushort...)
def netcdf_54():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy( 'data/test_ogr_nc4.nc', 'tmp/netcdf_54.nc')
ds = gdal.OpenEx( 'tmp/netcdf_54.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['int32'] += 1
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_54.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_54.nc')
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
return 'success'
###############################################################################
# Test auto-grow of bidimensional char variables in a vector NetCDF 4 file
def netcdf_55():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy( 'data/test_ogr_nc4.nc', 'tmp/netcdf_55.nc')
ds = gdal.OpenEx( 'tmp/netcdf_55.nc', gdal.OF_VECTOR | gdal.OF_UPDATE )
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None:
gdaltest.post_reason('failure')
return 'fail'
f['twodimstringchar'] = 'abcd'
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
if lyr.CreateFeature(f) != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_55.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_55.nc')
if src_json != out_json:
gdaltest.post_reason('failure')
print(src_json)
print(out_json)
return 'fail'
return 'success'
###############################################################################
# Test truncation of bidimensional char variables and WKT in a vector NetCDF 3 file
def netcdf_56():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_56.nc')
# Test auto-grow of WKT field
lyr = ds.CreateLayer('netcdf_56', options = [ 'AUTOGROW_STRINGS=NO', 'STRING_DEFAULT_WIDTH=5', 'WKT_DEFAULT_WIDTH=5' ] )
lyr.CreateField(ogr.FieldDefn('txt'))
f = ogr.Feature(lyr.GetLayerDefn())
f['txt'] = '0123456789'
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
with gdaltest.error_handler():
ret = lyr.CreateFeature(f)
if ret != 0:
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.OpenEx( 'tmp/netcdf_56.nc', gdal.OF_VECTOR )
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['txt'] != '01234' or f.GetGeometryRef() is not None:
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_56.nc')
return 'success'
###############################################################################
# Test one layer per file creation
def netcdf_57():
if gdaltest.netcdf_drv is None:
return 'skip'
try:
shutil.rmtree('tmp/netcdf_57')
except:
pass
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
if ds is not None:
gdaltest.post_reason('failure')
return 'fail'
open('tmp/netcdf_57', 'wb').close()
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
if ds is not None:
gdaltest.post_reason('failure')
return 'fail'
os.unlink('tmp/netcdf_57')
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_57', options = ['MULTIPLE_LAYERS=SEPARATE_FILES'])
for ilayer in range(2):
lyr = ds.CreateLayer('lyr%d' % ilayer)
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = ilayer
lyr.CreateFeature(f)
ds = None
for ilayer in range(2):
ds = ogr.Open('tmp/netcdf_57/lyr%d.nc' % ilayer)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['lyr_id'] != ilayer:
gdaltest.post_reason('failure')
return 'fail'
ds = None
shutil.rmtree('tmp/netcdf_57')
return 'success'
###############################################################################
# Test one layer per group (NC4)
def netcdf_58():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_58.nc', options = ['FORMAT=NC4', 'MULTIPLE_LAYERS=SEPARATE_GROUPS'])
for ilayer in range(2):
# Make sure auto-grow will happen to test this works well with multiple groups
lyr = ds.CreateLayer('lyr%d' % ilayer, geom_type = ogr.wkbNone, options = ['USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1' ])
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = 'lyr_%d' % ilayer
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_58.nc')
for ilayer in range(2):
lyr = ds.GetLayer(ilayer)
f = lyr.GetNextFeature()
if f['lyr_id'] != 'lyr_%d' % ilayer:
gdaltest.post_reason('failure')
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_58.nc')
return 'success'
###############################################################################
#check for UnitType set/get.
def netcdf_59():
if gdaltest.netcdf_drv is None:
return 'skip'
# get
ds = gdal.Open( 'data/unittype.nc' )
unit = ds.GetRasterBand( 1 ).GetUnitType();
if unit != 'm/s':
gdaltest.post_reason( 'Incorrect unit(%s)' % unit )
return 'fail'
ds = None
# set
tst = gdaltest.GDALTest( 'NetCDF', 'unittype.nc', 1, 4672 )
return tst.testSetUnitType()
###############################################################################
# Test reading a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_indexed_ragged_array_representation_of_profiles
def netcdf_60():
if gdaltest.netcdf_drv is None:
return 'skip'
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx( 'data/profile.nc', gdal.OF_RASTER )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx( 'data/profile.nc', gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
with gdaltest.error_handler():
gdal.VectorTranslate( '/vsimem/netcdf_60.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_60.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_60.csv')
return 'success'
###############################################################################
# Test appending to a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_61():
if gdaltest.netcdf_drv is None:
return 'skip'
shutil.copy('data/profile.nc', 'tmp/netcdf_61.nc')
ds = gdal.VectorTranslate( 'tmp/netcdf_61.nc', 'data/profile.nc', accessMode = 'append' )
gdal.VectorTranslate( '/vsimem/netcdf_61.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_61.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_61.csv')
gdal.Unlink('/vsimem/netcdf_61.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_62():
if gdaltest.netcdf_drv is None:
return 'skip'
ds = gdal.VectorTranslate( 'tmp/netcdf_62.nc', 'data/profile.nc', format = 'netCDF', layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_INIT_SIZE=1', 'PROFILE_VARIABLES=station'] )
gdal.VectorTranslate( '/vsimem/netcdf_62.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_62.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_62.csv')
return 'success'
def netcdf_62_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_62.nc' )
if ret.find('profile = 2') < 0 or \
ret.find('record = UNLIMITED') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0 or \
ret.find('char station(profile') < 0 or \
ret.find('char foo(record') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
return 'skip'
return 'success'
def netcdf_62_cf_check():
if gdaltest.netcdf_drv is None:
return 'skip'
import netcdf_cf
if netcdf_cf.netcdf_cf_setup() == 'success' and \
gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf.netcdf_cf_check_file( 'tmp/netcdf_62.nc','auto',False )
if result_cf != 'success':
gdaltest.post_reason('failure')
return 'fail'
gdal.Unlink('/vsimem/netcdf_62.nc')
return 'success'
###############################################################################
# Test creating a NC4 "Indexed ragged array representation of profiles" v1.6.0 H3.5
def netcdf_63():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
shutil.copy('data/profile.nc', 'tmp/netcdf_63.nc')
ds = gdal.VectorTranslate( 'tmp/netcdf_63.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['FORMAT=NC4'], layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1' ] )
gdal.VectorTranslate( '/vsimem/netcdf_63.csv', ds, format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_63.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_63.csv')
return 'success'
def netcdf_63_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_63.nc' )
if ret.find('profile = UNLIMITED') < 0 or \
ret.find('record = UNLIMITED') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0 or \
ret.find('char station(record') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
gdal.Unlink('/vsimem/netcdf_63.nc')
return 'skip'
gdal.Unlink('/vsimem/netcdf_63.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# but without a profile field.
def netcdf_64():
if gdaltest.netcdf_drv is None:
return 'skip'
gdal.VectorTranslate( 'tmp/netcdf_64.nc', 'data/profile.nc', format = 'netCDF', selectFields = ['id,station,foo'], layerCreationOptions = ['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_NAME=profile_dim', 'PROFILE_DIM_INIT_SIZE=1'] )
gdal.VectorTranslate( '/vsimem/netcdf_64.csv', 'tmp/netcdf_64.nc', format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_64.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile_dim,id,station,foo
"POINT Z (2 49 100)",0,1,Palo Alto,bar
"POINT Z (3 50 50)",1,2,Santa Fe,baz
"POINT Z (2 49 200)",0,3,Palo Alto,baw
"POINT Z (3 50 100)",1,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_64.csv')
gdal.Unlink('/vsimem/netcdf_64.nc')
return 'success'
###############################################################################
# Test creating a NC4 file with empty string fields / WKT fields
# (they must be filled as empty strings to avoid crashes in netcdf lib)
def netcdf_65():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_65.nc', options = ['FORMAT=NC4'])
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_65.nc')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['str'] != '':
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
ds = None
gdal.Unlink('tmp/netcdf_65.nc')
return 'success'
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# from a config file
def netcdf_66():
if gdaltest.netcdf_drv is None:
return 'skip'
# First trying with no so good configs
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=not_existing'] )
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=<Configuration>'] )
myconfig = \
"""<Configuration>
<!-- comment -->
<unrecognized_elt/>
<DatasetCreationOption/>
<DatasetCreationOption name="x"/>
<DatasetCreationOption value="x"/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
<Field name="x">
<!-- comment -->
<unrecognized_elt/>
</Field>
<Field name="station" main_dim="non_existing"/>
<Layer/>
<Layer name="x">
<!-- comment -->
<unrecognized_elt/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
</Layer>
</Configuration>
"""
with gdaltest.error_handler():
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=' + myconfig] )
# Now with a correct configuration
myconfig = \
"""<Configuration>
<DatasetCreationOption name="WRITE_GDAL_TAGS" value="NO"/>
<LayerCreationOption name="STRING_DEFAULT_WIDTH" value="1"/>
<Attribute name="foo" value="bar"/>
<Attribute name="foo2" value="bar2"/>
<Field name="id">
<Attribute name="my_extra_attribute" value="5.23" type="double"/>
</Field>
<Field netcdf_name="lon"> <!-- edit predefined variable -->
<Attribute name="my_extra_lon_attribute" value="foo"/>
</Field>
<Layer name="profile" netcdf_name="my_profile">
<LayerCreationOption name="FEATURE_TYPE" value="PROFILE"/>
<LayerCreationOption name="RECORD_DIM_NAME" value="obs"/>
<Attribute name="foo" value="123" type="integer"/> <!-- override global one -->
<Field name="station" netcdf_name="my_station" main_dim="obs">
<Attribute name="long_name" value="my station attribute"/>
</Field>
<Field netcdf_name="lat"> <!-- edit predefined variable -->
<Attribute name="long_name" value=""/> <!-- remove predefined attribute -->
</Field>
</Layer>
</Configuration>
"""
gdal.VectorTranslate( 'tmp/netcdf_66.nc', 'data/profile.nc', format = 'netCDF', datasetCreationOptions = ['CONFIG_FILE=' + myconfig] )
gdal.VectorTranslate( '/vsimem/netcdf_66.csv', 'tmp/netcdf_66.nc', format = 'CSV', layerCreationOptions = ['LINEFORMAT=LF', 'GEOMETRY=AS_WKT'] )
fp = gdal.VSIFOpenL( '/vsimem/netcdf_66.csv', 'rb' )
if fp is not None:
content = gdal.VSIFReadL( 1, 10000, fp ).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,my_station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
if content != expected_content:
gdaltest.post_reason('failure')
print(content)
return 'fail'
gdal.Unlink('/vsimem/netcdf_66.csv')
return 'success'
def netcdf_66_ncdump_check():
if gdaltest.netcdf_drv is None:
return 'skip'
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err( 'ncdump -h tmp/netcdf_66.nc' )
if ret.find('char my_station(obs, my_station_max_width)') < 0 or \
ret.find('my_station:long_name = "my station attribute"') < 0 or \
ret.find('lon:my_extra_lon_attribute = "foo"') < 0 or \
ret.find('lat:long_name') >= 0 or \
ret.find('id:my_extra_attribute = 5.23') < 0 or \
ret.find('profile:cf_role = "profile_id"') < 0 or \
ret.find('parentIndex:instance_dimension = "profile"') < 0 or \
ret.find(':featureType = "profile"') < 0:
gdaltest.post_reason('failure')
print(ret)
return 'fail'
else:
gdal.Unlink('/vsimem/netcdf_66.nc')
return 'skip'
gdal.Unlink('/vsimem/netcdf_66.nc')
return 'success'
###############################################################################
# ticket #5950: optimize IReadBlock() and CheckData() handling of partial
# blocks in the x axischeck for partial block reading.
def netcdf_67():
if gdaltest.netcdf_drv is None:
return 'skip'
if not gdaltest.netcdf_drv_has_nc4:
return 'skip'
try:
import numpy
except:
return 'skip'
# disable bottom-up mode to use the real file's blocks size
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', 'NO' )
# for the moment the next test using check_stat does not work, seems like
# the last pixel (9) of the image is not handled by stats...
# tst = gdaltest.GDALTest( 'NetCDF', 'partial_block_ticket5950.nc', 1, 45 )
# result = tst.testOpen( check_stat=(1, 9, 5, 2.582) )
# so for the moment compare the full image
ds = gdal.Open( 'data/partial_block_ticket5950.nc', gdal.GA_ReadOnly )
ref = numpy.arange(1, 10).reshape((3, 3))
if numpy.array_equal(ds.GetRasterBand(1).ReadAsArray(), ref):
result = 'success'
else:
result = 'fail'
ds = None
gdal.SetConfigOption( 'GDAL_NETCDF_BOTTOMUP', None )
return result
###############################################################################
###############################################################################
# main tests list
gdaltest_list = [
netcdf_1,
netcdf_2,
netcdf_3,
netcdf_4,
netcdf_5,
netcdf_6,
netcdf_7,
netcdf_8,
netcdf_9,
netcdf_10,
netcdf_11,
netcdf_12,
netcdf_13,
netcdf_14,
netcdf_15,
netcdf_16,
netcdf_17,
netcdf_18,
netcdf_19,
netcdf_20,
netcdf_21,
netcdf_22,
netcdf_23,
netcdf_24,
netcdf_25,
netcdf_26,
netcdf_27,
netcdf_28,
netcdf_29,
netcdf_30,
netcdf_31,
netcdf_32,
netcdf_33,
netcdf_34,
netcdf_35,
netcdf_36,
netcdf_37,
netcdf_38,
netcdf_39,
netcdf_40,
netcdf_41,
netcdf_42,
netcdf_43,
netcdf_44,
netcdf_45,
netcdf_46,
netcdf_47,
netcdf_48,
netcdf_49,
netcdf_50,
netcdf_51,
netcdf_51_no_gdal_tags,
netcdf_52,
netcdf_53,
netcdf_54,
netcdf_55,
netcdf_56,
netcdf_57,
netcdf_58,
netcdf_59,
netcdf_60,
netcdf_61,
netcdf_62,
netcdf_62_ncdump_check,
netcdf_62_cf_check,
netcdf_63,
netcdf_63_ncdump_check,
netcdf_64,
netcdf_65,
netcdf_66,
netcdf_66_ncdump_check,
netcdf_67
]
###############################################################################
# basic file creation tests
init_list = [ \
('byte.tif', 1, 4672, None, []),
('byte_signed.tif', 1, 4672, None, ['PIXELTYPE=SIGNEDBYTE']),
('int16.tif', 1, 4672, None, []),
('int32.tif', 1, 4672, None, []),
('float32.tif', 1, 4672, None, []),
('float64.tif', 1, 4672, None, [])
]
# Some tests we don't need to do for each type.
item = init_list[0]
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
#test geotransform and projection
gdaltest_list.append( (ut.testSetGeoTransform, item[0]) )
gdaltest_list.append( (ut.testSetProjection, item[0]) )
#SetMetadata() not supported
#gdaltest_list.append( (ut.testSetMetadata, item[0]) )
# Others we do for each pixel type.
for item in init_list:
ut = gdaltest.GDALTest( 'netcdf', item[0], item[1], item[2], options=item[4] )
if ut is None:
print( 'GTiff tests skipped' )
gdaltest_list.append( (ut.testCreateCopy, item[0]) )
gdaltest_list.append( (ut.testCreate, item[0]) )
gdaltest_list.append( (ut.testSetNoDataValue, item[0]) )
###############################################################################
# other tests
if __name__ == '__main__':
gdaltest.setup_run( 'netcdf' )
gdaltest.run_tests( gdaltest_list )
#make sure we cleanup
gdaltest.clean_tmp()
gdaltest.summarize()
|
nextgis-extra/tests
|
lib_gdal/gdrivers/netcdf.py
|
Python
|
gpl-2.0
| 97,818
|
[
"Gaussian",
"NetCDF"
] |
23f9942bad9b1c849bd0717f32fdb57f0e81c96dd1a32054908751c6068efd29
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import numpy as np
from ..base import BaseEstimator
from ..utils import check_random_state
from ..utils.extmath import logsumexp, pinvh
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covars : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
from scipy import linalg
U, s, V = linalg.svd(covar)
sqrtS = np.diag(np.sqrt(s))
sqrt_covar = np.dot(U, np.dot(sqrtS, V))
rand = np.dot(sqrt_covar, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
thresh : float, optional
Convergence threshold.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
`weights_` : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`covars_` : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Ininite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=0.01)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def eval(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.eval(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.eval(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.eval(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
## initialization step
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.eval(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means=0.0, covars=1.0):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
from scipy import linalg
n_samples, n_dim = X.shape
icv = pinvh(covars)
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.log(linalg.det(covars) + 0.1)
+ np.sum(X * np.dot(X, icv), 1)[:, np.newaxis]
- 2 * np.dot(np.dot(X, icv), means.T)
+ np.sum(means * np.dot(means, icv), 1))
return lpr
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
from scipy import linalg
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape"
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
n_features = X.shape[1]
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
return (avg_X2 - avg_means2 + min_covar * np.eye(n_features)) / X.shape[0]
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
florian-f/sklearn
|
sklearn/mixture/gmm.py
|
Python
|
bsd-3-clause
| 26,975
|
[
"Gaussian"
] |
7df36cfe3f86fb38f5c3aabe67dd77c758a58658070a2220572c16f2b34012cf
|
import deepchem as dc
import numpy as np
import unittest
import pytest
import tempfile
from flaky import flaky
try:
import tensorflow as tf
from tensorflow.keras.layers import Input, Concatenate, Dense
class ExampleGAN(dc.models.GAN):
def get_noise_input_shape(self):
return (2,)
def get_data_input_shapes(self):
return [(1,)]
def get_conditional_input_shapes(self):
return [(1,)]
def create_generator(self):
noise_input = Input(self.get_noise_input_shape())
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [noise_input, conditional_input]
gen_in = Concatenate(axis=1)(inputs)
output = Dense(1)(gen_in)
return tf.keras.Model(inputs=inputs, outputs=output)
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1, activation=tf.sigmoid)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
has_tensorflow = True
except:
has_tensorflow = False
@pytest.mark.tensorflow
def generate_batch(batch_size):
"""Draw training data from a Gaussian distribution, where the mean is a conditional input."""
means = 10 * np.random.random([batch_size, 1])
values = np.random.normal(means, scale=2.0)
return means, values
@pytest.mark.tensorflow
def generate_data(gan, batches, batch_size):
for i in range(batches):
means, values = generate_batch(batch_size)
batch = {gan.data_inputs[0]: values, gan.conditional_inputs[0]: means}
yield batch
@flaky
@pytest.mark.tensorflow
def test_cgan():
"""Test fitting a conditional GAN."""
gan = ExampleGAN(learning_rate=0.01)
gan.fit_gan(
generate_data(gan, 500, 100), generator_steps=0.5, checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
values = gan.predict_gan_generator(conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
assert gan.get_global_step() == 500
@flaky
@pytest.mark.tensorflow
def test_cgan_reload():
"""Test reloading a conditional GAN."""
model_dir = tempfile.mkdtemp()
gan = ExampleGAN(learning_rate=0.01, model_dir=model_dir)
gan.fit_gan(generate_data(gan, 500, 100), generator_steps=0.5)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
batch_size = len(means)
noise_input = gan.get_noise_batch(batch_size=batch_size)
values = gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
assert gan.get_global_step() == 500
reloaded_gan = ExampleGAN(learning_rate=0.01, model_dir=model_dir)
reloaded_gan.restore()
reloaded_values = reloaded_gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means])
assert np.all(values == reloaded_values)
@flaky
@pytest.mark.tensorflow
def test_mix_gan_reload():
"""Test reloading a GAN with multiple generators and discriminators."""
model_dir = tempfile.mkdtemp()
gan = ExampleGAN(
n_generators=2,
n_discriminators=2,
learning_rate=0.01,
model_dir=model_dir)
gan.fit_gan(generate_data(gan, 1000, 100), generator_steps=0.5)
reloaded_gan = ExampleGAN(
n_generators=2,
n_discriminators=2,
learning_rate=0.01,
model_dir=model_dir)
reloaded_gan.restore()
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
batch_size = len(means)
noise_input = gan.get_noise_batch(batch_size=batch_size)
for i in range(2):
values = gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means], generator_index=i)
reloaded_values = reloaded_gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means], generator_index=i)
assert np.all(values == reloaded_values)
assert gan.get_global_step() == 1000
# No training has been done after reload
assert reloaded_gan.get_global_step() == 0
@flaky
@pytest.mark.tensorflow
def test_mix_gan():
"""Test a GAN with multiple generators and discriminators."""
gan = ExampleGAN(n_generators=2, n_discriminators=2, learning_rate=0.01)
gan.fit_gan(
generate_data(gan, 1000, 100), generator_steps=0.5, checkpoint_interval=0)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
for i in range(2):
values = gan.predict_gan_generator(
conditional_inputs=[means], generator_index=i)
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
assert gan.get_global_step() == 1000
@flaky
@pytest.mark.tensorflow
def test_wgan():
"""Test fitting a conditional WGAN."""
class ExampleWGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (2,)
def get_data_input_shapes(self):
return [(1,)]
def get_conditional_input_shapes(self):
return [(1,)]
def create_generator(self):
noise_input = Input(self.get_noise_input_shape())
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [noise_input, conditional_input]
gen_in = Concatenate(axis=1)(inputs)
output = Dense(1)(gen_in)
return tf.keras.Model(inputs=inputs, outputs=output)
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
# We have to set the gradient penalty very small because the generator's
# output is only a single number, so the default penalty would constrain
# it far too much.
gan = ExampleWGAN(learning_rate=0.01, gradient_penalty=0.1)
gan.fit_gan(generate_data(gan, 1000, 100), generator_steps=0.1)
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
values = gan.predict_gan_generator(conditional_inputs=[means])
deltas = values - means
assert abs(np.mean(deltas)) < 1.0
assert np.std(deltas) > 1.0
@flaky
@pytest.mark.tensorflow
def test_wgan_reload():
"""Test fitting a conditional WGAN."""
class ExampleWGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (2,)
def get_data_input_shapes(self):
return [(1,)]
def get_conditional_input_shapes(self):
return [(1,)]
def create_generator(self):
noise_input = Input(self.get_noise_input_shape())
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [noise_input, conditional_input]
gen_in = Concatenate(axis=1)(inputs)
output = Dense(1)(gen_in)
return tf.keras.Model(inputs=inputs, outputs=output)
def create_discriminator(self):
data_input = Input(self.get_data_input_shapes()[0])
conditional_input = Input(self.get_conditional_input_shapes()[0])
inputs = [data_input, conditional_input]
discrim_in = Concatenate(axis=1)(inputs)
dense = Dense(10, activation=tf.nn.relu)(discrim_in)
output = Dense(1)(dense)
return tf.keras.Model(inputs=inputs, outputs=output)
# We have to set the gradient penalty very small because the generator's
# output is only a single number, so the default penalty would constrain
# it far too much.
model_dir = tempfile.mkdtemp()
gan = ExampleWGAN(
learning_rate=0.01, gradient_penalty=0.1, model_dir=model_dir)
gan.fit_gan(generate_data(gan, 1000, 100), generator_steps=0.1)
reloaded_gan = ExampleWGAN(
learning_rate=0.01, gradient_penalty=0.1, model_dir=model_dir)
reloaded_gan.restore()
# See if it has done a plausible job of learning the distribution.
means = 10 * np.random.random([1000, 1])
batch_size = len(means)
noise_input = gan.get_noise_batch(batch_size=batch_size)
values = gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means])
reloaded_values = reloaded_gan.predict_gan_generator(
noise_input=noise_input, conditional_inputs=[means])
assert np.all(values == reloaded_values)
|
peastman/deepchem
|
deepchem/models/tests/test_gan.py
|
Python
|
mit
| 8,693
|
[
"Gaussian"
] |
774be68acee9ee03b6da80f7d768cf8c848b5720b44f6df7863e8e117f43b338
|
# By Jay Ravaliya
# Imports
from twython import Twython
from secret import consumer_key, consumer_secret, access_token, access_secret
from model import Posted, db
import requests
import json
import datetime
import random
import math
import sys
# Set up Twitter keys
twitter = Twython(consumer_key, consumer_secret, access_token, access_secret)
# Set up payload for analytics.
payload = {
"userid" : "TwitterBot",
"device" : "TwitterBot"
}
# Send post request to API to get data, convert it to JSON right away.
r = requests.post("http://eventsatnjit.jayravaliya.com/api/v0.2/events", json=payload).json()
# Retrive current time.
currenttime = datetime.datetime.now()
# Total number of events, counted.
total = 0
# At 8:00 AM, post morning tweet.
if(currenttime.hour == 8):
# Count total elements that are taking place today. Post it.
# Else, post that there are no events going on.
for elem in r["response"]:
if elem["datetime"]["is_today"] == True:
total = total + 1
if total > 0:
tweet = "There are " + str(total) + " events taking place today! Be sure to stop by and check some out! via @EventsAtNJIT"
else:
tweet = "Ah - no events going on today! Be sure to check back tomorrow to see what's going on!"
print(tweet)
twitter.update_status(status=tweet)
# If posting at night, post # of events going on tomorrow.
elif(currenttime.hour == 22):
tweet = "That's all for today! Visit back tomorrow to learn about the awesome events taking place on campus! via @EventsAtNJIT"
twitter.update_status(status=tweet)
# Posting every two hours:
else:
# Starting text.
starters = [
"Awesome event coming up: ",
"Did you know? ",
"Check this out: ",
"Stop by: "
]
# Categories to include.
categories = [
"Intramurals & Recreation",
"Reception, Banquet, Party",
"Lecture, Seminar, Workshop",
"Conference, Fair",
"Other"
]
# Count the number of events. Exit if there are no events left.
num_events = 0
def today_events():
global num_events
for elem in r["response"]:
if (elem["datetime"]["is_today"] == True or elem["datetime"]["is_tomorrow"]):
num_events = num_events + 1
today_events()
if (num_events == 0):
print "NO EVENTS"
sys.exit()
# Input JSON element - ouput validity.
def valid_event(elem):
if (elem["datetime"]["is_today"] == True or elem["datetime"]["is_tomorrow"] == True):
if (elem["datetime"]["multiday"] == False and (elem["datetime"]["currently_happening"] == False or elem["datetime"]["starting_now"] == True)):
return True
return False
# Input JSON element - output tweet.
def generate_tweet(elem):
print("Element Id: " + str(elem["id"]))
# Random intro, unless happening now.
if elem["datetime"]["currently_happening"] == True:
intro = "Happening Now: "
else:
intro = starters[int(math.floor(random.random() * len(starters)))]
# Add basic data.
tweet = "\"" + elem["name"] + "\"" + " hosted by " + elem["organization"] + " "
if elem["datetime"]["is_today"] == True:
tweet = tweet + "starts today "
elif elem["datetime"]["is_tomorrow"] == True:
tweet = tweet + "starts tomorrow "
elif elem["datetime"]["currently_happening"] == True:
tweet = tweet + "started "
else:
tweet = tweet + "starts on " + elem["datetime"]["start"]["common_formats"]["date"] + " "
# Finalize tweet, return.
tweet = tweet + "at " + elem["datetime"]["start"]["common_formats"]["time"] + " in " + elem["location"] + "."
if len(intro + tweet) <= 140:
return intro + tweet
elif len(tweet) <= 140:
return tweet
else:
return None
# Loop through events, tweet!
for elem in r["response"]:
if valid_event(elem) == True:
try:
tweet = generate_tweet(elem)
p = Posted.query.filter_by(event_id=elem["id"]).first()
if tweet != None and p == None:
print tweet + " / " + str(len(tweet))
p = Posted(elem["id"])
db.session.add(p)
db.session.commit()
twitter.update_status(status=tweet)
break
except:
pass
|
jayrav13/njit-events-api
|
dev_bot.py
|
Python
|
mit
| 3,984
|
[
"VisIt"
] |
71461e47d03279c29d9f37546224e8488e9b27ecdc9a578661fd041422c35b60
|
#!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com) GitHub ID: grastogi23
#
# module_check: not supported
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_api_session
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Avi API Module
description:
- This module can be used for calling any resources defined in Avi REST API. U(https://avinetworks.com/)
- This module is useful for invoking HTTP Patch methods and accessing resources that do not have an REST object associated with them.
version_added: 2.3
requirements: [ avisdk ]
options:
http_method:
description:
- Allowed HTTP methods for RESTful services and are supported by Avi Controller.
choices: ["get", "put", "post", "patch", "delete"]
required: true
data:
description:
- HTTP body in YAML or JSON format.
params:
description:
- Query parameters passed to the HTTP API.
path:
description:
- 'Path for Avi API resource. For example, C(path: virtualservice) will translate to C(api/virtualserivce).'
timeout:
description:
- Timeout (in seconds) for Avi API calls.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Get Pool Information using avi_api_session
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: pool
params:
name: "{{ pool_name }}"
api_version: 16.4
register: pool_results
- name: Patch Pool with list of servers
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: patch
path: "{{ pool_path }}"
api_version: 16.4
data:
add:
servers:
- ip:
addr: 10.10.10.10
type: V4
- ip:
addr: 20.20.20.20
type: V4
register: updated_pool
- name: Fetch Pool metrics bandwidth and connections rate
avi_api_session:
controller: "{{ controller }}"
username: "{{ username }}"
password: "{{ password }}"
http_method: get
path: analytics/metrics/pool
api_version: 16.4
params:
name: "{{ pool_name }}"
metric_id: l4_server.avg_bandwidth,l4_server.avg_complete_conns
step: 300
limit: 10
register: pool_metrics
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from copy import deepcopy
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, ansible_return, HAS_AVI)
from avi.sdk.avi_api import ApiSession
from avi.sdk.utils.ansible_utils import avi_obj_cmp, cleanup_absent_fields
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
http_method=dict(required=True,
choices=['get', 'put', 'post', 'patch',
'delete']),
path=dict(type='str', required=True),
params=dict(type='dict'),
data=dict(type='jsonarg'),
timeout=dict(type='int', default=60)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
tenant_uuid = module.params.get('tenant_uuid', None)
api = ApiSession.get_session(
module.params['controller'], module.params['username'],
module.params['password'], tenant=module.params['tenant'],
tenant_uuid=tenant_uuid)
tenant = module.params.get('tenant', '')
timeout = int(module.params.get('timeout'))
# path is a required argument
path = module.params.get('path', '')
params = module.params.get('params', None)
data = module.params.get('data', None)
# Get the api_version from module.
api_version = module.params.get('api_version', '16.4')
if data is not None:
data = json.loads(data)
method = module.params['http_method']
existing_obj = None
changed = method != 'get'
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
if method == 'post':
# need to check if object already exists. In that case
# change the method to be put
gparams['name'] = data['name']
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
try:
existing_obj = rsp.json()['results'][0]
except IndexError:
# object is not found
pass
else:
# object is present
method = 'put'
path += '/' + existing_obj['uuid']
if method == 'put':
# put can happen with when full path is specified or it is put + post
if existing_obj is None:
using_collection = False
if (len(path.split('/')) == 1) and ('name' in data):
gparams['name'] = data['name']
using_collection = True
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
rsp_data = rsp.json()
if using_collection:
if rsp_data['results']:
existing_obj = rsp_data['results'][0]
path += '/' + existing_obj['uuid']
else:
method = 'post'
else:
if rsp.status_code == 404:
method = 'post'
else:
existing_obj = rsp_data
if existing_obj:
changed = not avi_obj_cmp(data, existing_obj)
cleanup_absent_fields(data)
if method == 'patch':
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
existing_obj = rsp.json()
if (method == 'put' and changed) or (method != 'put'):
fn = getattr(api, method)
rsp = fn(path, tenant=tenant, tenant_uuid=tenant, timeout=timeout,
params=params, data=data, api_version=api_version)
else:
rsp = None
if method == 'delete' and rsp.status_code == 404:
changed = False
rsp.status_code = 200
if method == 'patch' and existing_obj and rsp.status_code < 299:
# Ideally the comparison should happen with the return values
# from the patch API call. However, currently Avi API are
# returning different hostname when GET is used vs Patch.
# tracked as AV-12561
if path.startswith('pool'):
time.sleep(1)
gparams = deepcopy(params) if params else {}
gparams.update({'include_refs': '', 'include_name': ''})
rsp = api.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
params=gparams, api_version=api_version)
new_obj = rsp.json()
changed = not avi_obj_cmp(new_obj, existing_obj)
if rsp is None:
return module.exit_json(changed=changed, obj=existing_obj)
return ansible_return(module, rsp, changed, req=data)
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_api_session.py
|
Python
|
bsd-3-clause
| 8,381
|
[
"VisIt"
] |
cc9c25c1d1fd0029cba318b65588e234de4b6314071e63b17c472c5937487d6c
|
"""
Handling the download of the shifter Proxy
"""
import os
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
def getShifterProxy(shifterType, fileName=False):
"""This method returns a shifter's proxy
:param str shifterType: ProductionManager / DataManager...
:param str fileName: file name
:return: S_OK(dict)/S_ERROR()
"""
if fileName:
mkDir(os.path.dirname(fileName))
opsHelper = Operations()
userName = opsHelper.getValue(cfgPath("Shifter", shifterType, "User"), "")
if not userName:
return S_ERROR("No shifter User defined for %s" % shifterType)
result = Registry.getDNForUsername(userName)
if not result["OK"]:
return result
userDN = result["Value"][0]
result = Registry.findDefaultGroupForDN(userDN)
if not result["OK"]:
return result
defaultGroup = result["Value"]
userGroup = opsHelper.getValue(cfgPath("Shifter", shifterType, "Group"), defaultGroup)
vomsAttr = Registry.getVOMSAttributeForGroup(userGroup)
if vomsAttr:
gLogger.info("Getting VOMS [%s] proxy for shifter %s@%s (%s)" % (vomsAttr, userName, userGroup, userDN))
result = gProxyManager.downloadVOMSProxyToFile(
userDN, userGroup, filePath=fileName, requiredTimeLeft=86400, cacheTime=86400
)
else:
gLogger.info("Getting proxy for shifter %s@%s (%s)" % (userName, userGroup, userDN))
result = gProxyManager.downloadProxyToFile(
userDN, userGroup, filePath=fileName, requiredTimeLeft=86400, cacheTime=86400
)
if not result["OK"]:
return result
chain = result["chain"]
fileName = result["Value"]
return S_OK({"DN": userDN, "username": userName, "group": userGroup, "chain": chain, "proxyFile": fileName})
def setupShifterProxyInEnv(shifterType, fileName=False):
"""Return the shifter's proxy and set it up as the default
proxy via changing the environment.
This method returns a shifter's proxy
:param str shifterType: ProductionManager / DataManager...
:param str fileName: file name
:return: S_OK(dict)/S_ERROR()
"""
result = getShifterProxy(shifterType, fileName)
if not result["OK"]:
return result
proxyDict = result["Value"]
os.environ["X509_USER_PROXY"] = proxyDict["proxyFile"]
return result
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/Shifter.py
|
Python
|
gpl-3.0
| 2,643
|
[
"DIRAC"
] |
40ea4f544229a6ee0f29ac76363faadc087d49edabc8de1b6354654de8f03b86
|
# -*- coding: utf-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
import functools
import os
import six
import xml.dom
import xml4h
class TestBuilderMethods(unittest.TestCase):
def test_create_minidom(self):
xmlb = xml4h.build('DocRoot', adapter=xml4h.XmlDomImplAdapter)
self.assertIsInstance(
xmlb.dom_element.impl_document, xml.dom.minidom.Document)
def test_init_class_with_illegal_object(self):
self.assertRaises(ValueError, xml4h.Builder, 'Bad')
def test_builder_method_with_illegal_object(self):
try:
xml4h.build(123)
except Exception as ex:
self.assertEqual(
xml4h.exceptions.IncorrectArgumentTypeException,
ex.__class__)
if six.PY3:
expected = (
"Argument 123 is not one of the expected types: "
"[<class 'str'>, <class 'xml4h.nodes.Element'>]"
)
else:
expected = (
"Argument 123 is not one of the expected types: "
"[<type 'str'>, <class 'xml4h.nodes.Element'>]"
)
self.assertEqual(expected, str(ex))
class BaseBuilderNodesTest(object):
def setUp(self):
if not self.adapter.is_available():
self.skipTest('Library for adapter %s is not available'
% self.adapter)
@property
def my_builder(self):
return functools.partial(xml4h.build, adapter=self.adapter)
def test_element(self):
xmlb = self.my_builder('DocRoot')
# Aliases
self.assertEqual(xmlb.element, xmlb.elem)
self.assertEqual(xmlb.element, xmlb.e)
# Add elements
xmlb = (
self.my_builder('DocRoot')
.e('Deeper')
.e('AndDeeper')
.e('DeeperStill'))
# Check builder's current node is at deepest element
self.assertEqual('<DeeperStill/>', xmlb.xml())
# Check builder produces expected XML doc as string
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Deeper>\n'
' <AndDeeper>\n'
' <DeeperStill/>\n'
' </AndDeeper>\n'
' </Deeper>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_root(self):
xmlb = (
self.my_builder('DocRoot')
.e('Deeper')
.e('AndDeeper')
.e('DeeperStill'))
# Builder node is at DeeperStill element, but we can get to the root
self.assertEqual('DeeperStill', xmlb.dom_element.name)
self.assertEqual('DocRoot', xmlb.dom_element.root.name)
def test_up(self):
xmlb = (
self.my_builder('DocRoot')
.e('Deeper')
.e('AndDeeper')
.e('DeeperStill'))
self.assertEqual('DeeperStill', xmlb.dom_element.name)
# Can navigate up XML DOM tree one step at a time...
self.assertEqual('AndDeeper', xmlb.up().dom_element.name)
self.assertEqual('Deeper', xmlb.up().up().dom_element.name)
self.assertEqual('DocRoot', xmlb.up().up().up().dom_element.name)
# ...but not past the root element
self.assertEqual('DocRoot',
xmlb.up().up().up().up().dom_element.name)
# Can navigate up by count...
self.assertEqual('AndDeeper', xmlb.up().dom_element.name)
self.assertEqual('Deeper', xmlb.up(2).dom_element.name)
self.assertEqual('DocRoot', xmlb.up(3).dom_element.name)
# ...but not past the root element
self.assertEqual('DocRoot', xmlb.up(100).dom_element.name)
# Can navigate up to a given element tagname
self.assertEqual('AndDeeper',
xmlb.up('AndDeeper').dom_element.name)
self.assertEqual('Deeper',
xmlb.up('Deeper').dom_element.name)
self.assertEqual('DocRoot',
xmlb.up('DocRoot').dom_element.name)
# ...but not past the root element if there is no such tagname
self.assertEqual('DocRoot',
xmlb.up('NoSuchName').dom_element.name)
def test_attributes(self):
# Aliases
xmlb = self.my_builder('DocRoot')
self.assertEqual(xmlb.attributes, xmlb.attrs)
self.assertEqual(xmlb.attributes, xmlb.a)
# Add attributes
xmlb = (
self.my_builder('DocRoot')
.e('Elem1').attrs(x=1).up() # Add a single name/value pair
.e('Elem2').attrs(a='a', b='bee').up() # Add multiple
.e('Elem3').attrs([ # Add list of tuple pairs
('hyphenated-name', 'v2'),
]).up()
.e('Elem4').attrs({ # Add a dictionary
'twelve': 3 * 4,
}).up()
# Attributes given in first arg trump same name in kwargs
.e('Elem5').attrs(
{'test': 'value-in-first-arg'},
test='value-in-kwargs').up()
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1 x="1"/>\n'
' <Elem2 a="a" b="bee"/>\n'
' <Elem3 hyphenated-name="v2"/>\n'
' <Elem4 twelve="12"/>\n'
' <Elem5 test="value-in-first-arg"/>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_xml(self):
xmlb = (
self.my_builder('DocRoot')
.e('Elem1').up()
.e('Elem2')
.e('Elem3').up())
# xml() method outputs current node content and descendents only
self.assertEqual(
'<Elem2>\n <Elem3/>\n</Elem2>',
xmlb.dom_element.xml())
# Default string output is utf-8, and pretty-printed
xml = (
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1/>\n'
' <Elem2>\n'
' <Elem3/>\n'
' </Elem2>\n'
'</DocRoot>\n'
)
self.assertEqual(xml, xmlb.dom_element.xml_doc())
# Mix it up a bit
self.assertEqual(
'<DocRoot>\n'
' <Elem1/>\n'
' <Elem2>\n'
' <Elem3/>\n'
' </Elem2>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc(omit_declaration=True))
self.assertEqual(
'<?xml version="1.0" encoding="latin1"?>\n'
'<DocRoot>\n'
' <Elem1/>\n'
' <Elem2>\n'
' <Elem3/>\n'
' </Elem2>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc(encoding='latin1', indent=2))
self.assertEqual(
'<?xml version="1.0"?>\t'
'<DocRoot>\t'
' <Elem1/>\t'
' <Elem2>\t'
' <Elem3/>\t'
' </Elem2>\t'
'</DocRoot>\t',
xmlb.dom_element.xml_doc(encoding=None, indent=8, newline='\t'))
def test_text(self):
# Aliases
xmlb = self.my_builder('DocRoot')
self.assertEqual(xmlb.text, xmlb.t)
# Add text values to elements
xmlb = (
self.my_builder('DocRoot')
.e('Elem1').t('A text value').up()
.e('Elem2').t('Seven equals %s' % 7).up()
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1>A text value</Elem1>\n'
' <Elem2>Seven equals 7</Elem2>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
# Multiple text nodes, and text node next to nested element
xmlb = (
self.my_builder('DocRoot')
.e('Elem1')
.t('First text value')
.t('Second text value').up()
.e('Elem2')
.t('Text')
.e('Nested')
.t('Text in nested').up()
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1>First text valueSecond text value</Elem1>\n'
' <Elem2>Text\n'
' <Nested>Text in nested</Nested>\n'
' </Elem2>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_comment(self):
# Aliases
xmlb = self.my_builder('DocRoot')
self.assertEqual(xmlb.comment, xmlb.c)
# Add text values to elements
xmlb = (
self.my_builder('DocRoot')
.e('Elem1').c('Here is my comment').up()
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1><!--Here is my comment--></Elem1>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_instruction(self):
# Aliases
xmlb = self.my_builder('DocRoot')
self.assertEqual(
xmlb.instruction, xmlb.processing_instruction)
self.assertEqual(xmlb.instruction, xmlb.i)
# Add text values to elements
xmlb = (
self.my_builder('DocRoot').i('inst-target', 'inst-data')
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <?inst-target inst-data?>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_namespace(self):
# Define namespaces on elements after creation
xmlb = (
self.my_builder('DocRoot', ns_uri='urn:default')
.e('Elem1', ns_uri='urn:elem1').up()
.e('Elem2').ns_prefix('myns', 'urn:elem2').up()
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot xmlns="urn:default">\n'
' <Elem1 xmlns="urn:elem1"/>\n'
' <Elem2 xmlns:myns="urn:elem2"/>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
# Test namespaces work as expected when searching/traversing DOM
self.assertEqual(1, len(xmlb.find(name='Elem1'))) # Ignore namespace
self.assertEqual(1, len(xmlb.find(name='Elem1', ns_uri='urn:elem1')))
self.assertEqual(1, len(xmlb.find_doc(ns_uri='urn:elem1')))
self.assertEqual(0, len(xmlb.find(name='Elem1', ns_uri='urn:wrong')))
self.assertEqual(['Elem1'],
[n.name for n in xmlb.dom_element.children(ns_uri='urn:elem1')])
self.assertEqual(['DocRoot', 'Elem2'],
[n.name for n in xmlb.find_doc(ns_uri='urn:default')])
self.assertEqual(['Elem2'],
[n.name for n in xmlb.dom_element.children(ns_uri='urn:default')])
# Set namespaces of elements and attributes on creation
xmlb = (
self.my_builder('DocRoot', ns_uri='urn:default')
.ns_prefix('myns', 'urn:custom')
# Elements in default namespace
.e('NSDefaultImplicit').up()
.e('NSDefaultExplicit', ns_uri='urn:default').up()
# Elements in custom namespace
.e('NSCustomExplicit', ns_uri='urn:custom').up()
.e('myns:NSCustomWithPrefixImplicit').up()
.e('myns:NSCustomWithPrefixExplicit',
ns_uri='urn:custom').up()
# Attributes in namespace
.e('Attrs1')
.attrs({'default-ns-implicit': 1})
.attrs({'default-ns-explicit': 1},
ns_uri='urn:default').up()
.e('Attrs2')
.attrs({'myns:custom-ns-prefix-implicit': 1})
.attrs({'myns:custom-ns-prefix-explicit': 1},
ns_uri='urn:custom')
)
xml = (
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot xmlns="urn:default" xmlns:myns="urn:custom">\n'
' <NSDefaultImplicit/>\n'
' <NSDefaultExplicit xmlns="urn:default"/>\n'
' <NSCustomExplicit xmlns="urn:custom"/>\n'
' <myns:NSCustomWithPrefixImplicit/>\n'
' <NSCustomWithPrefixExplicit xmlns="urn:custom"/>\n'
' <Attrs1 default-ns-explicit="1"'
' default-ns-implicit="1"/>\n'
' <Attrs2'
' myns:custom-ns-prefix-explicit="1"'
' myns:custom-ns-prefix-implicit="1"/>\n'
'</DocRoot>\n'
)
self.assertEqual(xml, xmlb.dom_element.xml_doc())
# Test namespaces work as expected when searching/traversing DOM
self.assertEqual(
['DocRoot', 'NSDefaultImplicit', 'NSDefaultExplicit',
'Attrs1', 'Attrs2'],
[n.name for n in xmlb.find_doc(ns_uri='urn:default')])
self.assertEqual(
['NSCustomExplicit',
'NSCustomWithPrefixImplicit',
'NSCustomWithPrefixExplicit'],
[n.local_name for n in xmlb.find_doc(ns_uri='urn:custom')])
self.assertEqual(
['NSCustomExplicit', 'NSCustomWithPrefixImplicit',
'NSCustomWithPrefixExplicit'],
[n.local_name for n in xmlb.find_doc(ns_uri='urn:custom')])
# Check attribute namespaces
self.assertEqual(
[xml4h.nodes.Node.XMLNS_URI, xml4h.nodes.Node.XMLNS_URI],
[n.namespace_uri for n in xmlb.dom_element.root.attribute_nodes])
attrs1_elem = xmlb.document.find_first('Attrs1')
self.assertEqual([None, None],
[n.namespace_uri for n in attrs1_elem.attribute_nodes])
attrs2_elem = xmlb.document.find_first('Attrs2')
self.assertEqual(['urn:custom', 'urn:custom'],
[n.namespace_uri for n in attrs2_elem.attribute_nodes])
def test_element_creation_with_namespace(self):
# Define namespaces on elements using prefixes
xmlb = (
self.my_builder('DocRoot', ns_uri='urn:default')
.ns_prefix('testns', 'urn:test')
.e('testns:Elem1').up() # Standard XML-style prefix name
.e('{urn:test}Elem2').up() # ElementTree-style prefix URI
.e('Attrs').attrs({
'testns:attrib1': 'value1',
'{urn:test}attrib2': 'value2'})
)
root = xmlb.dom_element.root
self.assertEqual('testns', root.find_first(name='Elem1').prefix)
self.assertEqual('testns', root.find_first(name='Elem2').prefix)
self.assertEqual(
'urn:test', root.find_first(name='Elem1').namespace_uri)
self.assertEqual(
'urn:test', root.find_first(name='Elem2').namespace_uri)
self.assertEqual('testns:Elem1', root.find_first(name='Elem1').name)
self.assertEqual('testns:Elem2', root.find_first(name='Elem2').name)
attrs_elem = root.find_first(name='Attrs')
self.assertEqual('Attrs', attrs_elem.name)
# TODO Allow attrib lookups without namespace prefix?
self.assertEqual(
'testns', attrs_elem.attributes.prefix('testns:attrib1'))
self.assertEqual(
'testns', attrs_elem.attributes.prefix('testns:attrib2'))
self.assertEqual(
'urn:test', attrs_elem.attributes.namespace_uri('testns:attrib1'))
self.assertEqual(
'urn:test', attrs_elem.attributes.namespace_uri('testns:attrib2'))
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot xmlns="urn:default" xmlns:testns="urn:test">\n'
' <testns:Elem1/>\n'
' <testns:Elem2/>\n'
' <Attrs testns:attrib1="value1" testns:attrib2="value2"/>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
# Attempts to use undefined namespace prefixes will fail
xmlb = self.my_builder('DocRoot', ns_uri='urn:default')
self.assertRaises(xml4h.exceptions.UnknownNamespaceException,
xmlb.e, 'missingns:Elem1')
self.assertRaises(xml4h.exceptions.UnknownNamespaceException,
xmlb.attrs, {'missingns:attrib1': 'value1'})
# Element with literal namespace defn will use the ns as its default
xmlb = self.my_builder('DocRoot', ns_uri='urn:default')
xmlb.e('{urn:missing}Elem1')
self.assertEqual(
None, xmlb.root.find_first(name='Elem1').prefix)
self.assertEqual(
'urn:missing',
xmlb.root.find_first(name='Elem1').namespace_uri)
self.assertEqual(
'urn:missing',
xmlb.root.find_first(name='Elem1').attributes['xmlns'])
# Automatically define prefix for attribute with literal namespace
xmlb.attrs({'{urn:missing2}attrib1': 'value2'})
self.assertEqual(
'autoprefix0',
xmlb.root.attributes.prefix('autoprefix0:attrib1'))
self.assertEqual(
'urn:missing2',
xmlb.root.attributes.namespace_uri('autoprefix0:attrib1'))
self.assertEqual(
'urn:missing2',
xmlb.root.attributes['xmlns:autoprefix0'])
xmlb.attrs({'{urn:missing3}attrib2': 'value3'})
self.assertEqual(
'autoprefix1',
xmlb.root.attributes.prefix('autoprefix1:attrib2'))
self.assertEqual(
'urn:missing3',
xmlb.root.attributes.namespace_uri('autoprefix1:attrib2'))
self.assertEqual(
'urn:missing3',
xmlb.root.attributes['xmlns:autoprefix1'])
def test_cdata(self):
# Aliases
xmlb = self.my_builder('DocRoot')
self.assertEqual(xmlb.cdata, xmlb.data)
self.assertEqual(xmlb.cdata, xmlb.d)
# Add text values to elements
xmlb = (
self.my_builder('DocRoot')
.e('Elem1').t('<content/> as text').up()
.e('Elem2').d('<content/> as cdata').up()
)
if self.adapter in (xml4h.LXMLAdapter, xml4h.ElementTreeAdapter,
xml4h.cElementTreeAdapter):
# TODO: Make lxml & ElementTree libs support real cdata
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1><content/> as text</Elem1>\n'
' <Elem2><content/> as cdata</Elem2>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
else:
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem1><content/> as text</Elem1>\n'
' <Elem2><![CDATA[<content/> as cdata]]></Elem2>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_element_with_extra_kwargs(self):
xmlb = (
self.my_builder('DocRoot')
# Include attributes
.e('Elem', attributes=[('x', 1)]).up()
.e('Elem', attributes={'my-attribute': 'value'}).up()
# Include text
.e('Elem', text='Text value').up()
# Include attributes and text
.e('Elem', attributes={'x': 1}, text='More text').up()
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <Elem x="1"/>\n'
' <Elem my-attribute="value"/>\n'
' <Elem>Text value</Elem>\n'
' <Elem x="1">More text</Elem>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
# Insert a new element before another
xmlb = (
self.my_builder('DocRoot')
.e('FinalElement')
.e('PenultimateElement', before_this_element=True)
.e('ThirdLastElement', before_this_element=True)
)
self.assertEqual(
'<?xml version="1.0" encoding="utf-8"?>\n'
'<DocRoot>\n'
' <ThirdLastElement/>\n'
' <PenultimateElement/>\n'
' <FinalElement/>\n'
'</DocRoot>\n',
xmlb.dom_element.xml_doc())
def test_unicode(self):
ns_default = u'urn:默认'
ns_custom = u'urn:習俗'
# NOTE lxml doesn't support unicode namespace URIs
if self.adapter == xml4h.LXMLAdapter:
ns_default = u'urn:default'
ns_custom = u'urn:custom'
xmlb = (
self.my_builder(u'جذر', ns_uri=ns_default)
.ns_prefix(u'důl', ns_custom)
.e(u'důl:ぷㄩƦ').up()
.e(u'yếutố1')
.attrs({u'תכונה': '1'})
.up()
.e(u'yếutố2')
.attrs({u'důl:עודתכונה': u'tvö'})
)
xml = (
u'<?xml version="1.0" encoding="utf-8"?>\n'
u'<جذر xmlns="%(ns_default)s" xmlns:důl="%(ns_custom)s">\n'
u' <důl:ぷㄩƦ/>\n'
u' <yếutố1 תכונה="1"/>\n'
u' <yếutố2 důl:עודתכונה="tvö"/>\n'
u'</جذر>\n') % {'ns_default': ns_default, 'ns_custom': ns_custom}
self.assertEqual(xml, xmlb.dom_element.xml_doc())
doc = xmlb.document
self.assertEqual(u'جذر', doc.root.name)
self.assertEqual(ns_default, doc.root.attributes['xmlns'])
self.assertEqual(ns_custom, doc.root.attributes[u'xmlns:důl'])
self.assertEqual(3, len(doc.find(ns_uri=ns_default)))
self.assertEqual(1, len(doc.find(ns_uri=ns_custom)))
self.assertEqual('1', doc.find_first(u'yếutố1').attributes[u'תכונה'])
self.assertEqual(
u'tvö',
doc.find_first(u'yếutố2').attributes[u'důl:עודתכונה'])
def test_transplant_and_clone_xml4h_element(self):
"""
Test transplanting and cloning an xml4h element node from one document
to another using methods on the xml4h Node.
"""
cat_b = (
self.my_builder('Animal')
.element('Cat')
.element('Feature').text('Independent')
)
dog_b = (
self.my_builder('Animal')
.element('Dog')
.element('Feature').text('Loyal')
)
horse_b = (
self.my_builder('Animal')
.element('Horse')
.element('Feature').text('Transport')
)
# Transplant an xml4h element node from one doc into another (it is not
# left in the original document)
cat_b.document.Animal.transplant_node(dog_b.document.Animal.Dog)
self.assertEqual(
'<Animal>'
'<Cat><Feature>Independent</Feature></Cat>'
'<Dog><Feature>Loyal</Feature></Dog>'
'</Animal>',
cat_b.root.xml(indent=False))
# Node and descendants are removed from original document
self.assertEqual('<Animal/>', dog_b.root.xml(indent=False))
# Clone an xml4h element node from one doc into another (it is left in
# place in the original document)
cat_b.document.Animal.clone_node(horse_b.document.Animal.Horse)
self.assertEqual(
'<Animal>'
'<Cat><Feature>Independent</Feature></Cat>'
'<Dog><Feature>Loyal</Feature></Dog>'
'<Horse><Feature>Transport</Feature></Horse>'
'</Animal>',
cat_b.root.xml(indent=False))
# Node and descendants remain in original document
self.assertEqual(
'<Animal>'
'<Horse><Feature>Transport</Feature></Horse>'
'</Animal>',
horse_b.root.xml(indent=False))
def test_transplant_and_clone_impl_text_node(self):
"""
Test transplanting and cloning an implementation Text node from one
document to another using builder methods.
"""
cat_b = (
self.my_builder('Animal')
.element('Cat')
.element('Feature').text('Independent')
)
dog_b = (
self.my_builder('Animal')
.element('Dog')
.element('Feature').text('Loyal')
)
horse_b = (
self.my_builder('Animal')
.element('Horse')
.element('Feature').text('Transport')
)
# Transplant an implementation Text node from one doc into another
cat_feature_b = cat_b
self.assertEqual('Feature', cat_feature_b.dom_element.name)
cat_feature_b.transplant(
dog_b.document.Animal.Dog.Feature.children[0].impl_node) \
.up().element('X') # Check method chaining works after transplant
self.assertEqual(
'<Animal>'
'<Cat><Feature>IndependentLoyal</Feature><X/></Cat>'
'</Animal>',
cat_b.root.xml(indent=False))
# Check text node is no longer in original document
self.assertEqual(
'<Animal><Dog><Feature/></Dog></Animal>',
dog_b.root.xml(indent=False))
# Clone an Text node from one doc into another
cat_feature_b.clone(
horse_b.document.Animal.Horse.Feature.children[0].impl_node)
self.assertEqual(
'<Animal>'
'<Cat><Feature>IndependentLoyalTransport</Feature><X/></Cat>'
'</Animal>',
cat_b.root.xml(indent=False))
# Check text node is no longer in original document
self.assertEqual(
'<Animal><Horse><Feature>Transport</Feature></Horse></Animal>',
horse_b.root.xml(indent=False))
def test_build_monty_python_film_example(self):
"""
Test production of a simple example XML doc; to be reused as project
documentation.
"""
# Create builder with the name of the root element
b = (self.my_builder('MontyPythonFilms')
# Assign attributes to the new root element
.attributes(
{'source': 'http://en.wikipedia.org/wiki/Monty_Python'})
# Create a child element
.element('Film')
# When an element is added, later method calls apply to it
.attributes({'year': 1971})
.element('Title')
# Set text content of element with text()
.text('And Now for Something Completely Different')
# Use up() to perform later actions on parent element
.up()
# Builder methods element(), text() etc. have shorter aliases
.elem('Description').t(
"A collection of sketches from the first and second TV"
" series of Monty Python's Flying Circus purposely"
" re-enacted and shot for film.").up()
.up()
)
# A builder object can be re-used
(b.e('Film')
.attrs(year=1974)
.e('Title').t('Monty Python and the Holy Grail').up()
.e('Description').t(
"King Arthur and his knights embark on a low-budget search"
" for the Holy Grail, encountering humorous obstacles along"
" the way. Some of these turned into standalone sketches."
).up()
.up()
)
# A builder can be created from any element
doc_root_elem = b.root
b = (doc_root_elem.builder
.e('Film')
.attrs(year=1979)
.e('Title').t("Monty Python's Life of Brian").up()
.e('Description').t(
"Brian is born on the first Christmas, in the stable "
"next to Jesus'. He spends his life being mistaken "
"for a messiah."
).up()
.up()
.e('Film')
.attrs(year=1982)
.e('Title').t('Monty Python Live at the Hollywood Bowl').up()
.e('Description').t(
"A videotape recording directed by Ian MacNaughton of a"
" live performance of sketches. Originally intended for"
" a TV/video special. Transferred to 35mm and given a"
" limited theatrical release in the US."
).up()
.up()
.e('Film')
.attrs(year=1983)
.e('Title').t("Monty Python's The Meaning of Life").up()
.e('Description').t(
"An examination of the meaning of life in a series of"
" sketches from conception to death and beyond."
).up()
.up()
.e('Film')
.attrs(year=2009)
.e('Title')
.t("Monty Python: Almost the Truth (The Lawyer's Cut)")
.up()
.e('Description').t(
"This film features interviews with all the surviving"
" Python members, along with archive representation for"
" the late Graham Chapman."
).up()
.up()
.e('Film')
.attrs(year=2012)
.e('Title').t("A Liar's Autobiography: Volume IV").up()
.e('Description').t(
"This is an animated film which is based on the memoir"
" of the late Monty Python member, Graham Chapman."
).up()
.up()
)
# Compare output of builder with pre-prepared example document
example_file_path = os.path.join(
os.path.dirname(__file__), 'data/monty_python_films.xml')
expected_xml = open(example_file_path).read()
self.assertEqual(expected_xml, b.xml_doc(indent=True))
class TestXmlDomBuilder(BaseBuilderNodesTest, unittest.TestCase):
"""
Tests building with the standard library xml.dom module, or with any
library that augments/clobbers this module.
"""
@property
def adapter(self):
return xml4h.XmlDomImplAdapter
class TestLXMLEtreeBuilder(BaseBuilderNodesTest, unittest.TestCase):
"""
Tests building with the lxml (lxml.etree) library.
"""
@property
def adapter(self):
return xml4h.LXMLAdapter
class TestElementTreeBuilder(BaseBuilderNodesTest, unittest.TestCase):
"""
Test building with the (c)ElementTree library.
"""
@property
def adapter(self):
return xml4h.ElementTreeAdapter
class TestcElementTreeBuilder(BaseBuilderNodesTest, unittest.TestCase):
"""
Test building with the (c)ElementTree library.
"""
@property
def adapter(self):
return xml4h.cElementTreeAdapter
|
jmurty/xml4h
|
tests/test_builder.py
|
Python
|
mit
| 31,336
|
[
"Brian"
] |
ed73a0357577fa54027e00a553498b888ae9efdb2a21bfbd22492cc529e7e0d6
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import jax
import jax.numpy as jnp
import scipy.linalg
import scipy.signal
import time
from src.global_vars import *
from src.utils import run, get_polytope_at, get_hidden_at, AcceptableFailure, KnownT, matmul, cheat_get_inner_layers, which_is_zero
from src.hyperplane_normal import get_ratios_lstsq, get_ratios
from src.find_witnesses import do_better_sweep
def sign_to_int(signs):
"""
Convert a list to an integer.
[-1, 1, 1, -1], -> 0b0110 -> 6
"""
return int("".join('0' if x == -1 else '1' for x in signs),2)
def is_on_following_layer(known_T, known_A, known_B, point):
print("Check if the critical point is on the next layer")
def is_on_prior_layer(query):
print("Hidden think", known_T.get_hidden_layers(query))
if CHEATING:
print("Hidden real", cheat_get_inner_layers(query))
if any(np.min(np.abs(layer)) < 1e-5 for layer in known_T.get_hidden_layers(query)):
return True
next_hidden = known_T.extend_by(known_A, known_B).forward(query)
print(next_hidden)
if np.min(np.abs(next_hidden)) < 1e-4:
return True
return False
if is_on_prior_layer(point):
print("It's not, because it's on an earlier layer")
return False
if CHEATING:
ls = ([np.min(np.abs(x)) for x in cheat_get_inner_layers(point)])
initial_signs = get_polytope_at(known_T, known_A, known_B, point)
normal = get_ratios([point], [range(DIM)], eps=GRAD_EPS)[0].flatten()
normal = normal / np.sum(normal**2)**.5
for tol in range(10):
random_dir = np.random.normal(size=DIM)
perp_component = np.dot(random_dir,normal)/(np.dot(normal, normal)) * normal
parallel_dir = random_dir - perp_component
go_direction = parallel_dir/np.sum(parallel_dir**2)**.5
_, high = binary_search_towards(known_T,
known_A, known_B,
point,
initial_signs,
go_direction)
if CHEATING:
print(cheat_get_inner_layers(point + go_direction * high/2)[np.argmin(ls)])
point_in_same_polytope = point + (high * .999 - 1e-4) * go_direction
print("high", high)
solutions = do_better_sweep(point_in_same_polytope,
normal,
-1e-4 * high, 1e-4 * high,
known_T=known_T)
if len(solutions) >= 1:
print("Correctly found", len(solutions))
else:
return False
point_in_different_polytope = point + (high * 1.1 + 1e-1) * go_direction
solutions = do_better_sweep(point_in_different_polytope,
normal,
-1e-4 * high, 1e-4 * high,
known_T=known_T)
if len(solutions) == 0:
print("Correctly found", len(solutions))
else:
return False
#print("I THINK IT'S ON THE NEXT LAYER")
if CHEATING:
soln = [np.min(np.abs(x)) for x in cheat_get_inner_layers(point)]
print(soln)
assert np.argmin(soln) == len(known_T.A)+1
return True
def find_plane_angle(known_T,
known_A, known_B,
multiple_intersection_point,
sign_at_init,
init_step,
exponential_base=1.5):
"""
Given an input that's at the multiple intersection point, figure out how
to continue along the path after it bends.
/ X : multiple intersection point
......../.. ---- : layer N hyperplane
. / . | : layer N+1 hyperplane that bends
. / .
--------X-----------
. | .
. | .
.....|.....
|
|
We need to make sure to bend, and not turn onto the layer N hyperplane.
To do this we will draw a box around the X and intersect with the planes
and determine the four coordinates. Then draw another box twice as big.
The first layer plane will be the two points at a consistent angle.
The second layer plane will have an inconsistent angle.
Choose the inconsistent angle plane, and make sure we move to a new
polytope and don't just go backwards to where we've already bene.
"""
success = None
camefrom = None
prev_iter_intersections = []
while True:
x_dir_base = np.sign(np.random.normal(size=DIM))/DIM**.5
y_dir_base = np.sign(np.random.normal(size=DIM))/DIM**.5
# When the input dimension is odd we can't have two orthogonal
# vectors from {-1,1}^DIM
if np.abs(np.dot(x_dir_base, y_dir_base)) <= DIM%2 + 1e-8:
break
MAX = 35
start = [10] if init_step > 10 else []
for stepsize in start + list(range(init_step, MAX)):
print("\tTry stepping away", stepsize)
x_dir = x_dir_base * (exponential_base**(stepsize-10))
y_dir = y_dir_base * (exponential_base**(stepsize-10))
# Draw the box as shown in the diagram above, and compute where
# the critical points are.
top = do_better_sweep(multiple_intersection_point + x_dir,
y_dir, -1, 1,
known_T=known_T)
bot = do_better_sweep(multiple_intersection_point - x_dir,
y_dir, -1, 1,
known_T=known_T)
left = do_better_sweep(multiple_intersection_point + y_dir,
x_dir, -1, 1,
known_T=known_T)
right = do_better_sweep(multiple_intersection_point - y_dir,
x_dir, -1, 1,
known_T=known_T)
intersections = top + bot + left + right
# If we only have two critical points, and we're taking a big step,
# then something is seriously messed up.
# This is not an acceptable error. Just abort out and let's try to
# do the whole thing again.
if len(intersections) == 2 and stepsize >= 10:
raise AcceptableFailure()
if CHEATING:
print("\tHAVE BOX INTERSECT COUNT", len(intersections))
print("\t",len(left), len(right), len(top), len(bot))
if (len(intersections) == 0 and stepsize > 15):# or (len(intersections) == 3 and stepsize > 5):
# Probably we're in just a constant flat 0 region
# At this point we're basically dead in the water.
# Just fail up and try again.
print("\tIt looks like we're in a flat region, raise failure")
raise AcceptableFailure()
# If we somehow went from almost no critical points to more than 4,
# then we've really messed up.
# Just fail out and let's hope next time it doesn't happen.
if len(intersections) > 4 and len(prev_iter_intersections) < 2:
print("\tWe didn't get enough inner points")
if exponential_base == 1.2:
print("\tIt didn't work a second time")
return None, None, 0
else:
print("\tTry with smaller step")
return find_plane_angle(known_T,
known_A, known_B,
multiple_intersection_point,
sign_at_init,
init_step,
exponential_base=1.2)
# This is the good, expected code-path.
# We've seen four intersections at least twice before, and now
# we're seeing more than 4.
if (len(intersections) > 4 or stepsize > 20) and len(prev_iter_intersections) >= 2:
next_intersections = np.array(prev_iter_intersections[-1])
intersections = np.array(prev_iter_intersections[-2])
# Let's first figure out what points are responsible for the prior-layer neurons
# being zero, and which are from the current-layer neuron being zero
candidate = []
for i,a in enumerate(intersections):
for j,b in enumerate(intersections):
if i == j: continue
score = np.sum(((a+b)/2-multiple_intersection_point)**2)
a_to_b = b-a
a_to_b /= np.sum(a_to_b**2)**.5
variance = np.std((next_intersections-a)/a_to_b,axis=1)
best_variance = np.min(variance)
#print(i,j,score, best_variance)
candidate.append((best_variance, i, j))
if sorted(candidate)[3][0] < 1e-8:
# It looks like both lines are linear here
# We can't distinguish what way is the next best way to go.
print("\tFailed the box continuation finding procedure. (1)")
print("\t",candidate)
raise AcceptableFailure()
# Sometimes life is just ugly, and nothing wants to work.
# Just abort.
err, index_0, index_1 = min(candidate)
if err/max(candidate)[0] > 1e-5:
return None, None, 0
prior_layer_near_zero = np.zeros(4, dtype=np.bool)
prior_layer_near_zero[index_0] = True
prior_layer_near_zero[index_1] = True
# Now let's walk through each of these points and check that everything looks sane.
should_fail = False
for critical_point, is_prior_layer_zero in zip(intersections,prior_layer_near_zero):
vs = known_T.extend_by(known_A,known_B).get_hidden_layers(critical_point)
#print("VS IS", vs)
#print("Is prior", is_prior_layer_zero)
#if CHEATING:
# print(cheat_get_inner_layers(critical_point))
if is_prior_layer_zero:
# We expect the prior layer to be zero.
if all([np.min(np.abs(x)) > 1e-5 for x in vs]):
# If it looks like it's not actually zero, then brutally fail.
print("\tAbort 1: failed to find a valid box")
should_fail = True
if any([np.min(np.abs(x)) < 1e-10 for x in vs]):
# We expect the prior layer to be zero.
if not is_prior_layer_zero:
# If it looks like it's not actually zero, then brutally fail.
print("\tAbort 2: failed to find a valid box")
should_fail = True
if should_fail:
return None, None, 0
# Done with error checking, life is good here.
# Find the direction that corresponds to the next direction we can move in
# and continue our search from that point.
for critical_point, is_prior_layer_zero in zip(intersections,prior_layer_near_zero):
sign_at_crit = sign_to_int(get_polytope_at(known_T,
known_A, known_B,
critical_point))
print("\tMove to", sign_at_crit, 'versus', sign_at_init, is_prior_layer_zero)
if not is_prior_layer_zero:
if sign_at_crit != sign_at_init:
success = critical_point
if CHEATING:
print('\tinner at success', cheat_get_inner_layers(success))
print("\tSucceeded")
else:
camefrom = critical_point
# If we didn't get a solution, then abort out.
# Probably what happened here is that we got more than four points
# on the box but didn't see exactly four points on the box twice before
# this means we should decrease the initial step size and try again.
if success is None:
print("\tFailed the box continuation finding procedure. (2)")
raise AcceptableFailure()
#assert success is not None
break
if len(intersections) == 4:
prev_iter_intersections.append(intersections)
return success, camefrom, min(stepsize, MAX-3)
def binary_search_towards_slow(known_T, known_A, known_B, start_point, initial_signs, go_direction, maxstep=1e6):
low = 0
high = maxstep
while high-low > 1e-8:
mid = (high+low)/2
query_point = start_point + mid * go_direction
next_signs = get_polytope_at(known_T, known_A, known_B,
query_point)
if initial_signs == next_signs:
low = mid
else:
high = mid
#print('check',np.abs(mid - can_go_dist))
next_signs = get_polytope_at(known_T, known_A, known_B,
start_point + low * go_direction)
if next_signs != initial_signs:
# It is extremely unlikely, but possible, for us to end up
# skipping over the region of interest.
# If this happens then don't step as far and try again.
# This has only ever happend once, but just in case....
print("Well this is awkward")
return binary_search_towards(known_T, known_A, known_B, start_point, initial_signs, go_direction, maxstep=maxstep/10)
# If mid is at the end, it means it never binary searched.
if mid > 1e6-1:
return None, None
else:
a_bit_further = start_point + (high+1e-4)*go_direction
return a_bit_further, high
PREV_GRAD = None
def binary_search_towards(known_T, known_A, known_B, start_point, initial_signs, go_direction, maxstep=1e6):
"""
Compute how far we can walk along the hyperplane until it is in a
different polytope from a prior layer.
It is okay if it's in a differnt polytope in a *later* layer, because
it will still have the same angle.
(but do it analytically by looking at the signs of the first layer)
this requires no queries and could be done with math but instead
of thinking I'm just going to run binary search.
"""
global PREV_GRAD
#_, slow_ans = binary_search_towards_slow(known_T, known_A, known_B, start_point, initial_signs, go_direction, maxstep)
plus_T = known_T.extend_by(known_A, known_B)
# this is the hidden state
initial_hidden = np.array(plus_T.get_hidden_layers(start_point, flat=True))
delta_hidden_np = (np.array(plus_T.get_hidden_layers(start_point + 1e-6 * go_direction, flat=True)) - initial_hidden) * 1e6
#
#can_go_dist_all = initial_hidden / delta_hidden
if PREV_GRAD is None or PREV_GRAD[0] is not known_T or PREV_GRAD[1] is not known_A or PREV_GRAD[2] is not known_B:
def get_grad(x, i):
initial_hidden = plus_T.get_hidden_layers(x, flat=True, np=jnp)
return initial_hidden[i]
g = jax.jit(jax.grad(get_grad))
def grads(start_point, go_direction):
return jnp.array([jnp.dot(g(start_point, i), go_direction) for i in range(initial_hidden.shape[0])])
PREV_GRAD = (known_T, known_A, known_B, jax.jit(grads))
else:
grads = PREV_GRAD[3]
delta_hidden = grads(start_point, go_direction)
can_go_dist_all = np.array(initial_hidden / delta_hidden)
can_go_dist = -can_go_dist_all[can_go_dist_all<0]
if len(can_go_dist) == 0:
print("Can't go anywhere at all")
raise AcceptableFailure()
can_go_dist = np.min(can_go_dist)
a_bit_further = start_point + (can_go_dist+1e-4)*go_direction
return a_bit_further, can_go_dist
def follow_hyperplane(LAYER, start_point, known_T, known_A, known_B,
history=[], MAX_POINTS=1e3, only_need_positive=False):
"""
This is the ugly algorithm that will let us recover sign for expansive networks.
Assumes we have extracted up to layer K-1 correctly, and layer K up to sign.
start_point is a neuron on layer K+1
known_T is the transformation that computes up to layer K-1, with
known_A and known_B being the layer K matrix up to sign.
We're going to come up with a bunch of different inputs,
each of which has the same critical point held constant at zero.
"""
def choose_new_direction_from_minimize(previous_axis):
"""
Given the current point which is at a critical point of the next
layer neuron, compute which direction we should travel to continue
with finding more points on this hyperplane.
Our goal is going to be to pick a direction that lets us explore
a new part of the space we haven't seen before.
"""
print("Choose a new direction to travel in")
if len(history) == 0:
which_to_change = 0
new_perp_dir = perp_dir
new_start_point = start_point
initial_signs = get_polytope_at(known_T, known_A, known_B, start_point)
# If we're in the 1 region of the polytope then we try to make it smaller
# otherwise make it bigger
fn = min if initial_signs[0] == 1 else max
else:
neuron_values = np.array([x[1] for x in history])
neuron_positive_count = np.sum(neuron_values>1,axis=0)
neuron_negative_count = np.sum(neuron_values<-1,axis=0)
mean_plus_neuron_value = neuron_positive_count/(neuron_positive_count + neuron_negative_count + 1)
mean_minus_neuron_value = neuron_negative_count/(neuron_positive_count + neuron_negative_count + 1)
# we want to find values that are consistently 0 or 1
# So map 0 -> 0 and 1 -> 0 and the middle to higher values
if only_need_positive:
neuron_consistency = mean_plus_neuron_value
else:
neuron_consistency = mean_plus_neuron_value * mean_minus_neuron_value
# Print out how much progress we've made.
# This estimate is probably worse than Windows 95's estimated time remaining.
# At least it's monotonic. Be thankful for that.
print("Progress", "%.1f"%int(np.mean(neuron_consistency!=0)*100)+"%")
print("Counts on each side of each neuron")
print(neuron_positive_count)
print(neuron_negative_count)
# Choose the smallest value, which is the most consistent
which_to_change = np.argmin(neuron_consistency)
print("Try to explore the other side of neuron", which_to_change)
if which_to_change != previous_axis:
if previous_axis is not None and neuron_consistency[previous_axis] == neuron_consistency[which_to_change]:
# If the previous thing we were working towards has the same value as this one
# the don't change our mind and just keep going at that one
# (almost always--sometimes we can get stuck, let us get unstuck)
which_to_change = previous_axis
new_start_point = start_point
new_perp_dir = perp_dir
else:
valid_axes = np.where(neuron_consistency == neuron_consistency[which_to_change])[0]
best = (np.inf, None, None)
for _, potential_hidden_vector, potential_point in history[-1:]:
for potential_axis in valid_axes:
value = potential_hidden_vector[potential_axis]
if np.abs(value) < best[0]:
best = (np.abs(value), potential_axis, potential_point)
_, which_to_change, new_start_point = best
new_perp_dir = perp_dir
else:
new_start_point = start_point
new_perp_dir = perp_dir
# If we're in the 1 region of the polytope then we try to make it smaller
# otherwise make it bigger
fn = min if neuron_positive_count[which_to_change] > neuron_negative_count[which_to_change] else max
arg_fn = np.argmin if neuron_positive_count[which_to_change] > neuron_negative_count[which_to_change] else np.argmax
print("Changing", which_to_change, 'to flip sides because mean is', mean_plus_neuron_value[which_to_change])
val = matmul(known_T.forward(new_start_point, with_relu=True), known_A, known_B)[which_to_change]
initial_signs = get_polytope_at(known_T, known_A, known_B, new_start_point)
# Now we're going to figure out what direction makes this biggest/smallest
# this doesn't take any queries
# There's probably an analytical way to do this.
# But thinking is hard. Just try 1000 random angles.
# There are no queries involved in this process.
choices = []
for _ in range(1000):
random_dir = np.random.normal(size=DIM)
perp_component = np.dot(random_dir,new_perp_dir)/(np.dot(new_perp_dir, new_perp_dir)) * new_perp_dir
parallel_dir = random_dir - perp_component
# This is the direction we're going to travel in.
go_direction = parallel_dir/np.sum(parallel_dir**2)**.5
try:
a_bit_further, high = binary_search_towards(known_T,
known_A, known_B,
new_start_point,
initial_signs,
go_direction)
except AcceptableFailure:
continue
if a_bit_further is None:
continue
# choose a direction that makes the Kth value go down by the most
val = matmul(known_T.forward(a_bit_further[np.newaxis,:], with_relu=True), known_A, known_B)[0][which_to_change]
#print('\t', val, high)
choices.append([val,
new_start_point + high*go_direction])
best_value, multiple_intersection_point = fn(choices, key=lambda x: x[0])
print('Value', best_value)
return new_start_point, multiple_intersection_point, which_to_change
###################################################
### Actual code to do the sign recovery starts. ###
###################################################
start_box_step = 0
points_on_plane = []
if CHEATING:
layer = np.abs(cheat_get_inner_layers(np.array(start_point))[LAYER+1])
print("Layer", layer)
which_is_zero = np.argmin(layer)
current_change_axis = 0
while True:
print("\n\n")
print("-----"*10)
if CHEATING:
layer = np.abs(cheat_get_inner_layers(np.array(start_point))[LAYER+1])
#print('layer',LAYER+1, layer)
#print('all inner layers')
#for e in cheat_get_inner_layers(np.array(start_point)):
# print(e)
which_is_zero_2 = np.argmin(np.abs(layer))
if which_is_zero_2 != which_is_zero:
print("STARTED WITH", which_is_zero, "NOW IS", which_is_zero_2)
print(layer)
raise
# Keep track of where we've been, so we can go to new places.
which_polytope = get_polytope_at(known_T, known_A, known_B, start_point, False) # [-1 1 -1]
hidden_vector = get_hidden_at(known_T, known_A, known_B, LAYER, start_point, False)
sign_at_init = sign_to_int(which_polytope) # 0b010 -> 2
print("Number of collected points", len(points_on_plane))
if len(points_on_plane) > MAX_POINTS:
return points_on_plane, False
neuron_values = np.array([x[1] for x in history])
neuron_positive_count = np.sum(neuron_values>1,axis=0)
neuron_negative_count = np.sum(neuron_values<-1,axis=0)
if (np.all(neuron_positive_count > 0) and np.all(neuron_negative_count > 0)) or \
(only_need_positive and np.all(neuron_positive_count > 0)):
print("Have all the points we need (1)")
print(query_count)
print(neuron_positive_count)
print(neuron_negative_count)
neuron_values = np.array([get_hidden_at(known_T, known_A, known_B, LAYER, x, False) for x in points_on_plane])
neuron_positive_count = np.sum(neuron_values>1,axis=0)
neuron_negative_count = np.sum(neuron_values<-1,axis=0)
print(neuron_positive_count)
print(neuron_negative_count)
return points_on_plane, True
# 1. find a way to move along the hyperplane by computing the normal
# direction using the ratios function. Then find a parallel direction.
try:
#perp_dir = get_ratios([start_point], [range(DIM)], eps=1e-4)[0].flatten()
perp_dir = get_ratios_lstsq(0, [start_point], [range(DIM)], KnownT([], []), eps=1e-5)[0].flatten()
except AcceptableFailure:
print("Failed to compute ratio at start point. Something very bad happened.")
return points_on_plane, False
# Record these points.
history.append((which_polytope,
hidden_vector,
np.copy(start_point)))
# We can't just pick any parallel direction. If we did, then we would
# not end up covering much of the input space.
# Instead, we're going to figure out which layer-1 hyperplanes are "visible"
# from the current point. Then we're going to try and go reach all of them.
# This is the point at which the first and second layers intersect.
start_point, multiple_intersection_point, new_change_axis = choose_new_direction_from_minimize(current_change_axis)
if new_change_axis != current_change_axis:
start_point, multiple_intersection_point, current_change_axis = choose_new_direction_from_minimize(None)
#if CHEATING:
# print("INIT MULTIPLE", cheat_get_inner_layers(multiple_intersection_point))
# Refine the direction we're going to travel in---stay numerically stable.
towards_multiple_direction = multiple_intersection_point - start_point
step_distance = np.sum(towards_multiple_direction**2)**.5
print("Distance we need to step:", step_distance)
if step_distance > 1 or True:
mid_point = 1e-4 * towards_multiple_direction/np.sum(towards_multiple_direction**2)**.5 + start_point
random_dir = np.random.normal(size=DIM)
mid_points = do_better_sweep(mid_point, perp_dir/np.sum(perp_dir**2)**.5,
low=-1e-3,
high=1e-3,
known_T=known_T)
if len(mid_points) > 0:
mid_point = mid_points[np.argmin(np.sum((mid_point-mid_points)**2,axis=1))]
towards_multiple_direction = mid_point - start_point
towards_multiple_direction = towards_multiple_direction/np.sum(towards_multiple_direction**2)**.5
initial_signs = get_polytope_at(known_T, known_A, known_B, start_point)
_, high = binary_search_towards(known_T,
known_A, known_B,
start_point,
initial_signs,
towards_multiple_direction)
multiple_intersection_point = towards_multiple_direction * high + start_point
# Find the angle of the next hyperplane
# First, take random steps away from the intersection point
# Then run the search algorithm to find some intersections
# what we find will either be a layer-1 or layer-2 intersection.
print("Now try to find the continuation direction")
success = None
while success is None:
if start_box_step < 0:
start_box_step = 0
print("VERY BAD FAILURE")
print("Choose a new random point to start from")
which_point = np.random.randint(0, len(history))
start_point = history[which_point][2]
print("New point is", which_point)
current_change_axis = np.random.randint(0, sizes[LAYER+1])
print("New axis to change", current_change_axis)
break
print("\tStart the box step with size", start_box_step)
try:
success, camefrom, stepsize = find_plane_angle(known_T,
known_A, known_B,
multiple_intersection_point,
sign_at_init,
start_box_step)
except AcceptableFailure:
# Go back to the top and try with a new start point
print("\tOkay we need to try with a new start point")
start_box_step = -10
start_box_step -= 2
if success is None:
continue
val = matmul(known_T.forward(multiple_intersection_point, with_relu=True), known_A, known_B)[new_change_axis]
print("Value at multiple:", val)
val = matmul(known_T.forward(success, with_relu=True), known_A, known_B)[new_change_axis]
print("Value at success:", val)
if stepsize < 10:
new_move_direction = success - multiple_intersection_point
# We don't want to be right next to the multiple intersection point.
# So let's binary search to find how far away we can go while remaining in this polytope.
# Then we'll go half as far as we can maximally go.
initial_signs = get_polytope_at(known_T, known_A, known_B, success)
print("polytope at initial", sign_to_int(initial_signs))
low = 0
high = 1
while high-low > 1e-2:
mid = (high+low)/2
query_point = multiple_intersection_point + mid * new_move_direction
next_signs = get_polytope_at(known_T, known_A, known_B, query_point)
print("polytope at", mid, sign_to_int(next_signs), "%x"%(sign_to_int(next_signs)^sign_to_int(initial_signs)))
if initial_signs == next_signs:
low = mid
else:
high = mid
print("GO TO", mid)
success = multiple_intersection_point + (mid/2) * new_move_direction
val = matmul(known_T.forward(success, with_relu=True), known_A, known_B)[new_change_axis]
print("Value at moved success:", val)
print("Adding the points to the set of known good points")
points_on_plane.append(start_point)
if camefrom is not None:
points_on_plane.append(camefrom)
#print("Old start point", start_point)
#print("Set to success", success)
start_point = success
start_box_step = max(stepsize-1,0)
return points_on_plane, False
def is_solution_map(args):
bounds, extra_tuple = args
r = []
for i in range(bounds[0], bounds[1]):
r.append(is_solution((i, extra_tuple)))
return r
def is_solution(input_tuple):
signs, (known_A0, known_B0, LAYER, known_hidden_so_far, K, responses) = input_tuple
new_signs = np.array([-1 if x == '0' else 1 for x in bin((1<<K)+signs)[3:]])
if CHEATING:
if signs%1001 == 0:
print('tick',signs)
else:
if signs%100001 == 0:
# This isn't cheating, but makes things prettier
print('tick',signs)
guess_A0 = known_A0 * new_signs
guess_B0 = known_B0 * new_signs
# We're going to set up a system of equations here
# The matrix is going to have a bunch of rows (equal to number of equations)
# each row is of the form
# [h_0 h_1 h_2 h_3 h_4 ... h_n 1]
# where h_n is the hidden vector after multiplying by the guessed matrix.
# and 1 is the weight for the bias term
inputs = matmul(known_hidden_so_far, guess_A0, guess_B0)
inputs[inputs < 0] = 0
if responses is None:
responses = np.ones((inputs.shape[0], 1))
else:
inputs = np.concatenate([inputs, np.ones((inputs.shape[0],1))], axis=1)
pass
solution, res, _, _ = scipy.linalg.lstsq(inputs, responses)
bias = np.dot(inputs, solution)-responses
res = np.std(bias)
#print("Recovered vector", solution.flatten())
if res > 1e-2:
return (res, new_signs, solution), 0
bias = bias.mean(axis=0)
#solution = np.concatenate([solution, [-bias]])[:, np.newaxis]
mat = (solution/solution[0][0])[:-1,:]
if np.any(np.isnan(mat)) or np.any(np.isinf(mat)):
print("Invalid solution")
return (res, new_signs, solution), 0
else:
s = solution/solution[0][0]
s[np.abs(s)<1e-14] = 0
return (res, new_signs, solution), 1
def solve_contractive_sign(known_T, weight, bias, LAYER):
print("Solve the extraction problem for contractive networks")
def get_preimage(hidden):
preimage = hidden
for i,(my_A,my_B) in reversed(list(enumerate(zip(known_T.A+[weight], known_T.B+[bias])))):
if i == 0:
res = scipy.optimize.lsq_linear(my_A.T, preimage-my_B,
bounds=(-np.inf, np.inf))
else:
res = scipy.optimize.lsq_linear(my_A.T, preimage-my_B,
bounds=(0, np.inf))
preimage = res.x
return preimage[np.newaxis,:]
hidden = np.zeros((sizes[LAYER+1]))
preimage = get_preimage(hidden)
extended_T = known_T.extend_by(weight,bias)
standard_out = run(preimage)
signs = []
for axis in range(len(hidden)):
h = np.array(hidden)
h[axis] = 10
preimage_plus = get_preimage(h)
h[axis] = -10
preimage_minus = get_preimage(h)
print("Confirm preimage")
if np.any(extended_T.forward(preimage) > 1e-5):
raise AcceptableFailure()
out_plus = run(preimage_plus)
out_minus = run(preimage_minus)
print(standard_out, out_plus, out_minus)
inverted_if_small = np.sum(np.abs(out_plus-standard_out))
not_inverted_if_small = np.sum(np.abs(out_minus-standard_out))
print("One of these should be small",
inverted_if_small,
not_inverted_if_small)
if inverted_if_small < not_inverted_if_small:
signs.append(-1)
else:
signs.append(1)
return signs
def solve_layer_sign(known_T, known_A0, known_B0, critical_points, LAYER,
already_checked_critical_points=False,
only_need_positive=False, l1_mask=None):
"""
Compute the signs for one layer of the network.
known_T is the transformation that computes up to layer K-1, with
known_A and known_B being the layer K matrix up to sign.
"""
def get_critical_points():
print("Init")
print(critical_points)
for point in critical_points:
print("Tick")
if already_checked_critical_points or is_on_following_layer(known_T, known_A0, known_B0, point):
print("Found layer N point at ", point, already_checked_critical_points)
yield point
get_critical_point = get_critical_points()
print("Start looking for critical point")
MAX_POINTS = 200
which_point = next(get_critical_point)
print("Done looking for critical point")
initial_points = []
history = []
pts = []
if already_checked_critical_points:
for point in get_critical_point:
initial_points.append(point)
pts.append(point)
which_polytope = get_polytope_at(known_T, known_A0, known_B0, point, False) # [-1 1 -1]
hidden_vector = get_hidden_at(known_T, known_A0, known_B0, LAYER, point, False)
if CHEATING:
layers = cheat_get_inner_layers(point)
print('have',[(np.argmin(np.abs(x)),np.min(np.abs(x))) for x in layers])
history.append((which_polytope,
hidden_vector,
np.copy(point)))
while True:
if not already_checked_critical_points:
history = []
pts = []
prev_count = -10
good = False
while len(pts) > prev_count+2:
print("======"*10)
print("RESTART SEARCH", len(pts), prev_count)
print(which_point)
prev_count = len(pts)
more_points, done = follow_hyperplane(LAYER, which_point,
known_T,
known_A0, known_B0,
history=history,
only_need_positive=only_need_positive)
pts.extend(more_points)
if len(pts) >= MAX_POINTS:
print("Have enough; break")
break
if len(pts) == 0:
break
neuron_values = known_T.extend_by(known_A0, known_B0).forward(pts)
neuron_positive_count = np.sum(neuron_values>1,axis=0)
neuron_negative_count = np.sum(neuron_values<-1,axis=0)
print("Counts")
print(neuron_positive_count)
print(neuron_negative_count)
print("SHOULD BE DONE?", done, only_need_positive)
if done and only_need_positive:
good = True
break
if np.all(neuron_positive_count > 0) and np.all(neuron_negative_count > 0) or \
(only_need_positive and np.all(neuron_positive_count > 0)):
print("Have all the points we need (2)")
good = True
break
if len(pts) < MAX_POINTS/2 and good == False:
print("======="*10)
print("Select a new point to start from")
print("======="*10)
if already_checked_critical_points:
print("CHOOSE FROM", len(initial_points), initial_points)
which_point = initial_points[np.random.randint(0,len(initial_points)-1)]
else:
which_point = next(get_critical_point)
else:
print("Abort")
break
critical_points = np.array(pts)#sorted(list(set(map(tuple,pts))))
print("Now have critical points", len(critical_points))
if CHEATING:
layer = [[np.min(np.abs(x)) for x in cheat_get_inner_layers(x[np.newaxis,:])][LAYER+1] for x in critical_points]
#print("Which layer is zero?", sorted(layer))
layer = np.abs(cheat_get_inner_layers(np.array(critical_points))[LAYER+1])
print(layer)
which_is_zero = np.argmin(layer,axis=1)
print("Which neuron is zero?", which_is_zero)
which_is_zero = which_is_zero[0]
print("Query count", query_count)
K = neuron_count[LAYER+1]
MAX = (1<<K)
if already_checked_critical_points:
bounds = [(MAX-1, MAX)]
else:
bounds = []
for i in range(1024):
bounds.append(((MAX*i)//1024, (MAX*(i+1))//1024))
print("Created a list")
known_hidden_so_far = known_T.forward(critical_points, with_relu=True)
debug = False
start_time = time.time()
extra_args_tup = (known_A0, known_B0, LAYER, known_hidden_so_far, K, None)
all_res = pool[0].map(is_solution_map, [(bound, extra_args_tup) for bound in bounds])
end_time = time.time()
print("Done map, now collect results")
print("Took", end_time-start_time, 'seconds')
all_res = [x for y in all_res for x in y]
scores = [r[0] for r in all_res]
solution_attempts = sum([r[1] for r in all_res])
total_attempts = len(all_res)
print("Attempts at solution:", (solution_attempts), 'out of', total_attempts)
std = np.std([x[0] for x in scores])
print('std',std)
print('median', np.median([x[0] for x in scores]))
print('min', np.min([x[0] for x in scores]))
return min(scores,key=lambda x: x[0])[1], critical_points
|
google-research/cryptanalytic-model-extraction
|
src/sign_recovery.py
|
Python
|
apache-2.0
| 42,098
|
[
"NEURON"
] |
9a07bc78e50dc31d44d188510273e5bae8c20de2ec3282256f5f8a4ebbfe170f
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from mock import Mock, patch
from nose.tools import eq_, raises
from djfactory.manage import validate_settings
from djfactory.settings_base import (get_apps, get_middleware,
get_template_context_processors)
@patch.object(settings, 'DEBUG', True)
@patch.object(settings, 'HMAC_KEYS', {'2012-06-06': 'secret'})
@patch.object(settings, 'SECRET_KEY', 'any random value')
@patch.object(settings, 'SESSION_COOKIE_SECURE', False)
def test_insecure_session_cookie_for_dev():
validate_settings(settings)
@raises(ImproperlyConfigured)
@patch.object(settings, 'DEBUG', False)
@patch.object(settings, 'HMAC_KEYS', {'2012-06-06': 'secret'})
@patch.object(settings, 'SECRET_KEY', '')
@patch.object(settings, 'SESSION_COOKIE_SECURE', True)
def test_empty_secret_key_for_prod():
validate_settings(settings)
@patch.object(settings, 'DEBUG', False)
@patch.object(settings, 'HMAC_KEYS', {'2012-06-06': 'secret'})
@patch.object(settings, 'SECRET_KEY', 'any random value')
@patch.object(settings, 'SESSION_COOKIE_SECURE', True)
def test_secret_key_ok():
"""Validate required security-related settings.
Don't raise exceptions when required settings are set properly."""
validate_settings(settings)
@raises(ImproperlyConfigured)
@patch.object(settings, 'DEBUG', False)
@patch.object(settings, 'HMAC_KEYS', {'2012-06-06': 'secret'})
@patch.object(settings, 'SECRET_KEY', 'any random value')
@patch.object(settings, 'SESSION_COOKIE_SECURE', None)
def test_session_cookie_ok():
"""Raise an exception if session cookies aren't secure in production."""
validate_settings(settings)
@patch.object(settings, 'DEBUG', True)
@patch.object(settings, 'HMAC_KEYS', {})
@patch.object(settings, 'SESSION_COOKIE_SECURE', False)
def test_empty_hmac_in_dev():
# Should not raise an exception.
validate_settings(settings)
@raises(ImproperlyConfigured)
@patch.object(settings, 'DEBUG', False)
@patch.object(settings, 'HMAC_KEYS', {})
@patch.object(settings, 'SESSION_COOKIE_SECURE', False)
def test_empty_hmac_in_prod():
validate_settings(settings)
def test_get_apps():
eq_(get_apps(exclude=('chico',),
current={'apps': ('groucho', 'harpo', 'chico')}),
('groucho', 'harpo'))
eq_(get_apps(append=('zeppo',),
current={'apps': ('groucho', 'harpo', 'chico')}),
('groucho', 'harpo', 'chico', 'zeppo'))
eq_(get_apps(exclude=('harpo', 'zeppo'), append=('chico',),
current={'apps': ('groucho', 'harpo', 'zeppo')}),
('groucho', 'chico'))
eq_(get_apps(exclude=('djfactory'), append=('gummo',)), get_apps())
def test_get_middleware():
eq_(get_middleware(exclude=['larry', 'moe'],
current={'middleware': ('larry', 'curly', 'moe')}),
('curly',))
eq_(get_middleware(append=('shemp', 'moe'),
current={'middleware': ('larry', 'curly')}),
('larry', 'curly', 'shemp', 'moe'))
eq_(get_middleware(exclude=('curly'), append=['moe'],
current={'middleware': ('shemp', 'curly', 'larry')}),
('shemp', 'larry', 'moe'))
eq_(get_middleware(append=['emil']), get_middleware())
def test_get_processors():
eq_(get_template_context_processors(exclude=('aramis'),
current={'processors': ('athos', 'porthos', 'aramis')}),
('athos', 'porthos'))
eq_(get_template_context_processors(append=("d'artagnan",),
current={'processors': ('athos', 'porthos')}),
('athos', 'porthos', "d'artagnan"))
eq_(get_template_context_processors(exclude=['athos'], append=['aramis'],
current={'processors': ('athos', 'porthos', "d'artagnan")}),
('porthos', "d'artagnan", 'aramis'))
eq_(get_template_context_processors(append=['richelieu']),
get_template_context_processors())
|
hfeeki/djfactory
|
tests/test_settings.py
|
Python
|
bsd-3-clause
| 3,830
|
[
"MOE"
] |
81d783cc00b0333363d71de496caacf69754c149c00fd2379466b6fc56ccd13c
|
import os
import unittest
from pymatgen.core import Molecule, Structure
from megnet.data.local_env import (AllAtomPairs, MinimumDistanceNNAll,
deserialize, get, serialize)
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def _equal(x, y):
if isinstance(x, list):
return all([_equal(i, j) for i, j in zip(x, y)])
elif isinstance(x, dict):
return all(_equal(x[i], y[i]) for i in x.keys())
else:
if x == y:
return True
else:
print(x, y)
return False
def _sort_neighbors(neighbors):
out = []
for n in neighbors:
out.append([sorted(n, key=lambda x: (x["weight"], x["site_index"]))])
return out
class TestLocalEnv(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.structure = Structure.from_file(os.path.join(MODULE_DIR, "cifs", "BaTiO3_mp-2998_computed.cif"))
cls.molecule = Molecule(["C", "O", "O"], [[0, 0, 0], [-1, 0, 0], [1, 0, 0]])
cls.mall = MinimumDistanceNNAll(4)
cls.aapair = AllAtomPairs()
def test_minimal_distance(self):
neighbors1 = self.mall.get_all_nn_info(self.structure)
neighbors2 = [self.mall.get_nn_info(self.structure, i) for i in range(len(self.structure))]
self.assertTrue(_equal(_sort_neighbors(neighbors1), _sort_neighbors(neighbors2)))
def test_all_atom_pairs(self):
mol_pairs = self.aapair.get_all_nn_info(self.molecule)
self.assertEqual(len(mol_pairs[0]), 2)
def test_serialization(self):
mall = MinimumDistanceNNAll(4)
config = serialize(mall)
self.assertDictEqual(
config, {"@module": "megnet.data.local_env", "@class": "MinimumDistanceNNAll", "cutoff": 4}
)
self.assertTrue(serialize(None) is None)
mall2 = deserialize(config)
self.assertTrue(isinstance(mall2, MinimumDistanceNNAll))
self.assertTrue(mall2.cutoff == 4)
def test_get(self):
voronoi = get("VoronoiNN")
self.assertTrue(voronoi.__name__ == "VoronoiNN")
if __name__ == "__main__":
unittest.main()
|
materialsvirtuallab/megnet
|
megnet/data/tests/test_local_env.py
|
Python
|
bsd-3-clause
| 2,142
|
[
"pymatgen"
] |
05ec3e8b7adaa15eec66f4ce953b8376cf34c0fe238d3c103e169382358401d9
|
# -*- coding: utf-8 -*-
#
# IRCrypt: Addon for IRCrypt to enable key exchange via public key authentication
# ===============================================================================
#
# Copyright (C) 2013-2014
# Lars Kiesow <lkiesow@uos.de>
# Sven Haardiek <sven@haardiek.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# == About ======================================================================
# The weechat IRCrypt-KeyEx plug-in is an addon for the weechat IRCrypt
# plug-in to enable key exchange via public key exchange. The plug-in will
# create a RSA keypair and use this to do plublic key authentication with
# other users and to exchange symmetric keys.
#
# == Project ====================================================================
#
# This plug-in is part of the IRCrypt project. For mor information or to
# participate, please visit
#
# https://github.com/IRCrypt
#
#
# To report bugs, make suggestions, etc. for this particular plug-in, please
# have a look at:
#
# https://github.com/IRCrypt/ircrypt-weechat
#
import weechat, string, os, subprocess, base64, time, imp, sys
# Dont create .pyc file
sys.dont_write_bytecode = True
# Constants used in this script
SCRIPT_NAME = 'ircrypt-keyex'
SCRIPT_AUTHOR = 'Sven Haardiek <sven@haardiek.de>, Lars Kiesow <lkiesow@uos.de>'
SCRIPT_VERSION = 'SNAPSHOT'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'IRCrypt-KeyEx: Addon for IRCrypt to enable key exchange via public key authentication'
SCRIPT_HELP_TEXT = '''%(bold)sIRCrypt-KeyEx command options: %(normal)s
list List public key fingerprints
start [-server <server>] <nick> Start key exchange with nick
remove-public-key [-server <server>] <nick> Remove public key id for nick
%(bold)sExamples: %(normal)s
Start key exchange with a user
/ircrypt-keyex start nick
Remove public key identifier for a user:
/ircrypt-keyex remove-public-key nick
%(bold)sConfiguration: %(normal)s
Tip: You can list all options and what they are currently set to by executing:
/set ircrypt-keyex.*
%(bold)sircrypt-keyex.general.binary %(normal)s
This will set the GnuPG binary used for encryption and decryption. IRCrypt-keyex
will try to set this automatically.
''' % {'bold':weechat.color('bold'), 'normal':weechat.color('-bold')}
MAX_PART_LEN = 300
MSG_PART_TIMEOUT = 300 # 5min
# Global variables and memory used to store message parts, pending requests,
# configuration options, keys, etc.
ircrypt = None
ircrypt_sym_key_memory = {}
ircrypt_config_file = None
ircrypt_config_section = {}
ircrypt_config_option = {}
ircrypt_asym_id = {}
ircrypt_pub_keys_memory = {}
ircrypt_key_ex_memory = {}
ircrypt_gpg_homedir = None
ircrypt_gpg_id = None
class MeassageParts:
'''Class used for storing parts of messages which were split after
encryption due to their length.'''
modified = 0
last_id = None
message = ''
def update(self, id, msg):
'''This method updates an already existing message part by adding a new
part to the old ones and updating the identifier of the latest received
message part.
'''
# Check if id is correct. If not, throw away old parts:
if self.last_id and self.last_id != id+1:
self.message = ''
# Check if the are old message parts which belong due to their old age
# probably not to this message:
if time.time() - self.modified > MSG_PART_TIMEOUT:
self.message = ''
self.last_id = id
self.message = msg + self.message
self.modified = time.time()
class KeyExchange:
'''Class used for key exchange
@pub_key_receive indicates wether the public key has not yet received
@pub_key_send indicates wether the public key has not yet been send
@parts specify the number of keyparts
@sym_key is the symmetric key
@sym_received incicates wether the symmetric key is completed
'''
pub_key_receive = False
pub_key_send = False
parts = 0
sym_key = ''
sym_received = False
def __init__(self, pub_key_receive, pub_key_send):
'''This function initialize the instance'''
self.pub_key_receive = pub_key_receive
self.pub_key_send = pub_key_send
def update(self, keypart):
'''This function update the symmetric key and do the XOR operation'''
if self.sym_key == '':
self.sym_key = keypart
else:
self.sym_key = ''.join(chr(ord(x) ^ ord(y)) for x, y in
zip(self.sym_key, keypart))
self.parts = self.parts + 1
def ircrypt_gpg_init():
'''Initialize GnuPG'''
global ircrypt_gpg_homedir, ircrypt_gpg_id
# This should usually be ~/.weechat/ircrypt
ircrypt_gpg_homedir = '%s/ircrypt' % weechat.info_get("weechat_dir", "")
try:
os.mkdir(ircrypt_gpg_homedir, 0o700)
except OSError:
pass
# Probe for GPG key
(ret, out, err) = ircrypt.ircrypt_gnupg(b'', '--homedir', ircrypt_gpg_homedir,
'--list-secret-keys', '--with-fingerprint', '--with-colon')
# GnuPG returncode
if ret:
ircrypt.ircrypt_error(err.decode('utf-8'), weechat.current_buffer())
return weechat.WEECHAT_RC_ERROR
elif err:
ircrypt.ircrypt_warn(err.decode('utf-8'), '')
# There is a secret key
if out:
try:
ircrypt_gpg_id = out.decode('utf-8').split('fpr')[-1].split('\n')[0].strip(':')
ircrypt.ircrypt_info('Found private gpg key with fingerprint %s' %
ircrypt_gpg_id, '')
return weechat.WEECHAT_RC_OK
except:
ircrypt.ircrypt_error('Unable to get key id', '')
# Try to generate a key
ircrypt.ircrypt_warn('No private key for assymetric encryption was found in the '
+ 'IRCrypt GPG keyring. IRCrypt will now try to automatically generate a '
+ 'new key. This might take quite some time as this procedure depends on '
+ 'the gathering of enough entropy for generating cryptographically '
+ 'strong random numbers. You cannot use the key exchange (public key'
+ 'authentication) until this process is done. However, it does not'
+ 'affect the symmetric encryption which can already be used. You '
+ 'will be notified once the process is done.')
binary = weechat.config_string(weechat.config_get('ircrypt.general.binary'))
hook = weechat.hook_process_hashtable(binary, {
'stdin': '1',
'arg1': '--batch',
'arg2': '--no-tty',
'arg3': '--quiet',
'arg4': '--homedir',
'arg5': ircrypt_gpg_homedir,
'arg6': '--gen-key'},
0, 'ircrypt_key_generated_cb', '')
gen_command = 'Key-Type: RSA\n' \
+ 'Key-Length: 2048\n' \
+ 'Subkey-Type: RSA\n' \
+ 'Subkey-Length: 2048\n' \
+ 'Name-comment: ircrypt\n' \
+ 'Expire-Date: 0\n' \
+ '%commit'
weechat.hook_set(hook, 'stdin', gen_command)
weechat.hook_set(hook, 'stdin_close', '')
return weechat.WEECHAT_RC_OK
def ircrypt_key_generated_cb(data, command, errorcode, out, err):
'''Callback for process hook to generate key'''
# Error
if errorcode:
ircrypt.ircrypt_error(err, '')
return weechat.WEECHAT_RC_ERROR
elif err:
ircrypt.ircrypt_warn(err)
ircrypt.ircrypt_info('A private key for asymmetric encryption was successfully'
+ 'generated and can now be used for communication.')
return ircrypt_gpg_init()
def ircrypt_receive_key_ex_ping(server, args, info):
'''This function handles incomming >KEY-EX-PING notices'''
global ircrypt_gpg_id, ircrypt_key_ex_memory
# Check for ircrypt plugin
if not ircrypt_check_ircrypt:
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
return ''
# Check if own gpg key exists
if not ircrypt_gpg_id:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
return ''
# Get fingerprint from message
try:
fingerprint = args.split('>KEY-EX-PING')[-1].split(' (')[0].lstrip(' ')
except:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
return ''
# Wrong fingerprint: Error
if fingerprint and fingerprint != ircrypt_gpg_id:
ircrypt.ircrypt_error('%s tries key exchange with wrong fingerprint' \
% info['nick'], weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-PING-WITH-INVALID-FINGERPRINT' % (server, info['nick']))
return ''
# Send back a >KEY-EX-PONG with optional fingerprint and create an instance
# of the class KeyExchange
target = ('%s/%s' % (server, info['nick'])).lower()
gpg_id = ircrypt_asym_id.get(target)
if gpg_id:
weechat.command('','/mute -all notice -server %s %s >KEY-EX-PONG %s' \
% (server, info['nick'], gpg_id))
if fingerprint:
ircrypt_key_ex_memory[target] = KeyExchange(False, False)
else:
ircrypt_key_ex_memory[target] = KeyExchange(False, True)
else:
weechat.command('','/mute -all notice -server %s %s >KEY-EX-PONG' \
% (server, info['nick']))
if fingerprint:
ircrypt_key_ex_memory[target] = KeyExchange(True, False)
else:
ircrypt_key_ex_memory[target] = KeyExchange(True, True)
return ''
def ircrypt_receive_key_ex_pong(server, args, info):
'''This function handles incomming >KEY-EX-PONG notices'''
global ircrypt_gpg_id, ircrypt_key_ex_memory
target = ('%s/%s' % (server, info['nick'])).lower()
# No instance of KeyExchange: Error
if not ircrypt_key_ex_memory.get(target):
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-KEY-EXCHANGE' % (server, info['nick']))
return ''
fingerprint = args.split('>KEY-EX-PONG')[-1].lstrip(' ')
# Wrong fingerprint: Error and try to delete instance of KeyExchange
if fingerprint and fingerprint != ircrypt_gpg_id:
ircrypt.ircrypt_error('%s tries key exchange with wrong fingerprint' \
% info['nick'], weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-PING-WITH-INVALID-FINGERPRINT' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# If correct fingerprint, the public key must not been sent
if fingerprint:
ircrypt_key_ex_memory[target].pub_key_send = False
# Notice to start next phase
weechat.command('','/mute -all notice -server %s %s >KEY-EX-NEXT-PHASE' \
% (server, info['nick']))
# If no public key must be sent, start symmetric key exchange. Otherwise
# send public key, if necessary
if (ircrypt_key_ex_memory[target].pub_key_send,
ircrypt_key_ex_memory[target].pub_key_receive) == (False, False):
ircrypt_sym_key_send(server, info['nick'])
elif ircrypt_key_ex_memory[target].pub_key_send:
ircrypt_public_key_send(server, info['nick'])
return ''
def ircrypt_receive_next_phase(server, args, info):
'''This function handles incomming >KEY-EX-NEXT-PHASE notices'''
global ircrypt_gpg_id, ircrypt_key_ex_memory
target = ('%s/%s' % (server, info['nick'])).lower()
# No instance of KeyExchange: Error
if not ircrypt_key_ex_memory.get(target):
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-KEY-EXCHANGE' % (server, info['nick']))
return ''
# If no public key must be sent, start symmetric key exchange. Otherwise
# send public key, if necessary
if (ircrypt_key_ex_memory[target].pub_key_send,
ircrypt_key_ex_memory[target].pub_key_receive) == (False,False):
ircrypt_sym_key_send(server, info['nick'])
elif ircrypt_key_ex_memory[target].pub_key_send:
ircrypt_public_key_send(server, info['nick'])
return ''
def ircrypt_public_key_send(server, nick):
'''This function sends away own public key'''
global ircrypt_gpg_homedir, ircrypt_gpg_id, ircrypt_key_ex_memory
# Export own public key and b64encode the public key. Print error if
# necessary.
if not ircrypt_gpg_id:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
return ''
(ret, out, err) = ircrypt.ircrypt_gnupg(b'', '--homedir', ircrypt_gpg_homedir,
'--export', ircrypt_gpg_id)
if ret:
ircrypt.ircrypt_error(err.decode('utf-8'), weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
# TODO: This will never work. There is no target
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
elif err:
ircrypt.ircrypt_warn(err.decode('utf-8'))
pub_key = base64.b64encode(out)
# Partition the public key and send it away
for i in range(1 + (len(pub_key) // MAX_PART_LEN))[::-1]:
msg = '>PUB-EX-%i %s' % (i, pub_key[i*MAX_PART_LEN:(i+1)*MAX_PART_LEN])
weechat.command('','/mute -all notice -server %s %s %s' % (server, nick, msg))
return ''
def ircrypt_public_key_get(server, args, info):
'''This function handles incomming >PUB-EX- messages'''
global ircrypt_pub_keys_memory, ircrypt_asym_id, ircrypt_key_ex_memory
# Get prefix, number and message
pre, message = args.split('>PUB-EX-', 1)
number, message = message.split(' ', 1)
target = ('%s/%s' % (server, info['nick'])).lower()
# Check if we got the last part of the message otherwise put the message
# into a global memory and quit
if int(number):
if not target in ircrypt_pub_keys_memory:
# - First element is list of requests
# - Second element is currently received request
ircrypt_pub_keys_memory[target] = ircrypt.MessageParts()
# Add parts to current request
ircrypt_pub_keys_memory[target].update(int(number), message)
return ''
# No instance of KeyExchange: Error
if not ircrypt_key_ex_memory.get(target):
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-KEY-EXCHANGE' % (server, info['nick']))
return ''
# If no request for a public key: Error and try to delete instance of
# KeyExchange
if not ircrypt_key_ex_memory[target].pub_key_receive:
ircrypt.ircrypt_error('%s sends his public key without inquiry' % info['nick'],
weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-REQUEST-FOR-PUBLIC-KEY' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# If there is a public identifier: Error and try to delete instance of
# KeyExchange
key_id = ircrypt_asym_id.get(target)
if key_id:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Get whole message
try:
message = message + ircrypt_pub_keys_memory[target].message
del ircrypt_pub_keys_memory[target]
except KeyError:
pass
# Decode base64 encoded message
try:
message = base64.b64decode(message)
except:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Import public key
(ret, out, err) = ircrypt.ircrypt_gnupg(message, '--homedir', ircrypt_gpg_homedir,
'--keyid-format', '0xlong', '--import')
# Print error (There are the information about the imported public key)
# and quit key exchange if necessary
if ret:
ircrypt.ircrypt_error(err.decode('utf-8'), weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Try to get public key identifier contained in the stderr output.
try:
gpg_id = err.decode('utf-8').split('0x',1)[1].split(':',1)[0]
except:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Probe for GPG fingerprint
(ret, out, err) = ircrypt.ircrypt_gnupg(b'', '--homedir', ircrypt_gpg_homedir,
'--fingerprint', '--with-colon')
# Print error (There are the information about the imported public key)
# and quit key exchange if necessary
if ret:
ircrypt.ircrypt_error(err.decode('utf-8'), weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
elif err:
ircrypt.ircrypt_warn(err.decode('utf-8'))
# There is a secret key
try:
out = [ line for line in out.decode('utf-8').split('\n') \
if (gpg_id + ':') in line and line.startswith('fpr:') ][-1]
gpg_id = out.split('fpr')[-1].strip(':')
except:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Set asymmetric identifier and remember that the public key was received
ircrypt_asym_id[target] = gpg_id
ircrypt_key_ex_memory[target].pub_key_receive = False
# Send status back
weechat.command('','/mute -all notice -server %s %s '
'>KEY-EX-PUB-RECEIVED' % (server, info['nick']))
# Start symmetic key exchange if public key exchange is closed
if (ircrypt_key_ex_memory[target].pub_key_send,
ircrypt_key_ex_memory[target].pub_key_receive) == (False,False):
ircrypt_sym_key_send(server, info['nick'])
return ''
def ircrypt_receive_key_ex_pub_received(server, args, info):
'''This function handles incomming >PUB-KEY-RECEIVED notices'''
global ircrypt_gpg_id, ircrypt_key_ex_memory
target = ('%s/%s' % (server, info['nick'])).lower()
# No instance of KeyExchange: Error
if not ircrypt_key_ex_memory.get(target):
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-KEY-EXCHANGE' % (server, info['nick']))
return ''
# Set asymmetric identifier and remember that the public key was sent
ircrypt_key_ex_memory[target].pub_key_send = False
# Start symmetic key exchange if public key exchange is closed
if (ircrypt_key_ex_memory[target].pub_key_send,
ircrypt_key_ex_memory[target].pub_key_receive) == (False,False):
ircrypt_sym_key_send(server, info['nick'])
return ''
def ircrypt_sym_key_send(server, nick):
'''This function create a part of a symmetric key and send it away'''
global ircrypt_asym_id, ircrypt_key_ex_memory, ircrypt_sym_keys_memory
# Create part of key
keypart = os.urandom(64)
target = ('%s/%s' % (server, nick)).lower()
(ret, out, err) = ircrypt.ircrypt_gnupg(keypart, '--homedir', ircrypt_gpg_homedir,
'-s', '--trust-model', 'always', '-e', '-r', ircrypt_asym_id[target])
# Print error and quit key exchange if necessary
if ret:
ircrypt.ircrypt_error(err.decode('utf-8'), weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
elif err:
ircrypt.ircrypt_warn(err.decode('utf-8'))
# Update symmetric key
ircrypt_key_ex_memory[target].update(keypart)
# If symmetric key is complete, send status back
if ircrypt_key_ex_memory[target].parts == 2:
weechat.command('','/mute -all notice -server %s %s '
'>KEY-EX-SYM-RECEIVED' % (server, nick))
# If the symmetric key is also complete by the counterpart set symmetric
# key
if ircrypt_key_ex_memory[target].sym_received:
weechat.command('','/ircrypt set-key -server %s %s %s' \
% (server, info['nick'],
base64.b64encode(ircrypt_key_ex_memory[target].sym_key)))
# Print encrypted part of the symmetric key in multiple notices
out = base64.b64encode(out)
for i in range(1 + (len(out) / MAX_PART_LEN))[::-1]:
msg = '>SYM-EX-%i %s' % (i, out[i*MAX_PART_LEN:(i+1)*MAX_PART_LEN])
weechat.command('','/mute -all notice -server %s %s %s' % (server, nick, msg))
def ircrypt_sym_key_get(server, args, info):
global ircrypt_pub_keys_memory, ircrypt_asym_id, ircrypt_key_ex_memory
# Get prefix, number and message
pre, message = args.split('>SYM-EX-', 1)
number, message = message.split(' ', 1)
catchword = (server, info['channel'], info['nick'])
# Decrypt only if we got last part of the message
# otherwise put the message into a global memory and quit
if int(number) != 0:
if not catchword in ircrypt_sym_key_memory:
ircrypt_sym_key_memory[catchword] = ircrypt.MessageParts()
ircrypt_sym_key_memory[catchword].update(int(number), message)
return ''
# Get whole message
try:
message = message + ircrypt_sym_key_memory[catchword].message
except KeyError:
pass
target = ('%s/%s' % (server, info['nick'])).lower()
# No instance of KeyExchange: Error
if not ircrypt_key_ex_memory.get(target):
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-KEY-EXCHANGE' % (server, info['nick']))
return ''
# No request for symmtric key exchange: Error and try to delete instance
if (ircrypt_key_ex_memory[target].pub_key_send or
ircrypt_key_ex_memory[target].pub_key_receive):
ircrypt.ircrypt_error('%s sends symmetric key without inquiry' % info['nick'],
weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-REQUEST-FOR-SYMMETRIC-KEY' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Decode base64 encoded message
try:
message = base64.b64decode(message)
except TypeError:
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Decrypt
(ret, out, err) = ircrypt.ircrypt_gnupg(message, '--homedir',
ircrypt_gpg_homedir, '-d')
# Print error and quit key exchange if necessary
if ret:
ircrypt.ircrypt_error(err.decode('utf-8'), weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-INTERNAL-ERROR' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Remove old messages from memory
try:
del ircrypt_sym_key_memory[catchword]
except KeyError:
pass
target = ('%s/%s' % (server, info['nick'])).lower() # TODO: Necessary?
# Update symmetric key
ircrypt_key_ex_memory[target].update(out)
# If symmetric key is complete, send status back
if ircrypt_key_ex_memory[target].parts == 2:
weechat.command('','/mute -all notice -server %s %s '
'>KEY-EX-SYM-RECEIVED' % (server, info['nick']))
# If the symmetric key is also complete by the counterpart set symmetric
# key
if ircrypt_key_ex_memory[target].sym_received:
weechat.command('','/ircrypt set-key -server %s %s %s' \
% (server, info['nick'],
base64.b64encode(ircrypt_key_ex_memory[target].sym_key)))
return ''
def ircrypt_receive_key_ex_sym_received(server, args, info):
'''This functions handles incomming >KEY-EX-SYM-RECEIVED notices'''
global ircrypt_gpg_id, ircrypt_key_ex_memory
target = ('%s/%s' % (server, info['nick'])).lower()
# No instance of KeyExchange: Error
if not ircrypt_key_ex_memory.get(target):
weechat.command('','/mute -all notice -server %s %s '
'>UCRY-NO-KEY-EXCHANGE' % (server, info['nick']))
return ''
# No request for symmetric key exchange: Error and try to delete instance
if (ircrypt_key_ex_memory[target].pub_key_send or
ircrypt_key_ex_memory[target].pub_key_receive):
ircrypt.ircrypt_error('Error in IRCrypt key exchange', weechat.current_buffer())
weechat.command('','/mute -all notice -server %s %s'
'>UCRY-NO-REQUEST-FOR-SYMMETRIC-KEY' % (server, info['nick']))
try:
del ircrypt_key_ex_memory[target]
except KeyError:
pass
return ''
# Remember that the counterpart has received the symmetric key
ircrypt_key_ex_memory[target].sym_received = True
# If the symmetric key is also complete by the counterpart set symmetric
# key
if ircrypt_key_ex_memory[target].parts == 2:
weechat.command('','/ircrypt set-key -server %s %s %s' \
% (server, info['nick'],
base64.b64encode(ircrypt_key_ex_memory[target].sym_key)))
return ''
def ircrypt_config_init():
''' This method initializes the configuration file. It creates sections and
options in memory and prepares the handling of key sections.
'''
global ircrypt_config_file, ircrypt_config_section, ircrypt_config_option
ircrypt_config_file = weechat.config_new('ircrypt-keyex', 'ircrypt_config_reload_cb', '')
if not ircrypt_config_file:
return
# public key identifier
ircrypt_config_section['asym_id'] = weechat.config_new_section(
ircrypt_config_file, 'asym_id', 0, 0,
'ircrypt_config_asym_id_read_cb', '',
'ircrypt_config_asym_id_write_cb', '', '', '', '', '', '', '')
if not ircrypt_config_section['asym_id']:
weechat.config_free(ircrypt_config_file)
def ircrypt_config_reload_cb(data, config_file):
'''Handle a reload of the configuration file.
'''
return weechat.WEECHAT_CONFIG_READ_OK
def ircrypt_config_read():
''' Read IRCrypt configuration file (ircrypt.conf).
'''
global ircrypt_config_file
return weechat.config_read(ircrypt_config_file)
def ircrypt_config_write():
''' Write IRCrypt configuration file (ircrypt.conf) to disk.
'''
global ircrypt_config_file
return weechat.config_write(ircrypt_config_file)
def ircrypt_config_asym_id_read_cb(data, config_file, section_name, option_name,
value):
'''Read elements of the key section from the configuration file.
'''
global ircrypt_asym_id
ircrypt_asym_id[option_name.lower()] = value
return weechat.WEECHAT_CONFIG_OPTION_SET_OK_CHANGED
def ircrypt_config_asym_id_write_cb(data, config_file, section_name):
'''Write passphrases to the key section of the configuration file.
'''
global ircrypt_asym_id
weechat.config_write_line(config_file, section_name, '')
for target, asym_id in sorted(list(ircrypt_asym_id.items())):
weechat.config_write_line(config_file, target.lower(), asym_id)
return weechat.WEECHAT_RC_OK
def ircrypt_command_list():
'''ircrypt command to list fingerprints'''
out = '\n'.join([' %s : %s' % x for x in ircrypt_asym_id.items()])
ircrypt.ircrypt_info('Fingerprint:\n' + out if out else 'No known Fingerprints')
return weechat.WEECHAT_RC_OK
def ircrypt_command_start(server, nick):
'''This function is called when the user starts a key exchange'''
global ircrypt_asym_id, ircrypt_key_ex_memory, ircrypt_gpg_id
# Check for ircrypt plugin
if not ircrypt_check_ircrypt:
return weechat.WEECHAT_RC_OK
# Check if own gpg key exists
if not ircrypt_gpg_id:
ircrypt.ircrypt_error('No GPG key generated')
return weechat.WEECHAT_RC_ERROR
# Send >KEY-EX-PING with optional gpg fingerprint and create instance of
# KeyExchange
target = ('%s/%s' % (server, nick)).lower()
gpg_id = ircrypt_asym_id.get(target)
text = '(Trying to initialte key exchange via IRCrypt-KeyEx)'
if gpg_id:
weechat.command('','/mute -all notice -server %s %s >KEY-EX-PING %s %s' \
% (server, nick, gpg_id, text))
ircrypt_key_ex_memory[target] = KeyExchange(False, True)
else:
weechat.command('','/mute -all notice -server %s %s >KEY-EX-PING %s' \
% (server, nick, text))
ircrypt_key_ex_memory[target] = KeyExchange(True, True)
# print information
ircrypt.ircrypt_info('Start key exchange with %s on server %s. This may take some '
'time. The exchange will be ignored if %s has no IRCrypt-KeyEx.' \
% (nick, server, nick))
return weechat.WEECHAT_RC_OK
def ircrypt_command_remove_public_key(target):
'''ircrypt command to remove public key for target (target is a server/channel combination)'''
global ircrypt_asym_id
# Check if public key is set and print error in current buffer otherwise
if target.lower() not in ircrypt_asym_id:
ircrypt.ircrypt_error('No existing public key for %s.' % target, weechat.current_buffer())
return weechat.WEECHAT_RC_ERROR
# Delete public key (first in gpg then in config file) and print status
# message in current buffer
(ret, out, err) = ircrypt.ircrypt_gnupg(b'', '--yes', '--homedir',
ircrypt_gpg_homedir,'--delete-key', ircrypt_asym_id[target.lower()])
if ret:
ircrypt.ircrypt_error('Could not delete public key in gpg', weechat.current_buffer())
return weechat.WEECHAT_RC_ERROR
elif err:
ircrypt.ircrypt_warn(err.decode('utf-8'))
del ircrypt_asym_id[target.lower()]
ircrypt.ircrypt_info('Removed asymmetric identifier for %s' % target)
return weechat.WEECHAT_RC_OK
def ircrypt_command(data, buffer, args):
'''Hook to handle the /ircrypt-keyex weechat command.'''
global ircrypt_asym_id
argv = [a for a in args.split(' ') if a]
if argv and not argv[0] in ['list', 'remove-public-key', 'start']:
ircrypt.ircrypt_error('%sUnknown command. Try /help ircrypt-keyex', buffer)
return weechat.WEECHAT_RC_ERROR
# list
if not argv or argv == ['list']:
return ircrypt_command_list()
# Check if a server was set
if (len(argv) > 2 and argv[1] == '-server'):
server = argv[2]
del argv[2]
del argv[1]
args = args.split(' ', 2)[-1]
else:
# Try to determine the server automatically
server = weechat.buffer_get_string(buffer, 'localvar_server')
# All remaining commands need a server name
if not server:
# if no server was set print message in ircrypt buffer and throw error
ircrypt.ircrypt_error('Unknown Server. Please use -server to specify server', buffer)
return weechat.WEECHAT_RC_ERROR
# For the remaining commands we need at least one additional argument
if len(argv) < 2:
return weechat.WEECHAT_RC_ERROR
target = ('%s/%s' % (server, argv[1])).lower()
if argv[0] == 'start':
if len(argv) == 2:
return ircrypt_command_start(server, argv[1])
return weechat.WEECHAT_RC_ERROR
# Remove public key from another user
if argv[0] == 'remove-public-key':
if len(argv) != 2:
return weechat.WEECHAT_RC_ERROR
return ircrypt_command_remove_public_key(target)
# Error if command was unknown
return weechat.WEECHAT_RC_ERROR
def ircrypt_notice_hook(data, msgtype, server, args):
info = weechat.info_get_hashtable('irc_message_parse', { 'message': args })
if '>UCRY-INTERNAL-ERROR' in args:
ircrypt.ircrypt_error('%s on server %s reported an error during the key exchange' \
% (info['nick'], server), weechat.current_buffer())
return ''
elif '>UCRY-NO-KEY-EXCHANGE' in args:
ircrypt.ircrypt_error('%s on server %s reported an error during the key exchange' \
% (info['nick'], server), weechat.current_buffer())
return ''
elif '>UCRY-PING-WITH-INVALID-FINGERPRINT' in args:
ircrypt.ircrypt_error('%s on server %s reported that your fingerprint known does'
'not match his own fingerprint' % (info['nick'], server),
weechat.current_buffer())
return ''
elif '>UCRY-NO-REQUEST-FOR-PUBLIC-KEY' in args:
ircrypt.ircrypt_error('%s on server %s reported an error during the key exchange' \
% (info['nick'], server), weechat.current_buffer())
return ''
elif '>UCRY-NO-REQUEST-FOR-SYMMETRIC-KEY' in args:
ircrypt.ircrypt_error('%s on server %s reported an error during the key exchange' \
% (info['nick'], server), weechat.current_buffer())
return ''
# Different hooks
elif '>KEY-EX-PING' in args:
return ircrypt_receive_key_ex_ping(server, args, info)
elif '>KEY-EX-PONG' in args:
return ircrypt_receive_key_ex_pong(server, args, info)
elif '>KEY-EX-NEXT-PHASE' in args:
return ircrypt_receive_next_phase(server, args, info)
elif '>KEY-EX-PUB-RECEIVED' in args:
return ircrypt_receive_key_ex_pub_received(server, args, info)
elif '>SYM-EX-' in args:
return ircrypt_sym_key_get(server, args, info)
elif '>KEY-EX-SYM-RECEIVED' in args:
return ircrypt_receive_key_ex_sym_received(server, args, info)
elif '>PUB-EX-' in args:
return ircrypt_public_key_get(server, args, info)
return args
def ircrypt_load(data, signal, ircrypt_path):
global ircrypt
if ircrypt_path.endswith('ircrypt.py'):
ircrypt = imp.load_source('ircrypt', ircrypt_path)
ircrypt_init()
return weechat.WEECHAT_RC_OK
def ircrypt_check_ircrypt():
infolist = weechat.infolist_get('python_script', '', 'ircrypt')
weechat.infolist_next(infolist)
ircrypt_path = weechat.infolist_string(infolist, 'filename')
weechat.infolist_free(infolist)
return ircrypt_path
def ircrypt_init():
# Initialize configuration
ircrypt_config_init()
ircrypt_config_read()
# Look for GnuPG binary
if weechat.config_string(weechat.config_get('ircrypt.general.binary')):
# Initialize public key authentification
ircrypt_gpg_init()
# Register Hooks
weechat.hook_modifier('irc_in_notice', 'ircrypt_notice_hook', '')
weechat.hook_command('ircrypt-keyex', 'Commands of the Addon IRCrypt-keyex',
'[list] '
'| remove-public-key [-server <server>] <nick> '
'| start [-server <server>] <nick> ',
SCRIPT_HELP_TEXT,
'list '
'|| remove-public-key %(nicks)|-server %(irc_servers) %- '
'|| start %(nicks)|-server %(irc_servers) %- ',
'ircrypt_command', '')
else:
ircrypt.ircrypt_error('GnuPG not found', weechat.current_buffer())
# register plugin
if __name__ == '__main__' and weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, 'ircrypt_unload_script', 'UTF-8'):
ircrypt_path = ircrypt_check_ircrypt()
if ircrypt_path:
ircrypt = imp.load_source('ircrypt', ircrypt_path)
ircrypt_init()
else:
weechat.hook_signal('python_script_loaded', 'ircrypt_load', '')
def ircrypt_unload_script():
'''Hook to ensure the configuration is properly written to disk when the
script is unloaded.
'''
ircrypt_config_write()
return weechat.WEECHAT_RC_OK
|
IRCrypt/ircrypt-weechat
|
ircrypt-keyex.py
|
Python
|
gpl-3.0
| 34,597
|
[
"VisIt"
] |
1a87f02a7288ea0cacd5299c6e0a76525c3ba77416de219fb09a57fd5af6f6dc
|
# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
# Modifications to create of the HMMLearn module: Gael Varoquaux
"""
The :mod:`hmmlearn.hmm` module implements hidden Markov models.
"""
import string
import numpy as np
from numpy.random import multivariate_normal, normal
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from sklearn.base import BaseEstimator
from sklearn.mixture import (
GMM, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from sklearn import cluster
from scipy.stats import (poisson, expon)
from copy import deepcopy
from .utils.fixes import (log_multivariate_normal_density,
log_poisson_pmf, log_exponential_density)
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
def identity(x):
return x
def _do_estep(seq, modelBroadcast):
model = modelBroadcast.value
stats = model._initialize_sufficient_statistics()
framelogprob = model._compute_log_likelihood(seq)
lpr, fwdlattice = model._do_forward_pass(framelogprob)
bwdlattice = model._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
model._accumulate_sufficient_statistics(stats,
seq,
framelogprob,
posteriors,
fwdlattice,
bwdlattice,
model.params)
return stats, lpr
def _score(seq, modelBroadcast):
model = modelBroadcast.value
seq = np.asarray(seq)
framelogprob = model._compute_log_likelihood(seq)
lpr, _ = model._do_forward_pass(framelogprob)
return lpr
def merge_sum(x, y):
D = {}
for k in x.keys():
if isinstance(x[k], list):
z = []
for i in xrange(len(x[k])):
z.append(x[k][i] + y[k][i])
D[k] = z
else:
D[k] = x[k] + y[k]
return D
def log_normalize(A, axis=None):
arr = np.rollaxis(A, axis)
vmax = arr.max(axis=axis)
return normalize(np.exp((arr.T - vmax).T))
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
def randomize(A, axis=None):
randomizer = np.random.rand(*A.shape) / 10.
Arand = A + randomizer
return normalize(Arand, axis)
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
self.verbose_fmt = '{iter:>10d} {lpr:>16.4f} {improvement:>16.4f}'
self.verbose_mod = 1
def init(self):
header_fields = ['Iter', 'Log Likelihood', 'Log Improvement']
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
def update(self, i, lpr, improvement):
"""Update reporter with new iteration. """
# we need to take into account if we fit additional estimators.
if (i + 1) % self.verbose_mod == 0:
print(self.verbose_fmt.format(iter=i + 1,
lpr=lpr,
improvement=improvement))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_states`, `n_states`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_states`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0):
self.n_states = n_states
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
if startprob_prior is None:
startprob_prior = np.ones(n_states)
self.startprob_prior = startprob_prior
self.transmat_ = transmat
if transmat_prior is None:
transmat_prior = np.ones((n_states,
n_states))
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
self.verbose = verbose
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : list of array_like, shape (n, n_states)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
logprob = 0
posteriors = []
for seq in obs:
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors.append(np.exp(gamma.T - logsumexp(gamma, axis=1)).T)
posteriors[-1] += np.finfo(np.float32).eps
posteriors[-1] /= np.sum(posteriors, axis=1).reshape((-1, 1))
logprob += lpr
return logprob, posteriors
def score(self, sc, data):
"""Compute the log probability under the model.
Parameters
----------
obs : list of array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
modelBroadcast = sc.broadcast(self)
logprob = data.map(lambda seq: _score(seq, modelBroadcast)).reduce(lambda a, b: a + b)
return logprob
def aic(self, sc, obs):
"""Computes the Aikaike Information Criterion of the model and
set of observations.
Parameters
----------
obs : list of arrays
List of observation sequences.
Returns
-------
aic_score : float
The Aikaike Information Criterion.
"""
logprob = self.score(sc, obs)
n_pars = self._n_free_parameters()
aic_score = 2 * n_pars - 2 * logprob
return aic_score
def bic(self, sc, obs):
"""Computes the Aikaike Information Criterion of the model and
set of observations.
Parameters
----------
obs : list of arrays
List of observation sequences.
Returns
-------
bic_score : float
The Aikaike Information Criterion.
"""
logprob = self.score(sc, obs)
n_pars = self._n_free_parameters()
n_data = sum([len(seq) for seq in obs])
bic_score = n_pars * (np.log(n_data) - np.log(2 * np.pi)) - 2 * logprob
return bic_score
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM.
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
viterbi_logprobs = np.zeros(len(obs))
state_sequences = []
for n, seq in enumerate(obs):
seq = np.asarray(seq)
framelogprob = self._compute_log_likelihood(seq)
viterbi_logprobs[n], state_sequence = self._do_viterbi_pass(
framelogprob)
state_sequences.append(state_sequence)
return viterbi_logprobs, state_sequences
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : list of array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
map_logprobs = np.zeros(len(obs))
state_sequences = []
_, posteriors = self.score_samples(obs)
for n, post in enumerate(posteriors):
state_sequences.append(np.argmax(post, axis=1))
map_logprobs[n] = np.max(post, axis=1).sum()
return map_logprobs, state_sequences
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprobs : array_like, shape (n,)
Log probability of the maximum likelihood path through the HMM
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprobs, state_sequences = decoder[algorithm](obs)
return logprobs, state_sequences
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequences : list of array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequences = self.decode(obs, algorithm)
return state_sequences
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
posteriors : list of array-like, shape (n, n_states)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n_seq=1, n_min=10, n_max=20, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_seq : int
Number of observation sequences to generate.
n_min : int
Minimum number of observations for a sequence.
n_max : int
Maximum number of observations for a sequence.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : list of array_like, length `n_seq` List of samples
states : list of array_like, length `n_seq` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
obs = []
states = []
for _ in range(n_seq):
n = np.random.randint(n_min, n_max, size=1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
state_seq = [currstate]
obs_seq = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
state_seq.append(currstate)
obs_seq.append(self._generate_sample_from_state(
currstate, random_state=random_state))
obs.append(deepcopy(np.array(obs_seq)))
states.append(deepcopy(np.array(state_seq, dtype=int)))
return obs, states
def fit(self, sc, data, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
if not warm_start:
self._init(data, self.init_params)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init()
data.cache()
logprob = []
for i in range(self.n_iter):
# Expectation step
modelBroadcast = sc.broadcast(self)
results = data.map(lambda seq: _do_estep(seq, modelBroadcast)).cache()
stats = results.keys().reduce(merge_sum)
curr_logprob = results.values().reduce(lambda a, b:
a + b)
logprob.append(curr_logprob)
if i > 0:
improvement = logprob[-1] - logprob[-2]
else:
improvement = np.inf
if self.verbose:
verbose_reporter.update(i, curr_logprob, improvement)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_states, self.n_states)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_states:
raise ValueError('startprob must have length n_states')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_states,
(self.n_states, self.n_states))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_states, self.n_states)):
raise ValueError('transmat must have shape '
'(n_states, n_states)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_states))
_hmmc._forward(n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_states))
_hmmc._backward(n_observations, n_states, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_ = np.random.dirichlet(self.startprob_prior)
if 't' in params:
self.transmat_ = np.vstack([np.random.dirichlet(
self.transmat_prior[i])
for i in xrange(self.n_states)])
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_states),
'trans': np.zeros((self.n_states, self.n_states))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_states = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1,
n_states,
n_states))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_states, fwdlattice,
self._log_transmat, bwdlattice,
framelogprob, lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if 's' in params:
self.startprob_ = normalize(np.maximum(stats['start'], 1e-20))
if 't' in params:
self.transmat_ = normalize(np.maximum(stats['trans'], 1e-20), 1)
def _n_free_parameters(self):
pass
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
n_states : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
means : array, shape (`n_states`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_states`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import GaussianHMM
>>> GaussianHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_states=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_var=1.0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters,
verbose=0):
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params, verbose=verbose)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_var = means_var
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_states, self.n_features)):
raise ValueError('means must have shape '
'(n_states, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_states
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_states)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
concat_obs = np.vstack(obs
.flatMap(identity)
.sample(False, 0.01)
.map(np.atleast_2d)
.collect())
if (hasattr(self, 'n_features')
and self.n_features != concat_obs.shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (concat_obs.shape[1],
self.n_features))
self.n_features = concat_obs.shape[1]
if 'm' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
concat_obs)
self._means_ = np.array([multivariate_normal(
mean,
np.eye(self.n_features) * self.means_var)
for mean in clu.cluster_centers_])
if 'c' in params:
cv = np.cov(concat_obs.T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_states)
self._covars_[self._covars_ == 0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states, self.n_features))
stats['obs**2'] = np.zeros((self.n_states, self.n_features))
if self._covariance_type in ('tied', 'full'):
stats['obs*obs.T'] = np.zeros((self.n_states, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_states):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
self._means_ = stats['obs'] / stats['post'][:, np.newaxis]
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
if self._covariance_type in ('spherical', 'diag'):
cv_num = ((self._means_) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den,
1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_states, self.n_features,
self.n_features))
for c in range(self.n_states):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (np.outer(self._means_[c],
self._means_[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * self.n_features
if self._covariance_type == 'spherical':
n_pars += self.n_states
elif self._covariance_type == 'tied':
n_pars += ((self.n_features + 1) * self.n_features) / 2
elif self._covariance_type == 'diag':
n_pars += self.n_states * self.n_features
elif self._covariance_type == 'full':
n_pars += self.n_states * ((self.n_features + 1)
* self.n_features) / 2
return n_pars
def fit(self, sc, obs, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(sc, obs, warm_start)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
Attributes
----------
n_states : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_states`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
emissionprob_prior=None, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters, init_params=string.ascii_letters,
verbose=0):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose)
self.emissionprob_prior = emissionprob_prior
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_states, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_states, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set(obs.flatMap(identity).distinct().collect())
self.n_symbols = len(symbols)
if self.emissionprob_prior is None:
self.emissionprob_prior = np.ones((self.n_states,
self.n_symbols))
emissionprob = np.vstack([np.random.dirichlet(
self.emissionprob_prior[i])
for i in xrange(self.n_states)])
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_states, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = obs.copy()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * (self.n_symbols - 1)
return n_pars
def fit(self, sc, data, warm_start=False, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
"""
err_msg = ("Input must be a list of non-negative integer arrays where "
"in all, every element must be continuous, but %s was "
"given.")
modelBroadcast = sc.broadcast(self)
if not data.map(lambda x: modelBroadcast.value._check_input_symbols(x)).min():
raise ValueError(err_msg % data.take(5))
elif np.any(np.diff(data.flatMap(identity).distinct().sortBy(identity).collect()) > 1):
raise ValueError(err_msg % data.take(5))
return super(MultinomialHMM, self).fit(sc, data, warm_start, **kwargs)
class PoissonHMM(_BaseHMM):
"""Hidden Markov Model with Poisson (discrete) emissions
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
rates : array, shape ('n_states`,)
Poisson rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import PoissonHMM
>>> PoissonHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
PoissonHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose)
self.rates_var = rates_var
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return log_poisson_pmf(obs, self._rates)
def _generate_sample_from_state(self, state, random_state=None):
return poisson.rvs(self._rates[state])
def _init(self, obs, params='str'):
super(PoissonHMM, self)._init(obs, params=params)
concat_obs = np.array(obs.flatMap(identity).sample(False, 0.01).collect())
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(PoissonHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(PoissonHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
def _do_mstep(self, stats, params):
super(PoissonHMM, self)._do_mstep(stats, params)
if 'r' in params:
self._rates = stats['obs'] / stats['post']
def _check_input_symbols(self, obs):
"""check if input can be used for PoissonHMM. Input must be a list
of non-negative integers.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, -1, 3, 5, 10] not
"""
symbols = obs.copy()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states
return n_pars
def fit(self, sc, data, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
err_msg = ("Input must be a list of non-negative integer arrays, \
but %s was given.")
modelBroadcast = sc.broadcast(self)
if not data.map(lambda x: modelBroadcast.value._check_input_symbols(x)).min():
raise ValueError(err_msg % data.take(5))
return super(PoissonHMM, self).fit(sc, data, warm_start)
class ExponentialHMM(_BaseHMM):
"""Hidden Markov Model with Exponential (continuous) emissions
Attributes
----------
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
rates : array, shape ('n_states`,)
Exponential rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import ExponentialHMM
>>> ExponentialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ExponentialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters, verbose=0):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose)
self.rates_var = rates_var
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return log_exponential_density(obs, self._rates)
def _generate_sample_from_state(self, state, random_state=None):
return expon.rvs(scale=1. / self._rates[state])
def _init(self, obs, params='str'):
super(ExponentialHMM, self)._init(obs, params=params)
concat_obs = np.array(obs
.flatMap(identity)
.sample(False, 0.01)
.collect())
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
1. / clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(ExponentialHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_states)
stats['obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(ExponentialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
def _do_mstep(self, stats, params):
super(ExponentialHMM, self)._do_mstep(stats, params)
if 'r' in params:
self._rates = stats['post'] / stats['obs']
def _check_input_symbols(self, obs):
"""check if input can be used for ExponentialHMM. Input must be a list
of non-negative reals.
e.g. x = [0., 0.5, 2.3] is OK and y = [0.0, -1.0, 3.3, 5.4, 10.9] not
"""
symbols = obs.copy()
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states
return n_pars
def fit(self, sc, data, warm_start=False):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
err_msg = ("Input must be a list of non-negative real arrays, \
but %s was given.")
modelBroadcast = sc.broadcast(self)
if not data.map(lambda x: modelBroadcast.value._check_input_symbols(x)).min():
raise ValueError(err_msg % data.take(5))
return super(ExponentialHMM, self).fit(sc, data, warm_start)
class MultinomialExponentialHMM(_BaseHMM):
"""Hidden Markov Model with joint multinomial and exponential emissions
Attributes
----------
n_states : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_states`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
rates : array, shape ('n_states`,)
Exponential rate parameters for each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
Examples
--------
>>> from hmmlearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_states=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
emissionprob_prior=None, rates_var=1.0, algorithm="viterbi",
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters, init_params=string.ascii_letters,
verbose=0):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose)
self.emissionprob_prior = emissionprob_prior
self.rates_var = rates_var
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_states, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_states, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _get_rates(self):
"""Emission rate for each state."""
return self._rates
def _set_rates(self, rates):
rates = np.asarray(rates)
self._rates = rates.copy()
rates_ = property(_get_rates, _set_rates)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, map(int, obs[:, 0])].T + \
log_exponential_density(obs[:, 1], self._rates)
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
expon_obs = expon.rvs(scale=1. / self._rates[state])
return symbol, expon_obs
def _init(self, obs, params='ster'):
super(MultinomialExponentialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set(obs.flatMap(identity).distinct().collect())
self.n_symbols = len(symbols)
if self.emissionprob_prior is None:
self.emissionprob_prior = np.ones((self.n_states,
self.n_symbols))
emissionprob = np.vstack([np.random.dirichlet(
self.emissionprob_prior[i])
for i in xrange(self.n_states)])
self.emissionprob_ = emissionprob
concat_obs = obs.flatMap(lambda seq: seq[:, 1]).sample(False, 0.01).collect()
if 'r' in params:
clu = cluster.KMeans(n_clusters=self.n_states).fit(
np.atleast_2d(concat_obs).T)
rates = normal(0, self.rates_var, self.n_states) + \
1. / clu.cluster_centers_.T[0]
self._rates = np.maximum(0.1, rates)
def _initialize_sufficient_statistics(self):
stats = super(MultinomialExponentialHMM,
self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_states, self.n_symbols))
stats['post'] = np.zeros(self.n_states)
stats['expon_obs'] = np.zeros((self.n_states,))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialExponentialHMM,
self)._accumulate_sufficient_statistics(stats,
obs,
framelogprob,
posteriors,
fwdlattice,
bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs[:, 0]):
stats['obs'][:, int(symbol)] += posteriors[t]
if 'r' in params:
stats['post'] += posteriors.sum(axis=0)
stats['expon_obs'] += np.dot(posteriors.T, obs[:, 1])
def _do_mstep(self, stats, params):
super(MultinomialExponentialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
if 'r' in params:
self._rates = stats['post'] / stats['expon_obs']
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = obs[:, 0]
if symbols.dtype.kind not in ('i', 'f'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols = obs[:, 1]
if symbols.dtype.kind not in ('f', 'i'):
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
return True
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
n_pars += self.n_states * (self.n_symbols - 1)
n_pars += self.n_states
return n_pars
def fit(self, sc, data, warm_start=False, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation. Alternatively, a list of strings,
each of which is a filepath to a pickled object, being
a list of array-like observation sequences.
"""
err_msg = ("Input must be a list of non-negative integer arrays where "
"in all, every element must be continuous, but %s was "
"given.")
modelBroadcast = sc.broadcast(self)
cleaned_data = data.map(lambda seq: np.array(seq))
if not cleaned_data.map(lambda seq: modelBroadcast.value._check_input_symbols(seq)).min():
raise ValueError(err_msg % cleaned_data.take(5))
elif np.any(np.diff(cleaned_data.flatMap(lambda seq: seq[:, 0]).distinct().sortBy(identity).collect()) > 1):
raise ValueError(err_msg % cleaned_data.take(5))
return super(MultinomialExponentialHMM, self).fit(sc, cleaned_data, warm_start, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_states : int
Number of states in the model.
transmat : array, shape (`n_states`, `n_states`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_states`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_states`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more iterations the lower the frequency). If
greater than 1 then it prints progress and performance for every
iteration.
var : float, default: 1.0
Variance parameter to randomize the initialization of the GMM objects.
The larger var, the greater the randomization.
Examples
--------
>>> from hmmlearn.hmm import GMMHMM
>>> GMMHMM(n_states=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_states=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters,
verbose=0, means_var=1.0):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_states : int
Number of states.
"""
_BaseHMM.__init__(self, n_states, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params,
verbose=verbose)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_states):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
self.means_var = means_var
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
concat_obs = np.vstack(obs
.flatMap(lambda x: x)
.sample(False, 0.01)
.map(np.atleast_2d)
.collect())
n_features = concat_obs.shape[1]
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(concat_obs)
means = np.array([multivariate_normal(
mean,
np.eye(n_features) * self.means_var)
for mean in g.means_])
g.means_ = means
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, tmp_gmm_posteriors = g.score_samples(obs)
lgmm_posteriors = np.log(tmp_gmm_posteriors
+ np.finfo(np.float).eps) + \
np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
def _n_free_parameters(self):
n_pars = (self.n_states - 1) * (self.n_states + 1)
for g in self.gmms_:
n_components = g.means_.shape[0]
n_features = g.means_.shape[1]
n_pars += n_components - 1
n_pars += n_components * n_features
if g.covariance_type == 'spherical':
n_pars += n_components
elif g.covariance_type == 'tied':
n_pars += ((n_features + 1) * n_features) / 2
elif g.covariance_type == 'diag':
n_pars += n_components * n_features
elif g.covariance_type == 'full':
n_pars += n_components * ((n_features + 1) * n_features) / 2
return n_pars
|
mvictor212/hmmlearn
|
hmmlearn/hmmspark.py
|
Python
|
bsd-3-clause
| 81,101
|
[
"Gaussian"
] |
65a3faef7d7bbd7896b3536954d378cef1fae80ffbeb1ea6b60cbde949fdd318
|
from .algo_utils import XYZVelocity, MovingPixel, SimpleXYZVelocity, Velocity
import random
class FireFly(object):
def __init__(self, display, location, rgb_color, velocity, tail_len):
assert isinstance(velocity, XYZVelocity)
assert len(location) == 3
assert len(rgb_color) == 3
assert tail_len > 0
r, g, b = rgb_color
ir, ig, ib = rgb_color
pix_list = []
for inx in range(0, tail_len):
new_pixel = MovingPixel(display, location, velocity, (r,g,b))
for pixel in pix_list:
pixel.do_move()
pix_list.append(new_pixel)
r = ir / (inx + 2)
g = ig / (inx + 2)
b = ib / (inx + 2)
self.__pixels = pix_list
def tick_cb(self):
for pixel in self.__pixels:
pixel.tick_cb()
class FireFlyGroup(object):
def __init__(self, display, count, duration):
ffl = []
for inx in range(0, count):
dxt = random.randint(1, 10)
mv = random.choice([-1, 1])
tail = random.randint(3,10)
t = 255
r = random.randint(0,255)
t -= r
g = random.randint(0,t)
t -= g
b = random.randint(0,t)
x = random.randint(0,99)
z = random.randint(0,1)
y = 0
vel = SimpleXYZVelocity(display, dxt, 0, 0, mv, 0, 0) # XXX ick
ff = FireFly(display, (x, y, z), (r, g, b), vel, tail)
ffl.append(ff)
self.__fireflys = ffl
self.__display = display
self.__duration = duration
def tick_cb(self):
for ff in self.__fireflys:
ff.tick_cb()
if self.__duration == 0:
import sys
sys.exit(0)
self.__duration -= 1
if self.__duration % 1000 == 0:
print self.__duration
self.__display.refresh_physical()
class TestFly(object):
def __init__(self, display):
ffl = []
slow = SimpleXYZVelocity(display, 10, 0, 0, 1, 0, 0)
fast = SimpleXYZVelocity(display, 1, 0, 0, 1, 0, 0)
ffslow = FireFly(display, (50, 0, 0), (255, 0, 0), slow, 5)
fffast = FireFly(display, (0, 0, 0), (0, 255, 0), fast, 8)
self.__fireflys = [ffslow, fffast]
if True:
med = SimpleXYZVelocity(display, 2, 0, 2, 1, 0, 1)
fmed = FireFly(display, (80, 0, 0), (0, 0, 255), med, 20)
self.__fireflys.append(fmed)
self.__display = display
def tick_cb(self):
for ff in self.__fireflys:
ff.tick_cb()
self.__display.refresh_physical()
|
stuart-stanley/stormlight-archive
|
src/algos/firefly.py
|
Python
|
apache-2.0
| 2,706
|
[
"Firefly"
] |
9b43597a58ef78624915baee6541afc5e8540b67f0bea77a914a246c218af5f3
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 04 14:44:15 2017
@author: DanielM
"""
import unittest
class TestNeuron(unittest.TestCase):
"""
Imports a test neuron as specified by ouropy.tests.testneuron
and the parameters in ouropy/tests/testneuronparams.txt
The tests check that all parameters were set as defined.
"""
def setUp(self):
"""Create a generic neuron"""
import ouropy.tests.testneuron
self.testneuron = ouropy.tests.testneuron.TestNeuron()
def test_soma_general(self):
pass
def test_soma_geometry(self):
self.assertEqual(self.testneuron.soma.L, 16.8)
self.assertEqual(self.testneuron.soma.diam, 20)
def test_dendrites_general(self):
self.assertEqual(len(self.testneuron.dendrites),2)
self.assertEqual(len(self.testneuron.dendrites[0]), 4)
self.assertEqual(len(self.testneuron.dendrites[1]), 4)
def test_dendrites_geometry(self):
length_dends = [50.0, 150.0, 150.0, 150.0]
diam_dends = [5,4,3,2]
for dend in self.testneuron.dendrites:
for seg_idx, seg in enumerate(dend):
self.assertEqual(seg.L, length_dends[seg_idx])
self.assertEqual(seg.diam, diam_dends[seg_idx])
def test_soma_biophysics(self):
pass
class TestGenNeuronMethods(unittest.TestCase):
"""
Imports the genneuron module and tests the methods of the GenNeuron class.
"""
def setUp(self):
from ouropy.genneuron import GenNeuron
self.neuron = GenNeuron()
def test_mk_soma(self):
self.neuron.mk_soma(15,20)
self.assertEqual(self.neuron.soma.diam, 15)
self.assertEqual(self.neuron.soma.L, 20)
self.neuron.mk_soma(20,25)
self.assertEqual(self.neuron.soma.diam, 20)
self.assertEqual(self.neuron.soma.L, 25)
def test_new(self):
print(self.neuron.)
if __name__ == '__main__':
unittest.main()
|
danielmuellernai/ouropy
|
tests/genneuron_test.py
|
Python
|
mit
| 2,037
|
[
"NEURON"
] |
9db17498b10a88d1834fad62541825c1cba283a5a2ab15fcafc4e6cb7bda3494
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Tools/Utilities/Generate SoundEx Codes"""
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gen.soundex import soundex
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.autocomp import fill_combo
from gramps.gen.ggettext import sgettext as _
from gramps.gui.plug import tool
from gramps.gui.glade import Glade
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Generate_SoundEx_codes')
#-------------------------------------------------------------------------
#
# SoundGen.py
#
#-------------------------------------------------------------------------
class SoundGen(tool.Tool, ManagedWindow):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.label = _('SoundEx code generator')
tool.Tool.__init__(self, dbstate, options_class, name)
ManagedWindow.__init__(self,uistate,[],self.__class__)
self.glade = Glade()
self.glade.connect_signals({
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_delete_event" : self.close,
})
window = self.glade.toplevel
self.set_window(window,self.glade.get_object('title'),self.label)
self.value = self.glade.get_object("value")
self.autocomp = self.glade.get_object("name_list")
self.name = self.autocomp.get_child()
self.name.connect('changed',self.on_apply_clicked)
names = []
person = None
for person in self.db.iter_people():
lastname = person.get_primary_name().get_surname()
if lastname not in names:
names.append(lastname)
names.sort()
fill_combo(self.autocomp, names)
if person:
n = person.get_primary_name().get_surname()
self.name.set_text(n)
try:
se_text = soundex(n)
except UnicodeEncodeError:
se_text = soundex('')
self.value.set_text(se_text)
else:
self.name.set_text("")
self.show()
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(WIKI_HELP_PAGE , WIKI_HELP_SEC)
def build_menu_names(self, obj):
return (self.label,None)
def on_apply_clicked(self, obj):
try:
se_text = soundex(unicode(obj.get_text()))
except UnicodeEncodeError:
se_text = soundex('')
self.value.set_text(se_text)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class SoundGenOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
|
arunkgupta/gramps
|
gramps/plugins/tool/soundgen.py
|
Python
|
gpl-2.0
| 4,204
|
[
"Brian"
] |
a278494bd7fca06bd30c9789a9691c2025e06c2a6e401b52eef67c3e63d5cb76
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes that operate on points or vectors in 3D space.
"""
import re
import string
import warnings
from math import cos, pi, sin, sqrt
import numpy as np
from monty.json import MSONable
from pymatgen.electronic_structure.core import Magmom
from pymatgen.util.string import transformation_to_string
from pymatgen.util.typing import ArrayLike
__author__ = "Shyue Ping Ong, Shyam Dwaraknath, Matthew Horton"
class SymmOp(MSONable):
"""
A symmetry operation in cartesian space. Consists of a rotation plus a
translation. Implementation is as an affine transformation matrix of rank 4
for efficiency. Read: http://en.wikipedia.org/wiki/Affine_transformation.
.. attribute:: affine_matrix
A 4x4 numpy.array representing the symmetry operation.
"""
def __init__(self, affine_transformation_matrix: ArrayLike, tol=0.01):
"""
Initializes the SymmOp from a 4x4 affine transformation matrix.
In general, this constructor should not be used unless you are
transferring rotations. Use the static constructors instead to
generate a SymmOp from proper rotations and translation.
Args:
affine_transformation_matrix (4x4 array): Representing an
affine transformation.
tol (float): Tolerance for determining if matrices are equal.
"""
affine_transformation_matrix = np.array(affine_transformation_matrix)
if affine_transformation_matrix.shape != (4, 4):
raise ValueError("Affine Matrix must be a 4x4 numpy array!")
self.affine_matrix = affine_transformation_matrix
self.tol = tol
@staticmethod
def from_rotation_and_translation(
rotation_matrix: ArrayLike = ((1, 0, 0), (0, 1, 0), (0, 0, 1)),
translation_vec: ArrayLike = (0, 0, 0),
tol=0.1,
):
"""
Creates a symmetry operation from a rotation matrix and a translation
vector.
Args:
rotation_matrix (3x3 array): Rotation matrix.
translation_vec (3x1 array): Translation vector.
tol (float): Tolerance to determine if rotation matrix is valid.
Returns:
SymmOp object
"""
rotation_matrix = np.array(rotation_matrix)
translation_vec = np.array(translation_vec)
if rotation_matrix.shape != (3, 3):
raise ValueError("Rotation Matrix must be a 3x3 numpy array.")
if translation_vec.shape != (3,):
raise ValueError("Translation vector must be a rank 1 numpy array " "with 3 elements.")
affine_matrix = np.eye(4)
affine_matrix[0:3][:, 0:3] = rotation_matrix
affine_matrix[0:3][:, 3] = translation_vec
return SymmOp(affine_matrix, tol)
def __eq__(self, other):
return np.allclose(self.affine_matrix, other.affine_matrix, atol=self.tol)
def __hash__(self):
return 7
def __repr__(self):
return self.__str__()
def __str__(self):
output = [
"Rot:",
str(self.affine_matrix[0:3][:, 0:3]),
"tau",
str(self.affine_matrix[0:3][:, 3]),
]
return "\n".join(output)
def operate(self, point):
"""
Apply the operation on a point.
Args:
point: Cartesian coordinate.
Returns:
Coordinates of point after operation.
"""
affine_point = np.array([point[0], point[1], point[2], 1])
return np.dot(self.affine_matrix, affine_point)[0:3]
def operate_multi(self, points):
"""
Apply the operation on a list of points.
Args:
points: List of Cartesian coordinates
Returns:
Numpy array of coordinates after operation
"""
points = np.array(points)
affine_points = np.concatenate([points, np.ones(points.shape[:-1] + (1,))], axis=-1)
return np.inner(affine_points, self.affine_matrix)[..., :-1]
def apply_rotation_only(self, vector: ArrayLike):
"""
Vectors should only be operated by the rotation matrix and not the
translation vector.
Args:
vector (3x1 array): A vector.
"""
return np.dot(self.rotation_matrix, vector)
def transform_tensor(self, tensor: np.ndarray):
"""
Applies rotation portion to a tensor. Note that tensor has to be in
full form, not the Voigt form.
Args:
tensor (numpy array): a rank n tensor
Returns:
Transformed tensor.
"""
dim = tensor.shape
rank = len(dim)
assert all(i == 3 for i in dim)
# Build einstein sum string
lc = string.ascii_lowercase
indices = lc[:rank], lc[rank : 2 * rank]
einsum_string = ",".join([a + i for a, i in zip(*indices)])
einsum_string += ",{}->{}".format(*indices[::-1])
einsum_args = [self.rotation_matrix] * rank + [tensor]
return np.einsum(einsum_string, *einsum_args)
def are_symmetrically_related(self, point_a: ArrayLike, point_b: ArrayLike, tol: float = 0.001) -> bool:
"""
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
"""
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True
return False
@property
def rotation_matrix(self) -> np.ndarray:
"""
A 3x3 numpy.array representing the rotation matrix.
"""
return self.affine_matrix[0:3][:, 0:3]
@property
def translation_vector(self) -> np.ndarray:
"""
A rank 1 numpy.array of dim 3 representing the translation vector.
"""
return self.affine_matrix[0:3][:, 3]
def __mul__(self, other):
"""
Returns a new SymmOp which is equivalent to apply the "other" SymmOp
followed by this one.
"""
new_matrix = np.dot(self.affine_matrix, other.affine_matrix)
return SymmOp(new_matrix)
@property
def inverse(self) -> "SymmOp":
"""
Returns inverse of transformation.
"""
invr = np.linalg.inv(self.affine_matrix)
return SymmOp(invr)
@staticmethod
def from_axis_angle_and_translation(
axis: ArrayLike, angle: float, angle_in_radians: bool = False, translation_vec: ArrayLike = (0, 0, 0)
) -> "SymmOp":
"""
Generates a SymmOp for a rotation about a given axis plus translation.
Args:
axis: The axis of rotation in cartesian space. For example,
[1, 0, 0]indicates rotation about x-axis.
angle (float): Angle of rotation.
angle_in_radians (bool): Set to True if angles are given in
radians. Or else, units of degrees are assumed.
translation_vec: A translation vector. Defaults to zero.
Returns:
SymmOp for a rotation about given axis and translation.
"""
if isinstance(axis, (tuple, list)):
axis = np.array(axis)
vec = np.array(translation_vec)
a = angle if angle_in_radians else angle * pi / 180
cosa = cos(a)
sina = sin(a)
u = axis / np.linalg.norm(axis)
r = np.zeros((3, 3))
r[0, 0] = cosa + u[0] ** 2 * (1 - cosa)
r[0, 1] = u[0] * u[1] * (1 - cosa) - u[2] * sina
r[0, 2] = u[0] * u[2] * (1 - cosa) + u[1] * sina
r[1, 0] = u[0] * u[1] * (1 - cosa) + u[2] * sina
r[1, 1] = cosa + u[1] ** 2 * (1 - cosa)
r[1, 2] = u[1] * u[2] * (1 - cosa) - u[0] * sina
r[2, 0] = u[0] * u[2] * (1 - cosa) - u[1] * sina
r[2, 1] = u[1] * u[2] * (1 - cosa) + u[0] * sina
r[2, 2] = cosa + u[2] ** 2 * (1 - cosa)
return SymmOp.from_rotation_and_translation(r, vec)
@staticmethod
def from_origin_axis_angle(
origin: ArrayLike, axis: ArrayLike, angle: float, angle_in_radians: bool = False
) -> "SymmOp":
"""
Generates a SymmOp for a rotation about a given axis through an
origin.
Args:
origin (3x1 array): The origin which the axis passes through.
axis (3x1 array): The axis of rotation in cartesian space. For
example, [1, 0, 0]indicates rotation about x-axis.
angle (float): Angle of rotation.
angle_in_radians (bool): Set to True if angles are given in
radians. Or else, units of degrees are assumed.
Returns:
SymmOp.
"""
theta = angle * pi / 180 if not angle_in_radians else angle
a = origin[0] # type: ignore
b = origin[1] # type: ignore
c = origin[2] # type: ignore
u = axis[0] # type: ignore
v = axis[1] # type: ignore
w = axis[2] # type: ignore
# Set some intermediate values.
u2 = u * u # type: ignore
v2 = v * v # type: ignore
w2 = w * w # type: ignore
cos_t = cos(theta)
sin_t = sin(theta)
l2 = u2 + v2 + w2 # type: ignore
l = sqrt(l2) # type: ignore
# Build the matrix entries element by element.
m11 = (u2 + (v2 + w2) * cos_t) / l2 # type: ignore
m12 = (u * v * (1 - cos_t) - w * l * sin_t) / l2 # type: ignore
m13 = (u * w * (1 - cos_t) + v * l * sin_t) / l2 # type: ignore
m14 = ( # type: ignore
a * (v2 + w2) # type: ignore
- u * (b * v + c * w) # type: ignore
+ (u * (b * v + c * w) - a * (v2 + w2)) * cos_t # type: ignore
+ (b * w - c * v) * l * sin_t # type: ignore
) / l2 # type: ignore
m21 = (u * v * (1 - cos_t) + w * l * sin_t) / l2 # type: ignore
m22 = (v2 + (u2 + w2) * cos_t) / l2 # type: ignore
m23 = (v * w * (1 - cos_t) - u * l * sin_t) / l2 # type: ignore
m24 = ( # type: ignore
b * (u2 + w2) # type: ignore
- v * (a * u + c * w) # type: ignore
+ (v * (a * u + c * w) - b * (u2 + w2)) * cos_t # type: ignore
+ (c * u - a * w) * l * sin_t # type: ignore
) / l2 # type: ignore
m31 = (u * w * (1 - cos_t) - v * l * sin_t) / l2 # type: ignore
m32 = (v * w * (1 - cos_t) + u * l * sin_t) / l2 # type: ignore
m33 = (w2 + (u2 + v2) * cos_t) / l2 # type: ignore
m34 = ( # type: ignore
c * (u2 + v2) # type: ignore
- w * (a * u + b * v) # type: ignore
+ (w * (a * u + b * v) - c * (u2 + v2)) * cos_t # type: ignore
+ (a * v - b * u) * l * sin_t # type: ignore
) / l2
return SymmOp(
[ # type: ignore
[m11, m12, m13, m14],
[m21, m22, m23, m24],
[m31, m32, m33, m34],
[0, 0, 0, 1],
]
)
@staticmethod
def reflection(normal: ArrayLike, origin: ArrayLike = (0, 0, 0)) -> "SymmOp":
"""
Returns reflection symmetry operation.
Args:
normal (3x1 array): Vector of the normal to the plane of
reflection.
origin (3x1 array): A point in which the mirror plane passes
through.
Returns:
SymmOp for the reflection about the plane
"""
# Normalize the normal vector first.
n = np.array(normal, dtype=float) / np.linalg.norm(normal)
u, v, w = n
translation = np.eye(4)
translation[0:3, 3] = -np.array(origin)
xx = 1 - 2 * u ** 2
yy = 1 - 2 * v ** 2
zz = 1 - 2 * w ** 2
xy = -2 * u * v
xz = -2 * u * w
yz = -2 * v * w
mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0], [0, 0, 0, 1]]
if np.linalg.norm(origin) > 1e-6:
mirror_mat = np.dot(np.linalg.inv(translation), np.dot(mirror_mat, translation))
return SymmOp(mirror_mat)
@staticmethod
def inversion(origin: ArrayLike = (0, 0, 0)) -> "SymmOp":
"""
Inversion symmetry operation about axis.
Args:
origin (3x1 array): Origin of the inversion operation. Defaults
to [0, 0, 0].
Returns:
SymmOp representing an inversion operation about the origin.
"""
mat = -np.eye(4)
mat[3, 3] = 1
mat[0:3, 3] = 2 * np.array(origin)
return SymmOp(mat)
@staticmethod
def rotoreflection(axis: ArrayLike, angle: float, origin: ArrayLike = (0, 0, 0)) -> "SymmOp":
"""
Returns a roto-reflection symmetry operation
Args:
axis (3x1 array): Axis of rotation / mirror normal
angle (float): Angle in degrees
origin (3x1 array): Point left invariant by roto-reflection.
Defaults to (0, 0, 0).
Return:
Roto-reflection operation
"""
rot = SymmOp.from_origin_axis_angle(origin, axis, angle)
refl = SymmOp.reflection(axis, origin)
m = np.dot(rot.affine_matrix, refl.affine_matrix)
return SymmOp(m)
def as_dict(self) -> dict:
"""
:return: MSONAble dict.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"matrix": self.affine_matrix.tolist(),
"tolerance": self.tol,
}
def as_xyz_string(self) -> str:
"""
Returns a string of the form 'x, y, z', '-x, -y, z',
'-y+1/2, x+1/2, z+1/2', etc. Only works for integer rotation matrices
"""
# test for invalid rotation matrix
if not np.all(np.isclose(self.rotation_matrix, np.round(self.rotation_matrix))):
warnings.warn("Rotation matrix should be integer")
return transformation_to_string(self.rotation_matrix, translation_vec=self.translation_vector, delim=", ")
@staticmethod
def from_xyz_string(xyz_string: str) -> "SymmOp":
"""
Args:
xyz_string: string of the form 'x, y, z', '-x, -y, z',
'-2y+1/2, 3x+1/2, z-y+1/2', etc.
Returns:
SymmOp
"""
rot_matrix = np.zeros((3, 3))
trans = np.zeros(3)
toks = xyz_string.strip().replace(" ", "").lower().split(",")
re_rot = re.compile(r"([+-]?)([\d\.]*)/?([\d\.]*)([x-z])")
re_trans = re.compile(r"([+-]?)([\d\.]+)/?([\d\.]*)(?![x-z])")
for i, tok in enumerate(toks):
# build the rotation matrix
for m in re_rot.finditer(tok):
factor = -1.0 if m.group(1) == "-" else 1.0
if m.group(2) != "":
factor *= float(m.group(2)) / float(m.group(3)) if m.group(3) != "" else float(m.group(2))
j = ord(m.group(4)) - 120
rot_matrix[i, j] = factor
# build the translation vector
for m in re_trans.finditer(tok):
factor = -1 if m.group(1) == "-" else 1
num = float(m.group(2)) / float(m.group(3)) if m.group(3) != "" else float(m.group(2))
trans[i] = num * factor
return SymmOp.from_rotation_and_translation(rot_matrix, trans)
@classmethod
def from_dict(cls, d) -> "SymmOp":
"""
:param d: dict
:return: SymmOp from dict representation.
"""
return cls(d["matrix"], d["tolerance"])
class MagSymmOp(SymmOp):
"""
Thin wrapper around SymmOp to extend it to support magnetic symmetry
by including a time reversal operator. Magnetic symmetry is similar
to conventional crystal symmetry, except symmetry is reduced by the
addition of a time reversal operator which acts on an atom's magnetic
moment.
"""
def __init__(self, affine_transformation_matrix: ArrayLike, time_reversal: int, tol: float = 0.01):
"""
Initializes the MagSymmOp from a 4x4 affine transformation matrix
and time reversal operator.
In general, this constructor should not be used unless you are
transferring rotations. Use the static constructors instead to
generate a SymmOp from proper rotations and translation.
Args:
affine_transformation_matrix (4x4 array): Representing an
affine transformation.
time_reversal (int): 1 or -1
tol (float): Tolerance for determining if matrices are equal.
"""
SymmOp.__init__(self, affine_transformation_matrix, tol=tol)
if time_reversal not in (-1, 1):
raise Exception(
"Time reversal operator not well defined: {0}, {1}".format(time_reversal, type(time_reversal))
)
self.time_reversal = time_reversal
def __eq__(self, other):
return np.allclose(self.affine_matrix, other.affine_matrix, atol=self.tol) and (
self.time_reversal == other.time_reversal
)
def __str__(self):
return self.as_xyzt_string()
def __repr__(self):
output = [
"Rot:",
str(self.affine_matrix[0:3][:, 0:3]),
"tau",
str(self.affine_matrix[0:3][:, 3]),
"Time reversal:",
str(self.time_reversal),
]
return "\n".join(output)
def __hash__(self):
# useful for obtaining a set of unique MagSymmOps
hashable_value = tuple(self.affine_matrix.flatten()) + (self.time_reversal,)
return hashable_value.__hash__()
def operate_magmom(self, magmom):
"""
Apply time reversal operator on the magnetic moment. Note that
magnetic moments transform as axial vectors, not polar vectors.
See 'Symmetry and magnetic structures', Rodríguez-Carvajal and
Bourée for a good discussion. DOI: 10.1051/epjconf/20122200010
Args:
magmom: Magnetic moment as electronic_structure.core.Magmom
class or as list or np array-like
Returns:
Magnetic moment after operator applied as Magmom class
"""
magmom = Magmom(magmom) # type casting to handle lists as input
transformed_moment = (
self.apply_rotation_only(magmom.global_moment) * np.linalg.det(self.rotation_matrix) * self.time_reversal
)
# retains input spin axis if different from default
return Magmom.from_global_moment_and_saxis(transformed_moment, magmom.saxis)
@classmethod
def from_symmop(cls, symmop, time_reversal) -> "MagSymmOp":
"""
Initialize a MagSymmOp from a SymmOp and time reversal operator.
Args:
symmop (SymmOp): SymmOp
time_reversal (int): Time reversal operator, +1 or -1.
Returns:
MagSymmOp object
"""
magsymmop = cls(symmop.affine_matrix, time_reversal, symmop.tol)
return magsymmop
@staticmethod
def from_rotation_and_translation_and_time_reversal(
rotation_matrix: ArrayLike = ((1, 0, 0), (0, 1, 0), (0, 0, 1)),
translation_vec: ArrayLike = (0, 0, 0),
time_reversal: int = 1,
tol: float = 0.1,
) -> "MagSymmOp":
"""
Creates a symmetry operation from a rotation matrix, translation
vector and time reversal operator.
Args:
rotation_matrix (3x3 array): Rotation matrix.
translation_vec (3x1 array): Translation vector.
time_reversal (int): Time reversal operator, +1 or -1.
tol (float): Tolerance to determine if rotation matrix is valid.
Returns:
MagSymmOp object
"""
symmop = SymmOp.from_rotation_and_translation(
rotation_matrix=rotation_matrix, translation_vec=translation_vec, tol=tol
)
return MagSymmOp.from_symmop(symmop, time_reversal)
@staticmethod
def from_xyzt_string(xyzt_string: str) -> "MagSymmOp":
"""
Args:
xyz_string: string of the form 'x, y, z, +1', '-x, -y, z, -1',
'-2y+1/2, 3x+1/2, z-y+1/2, +1', etc.
Returns:
MagSymmOp object
"""
symmop = SymmOp.from_xyz_string(xyzt_string.rsplit(",", 1)[0])
try:
time_reversal = int(xyzt_string.rsplit(",", 1)[1])
except Exception:
raise Exception("Time reversal operator could not be parsed.")
return MagSymmOp.from_symmop(symmop, time_reversal)
def as_xyzt_string(self) -> str:
"""
Returns a string of the form 'x, y, z, +1', '-x, -y, z, -1',
'-y+1/2, x+1/2, z+1/2, +1', etc. Only works for integer rotation matrices
"""
xyzt_string = SymmOp.as_xyz_string(self)
return xyzt_string + ", {:+}".format(self.time_reversal)
def as_dict(self) -> dict:
"""
:return: MSONABle dict
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"matrix": self.affine_matrix.tolist(),
"tolerance": self.tol,
"time_reversal": self.time_reversal,
}
@classmethod
def from_dict(cls, d: dict) -> "MagSymmOp":
"""
:param d: dict
:return: MagneticSymmOp from dict representation.
"""
return cls(d["matrix"], tol=d["tolerance"], time_reversal=d["time_reversal"])
|
gmatteo/pymatgen
|
pymatgen/core/operations.py
|
Python
|
mit
| 21,863
|
[
"CRYSTAL",
"pymatgen"
] |
d9069e8d517374ff92304f9934f2687fa6cbffe17c43d22146d368462ea93c8c
|
"""
Normalizing flows for transforming probability distributions.
"""
import numpy as np
import logging
from typing import List, Iterable, Optional, Tuple, Sequence, Any, Callable
import tensorflow as tf
from tensorflow.keras.layers import Lambda
import deepchem as dc
from deepchem.models.losses import Loss
from deepchem.models.models import Model
from deepchem.models.keras_model import KerasModel
from deepchem.models.optimizers import Optimizer, Adam
from deepchem.utils.typing import OneOrMany
from deepchem.utils.data_utils import load_from_disk, save_to_disk
logger = logging.getLogger(__name__)
class NormalizingFlow(tf.keras.models.Model):
"""Base class for normalizing flow.
The purpose of a normalizing flow is to map a simple distribution (that is
easy to sample from and evaluate probability densities for) to a more
complex distribution that is learned from data. The base distribution
p(x) is transformed by the associated normalizing flow y=g(x) to model the
distribution p(y).
Normalizing flows combine the advantages of autoregressive models
(which provide likelihood estimation but do not learn features) and
variational autoencoders (which learn feature representations but
do not provide marginal likelihoods).
"""
def __init__(self,
base_distribution,
flow_layers: Sequence,
event_shape: Optional[List[int]] = None,
**kwargs) -> None:
"""Create a new NormalizingFlow.
Parameters
----------
base_distribution: tfd.Distribution
Probability distribution to be transformed.
Typically an N dimensional multivariate Gaussian.
flow_layers: Sequence[tfb.Bijector]
An iterable of bijectors that comprise the flow.
event_shape: Optional[List[int]]
Dimensionality of inputs, e.g. [2] for 2D inputs.
**kwargs
"""
try:
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
except ModuleNotFoundError:
raise ImportError(
"This class requires tensorflow-probability to be installed.")
self.base_distribution = base_distribution
self.flow_layers = flow_layers
self.event_shape = event_shape
# Chain of flows is also a normalizing flow
bijector = tfb.Chain(list(reversed(self.flow_layers)))
# An instance of tfd.TransformedDistribution
self.flow = tfd.TransformedDistribution(
distribution=self.base_distribution,
bijector=bijector,
event_shape=self.event_shape)
super(NormalizingFlow, self).__init__(**kwargs)
def __call__(self, *inputs, training=True):
return self.flow.bijector.forward(*inputs)
class NormalizingFlowModel(KerasModel):
"""A base distribution and normalizing flow for applying transformations.
Normalizing flows are effective for any application requiring
a probabilistic model that can both sample from a distribution and
compute marginal likelihoods, e.g. generative modeling,
unsupervised learning, or probabilistic inference. For a thorough review
of normalizing flows, see [1]_.
A distribution implements two main operations:
1. Sampling from the transformed distribution
2. Calculating log probabilities
A normalizing flow implements three main operations:
1. Forward transformation
2. Inverse transformation
3. Calculating the Jacobian
Deep Normalizing Flow models require normalizing flow layers where
input and output dimensions are the same, the transformation is invertible,
and the determinant of the Jacobian is efficient to compute and
differentiable. The determinant of the Jacobian of the transformation
gives a factor that preserves the probability volume to 1 when transforming
between probability densities of different random variables.
References
----------
.. [1] Papamakarios, George et al. "Normalizing Flows for Probabilistic Modeling and Inference." (2019). https://arxiv.org/abs/1912.02762.
"""
def __init__(self, model: NormalizingFlow, **kwargs) -> None:
"""Creates a new NormalizingFlowModel.
In addition to the following arguments, this class also accepts all the keyword arguments from KerasModel.
Parameters
----------
model: NormalizingFlow
An instance of NormalizingFlow.
Examples
--------
>> import tensorflow_probability as tfp
>> tfd = tfp.distributions
>> tfb = tfp.bijectors
>> flow_layers = [
.. tfb.RealNVP(
.. num_masked=2,
.. shift_and_log_scale_fn=tfb.real_nvp_default_template(
.. hidden_layers=[8, 8]))
..]
>> base_distribution = tfd.MultivariateNormalDiag(loc=[0., 0., 0.])
>> nf = NormalizingFlow(base_distribution, flow_layers)
>> nfm = NormalizingFlowModel(nf)
>> dataset = NumpyDataset(
.. X=np.random.rand(5, 3).astype(np.float32),
.. y=np.random.rand(5,),
.. ids=np.arange(5))
>> nfm.fit(dataset)
"""
try:
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
except ModuleNotFoundError:
raise ImportError(
"This class requires tensorflow-probability to be installed.")
self.nll_loss_fn = lambda input, labels, weights: self.create_nll(input)
super(NormalizingFlowModel, self).__init__(
model=model, loss=self.nll_loss_fn, **kwargs)
self.flow = self.model.flow # normalizing flow
# TODO: Incompability between TF and TFP means that TF doesn't track
# trainable variables in the flow; must override `_create_gradient_fn`
# self._variables = self.flow.trainable_variables
def create_nll(self, input: OneOrMany[tf.Tensor]) -> tf.Tensor:
"""Create the negative log likelihood loss function.
The default implementation is appropriate for most cases. Subclasses can
override this if there is a need to customize it.
Parameters
----------
input: OneOrMany[tf.Tensor]
A batch of data.
Returns
-------
A Tensor equal to the loss function to use for optimization.
"""
return -tf.reduce_mean(self.flow.log_prob(input, training=True))
def save(self):
"""Saves model to disk using joblib."""
save_to_disk(self.model, self.get_model_filename(self.model_dir))
def reload(self):
"""Loads model from joblib file on disk."""
self.model = load_from_disk(self.get_model_filename(self.model_dir))
def _create_gradient_fn(self,
variables: Optional[List[tf.Variable]]) -> Callable:
"""Create a function that computes gradients and applies them to the model.
Because of the way TensorFlow function tracing works, we need to create a
separate function for each new set of variables.
Parameters
----------
variables: Optional[List[tf.Variable]]
Variables to track during training.
Returns
-------
Callable function that applies gradients for batch of training data.
"""
@tf.function(experimental_relax_shapes=True)
def apply_gradient_for_batch(inputs, labels, weights, loss):
with tf.GradientTape() as tape:
tape.watch(self.flow.trainable_variables)
if isinstance(inputs, tf.Tensor):
inputs = [inputs]
if self._loss_outputs is not None:
inputs = [inputs[i] for i in self._loss_outputs]
batch_loss = loss(inputs, labels, weights)
if variables is None:
vars = self.flow.trainable_variables
else:
vars = variables
grads = tape.gradient(batch_loss, vars)
self._tf_optimizer.apply_gradients(zip(grads, vars))
self._global_step.assign_add(1)
return batch_loss
return apply_gradient_for_batch
class NormalizingFlowLayer(object):
"""Base class for normalizing flow layers.
This is an abstract base class for implementing new normalizing flow
layers that are not available in tfb. It should not be called directly.
A normalizing flow transforms random variables into new random variables.
Each learnable layer is a bijection, an invertible
transformation between two probability distributions. A simple initial
density is pushed through the normalizing flow to produce a richer,
more multi-modal distribution. Normalizing flows have three main operations:
1. Forward
Transform a distribution. Useful for generating new samples.
2. Inverse
Reverse a transformation, useful for computing conditional probabilities.
3. Log(|det(Jacobian)|) [LDJ]
Compute the determinant of the Jacobian of the transformation,
which is a scaling that conserves the probability "volume" to equal 1.
For examples of customized normalizing flows applied to toy problems,
see [1]_.
References
----------
.. [1] Saund, Brad. "Normalizing Flows." (2020). https://github.com/bsaund/normalizing_flows.
Notes
-----
- A sequence of normalizing flows is a normalizing flow.
- The Jacobian is the matrix of first-order derivatives of the transform.
"""
def __init__(self, **kwargs):
"""Create a new NormalizingFlowLayer."""
pass
def _forward(self, x: tf.Tensor) -> tf.Tensor:
"""Forward transformation.
x = g(y)
Parameters
----------
x: tf.Tensor
Input tensor.
Returns
-------
fwd_x: tf.Tensor
Transformed tensor.
"""
raise NotImplementedError("Forward transform must be defined.")
def _inverse(self, y: tf.Tensor) -> tf.Tensor:
"""Inverse transformation.
x = g^{-1}(y)
Parameters
----------
y: tf.Tensor
Input tensor.
Returns
-------
inv_y: tf.Tensor
Inverted tensor.
"""
raise NotImplementedError("Inverse transform must be defined.")
def _forward_log_det_jacobian(self, x: tf.Tensor) -> tf.Tensor:
"""Log |Determinant(Jacobian(x)|
Note x = g^{-1}(y)
Parameters
----------
x: tf.Tensor
Input tensor.
Returns
-------
ldj: tf.Tensor
Log of absolute value of determinant of Jacobian of x.
"""
raise NotImplementedError("LDJ must be defined.")
def _inverse_log_det_jacobian(self, y: tf.Tensor) -> tf.Tensor:
"""Inverse LDJ.
The ILDJ = -LDJ.
Note x = g^{-1}(y)
Parameters
----------
y: tf.Tensor
Input tensor.
Returns
-------
ildj: tf.Tensor
Log of absolute value of determinant of Jacobian of y.
"""
return -self._forward_log_det_jacobian(self._inverse(y))
|
lilleswing/deepchem
|
deepchem/models/normalizing_flows.py
|
Python
|
mit
| 10,502
|
[
"Gaussian"
] |
9110c50a451ab414602406ea429049d93836e7aec64dfff261d9900d2f766343
|
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides access to Iris-specific configuration values.
The default configuration values can be overridden by creating the file
``iris/etc/site.cfg``. If it exists, this file must conform to the format
defined by :mod:`ConfigParser`.
----------
.. py:data:: iris.config.TEST_DATA_DIR
Local directory where test data exists. Defaults to "test_data"
sub-directory of the Iris package install directory. The test data
directory supports the subset of Iris unit tests that require data.
Directory contents accessed via :func:`iris.tests.get_data_path`.
.. py:data:: iris.config.PALETTE_PATH
The full path to the Iris palette configuration directory
.. py:data:: iris.config.IMPORT_LOGGER
The [optional] name of the logger to notify when first imported.
----------
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from six.moves import configparser
import contextlib
import os.path
import warnings
# Returns simple string options
def get_option(section, option, default=None):
"""
Returns the option value for the given section, or the default value
if the section/option is not present.
"""
value = default
if config.has_option(section, option):
value = config.get(section, option)
return value
# Returns directory path options
def get_dir_option(section, option, default=None):
"""
Returns the directory path from the given option and section, or
returns the given default value if the section/option is not present
or does not represent a valid directory.
"""
path = default
if config.has_option(section, option):
c_path = config.get(section, option)
if os.path.isdir(c_path):
path = c_path
else:
msg = 'Ignoring config item {!r}:{!r} (section:option) as {!r}' \
' is not a valid directory path.'
warnings.warn(msg.format(section, option, c_path))
return path
# Figure out the full path to the "iris" package.
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
# The full path to the configuration directory of the active Iris instance.
CONFIG_PATH = os.path.join(ROOT_PATH, 'etc')
# Load the optional "site.cfg" file if it exists.
config = configparser.SafeConfigParser()
config.read([os.path.join(CONFIG_PATH, 'site.cfg')])
##################
# Resource options
_RESOURCE_SECTION = 'Resources'
TEST_DATA_DIR = get_dir_option(_RESOURCE_SECTION, 'test_data_dir',
default=os.path.join(os.path.dirname(__file__),
'test_data'))
# Override the data repository if the appropriate environment variable
# has been set. This is used in setup.py in the TestRunner command to
# enable us to simulate the absence of external data.
override = os.environ.get("OVERRIDE_TEST_DATA_REPOSITORY")
if override:
TEST_DATA_DIR = None
if os.path.isdir(os.path.expanduser(override)):
TEST_DATA_DIR = os.path.abspath(override)
PALETTE_PATH = get_dir_option(_RESOURCE_SECTION, 'palette_path',
os.path.join(CONFIG_PATH, 'palette'))
# Runtime options
class NetCDF(object):
"""Control Iris NetCDF options."""
def __init__(self, conventions_override=None):
"""
Set up NetCDF processing options for Iris.
Currently accepted kwargs:
* conventions_override (bool):
Define whether the CF Conventions version (e.g. `CF-1.6`) set when
saving a cube to a NetCDF file should be defined by
Iris (the default) or the cube being saved.
If `False` (the default), specifies that Iris should set the
CF Conventions version when saving cubes as NetCDF files.
If `True`, specifies that the cubes being saved to NetCDF should
set the CF Conventions version for the saved NetCDF files.
Example usages:
* Specify, for the lifetime of the session, that we want all cubes
written to NetCDF to define their own CF Conventions versions::
iris.config.netcdf.conventions_override = True
iris.save('my_cube', 'my_dataset.nc')
iris.save('my_second_cube', 'my_second_dataset.nc')
* Specify, with a context manager, that we want a cube written to
NetCDF to define its own CF Conventions version::
with iris.config.netcdf.context(conventions_override=True):
iris.save('my_cube', 'my_dataset.nc')
"""
# Define allowed `__dict__` keys first.
self.__dict__['conventions_override'] = None
# Now set specific values.
setattr(self, 'conventions_override', conventions_override)
def __repr__(self):
msg = 'NetCDF options: {}.'
# Automatically populate with all currently accepted kwargs.
options = ['{}={}'.format(k, v)
for k, v in six.iteritems(self.__dict__)]
joined = ', '.join(options)
return msg.format(joined)
def __setattr__(self, name, value):
if name not in self.__dict__:
# Can't add new names.
msg = 'Cannot set option {!r} for {} configuration.'
raise AttributeError(msg.format(name, self.__class__.__name__))
if value is None:
# Set an unset value to the name's default.
value = self._defaults_dict[name]['default']
if self._defaults_dict[name]['options'] is not None:
# Replace a bad value with a good one if there is a defined set of
# specified good values. If there isn't, we can assume that
# anything goes.
if value not in self._defaults_dict[name]['options']:
good_value = self._defaults_dict[name]['default']
wmsg = ('Attempting to set invalid value {!r} for '
'attribute {!r}. Defaulting to {!r}.')
warnings.warn(wmsg.format(value, name, good_value))
value = good_value
self.__dict__[name] = value
@property
def _defaults_dict(self):
# Set this as a property so that it isn't added to `self.__dict__`.
return {'conventions_override': {'default': False,
'options': [True, False]},
}
@contextlib.contextmanager
def context(self, **kwargs):
"""
Allow temporary modification of the options via a context manager.
Accepted kwargs are the same as can be supplied to the Option.
"""
# Snapshot the starting state for restoration at the end of the
# contextmanager block.
starting_state = self.__dict__.copy()
# Update the state to reflect the requested changes.
for name, value in six.iteritems(kwargs):
setattr(self, name, value)
try:
yield
finally:
# Return the state to the starting state.
self.__dict__.clear()
self.__dict__.update(starting_state)
netcdf = NetCDF()
|
QuLogic/iris
|
lib/iris/config.py
|
Python
|
gpl-3.0
| 7,891
|
[
"NetCDF"
] |
904d3b635f9fa9bce908a98f3392f4913a89e99372a2546bc7388d0b4febb968
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
import math
from collections import defaultdict
import itertools
from itertools import combinations
from itertools import product
from typing import Dict, Any
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.special import comb
import pytest
import joblib
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import _convert_container
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import skip_if_no_parallel
from sklearn.utils.fixes import parse_version
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree._classes import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# Larger classification sample used for testing feature importances
X_large, y_large = datasets.make_classification(
n_samples=500, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Make regression dataset
X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10,
random_state=1)
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
# Get the default backend in joblib to test parallelism and interaction with
# different backends
DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS: Dict[str, Any] = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy()
FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
# also test apply
leaf_indices = clf.apply(X)
assert leaf_indices.shape == (len(X), clf.n_estimators)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_classification_toy(name):
check_classification_toy(name)
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, ("Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.5, ("Failed with criterion %s and score = %f"
% (criterion, score))
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
@pytest.mark.parametrize('criterion', ("gini", "entropy"))
def test_iris(name, criterion):
check_iris_criterion(name, criterion)
def check_regression_criterion(name, criterion):
# Check consistency on regression dataset.
ForestRegressor = FOREST_REGRESSORS[name]
reg = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.93, ("Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
reg = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.92, ("Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
@pytest.mark.parametrize('name', FOREST_REGRESSORS)
@pytest.mark.parametrize('criterion', ("mse", "mae", "friedman_mse"))
def test_regression(name, criterion):
check_regression_criterion(name, criterion)
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
@pytest.mark.parametrize('name', FOREST_REGRESSORS)
def test_regressor_attributes(name):
check_regressor_attributes(name)
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_probability(name):
check_probability(name)
def check_importances(name, criterion, dtype, tolerance):
# cast as dype
X = X_large.astype(dtype, copy=False)
y = y_large.astype(dtype, copy=False)
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=10, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
# The forest estimator can detect that only the first 3 features of the
# dataset are informative:
n_important = np.sum(importances > 0.1)
assert importances.shape[0] == 10
assert n_important == 3
assert np.all(importances[:3] > 0.1)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert np.all(importances >= 0.0)
for scale in [0.5, 100]:
est = ForestEstimator(n_estimators=10, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert np.abs(importances - importances_bis).mean() < tolerance
@pytest.mark.parametrize('dtype', (np.float64, np.float32))
@pytest.mark.parametrize(
'name, criterion',
itertools.chain(product(FOREST_CLASSIFIERS,
["gini", "entropy"]),
product(FOREST_REGRESSORS,
["mse", "friedman_mse", "mae"])))
def test_importances(dtype, name, criterion):
tolerance = 0.01
if name in FOREST_REGRESSORS and criterion == "mae":
tolerance = 0.05
check_importances(name, criterion, dtype, tolerance)
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in np.bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert np.abs(true_importances - importances).mean() < 0.01
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_unfitted_feature_importances(name):
err_msg = ("This {} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
.format(name))
with pytest.raises(NotFittedError, match=err_msg):
getattr(FOREST_ESTIMATORS[name](), 'feature_importances_')
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_accuracy",
[
(
*datasets.make_classification(
n_samples=300, n_classes=2, random_state=0
),
0.9,
),
(
*datasets.make_classification(
n_samples=1000, n_classes=3, n_informative=6, random_state=0
),
0.65,
),
(
iris.data, iris.target * 2 + 1, 0.65,
),
(
*datasets.make_multilabel_classification(
n_samples=300, random_state=0
),
0.18,
),
],
)
def test_forest_classifier_oob(
ForestClassifier, X, y, X_type, lower_bound_accuracy
):
"""Check that OOB score is close to score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0,
)
classifier = ForestClassifier(
n_estimators=40, bootstrap=True, oob_score=True, random_state=0,
)
assert not hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_decision_function_")
classifier.fit(X_train, y_train)
test_score = classifier.score(X_test, y_test)
assert abs(test_score - classifier.oob_score_) <= 0.1
assert classifier.oob_score_ >= lower_bound_accuracy
assert hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_prediction_")
assert hasattr(classifier, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0], len(set(y)))
else:
expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1])
assert classifier.oob_decision_function_.shape == expected_shape
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_r2",
[
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=1, random_state=0
),
0.7,
),
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=2, random_state=0
),
0.55,
),
],
)
def test_forest_regressor_oob(
ForestRegressor, X, y, X_type, lower_bound_r2
):
"""Check that forest-based regressor provide an OOB score close to the
score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0,
)
regressor = ForestRegressor(
n_estimators=50, bootstrap=True, oob_score=True, random_state=0,
)
assert not hasattr(regressor, "oob_score_")
assert not hasattr(regressor, "oob_prediction_")
regressor.fit(X_train, y_train)
test_score = regressor.score(X_test, y_test)
assert abs(test_score - regressor.oob_score_) <= 0.1
assert regressor.oob_score_ >= lower_bound_r2
assert hasattr(regressor, "oob_score_")
assert hasattr(regressor, "oob_prediction_")
assert not hasattr(regressor, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0],)
else:
expected_shape = (X_train.shape[0], y.ndim)
assert regressor.oob_prediction_.shape == expected_shape
@pytest.mark.parametrize(
"ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values()
)
def test_forest_oob_warning(ForestEstimator):
"""Check that a warning is raised when not enough estimator and the OOB
estimates will be inacurrate."""
estimator = ForestEstimator(
n_estimators=1, oob_score=True, bootstrap=True, random_state=0,
)
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"):
estimator.fit(iris.data, iris.target)
@pytest.mark.parametrize(
"ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values()
)
@pytest.mark.parametrize(
"X, y, params, err_msg",
[
(iris.data, iris.target, {"oob_score": True, "bootstrap": False},
"Out of bag estimation only available if bootstrap=True"),
(iris.data, rng.randint(low=0, high=5, size=(iris.data.shape[0], 2)),
{"oob_score": True, "bootstrap": True},
"The type of target cannot be used to compute OOB estimates")
]
)
def test_forest_oob_error(ForestEstimator, X, y, params, err_msg):
estimator = ForestEstimator(**params)
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize("oob_score", [True, False])
def test_random_trees_embedding_raise_error_oob(oob_score):
with pytest.raises(TypeError, match="got an unexpected keyword argument"):
RandomTreesEmbedding(oob_score=oob_score)
with pytest.raises(NotImplementedError, match="OOB score not supported"):
RandomTreesEmbedding()._set_oob_score_and_attributes(X, y)
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_gridsearch(name):
# Check that base trees can be grid-searched.
check_gridsearch(name)
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert len(forest) == 10
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
def test_parallel(name):
if name in FOREST_CLASSIFIERS:
X = iris.data
y = iris.target
elif name in FOREST_REGRESSORS:
X = X_reg
y = y_reg
check_parallel(name, X, y)
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert type(obj2) == obj.__class__
score2 = obj2.score(X, y)
assert score == score2
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
def test_pickle(name):
if name in FOREST_CLASSIFIERS:
X = iris.data
y = iris.target
elif name in FOREST_REGRESSORS:
X = X_reg
y = y_reg
check_pickle(name, X[::2], y[::2])
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
def test_multioutput(name):
check_multioutput(name)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_multioutput_string(name):
# Check estimators on multi-output problems with string outputs.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [["red", "blue"], ["red", "blue"], ["red", "blue"],
["green", "green"], ["green", "green"], ["green", "green"],
["red", "purple"], ["red", "purple"], ["red", "purple"],
["green", "yellow"], ["green", "yellow"], ["green", "yellow"]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [["red", "blue"], ["green", "green"],
["red", "purple"], ["green", "yellow"]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_equal(y_pred, y_test)
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert clf.n_classes_ == 2
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_classes_shape(name):
check_classes_shape(name)
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert type(X_transformed) == np.ndarray
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert X_transformed.shape[0] == X.shape[0]
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert linear_clf.score(X_reduced, y) == 1.
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert len(uniques) == 5
assert 0.20 > uniques[0][0] # Rough approximation of 1/6.
assert 0.20 > uniques[1][0]
assert 0.20 > uniques[2][0]
assert 0.20 > uniques[3][0]
assert uniques[4][0] > 0.3
assert uniques[4][1] == "0,1/0,0/--0,2/--"
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert len(uniques) == 8
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert est.estimators_[0].get_depth() == 1
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert est.estimators_[0].get_depth() == 1
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_max_leaf_nodes_max_depth(name):
check_max_leaf_nodes_max_depth(name)
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert np.min(node_samples) > len(X) * 0.5 - 1, (
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1,
random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert np.min(node_samples) > len(X) * 0.5 - 1, (
"Failed with {0}".format(name))
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_min_samples_split(name):
check_min_samples_split(name)
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > len(X) * 0.25 - 1, (
"Failed with {0}".format(name))
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_min_samples_leaf(name):
check_min_samples_leaf(name)
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert (
np.min(leaf_weights) >=
total_weight * est.min_weight_fraction_leaf), (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_min_weight_fraction_leaf(name):
check_min_weight_fraction_leaf(name)
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
@pytest.mark.parametrize('sparse_matrix',
(csr_matrix, csc_matrix, coo_matrix))
def test_sparse_input(name, sparse_matrix):
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
check_sparse_input(name, X, sparse_matrix(X), y)
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_almost_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_almost_equal(est.fit(X, y).predict(X), y)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
@pytest.mark.parametrize('dtype', (np.float64, np.float32))
def test_memory_layout(name, dtype):
check_memory_layout(name, dtype)
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_1d_input(name):
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
with ignore_warnings():
check_1d_input(name, X, X_2d, y)
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_class_weights(name):
check_class_weights(name)
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_class_weight_balanced_and_bootstrap_multi_output(name):
check_class_weight_balanced_and_bootstrap_multi_output(name)
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS)
def test_class_weight_errors(name):
check_class_weight_errors(name)
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est_ws = None
for n_estimators in [5, 10]:
if est_ws is None:
est_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
est_ws.set_params(n_estimators=n_estimators)
est_ws.fit(X, y)
assert len(est_ws) == n_estimators
est_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
est_no_ws.fit(X, y)
assert (set([tree.random_state for tree in est_ws]) ==
set([tree.random_state for tree in est_no_ws]))
assert_array_equal(est_ws.apply(X), est_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_warm_start(name):
check_warm_start(name)
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
est_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False, random_state=1)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.apply(X), est.apply(X))
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_warm_start_clear(name):
check_warm_start_clear(name)
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=4)
assert_raises(ValueError, est.fit, X, y)
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_warm_start_smaller_n_estimators(name):
check_warm_start_smaller_n_estimators(name)
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
est.fit(X, y)
est_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
est_2.fit(X, y)
# Now est_2 equals est.
est_2.set_params(random_state=2)
assert_warns(UserWarning, est_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(est.apply(X), est_2.apply(X))
@pytest.mark.parametrize('name', FOREST_ESTIMATORS)
def test_warm_start_equal_n_estimators(name):
check_warm_start_equal_n_estimators(name)
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
est = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
est.fit(X, y)
est_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
est_2.fit(X, y)
est_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
est_2.fit(X, y)
assert hasattr(est_2, 'oob_score_')
assert est.oob_score_ == est_2.oob_score_
# Test that oob_score is computed even if we don't need to train
# additional trees.
est_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
est_3.fit(X, y)
assert not hasattr(est_3, 'oob_score_')
est_3.set_params(oob_score=True)
ignore_warnings(est_3.fit)(X, y)
assert est.oob_score_ == est_3.oob_score_
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
def test_warm_start_oob(name):
check_warm_start_oob(name)
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert indicator.shape[1] == n_nodes_ptr[-1]
assert indicator.shape[0] == n_samples
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
def test_decision_path(name):
check_decision_path(name)
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(FutureWarning,
"min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert tree.min_impurity_split == 0.1
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert tree.min_impurity_decrease == 0.1
# mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type
class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore
def __init__(self, *args, **kwargs):
self.count = 0
super().__init__(*args, **kwargs)
def start_call(self):
self.count += 1
return super().start_call()
joblib.register_parallel_backend('testing', MyBackend)
@pytest.mark.skipif(parse_version(joblib.__version__) < parse_version('0.12'),
reason='tests not yet supported in joblib <0.12')
@skip_if_no_parallel
def test_backend_respected():
clf = RandomForestClassifier(n_estimators=10, n_jobs=2)
with joblib.parallel_backend("testing") as (ba, n_jobs):
clf.fit(X, y)
assert ba.count > 0
# predict_proba requires shared memory. Ensure that's honored.
with joblib.parallel_backend("testing") as (ba, _):
clf.predict_proba(X)
assert ba.count == 0
def test_forest_feature_importances_sum():
X, y = make_classification(n_samples=15, n_informative=3, random_state=1,
n_classes=3)
clf = RandomForestClassifier(min_samples_leaf=5, random_state=42,
n_estimators=200).fit(X, y)
assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7)
def test_forest_degenerate_feature_importances():
# build a forest of single node trees. See #13636
X = np.zeros((10, 10))
y = np.ones((10,))
gbr = RandomForestRegressor(n_estimators=10).fit(X, y)
assert_array_equal(gbr.feature_importances_,
np.zeros(10, dtype=np.float64))
@pytest.mark.parametrize('name', FOREST_CLASSIFIERS_REGRESSORS)
@pytest.mark.parametrize(
'max_samples, exc_type, exc_msg',
[(int(1e9), ValueError,
"`max_samples` must be in range 1 to 6 but got value 1000000000"),
(1.0, ValueError,
r"`max_samples` must be in range \(0, 1\) but got value 1.0"),
(2.0, ValueError,
r"`max_samples` must be in range \(0, 1\) but got value 2.0"),
(0.0, ValueError,
r"`max_samples` must be in range \(0, 1\) but got value 0.0"),
(np.nan, ValueError,
r"`max_samples` must be in range \(0, 1\) but got value nan"),
(np.inf, ValueError,
r"`max_samples` must be in range \(0, 1\) but got value inf"),
('str max_samples?!', TypeError,
r"`max_samples` should be int or float, but got "
r"type '\<class 'str'\>'"),
(np.ones(2), TypeError,
r"`max_samples` should be int or float, but got type "
r"'\<class 'numpy.ndarray'\>'")]
)
def test_max_samples_exceptions(name, max_samples, exc_type, exc_msg):
# Check invalid `max_samples` values
est = FOREST_CLASSIFIERS_REGRESSORS[name](max_samples=max_samples)
with pytest.raises(exc_type, match=exc_msg):
est.fit(X, y)
def test_forest_y_sparse():
X = [[1, 2, 3]]
y = csr_matrix([4, 5, 6])
est = RandomForestClassifier()
msg = "sparse multilabel-indicator for y is not supported."
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
@pytest.mark.parametrize(
'ForestClass', [RandomForestClassifier, RandomForestRegressor]
)
def test_little_tree_with_small_max_samples(ForestClass):
rng = np.random.RandomState(1)
X = rng.randn(10000, 2)
y = rng.randn(10000) > 0
# First fit with no restriction on max samples
est1 = ForestClass(
n_estimators=1,
random_state=rng,
max_samples=None,
)
# Second fit with max samples restricted to just 2
est2 = ForestClass(
n_estimators=1,
random_state=rng,
max_samples=2,
)
est1.fit(X, y)
est2.fit(X, y)
tree1 = est1.estimators_[0].tree_
tree2 = est2.estimators_[0].tree_
msg = "Tree without `max_samples` restriction should have more nodes"
assert tree1.node_count > tree2.node_count, msg
|
lesteve/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
Python
|
bsd-3-clause
| 51,281
|
[
"Brian"
] |
6de5a6c94b94e84734165a706c0852d2004e12e59de3f51342ed67f6990b37c8
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, bad-whitespace
# pylint: disable=too-many-lines, too-many-locals, len-as-condition
# pylint: disable=import-outside-toplevel
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import math
from math import cos, sin
import random
import warnings
import numpy as np
from numpy.linalg import inv
import scipy.linalg as linalg
import scipy.sparse as sp
import scipy.sparse.linalg as spln
from scipy.stats import norm, multivariate_normal
# Older versions of scipy do not support the allow_singular keyword. I could
# check the version number explicily, but perhaps this is clearer
_support_singular = True
try:
multivariate_normal.logpdf(1, 1, 1, allow_singular=True)
except TypeError:
warnings.warn(
'You are using a version of SciPy that does not support the '\
'allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). '\
'Future versions of FilterPy will require a version of SciPy that '\
'implements this keyword',
DeprecationWarning)
_support_singular = False
def _validate_vector(u, dtype=None):
# this is taken from scipy.spatial.distance. Internal function, so
# redefining here.
u = np.asarray(u, dtype=dtype).squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def mahalanobis(x, mean, cov):
"""
Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`. This can be thought as the number
of standard deviations x is from the mean, i.e. a return value of 3 means
x is 3 std from mean.
Parameters
----------
x : (N,) array_like, or float
Input state vector
mean : (N,) array_like, or float
mean of multivariate Gaussian
cov : (N, N) array_like or float
covariance of the multivariate Gaussian
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `x` and `mean`
Examples
--------
>>> mahalanobis(x=3., mean=3.5, cov=4.**2) # univariate case
0.125
>>> mahalanobis(x=3., mean=6, cov=1) # univariate, 3 std away
3.0
>>> mahalanobis([1., 2], [1.1, 3.5], [[1., .1],[.1, 13]])
0.42533327058913922
"""
x = _validate_vector(x)
mean = _validate_vector(mean)
if x.shape != mean.shape:
raise ValueError("length of input vectors must be the same")
y = x - mean
S = np.atleast_2d(cov)
dist = float(np.dot(np.dot(y.T, inv(S)), y))
return math.sqrt(dist)
def log_likelihood(z, x, P, H, R):
"""
Returns log-likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
S = np.dot(H, np.dot(P, H.T)) + R
return logpdf(z, np.dot(H, x), S)
def likelihood(z, x, P, H, R):
"""
Returns likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
return np.exp(log_likelihood(z, x, P, H, R))
def logpdf(x, mean=None, cov=1, allow_singular=True):
"""
Computes the log of the probability density function of the normal
N(mean, cov) for the data x. The normal may be univariate or multivariate.
Wrapper for older versions of scipy.multivariate_normal.logpdf which
don't support support the allow_singular keyword prior to verion 0.15.0.
If it is not supported, and cov is singular or not PSD you may get
an exception.
`x` and `mean` may be column vectors, row vectors, or lists.
"""
if mean is not None:
flat_mean = np.asarray(mean).flatten()
else:
flat_mean = None
flat_x = np.asarray(x).flatten()
if _support_singular:
return multivariate_normal.logpdf(flat_x, flat_mean, cov, allow_singular)
return multivariate_normal.logpdf(flat_x, flat_mean, cov)
def gaussian(x, mean, var, normed=True):
"""
returns probability density function (pdf) for x given a Gaussian with the
specified mean and variance. All must be scalars.
gaussian (1,2,3) is equivalent to scipy.stats.norm(2, math.sqrt(3)).pdf(1)
It is quite a bit faster albeit much less flexible than the latter.
Parameters
----------
x : scalar or array-like
The value(s) for which we compute the distribution
mean : scalar
Mean of the Gaussian
var : scalar
Variance of the Gaussian
normed : bool, default True
Normalize the output if the input is an array of values.
Returns
-------
pdf : float
probability distribution of x for the Gaussian (mean, var). E.g. 0.101 denotes
10.1%.
Examples
--------
>>> gaussian(8, 1, 2)
1.3498566943461957e-06
>>> gaussian([8, 7, 9], 1, 2)
array([1.34985669e-06, 3.48132630e-05, 3.17455867e-08])
"""
pdf = ((2*math.pi*var)**-.5) * np.exp((-0.5*(np.asarray(x)-mean)**2.) / var)
if normed and len(np.shape(pdf)) > 0:
pdf = pdf / sum(pdf)
return pdf
def mul(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1 / (1/var1 + 1/var2)
return (mean, var)
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S
def add(mean1, var1, mean2, var2):
"""
Add the Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).
var1 and var2 are variances - sigma squared in the usual parlance.
"""
return (mean1+mean2, var1+var2)
def multivariate_gaussian(x, mu, cov):
"""
This is designed to replace scipy.stats.multivariate_normal
which is not available before version 0.14. You may either pass in a
multivariate set of data:
.. code-block:: Python
multivariate_gaussian (array([1,1]), array([3,4]), eye(2)*1.4)
multivariate_gaussian (array([1,1,1]), array([3,4,5]), 1.4)
or unidimensional data:
.. code-block:: Python
multivariate_gaussian(1, 3, 1.4)
In the multivariate case if cov is a scalar it is interpreted as eye(n)*cov
The function gaussian() implements the 1D (univariate)case, and is much
faster than this function.
equivalent calls:
.. code-block:: Python
multivariate_gaussian(1, 2, 3)
scipy.stats.multivariate_normal(2,3).pdf(1)
Parameters
----------
x : float, or np.array-like
Value to compute the probability for. May be a scalar if univariate,
or any type that can be converted to an np.array (list, tuple, etc).
np.array is best for speed.
mu : float, or np.array-like
mean for the Gaussian . May be a scalar if univariate, or any type
that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
cov : float, or np.array-like
Covariance for the Gaussian . May be a scalar if univariate, or any
type that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
Returns
-------
probability : float
probability for x for the Gaussian (mu,cov)
"""
warnings.warn(
("This was implemented before SciPy version 0.14, which implemented "
"scipy.stats.multivariate_normal. This function will be removed in "
"a future release of FilterPy"), DeprecationWarning)
# force all to numpy.array type, and flatten in case they are vectors
x = np.array(x, copy=False, ndmin=1).flatten()
mu = np.array(mu, copy=False, ndmin=1).flatten()
nx = len(mu)
cov = _to_cov(cov, nx)
norm_coeff = nx*math.log(2*math.pi) + np.linalg.slogdet(cov)[1]
err = x - mu
if sp.issparse(cov):
numerator = spln.spsolve(cov, err).T.dot(err)
else:
numerator = np.linalg.solve(cov, err).T.dot(err)
return math.exp(-0.5*(norm_coeff + numerator))
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3
def plot_discrete_cdf(xs, ys, ax=None, xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.
Parameters
----------
xs : list-like of scalars
x values corresponding to the values in `y`s. Can be `None`, in which
case range(len(ys)) will be used.
ys : list-like of scalars
list of probabilities to be plotted which should sum to 1.
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if xs is None:
xs = range(len(ys))
ys = np.cumsum(ys)
ax.plot(xs, ys, label=label)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_gaussian_cdf(mean=0., variance=1.,
ax=None,
xlim=None, ylim=(0., 1.),
xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.
Parameters
----------
mean : scalar, default 0.
mean for the normal distribution.
variance : scalar, default 0.
variance for the normal distribution.
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
sigma = math.sqrt(variance)
n = norm(mean, sigma)
if xlim is None:
xlim = [n.ppf(0.001), n.ppf(0.999)]
xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.)
cdf = n.cdf(xs)
ax.plot(xs, cdf, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_gaussian_pdf(mean=0.,
variance=1.,
std=None,
ax=None,
mean_line=False,
xlim=None, ylim=None,
xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution PDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the probability density.
Parameters
----------
mean : scalar, default 0.
mean for the normal distribution.
variance : scalar, default 1., optional
variance for the normal distribution.
std: scalar, default=None, optional
standard deviation of the normal distribution. Use instead of
`variance` if desired
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
mean_line : boolean
draws a line at x=mean
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if variance is not None and std is not None:
raise ValueError('Specify only one of variance and std')
if variance is None and std is None:
raise ValueError('Specify variance or std')
if variance is not None:
std = math.sqrt(variance)
n = norm(mean, std)
if xlim is None:
xlim = [n.ppf(0.001), n.ppf(0.999)]
xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.)
ax.plot(xs, n.pdf(xs), label=label)
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if mean_line:
plt.axvline(mean)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_gaussian(mean=0., variance=1.,
ax=None,
mean_line=False,
xlim=None,
ylim=None,
xlabel=None,
ylabel=None,
label=None):
"""
DEPRECATED. Use plot_gaussian_pdf() instead. This is poorly named, as
there are multiple ways to plot a Gaussian.
"""
warnings.warn('This function is deprecated. It is poorly named. '\
'A Gaussian can be plotted as a PDF or CDF. This '\
'plots a PDF. Use plot_gaussian_pdf() instead,',
DeprecationWarning)
return plot_gaussian_pdf(mean, variance, ax, mean_line, xlim, ylim, xlabel,
ylabel, label)
def covariance_ellipse(P, deviations=1):
"""
Returns a tuple defining the ellipse representing the 2 dimensional
covariance matrix P.
Parameters
----------
P : nd.array shape (2,2)
covariance matrix
deviations : int (optional, default = 1)
# of standard deviations. Default is 1.
Returns (angle_radians, width_radius, height_radius)
"""
U, s, _ = linalg.svd(P)
orientation = math.atan2(U[1, 0], U[0, 0])
width = deviations * math.sqrt(s[0])
height = deviations * math.sqrt(s[1])
if height > width:
raise ValueError('width must be greater than height')
return (orientation, width, height)
def _eigsorted(cov, asc=True):
"""
Computes eigenvalues and eigenvectors of a covariance matrix and returns
them sorted by eigenvalue.
Parameters
----------
cov : ndarray
covariance matrix
asc : bool, default=True
determines whether we are sorted smallest to largest (asc=True),
or largest to smallest (asc=False)
Returns
-------
eigval : 1D ndarray
eigenvalues of covariance ordered largest to smallest
eigvec : 2D ndarray
eigenvectors of covariance matrix ordered to match `eigval` ordering.
I.e eigvec[:, 0] is the rotation vector for eigval[0]
"""
eigval, eigvec = np.linalg.eigh(cov)
order = eigval.argsort()
if not asc:
# sort largest to smallest
order = order[::-1]
return eigval[order], eigvec[:, order]
def plot_3d_covariance(mean, cov, std=1.,
ax=None, title=None,
color=None, alpha=1.,
label_xyz=True,
N=60,
shade=True,
limit_xyz=True,
**kwargs):
"""
Plots a covariance matrix `cov` as a 3D ellipsoid centered around
the `mean`.
Parameters
----------
mean : 3-vector
mean in x, y, z. Can be any type convertable to a row vector.
cov : ndarray 3x3
covariance matrix
std : double, default=1
standard deviation of ellipsoid
ax : matplotlib.axes._subplots.Axes3DSubplot, optional
Axis to draw on. If not provided, a new 3d axis will be generated
for the current figure
title : str, optional
If provided, specifies the title for the plot
color : any value convertible to a color
if specified, color of the ellipsoid.
alpha : float, default 1.
Alpha value of the ellipsoid. <1 makes is semi-transparent.
label_xyz: bool, default True
Gives labels 'X', 'Y', and 'Z' to the axis.
N : int, default=60
Number of segments to compute ellipsoid in u,v space. Large numbers
can take a very long time to plot. Default looks nice.
shade : bool, default=True
Use shading to draw the ellipse
limit_xyz : bool, default=True
Limit the axis range to fit the ellipse
**kwargs : optional
keyword arguments to supply to the call to plot_surface()
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# force mean to be a 1d vector no matter its shape when passed in
mean = np.atleast_2d(mean)
if mean.shape[1] == 1:
mean = mean.T
if not(mean.shape[0] == 1 and mean.shape[1] == 3):
raise ValueError('mean must be convertible to a 1x3 row vector')
mean = mean[0]
# force covariance to be 3x3 np.array
cov = np.asarray(cov)
if cov.shape[0] != 3 or cov.shape[1] != 3:
raise ValueError("covariance must be 3x3")
# The idea is simple - find the 3 axis of the covariance matrix
# by finding the eigenvalues and vectors. The eigenvalues are the
# radii (squared, since covariance has squared terms), and the
# eigenvectors give the rotation. So we make an ellipse with the
# given radii and then rotate it to the proper orientation.
eigval, eigvec = _eigsorted(cov, asc=True)
radii = std * np.sqrt(np.real(eigval))
if eigval[0] < 0:
raise ValueError("covariance matrix must be positive definite")
# calculate cartesian coordinates for the ellipsoid surface
u = np.linspace(0.0, 2.0 * np.pi, N)
v = np.linspace(0.0, np.pi, N)
x = np.outer(np.cos(u), np.sin(v)) * radii[0]
y = np.outer(np.sin(u), np.sin(v)) * radii[1]
z = np.outer(np.ones_like(u), np.cos(v)) * radii[2]
# rotate data with eigenvector and center on mu
a = np.kron(eigvec[:, 0], x)
b = np.kron(eigvec[:, 1], y)
c = np.kron(eigvec[:, 2], z)
data = a + b + c
N = data.shape[0]
x = data[:, 0:N] + mean[0]
y = data[:, N:N*2] + mean[1]
z = data[:, N*2:] + mean[2]
fig = plt.gcf()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z,
rstride=3, cstride=3, linewidth=0.1, alpha=alpha,
shade=shade, color=color, **kwargs)
# now make it pretty!
if label_xyz:
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if limit_xyz:
r = radii.max()
ax.set_xlim(-r + mean[0], r + mean[0])
ax.set_ylim(-r + mean[1], r + mean[1])
ax.set_zlim(-r + mean[2], r + mean[2])
if title is not None:
plt.title(title)
#pylint: disable=pointless-statement
Axes3D #kill pylint warning about unused import
return ax
def plot_covariance_ellipse(
mean, cov=None, variance=1.0, std=None,
ellipse=None, title=None, axis_equal=True, show_semiaxis=False,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Deprecated function to plot a covariance ellipse. Use plot_covariance
instead.
See Also
--------
plot_covariance
"""
warnings.warn("deprecated, use plot_covariance instead", DeprecationWarning)
plot_covariance(mean=mean, cov=cov, variance=variance, std=std,
ellipse=ellipse, title=title, axis_equal=axis_equal,
show_semiaxis=show_semiaxis, facecolor=facecolor,
edgecolor=edgecolor, fc=fc, ec=ec, alpha=alpha,
xlim=xlim, ylim=ylim, ls=ls)
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var)
def plot_covariance(
mean, cov=None, variance=1.0, std=None, interval=None,
ellipse=None, title=None, axis_equal=True,
show_semiaxis=False, show_center=True,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Plots the covariance ellipse for the 2D normal defined by (mean, cov)
`variance` is the normal sigma^2 that we want to plot. If list-like,
ellipses for all ellipses will be ploted. E.g. [1,2] will plot the
sigma^2 = 1 and sigma^2 = 2 ellipses. Alternatively, use std for the
standard deviation, in which case `variance` will be ignored.
ellipse is a (angle,width,height) tuple containing the angle in radians,
and width and height radii.
You may provide either cov or ellipse, but not both.
Parameters
----------
mean : row vector like (2x1)
The mean of the normal
cov : ndarray-like
2x2 covariance matrix
variance : float, default 1, or iterable float, optional
Variance of the plotted ellipse. May specify std or interval instead.
If iterable, such as (1, 2**2, 3**2), then ellipses will be drawn
for all in the list.
std : float, or iterable float, optional
Standard deviation of the plotted ellipse. If specified, variance
is ignored, and interval must be `None`.
If iterable, such as (1, 2, 3), then ellipses will be drawn
for all in the list.
interval : float range [0,1), or iterable float, optional
Confidence interval for the plotted ellipse. For example, .68 (for
68%) gives roughly 1 standand deviation. If specified, variance
is ignored and `std` must be `None`
If iterable, such as (.68, .95), then ellipses will be drawn
for all in the list.
ellipse: (float, float, float)
Instead of a covariance, plots an ellipse described by (angle, width,
height), where angle is in radians, and the width and height are the
minor and major sub-axis radii. `cov` must be `None`.
title: str, optional
title for the plot
axis_equal: bool, default=True
Use the same scale for the x-axis and y-axis to ensure the aspect
ratio is correct.
show_semiaxis: bool, default=False
Draw the semiaxis of the ellipse
show_center: bool, default=True
Mark the center of the ellipse with a cross
facecolor, fc: color, default=None
If specified, fills the ellipse with the specified color. `fc` is an
allowed abbreviation
edgecolor, ec: color, default=None
If specified, overrides the default color sequence for the edge color
of the ellipse. `ec` is an allowed abbreviation
alpha: float range [0,1], default=1.
alpha value for the ellipse
xlim: float or (float,float), default=None
specifies the limits for the x-axis
ylim: float or (float,float), default=None
specifies the limits for the y-axis
ls: str, default='solid':
line style for the edge of the ellipse
"""
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
if cov is not None and ellipse is not None:
raise ValueError('You cannot specify both cov and ellipse')
if cov is None and ellipse is None:
raise ValueError('Specify one of cov or ellipse')
if facecolor is None:
facecolor = fc
if edgecolor is None:
edgecolor = ec
if cov is not None:
ellipse = covariance_ellipse(cov)
if axis_equal:
plt.axis('equal')
if title is not None:
plt.title(title)
ax = plt.gca()
angle = np.degrees(ellipse[0])
width = ellipse[1] * 2.
height = ellipse[2] * 2.
std = _std_tuple_of(variance, std, interval)
for sd in std:
e = Ellipse(xy=mean, width=sd*width, height=sd*height, angle=angle,
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha,
lw=2, ls=ls)
ax.add_patch(e)
x, y = mean
if show_center:
plt.scatter(x, y, marker='+', color=edgecolor)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if show_semiaxis:
a = ellipse[0]
h, w = height/4, width/4
plt.plot([x, x+ h*cos(a+np.pi/2)], [y, y + h*sin(a+np.pi/2)])
plt.plot([x, x+ w*cos(a)], [y, y + w*sin(a)])
def norm_cdf(x_range, mu, var=1, std=None):
"""
Computes the probability that a Gaussian distribution lies
within a range of values.
Parameters
----------
x_range : (float, float)
tuple of range to compute probability for
mu : float
mean of the Gaussian
var : float, optional
variance of the Gaussian. Ignored if `std` is provided
std : float, optional
standard deviation of the Gaussian. This overrides the `var` parameter
Returns
-------
probability : float
probability that Gaussian is within x_range. E.g. .1 means 10%.
"""
if std is None:
std = math.sqrt(var)
return abs(norm.cdf(x_range[0], loc=mu, scale=std) -
norm.cdf(x_range[1], loc=mu, scale=std))
def _to_cov(x, n):
"""
If x is a scalar, returns a covariance matrix generated from it
as the identity matrix multiplied by x. The dimension will be nxn.
If x is already a 2D numpy array then it is returned unchanged.
Raises ValueError if not positive definite
"""
if np.isscalar(x):
if x < 0:
raise ValueError('covariance must be > 0')
return np.eye(n) * x
x = np.atleast_2d(x)
try:
# quickly find out if we are positive definite
np.linalg.cholesky(x)
except:
raise ValueError('covariance must be positive definit')
return x
def rand_student_t(df, mu=0, std=1):
"""
return random number distributed by student's t distribution with
`df` degrees of freedom with the specified mean and standard deviation.
"""
x = random.gauss(0, std)
y = 2.0*random.gammavariate(0.5 * df, 2.0)
return x / (math.sqrt(y / df)) + mu
def NEES(xs, est_xs, ps):
"""
Computes the normalized estimated error squared (NEES) test on a sequence
of estimates. The estimates are optimal if the mean error is zero and
the covariance matches the Kalman filter's covariance. If this holds,
then the mean of the NEES should be equal to or less than the dimension
of x.
Examples
--------
.. code-block: Python
xs = ground_truth()
est_xs, ps, _, _ = kf.batch_filter(zs)
NEES(xs, est_xs, ps)
Parameters
----------
xs : list-like
sequence of true values for the state x
est_xs : list-like
sequence of estimates from an estimator (such as Kalman filter)
ps : list-like
sequence of covariance matrices from the estimator
Returns
-------
errs : list of floats
list of NEES computed for each estimate
"""
est_err = xs - est_xs
errs = []
for x, p in zip(est_err, ps):
errs.append(np.dot(x.T, linalg.inv(p)).dot(x))
return errs
|
rlabbe/filterpy
|
filterpy/stats/stats.py
|
Python
|
mit
| 32,181
|
[
"Gaussian"
] |
576b407938a36b7292f862cf414527e1adc8419d19241f529b6ec2942f31f374
|
#!/usr/bin/env python
import shutil
import tempfile
import configparser
from textwrap import dedent
import tarfile
import pyaml
import hashlib
import os
import re
import bs4
import urllib
from urllib import request
from urllib import parse
from urllib import error
from collections import OrderedDict
import logging
import requests
logging.basicConfig(level=logging.INFO, format='[bioconductor_skeleton.py %(asctime)s]: %(message)s')
logger = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
base_url = 'http://bioconductor.org/packages/'
# Packages that might be specified in the DESCRIPTION of a package as
# dependencies, but since they're built-in we don't need to specify them in
# the meta.yaml.
#
# Note: this list is from:
#
# conda create -n rtest -c r r
# R -e "rownames(installed.packages())"
BASE_R_PACKAGES = ["base", "boot", "class", "cluster", "codetools", "compiler",
"datasets", "foreign", "graphics", "grDevices", "grid",
"KernSmooth", "lattice", "MASS", "Matrix", "methods",
"mgcv", "nlme", "nnet", "parallel", "rpart", "spatial",
"splines", "stats", "stats4", "survival", "tcltk", "tools",
"utils"]
# A list of packages, in recipe name format
GCC_PACKAGES = ['r-rcpp']
HERE = os.path.abspath(os.path.dirname(__file__))
class PageNotFoundError(Exception): pass
class BioCProjectPage(object):
def __init__(self, package):
"""
Represents a single Bioconductor package page and provides access to
scraped data.
>>> x = BioCProjectPage('DESeq2')
>>> x.tarball_url
'http://bioconductor.org/packages/release/bioc/src/contrib/DESeq2_1.8.2.tar.gz'
"""
self.base_url = base_url
self.package = package
self._md5 = None
self._cached_tarball = None
self._dependencies = None
self.build_number = 0
self.request = requests.get(os.path.join(base_url, package))
if not self.request:
raise PageNotFoundError('Error {0.status_code} ({0.reason})'.format(self.request))
# Since we provide the "short link" we will get redirected. Using
# requests allows us to keep track of the final destination URL, which
# we need for reconstructing the tarball URL.
self.url = self.request.url
# The table at the bottom of the page has the info we want. An earlier
# draft of this script parsed the dependencies from the details table.
# That's still an option if we need a double-check on the DESCRIPTION
# fields.
self.soup = bs4.BeautifulSoup(
self.request.content,
'html.parser')
self.details_table = self.soup.find_all(attrs={'class': 'details'})[0]
# However, it is helpful to get the version info from this table. That
# way we can try getting the bioaRchive tarball and cache that.
for td in self.details_table.findAll('td'):
if td.getText() == 'Version':
version = td.findNext().getText()
break
self.version = version
self.depends_on_gcc = False
@property
def bioaRchive_url(self):
"""
Returns the bioaRchive URL if one exists for this version of this
package, otherwise returns None.
Note that to get the package version, we're still getting the
bioconductor tarball to extract the DESCRIPTION file.
"""
url = 'https://bioarchive.galaxyproject.org/{0.package}_{0.version}.tar.gz'.format(self)
response = requests.get(url)
if response:
return url
elif response.status_code == 404:
return
else:
raise PageNotFoundError("Unexpected error: {0.status_code} ({0.reason})".format(response))
@property
def bioconductor_tarball_url(self):
"""
Return the url to the tarball from the bioconductor site.
"""
r = re.compile('{0}.*\.tar.gz'.format(self.package))
def f(href):
return href and r.search(href)
results = self.soup.find_all(href=f)
assert len(results) == 1, (
"Found {0} tags with '.tar.gz' in href".format(len(results)))
s = list(results[0].stripped_strings)
assert len(s) == 1
# build the actual URL based on the identified package name and the
# relative URL from the source. Here we're just hard-coding
# '../src/contrib' based on the structure of the bioconductor site.
return os.path.join(parse.urljoin(self.url, '../src/contrib'), s[0])
@property
def tarball_url(self):
url = self.bioaRchive_url
if url:
return url
return self.bioconductor_tarball_url
@property
def tarball_basename(self):
return os.path.basename(self.tarball_url)
@property
def cached_tarball(self):
"""
Downloads the tarball to the `cached_bioconductor_tarballs` dir if one
hasn't already been downloaded for this package.
This is because we need the whole tarball to get the DESCRIPTION file
and to generate an md5 hash, so we might as well save it somewhere.
"""
if self._cached_tarball:
return self._cached_tarball
cache_dir = os.path.join(HERE, 'cached_bioconductor_tarballs')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
fn = os.path.join(cache_dir, self.tarball_basename)
if os.path.exists(fn):
self._cached_tarball = fn
return fn
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'wb') as fout:
logger.info('Downloading {0} to {1}'.format(self.tarball_url, fn))
response = requests.get(self.tarball_url)
if response:
fout.write(response.content)
else:
raise PageNotFoundError('Unexpected error {0.status_code} ({0.reason})'.format(response))
shutil.move(tmp, fn)
self._cached_tarball = fn
return fn
@property
def description(self):
"""
Extract the DESCRIPTION file from the tarball and parse it.
"""
t = tarfile.open(self.cached_tarball)
d = t.extractfile(os.path.join(self.package, 'DESCRIPTION')).read()
self._contents = d
c = configparser.ConfigParser()
# On-spec config files need a "section", but the DESCRIPTION file
# doesn't have one. So we just add a fake section, and let the
# configparser take care of the details of parsing.
c.read_string('[top]\n' + d.decode('UTF-8'))
e = c['top']
# Glue together newlines
for k in e.keys():
e[k] = e[k].replace('\n', ' ')
return dict(e)
#@property
#def version(self):
# return self.description['version']
@property
def license(self):
return self.description['license']
@property
def imports(self):
try:
return self.description['imports'].split(', ')
except KeyError:
return []
@property
def depends(self):
try:
return self.description['depends'].split(', ')
except KeyError:
return []
def _parse_dependencies(self, items):
"""
The goal is to go from
['package1', 'package2', 'package3 (>= 0.1)', 'package4']
to::
[
('package1', ""),
('package2', ""),
('package3', " >=0.1"),
('package1', ""),
]
"""
results = []
for item in items:
toks = [i.strip() for i in item.split('(')]
if len(toks) == 1:
results.append((toks[0], ""))
elif len(toks) == 2:
assert ')' in toks[1]
toks[1] = toks[1].replace(')', '').replace(' ', '')
results.append(tuple(toks))
else:
raise ValueError("Found {0} toks: {1}".format(len(toks), toks))
return results
@property
def dependencies(self):
if self._dependencies:
return self._dependencies
results = []
# Some packages specify a minimum R version, which we'll need to keep
# track of
specific_r_version = False
# Sometimes a version is specified only in the `depends` and not in the
# `imports`. We keep the most specific version of each.
version_specs = list(
set(
self._parse_dependencies(self.imports) +
self._parse_dependencies(self.depends)
)
)
versions = {}
for name, version in version_specs:
if name in versions:
if not versions[name] and version:
versions[name] = version
else:
versions[name] = version
for name, version in sorted(versions.items()):
# DESCRIPTION notes base R packages, but we don't need to specify
# them in the dependencies.
if name in BASE_R_PACKAGES:
continue
# Try finding the dependency on the bioconductor site; if it can't
# be found then we assume it's in CRAN.
try:
BioCProjectPage(name)
prefix = 'bioconductor-'
except PageNotFoundError:
prefix = 'r-'
logger.info('{0:>12} dependency: name="{1}" version="{2}"'.format(
{'r-': 'R', 'bioconductor-': 'BioConductor'}[prefix],
name, version))
# add padding to version string
if version:
version = " " + version
if name.lower() == 'r':
# "r >=2.5" rather than "r-r >=2.5"
specific_r_version = True
results.append(name.lower() + version)
else:
results.append(prefix + name.lower() + version)
if prefix + name.lower() in GCC_PACKAGES:
self.depends_on_gcc = True
# Add R itself if no specific version was specified
if not specific_r_version:
results.append('r')
self._dependencies = results
return self._dependencies
@property
def md5(self):
"""
Calculate the md5 hash of the tarball so it can be filled into the
meta.yaml.
"""
if self._md5 is None:
self._md5 = hashlib.md5(
open(self.cached_tarball, 'rb').read()).hexdigest()
return self._md5
@property
def meta_yaml(self):
"""
Build the meta.yaml string based on discovered values.
Here we use a nested OrderedDict so that all meta.yaml files created by
this script have the same consistent format. Otherwise we're at the
mercy of Python dict sorting.
We use pyaml (rather than yaml) because it has better handling of
OrderedDicts.
However pyaml does not support comments, but if there are gcc and llvm
dependencies then they need to be added with preprocessing selectors
for `# [linux]` and `# [osx]`.
We do this with a unique placeholder (not a jinja or $-based
string.Template so as to avoid conflicting with the conda jinja
templating or the `$R` in the test commands, and replace the text once
the yaml is written.
"""
url = self.bioaRchive_url
if not url:
url = self.tarball_url
DEPENDENCIES = sorted(self.dependencies)
d = OrderedDict((
(
'package', OrderedDict((
('name', 'bioconductor-' + self.package.lower()),
('version', self.version),
)),
),
(
'source', OrderedDict((
('fn', self.tarball_basename),
('url', url),
('md5', self.md5),
)),
),
(
'build', OrderedDict((
('number', self.build_number),
('rpaths', ['lib/R/lib/', 'lib/']),
)),
),
(
'requirements', OrderedDict((
# If you don't make copies, pyaml sees these as the same
# object and tries to make a shortcut, causing an error in
# decoding unicode. Possible pyaml bug? Anyway, this fixes
# it.
('build', DEPENDENCIES[:]),
('run', DEPENDENCIES[:]),
)),
),
(
'test', OrderedDict((
('commands',
['''$R -e "library('{package}')"'''.format(
package=self.package)]),
)),
),
(
'about', OrderedDict((
('home', self.url),
('license', self.license),
('summary', self.description['description']),
)),
),
))
if self.depends_on_gcc:
d['requirements']['build'].append('GCC_PLACEHOLDER')
d['requirements']['build'].append('LLVM_PLACEHOLDER')
rendered = pyaml.dumps(d).decode('utf-8')
rendered = rendered.replace('GCC_PLACEHOLDER', 'gcc # [linux]')
rendered = rendered.replace('LLVM_PLACEHOLDER', 'llvm # [osx]')
return rendered
def write_recipe(package, recipe_dir, force=False):
"""
Write the meta.yaml and build.sh files.
"""
proj = BioCProjectPage(package)
recipe_dir = os.path.join(recipe_dir, 'bioconductor-' + proj.package.lower())
if os.path.exists(recipe_dir) and not force:
raise ValueError("{0} already exists, aborting".format(recipe_dir))
else:
if not os.path.exists(recipe_dir):
print('creating %s' % recipe_dir)
os.makedirs(recipe_dir)
# If the version number has not changed but something else in the recipe
# *has* changed, then bump the version number.
meta_file = os.path.join(recipe_dir, 'meta.yaml')
if os.path.exists(meta_file):
updated_meta = pyaml.yaml.load(proj.meta_yaml)
current_meta = pyaml.yaml.load(open(meta_file))
# pop off the version and build numbers so we can compare the rest of
# the dicts
updated_version = updated_meta['package'].pop('version')
current_version = current_meta['package'].pop('version')
updated_build_number = updated_meta['build'].pop('number')
current_build_number = current_meta['build'].pop('number')
if (
(updated_version == current_version)
and
(updated_meta != current_meta)
):
proj.build_number = int(current_build_number) + 1
with open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fout:
fout.write(proj.meta_yaml)
with open(os.path.join(recipe_dir, 'build.sh'), 'w') as fout:
fout.write(dedent(
"""
#!/bin/bash
# R refuses to build packages that mark themselves as
# "Priority: Recommended"
mv DESCRIPTION DESCRIPTION.old
grep -v '^Priority: ' DESCRIPTION.old > DESCRIPTION
#
$R CMD INSTALL --build .
#
# # Add more build steps here, if they are necessary.
#
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build
# process.
# """
)
)
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('package', help='Bioconductor package name')
ap.add_argument('--recipes', default='recipes',
help='Recipe will be created in <recipe-dir>/<package>')
ap.add_argument('--force', action='store_true',
help='Overwrite the contents of an existing recipe')
args = ap.parse_args()
write_recipe(args.package, args.recipes, args.force)
|
yesimon/bioconda-recipes
|
scripts/bioconductor/bioconductor_skeleton.py
|
Python
|
mit
| 16,419
|
[
"Bioconductor"
] |
71ee474cea4956a31fd273ec0e97fd0a1cf2c725ef26760a3630775ef3143f6a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.