input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>mutaihillary/mycalculator
# Copyright (c) 2003-2013 <NAME>.A. (Paris, FRANCE).
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
import keyword
import sys
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
from astroid import nodes
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.utils import WarningScope, OPTION_RGX
_KEYWORD_TOKENS = ['assert', 'del', 'elif', 'except', 'for', 'if', 'in', 'not',
'raise', 'return', 'while', 'yield']
if sys.version_info < (3, 0):
_KEYWORD_TOKENS.append('print')
_SPACED_OPERATORS = ['==', '<', '>', '!=', '<>', '<=', '>=',
'+=', '-=', '*=', '**=', '/=', '//=', '&=', '|=', '^=',
'%=', '>>=', '<<=']
_OPENING_BRACKETS = ['(', '[', '{']
_CLOSING_BRACKETS = [')', ']', '}']
_EOL = frozenset([tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT])
# Whitespace checking policy constants
_MUST = 0
_MUST_NOT = 1
_IGNORE = 2
# Whitespace checking config constants
_DICT_SEPARATOR = 'dict-separator'
_TRAILING_COMMA = 'trailing-comma'
_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR]
MSGS = {
'C0301': ('Line too long (%s/%s)',
'line-too-long',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s)', # was W0302
'too-many-lines',
'Used when a module has too much lines, reducing its readability.'
),
'C0303': ('Trailing whitespace',
'trailing-whitespace',
'Used when there is whitespace between the end of a line and the '
'newline.'),
'C0304': ('Final newline missing',
'missing-final-newline',
'Used when the last line in a file is missing a newline.'),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'bad-indentation',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'W0312': ('Found indentation with %ss instead of %ss',
'mixed-indentation',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'unnecessary-semicolon',
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'C0321': ('More than one statement on a single line',
'multiple-statements',
'Used when more than on statement are found on the same line.',
{'scope': WarningScope.NODE}),
'C0325' : ('Unnecessary parens after %r keyword',
'superfluous-parens',
'Used when a single item in parentheses follows an if, for, or '
'other keyword.'),
'C0326': ('%s space %s %s %s\n%s',
'bad-whitespace',
('Used when a wrong number of spaces is used around an operator, '
'bracket or block opener.'),
{'old_names': [('C0323', 'no-space-after-operator'),
('C0324', 'no-space-after-comma'),
('C0322', 'no-space-before-operator')]})
}
if sys.version_info < (3, 0):
MSGS.update({
'W0331': ('Use of the <> operator',
'old-ne-operator',
'Used when the deprecated "<>" operator is used instead \
of "!=".'),
'W0332': ('Use of "l" as long integer identifier',
'lowercase-l-suffix',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"'),
'W0333': ('Use of the `` operator',
'backtick',
'Used when the deprecated "``" (backtick) operator is used '
'instead of the str() function.',
{'scope': WarningScope.NODE}),
})
def _underline_token(token):
length = token[3][1] - token[2][1]
offset = token[2][1]
return token[4] + (' ' * offset) + ('^' * length)
def _column_distance(token1, token2):
if token1 == token2:
return 0
if token2[3] < token1[3]:
token1, token2 = token2, token1
if token1[3][0] != token2[2][0]:
return None
return token2[2][1] - token1[3][1]
class FormatChecker(BaseTokenChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
* use of <> instead of !=
"""
__implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 80, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('ignore-long-lines',
{'type': 'regexp', 'metavar': '<regexp>',
'default': r'^\s*(# )?<?https?://\S+>?$',
'help': ('Regexp for a line that is allowed to be longer than '
'the limit.')}),
('single-line-if-stmt',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : ('Allow the body of an if to be on the same '
'line as the test if there is no else.')}),
('no-space-check',
{'default': ','.join(_NO_SPACE_CHECK_CHOICES),
'type': 'multiple_choice',
'choices': _NO_SPACE_CHECK_CHOICES,
'help': ('List of optional constructs for which whitespace '
'checking is disabled')}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually \
" " (4 spaces) or "\\t" (1 tab).'}),
)
def __init__(self, linter=None):
BaseTokenChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
def new_line(self, tok_type, line, line_num, junk):
"""a new line has been encountered, process it if necessary"""
if not tok_type in junk:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_module(self, module):
self._keywords_with_parens = set()
for node in module.body:
if (isinstance(node, nodes.From) and node.modname == '__future__'
and any(name == 'print_function' for name, _ in node.names)):
self._keywords_with_parens.add('print')
def _check_keyword_parentheses(self, tokens, start):
"""Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if tokens[start+1][1] != '(':
return
found_and_or = False
depth = 0
keyword_token = tokens[start][1]
line_num = tokens[start][2][0]
for i in xrange(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token[0] == tokenize.NL:
return
if token[1] == '(':
depth += 1
elif token[1] == ')':
depth -= 1
if not depth:
# ')' can't happen after if (foo), since it would be a syntax error.
if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
tokens[i+1][0] in (tokenize.NEWLINE, tokenize.ENDMARKER,
tokenize.COMMENT)):
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == 'not':
if not found_and_or:
self.add_message('C0325', line=line_num,
args=keyword_token)
elif keyword_token in ('return', 'yield'):
self.add_message('C0325', line=line_num,
args=keyword_token)
elif keyword_token not in self._keywords_with_parens:
if not (tokens[i+1][1] == 'in' and found_and_or):
self.add_message('C0325', line=line_num,
args=keyword_token)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ',':
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
elif token[1] in ('and', 'or'):
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == 'yield':
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == 'for':
return
def _opening_bracket(self, tokens, i):
self._bracket_stack.append(tokens[i][1])
# Special case: ignore slices
if tokens[i][1] == '[' and tokens[i+1][1] == ':':
return
if (i > 0 and (tokens[i-1][0] == tokenize.NAME and
not (keyword.iskeyword(tokens[i-1][1]))
or tokens[i-1][1] in _CLOSING_BRACKETS)):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_IGNORE, _MUST_NOT))
def _closing_bracket(self, tokens, i):
self._bracket_stack.pop()
# Special case: ignore slices
if tokens[i-1][1] == ':' and tokens[i][1] == ']':
return
policy_before = _MUST_NOT
if tokens[i][1] in _CLOSING_BRACKETS and tokens[i-1][1] == | |
2,
"course_project": False
},
{
"id": 26062,
"name": "Основы естественнонаучных процессов",
"term": 2,
"course_project": False
},
{
"id": 36991,
"name": "Основы иммунологии/ Basics of Immunology",
"term": 2,
"course_project": False
},
{
"id": 19214,
"name": "Основы информационных оптических технологий",
"term": 6,
"course_project": False
},
{
"id": 30801,
"name": "Основы кибербезопасности",
"term": 4,
"course_project": False
},
{
"id": 26051,
"name": "Основы композиции",
"term": 6,
"course_project": False
},
{
"id": 26051,
"name": "Основы композиции",
"term": 6,
"course_project": True
},
{
"id": 26030,
"name": "Основы компьютерного дизайна",
"term": 4,
"course_project": False
},
{
"id": 33909,
"name": "Основы компьютерного моделирования в электродинамике / Basics of computer modeling in electromagnetics",
"term": 2,
"course_project": False
},
{
"id": 26031,
"name": "Основы конструирования механизмов и машин",
"term": 6,
"course_project": False
},
{
"id": 26031,
"name": "Основы конструирования механизмов и машин",
"term": 6,
"course_project": True
},
{
"id": 6741,
"name": "Основы конструирования оптико-электронных приборов и систем",
"term": 6,
"course_project": False
},
{
"id": 7073,
"name": "Основы конструирования оптико-электронных приборов и систем специального назначения",
"term": 6,
"course_project": False
},
{
"id": 7073,
"name": "Основы конструирования оптико-электронных приборов и систем специального назначения",
"term": 6,
"course_project": True
},
{
"id": 6765,
"name": "Основы конструирования оптических и лазерных приборов и систем",
"term": 6,
"course_project": False
},
{
"id": 6862,
"name": "Основы конструирования электронных средств",
"term": 6,
"course_project": False
},
{
"id": 31309,
"name": "Основы криологии",
"term": 4,
"course_project": False
},
{
"id": 26033,
"name": "Основы культивирования заквасочных микроорганизмов",
"term": 2,
"course_project": False
},
{
"id": 6730,
"name": "Основы лазерной техники",
"term": 6,
"course_project": False
},
{
"id": 6730,
"name": "Основы лазерной техники",
"term": 8,
"course_project": False
},
{
"id": 5335,
"name": "Основы логистики",
"term": 8,
"course_project": False
},
{
"id": 26066,
"name": "Основы математического моделирования",
"term": 6,
"course_project": False
},
{
"id": 26018,
"name": "Основы математического моделирования оборудования систем производства и транспортирования углеводородных энергоносителей",
"term": 6,
"course_project": False
},
{
"id": 19013,
"name": "Основы математической физики",
"term": 6,
"course_project": False
},
{
"id": 19455,
"name": "Основы моделирования физических процессов",
"term": 8,
"course_project": False
},
{
"id": 33511,
"name": "<NAME>",
"term": 8,
"course_project": False
},
{
"id": 31119,
"name": "Основы научной визуализации и визуального сторителлинга",
"term": 2,
"course_project": False
},
{
"id": 18765,
"name": "Основы научной деятельности",
"term": 8,
"course_project": False
},
{
"id": 35069,
"name": "Основы научной деятельности",
"term": 2,
"course_project": False
},
{
"id": 2954,
"name": "Основы научных исследований",
"term": 8,
"course_project": False
},
{
"id": 18791,
"name": "Основы научных исследований в биотехнологии и биоинженерии",
"term": 8,
"course_project": False
},
{
"id": 34710,
"name": "Основы нефтегазового дела",
"term": 2,
"course_project": False
},
{
"id": 35578,
"name": "Основы обработки естественного языка",
"term": 2,
"course_project": False
},
{
"id": 26042,
"name": "Основы обработки мультимедийных данных",
"term": 2,
"course_project": False
},
{
"id": 26042,
"name": "Основы обработки мультимедийных данных",
"term": 6,
"course_project": False
},
{
"id": 6940,
"name": "Основы оптики природных сред",
"term": 8,
"course_project": False
},
{
"id": 1632,
"name": "Основы оптоинформатики",
"term": 8,
"course_project": False
},
{
"id": 7112,
"name": "Основы организации применения систем специального назначения",
"term": 10,
"course_project": False
},
{
"id": 31492,
"name": "Основы препаративной химии",
"term": 2,
"course_project": False
},
{
"id": 1288,
"name": "Основы прикладного телевидения",
"term": 6,
"course_project": False
},
{
"id": 31034,
"name": "Основы проектирования баз данных",
"term": 6,
"course_project": False
},
{
"id": 31118,
"name": "Основы проектирования биотехнологических предприятий",
"term": 2,
"course_project": False
},
{
"id": 30392,
"name": "Основы проектирования приборов и систем",
"term": 6,
"course_project": False
},
{
"id": 26075,
"name": "Основы профессиональной деятельности",
"term": 2,
"course_project": False
},
{
"id": 33205,
"name": "Основы профессиональной деятельности / Computer Basics",
"term": 2,
"course_project": False
},
{
"id": 5348,
"name": "Основы работоспособности технических систем",
"term": 8,
"course_project": False
},
{
"id": 6953,
"name": "Основы разработки компиляторов",
"term": 6,
"course_project": False
},
{
"id": 26096,
"name": "Основы разработки программно-аппаратных средств",
"term": 6,
"course_project": False
},
{
"id": 6947,
"name": "Основы распределенных вычислений",
"term": 6,
"course_project": False
},
{
"id": 30952,
"name": "Основы расчета систем очистки газовоздушных выбросов",
"term": 2,
"course_project": False
},
{
"id": 26055,
"name": "Основы рисунка",
"term": 6,
"course_project": False
},
{
"id": 34354,
"name": "Основы робототехники",
"term": 2,
"course_project": False
},
{
"id": 34354,
"name": "Основы робототехники",
"term": 2,
"course_project": True
},
{
"id": 31583,
"name": "Основы системного анализа",
"term": 2,
"course_project": False
},
{
"id": 26099,
"name": "Основы современного естествознания и научной коммуникации",
"term": 2,
"course_project": False
},
{
"id": 28257,
"name": "Основы спиртового брожения / Basics of Alcoholic Fermentation",
"term": 2,
"course_project": False
},
{
"id": 19005,
"name": "Основы статистической физики",
"term": 6,
"course_project": False
},
{
"id": 6879,
"name": "Основы теории идентификации",
"term": 8,
"course_project": False
},
{
"id": 29646,
"name": "Основы теории кондиционирования воздуха",
"term": 4,
"course_project": False
},
{
"id": 248,
"name": "Основы теории управления",
"term": 6,
"course_project": False
},
{
"id": 26082,
"name": "Основы теории эксперимента",
"term": 2,
"course_project": False
},
{
"id": 26044,
"name": "Основы термодинамики и теплопередачи",
"term": 2,
"course_project": False
},
{
"id": 18710,
"name": "Основы тестирования программного обеспечения",
"term": 8,
"course_project": False
},
{
"id": 26083,
"name": "Основы технического зрения",
"term": 6,
"course_project": False
},
{
"id": 27330,
"name": "Основы технологии программно-конфигурируемых сетей",
"term": 2,
"course_project": False
},
{
"id": 34430,
"name": "Основы технологии программно-конфигурируемых сетей / Software define networks basic",
"term": 2,
"course_project": False
},
{
"id": 30498,
"name": "Основы технологий быстрого прототипирования",
"term": 6,
"course_project": False
},
{
"id": 26084,
"name": "Основы транспортного планирования",
"term": 2,
"course_project": False
},
{
"id": 5,
"name": "Основы трибоники",
"term": 8,
"course_project": False
},
{
"id": 26106,
"name": "Основы управления и эксплуатации сетей радиосвязи и радиодоступа",
"term": 6,
"course_project": False
},
{
"id": 26106,
"name": "Основы управления и эксплуатации сетей радиосвязи и радиодоступа",
"term": 6,
"course_project": True
},
{
"id": 9837,
"name": "Основы устройства и эксплуатации инженерных сетей",
"term": 8,
"course_project": False
},
{
"id": 26107,
"name": "Основы физики полупроводниковых лазеров",
"term": 2,
"course_project": False
},
{
"id": 1975,
"name": "Основы философии",
"term": 6,
"course_project": False
},
{
"id": 34602,
"name": "Основы финансовой грамотности",
"term": 2,
"course_project": False
},
{
"id": 1722,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 26108,
"name": "Основы цифрового производства",
"term": 2,
"course_project": False
},
{
"id": 26108,
"name": "Основы цифрового производства",
"term": 4,
"course_project": False
},
{
"id": 1976,
"name": "<NAME>",
"term": 8,
"course_project": False
},
{
"id": 34802,
"name": "<NAME> замкнутого цикла / The Basics of Circular Economy",
"term": 2,
"course_project": False
},
{
"id": 27356,
"name": "Основы экономического анализа и экономики пространства",
"term": 2,
"course_project": False
},
{
"id": 30457,
"name": "Основы электрических измерений",
"term": 6,
"course_project": False
},
{
"id": 26087,
"name": "Основы электротехники",
"term": 6,
"course_project": False
},
{
"id": 34877,
"name": "Особенности регулирования новых рынков",
"term": 2,
"course_project": False
},
{
"id": 30629,
"name": "Особенности формирования языковых профессиональных компетенций",
"term": 6,
"course_project": False
},
{
"id": 5372,
"name": "<NAME>",
"term": 8,
"course_project": False
},
{
"id": 28292,
"name": "Оценка восприятия (зрительное, когнитивное, эмоциональное) / Assessment of Perception (Visual, Cognitive, Emotional)",
"term": 2,
"course_project": False
},
{
"id": 26119,
"name": "Оценка жизненного цикла",
"term": 2,
"course_project": False
},
{
"id": 34805,
"name": "Оценка жизненного цикла / Life Cycle Assessment",
"term": 2,
"course_project": False
},
{
"id": 32489,
"name": "Оценка жизненного цикла продукции",
"term": 2,
"course_project": False
},
{
"id": 21701,
"name": "Оценка имущественного комплекса инновационных предприятий",
"term": 8,
"course_project": False
},
{
"id": 34905,
"name": "Оценка проектов на новых рынках",
"term": 2,
"course_project": False
},
{
"id": 34895,
"name": "Оценка стоимости проектов в секторе Life Sciences",
"term": 2,
"course_project": False
},
{
"id": 6647,
"name": "Оценка экономической эффективности",
"term": 8,
"course_project": False
},
{
"id": 34314,
"name": "Оценка эффективности информатизации предприятия",
"term": 2,
"course_project": False
},
{
"id": 34318,
"name": "Оценка эффективности программных продуктов",
"term": 2,
"course_project": False
},
{
"id": 18694,
"name": "Оценка эффективности проекта",
"term": 8,
"course_project": False
},
{
"id": 26128,
"name": "Пакеты | |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow ops for directed graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from syntaxnet.util import check
def ArcPotentialsFromTokens(source_tokens, target_tokens, weights):
r"""Returns arc potentials computed from token activations and weights.
For each batch of source and target token activations, computes a scalar
potential for each arc as the 3-way product between the activation vectors of
the source and target of the arc and the |weights|. Specifically,
arc[b,s,t] =
\sum_{i,j} source_tokens[b,s,i] * weights[i,j] * target_tokens[b,t,j]
Note that the token activations can be extended with bias terms to implement a
"biaffine" model (Dozat and Manning, 2017).
Args:
source_tokens: [B,N,S] tensor of batched activations for the source token in
each arc.
target_tokens: [B,N,T] tensor of batched activations for the target token in
each arc.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials where A_{b,s,t} is the potential of the
arc from s to t in batch element b. The dtype of A is the same as that of
the arguments. Note that the diagonal entries (i.e., where s==t) represent
self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(source_tokens.get_shape().ndims, 3, 'source_tokens must be rank 3')
check.Eq(target_tokens.get_shape().ndims, 3, 'target_tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(source_tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and source_tokens')
check.Eq(target_tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and target_tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
source_tokens.dtype.base_dtype,
target_tokens.dtype.base_dtype],
'dtype mismatch')
source_tokens_shape = tf.shape(source_tokens)
target_tokens_shape = tf.shape(target_tokens)
batch_size = source_tokens_shape[0]
num_tokens = source_tokens_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, target_tokens_shape[0]),
tf.assert_equal(num_tokens, target_tokens_shape[1])]):
# Flatten out the batch dimension so we can use one big multiplication.
targets_bnxt = tf.reshape(target_tokens, [-1, num_target_activations])
# Matrices are row-major, so we arrange for the RHS argument of each matmul
# to have its transpose flag set. That way no copying is required to align
# the rows of the LHS with the columns of the RHS.
weights_targets_bnxs = tf.matmul(targets_bnxt, weights, transpose_b=True)
# The next computation is over pairs of tokens within each batch element, so
# restore the batch dimension.
weights_targets_bxnxs = tf.reshape(
weights_targets_bnxs, [batch_size, num_tokens, num_source_activations])
# Note that this multiplication is repeated across the batch dimension,
# instead of being one big multiplication as in the first matmul. There
# doesn't seem to be a way to arrange this as a single multiplication given
# the pairwise nature of this computation.
arcs_bxnxn = tf.matmul(source_tokens, weights_targets_bxnxs,
transpose_b=True)
return arcs_bxnxn
def ArcSourcePotentialsFromTokens(tokens, weights):
r"""Returns arc source potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each arc
as the product between the activations of the source token and the |weights|.
Specifically,
arc[b,s,:] = \sum_{i} weights[i] * tokens[b,s,i]
Args:
tokens: [B,N,S] tensor of batched activations for source tokens.
weights: [S] vector of weights.
B,N may be statically-unknown, but S must be statically-known. The dtype of
all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials as defined above. The dtype of A is the
same as that of the arguments. Note that the diagonal entries (i.e., where
s==t) represent self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 1, 'weights must be a vector')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxs = tf.reshape(tokens, [-1, num_source_activations])
weights_sx1 = tf.expand_dims(weights, 1)
sources_bnx1 = tf.matmul(tokens_bnxs, weights_sx1)
sources_bnxn = tf.tile(sources_bnx1, [1, num_tokens])
# Restore the batch dimension in the output.
sources_bxnxn = tf.reshape(sources_bnxn, [batch_size, num_tokens, num_tokens])
return sources_bxnxn
def RootPotentialsFromTokens(root, tokens, weights_arc, weights_source):
r"""Returns root selection potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each root
selection as the 3-way product between the activations of the artificial root
token, the token activations, and the |weights|. Specifically,
roots[b,r] = \sum_{i,j} root[i] * weights[i,j] * tokens[b,r,j]
Args:
root: [S] vector of activations for the artificial root token.
tokens: [B,N,T] tensor of batched activations for root tokens.
weights_arc: [S,T] matrix of weights.
weights_source: [S] vector of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N] matrix R of root-selection potentials as defined above. The dtype of
R is the same as that of the arguments.
"""
# All arguments must have statically-known rank.
check.Eq(root.get_shape().ndims, 1, 'root must be a vector')
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights_arc.get_shape().ndims, 2, 'weights_arc must be a matrix')
check.Eq(weights_source.get_shape().ndims, 1,
'weights_source must be a vector')
# All activation dimensions must be statically-known.
num_source_activations = weights_arc.get_shape().as_list()[0]
num_target_activations = weights_arc.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(root.get_shape().as_list()[0], num_source_activations,
'dimension mismatch between weights_arc and root')
check.Eq(tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights_arc and tokens')
check.Eq(weights_source.get_shape().as_list()[0], num_source_activations,
'dimension mismatch between weights_arc and weights_source')
# All arguments must share the same type.
check.Same([
weights_arc.dtype.base_dtype, weights_source.dtype.base_dtype,
root.dtype.base_dtype, tokens.dtype.base_dtype
], 'dtype mismatch')
root_1xs = tf.expand_dims(root, 0)
weights_source_sx1 = tf.expand_dims(weights_source, 1)
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxt = tf.reshape(tokens, [-1, num_target_activations])
weights_targets_bnxs = tf.matmul(tokens_bnxt, weights_arc, transpose_b=True)
roots_1xbn = tf.matmul(root_1xs, weights_targets_bnxs, transpose_b=True)
# Add in the score for selecting the root as a source.
roots_1xbn += tf.matmul(root_1xs, weights_source_sx1)
# Restore the batch dimension in the output.
roots_bxn = tf.reshape(roots_1xbn, [batch_size, num_tokens])
return roots_bxn
def CombineArcAndRootPotentials(arcs, roots):
"""Combines arc and root potentials into a single set of potentials.
Args:
arcs: [B,N,N] tensor of batched arc potentials.
roots: [B,N] matrix of batched root potentials.
Returns:
[B,N,N] tensor P of combined potentials where
P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
"""
# All arguments must have statically-known rank.
check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')
# All arguments must share the same type.
dtype = arcs.dtype.base_dtype
check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')
roots_shape = tf.shape(roots)
arcs_shape = tf.shape(arcs)
batch_size = roots_shape[0]
num_tokens = roots_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, arcs_shape[0]),
tf.assert_equal(num_tokens, arcs_shape[1]),
tf.assert_equal(num_tokens, arcs_shape[2])]):
return tf.matrix_set_diag(arcs, roots)
def LabelPotentialsFromTokens(tokens, weights):
r"""Computes label potentials from tokens and weights.
For each batch of token activations, computes a scalar potential for each
label as the product between the activations of the source token and the
|weights|. Specifically,
labels[b,t,l] = \sum_{i} weights[l,i] * tokens[b,t,i]
Args:
tokens: [B,N,T] tensor of batched token activations.
weights: [L,T] matrix of weights.
B,N may be dynamic, but L,T must be static. The dtype of all arguments must
be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
num_labels = weights.get_shape().as_list()[0]
num_activations = weights.get_shape().as_list()[1]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_activations, 'unknown activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_activations,
'activation mismatch between weights and tokens')
| |
__source__ = 'https://github.com/kamyu104/LeetCode/blob/master/Python/count-of-smaller-numbers-after-self.py'
# https://leetcode.com/problems/count-of-smaller-numbers-after-self/#/description
# Time: O(nlogn)
# Space: O(n)
#
# Description: Leetcode # 315. Count of Smaller Numbers After Self
#
# You are given an integer array nums and you have to
# return a new counts array. The counts array has the
# property where counts[i] is the number of smaller
# elements to the right of nums[i].
#
# Example:
#
# Given nums = [5, 2, 6, 1]
#
# To the right of 5 there are 2 smaller elements (2 and 1).
# To the right of 2 there is only 1 smaller element (1).
# To the right of 6 there is 1 smaller element (1).
# To the right of 1 there is 0 smaller element.
# Return the array [2, 1, 1, 0].
#
# Companies
# Google
# Related Topics
# Divide and Conquer Binary Indexed Tree Segment Tree Binary Search Tree
# Similar Questions
# Count of Range Sum Queue Reconstruction by Height Reverse Pairs
#
import unittest
# BIT solution.
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def binarySearch(A, target, compare):
start, end = 0, len(A) - 1
while start <= end:
mid = (start + end) / 2
if compare(target, A[mid]):
end = mid - 1
else:
start = mid + 1
return start
class BIT(object):
def __init__(self, n) :
self.__bit = [0] * n
def __add__(self, i ,val):
while i < len(self.__bit):
self.__bit[i] += val
i += ( i & -i)
def query(self, i):
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
# Get the place (position in the ascending order) of each number.
sorted_nums, places = sorted(nums), [0] * len(nums)
for i ,num in enumerate(nums):
places[i] = binarySearch(sorted_nums, num, lambda x, y : x <= y)
# Count the smaller elements after the number.
ans, bit = [0] * len(nums), BIT(len(nums) + 1)
for i in reversed(xrange(len(nums))):
ans[i] = bit.query(places[i])
bit.__add__(places[i] + 1, 1)
return ans
# Time: O(nlogn)
# Space: O(n)
# BST solution.
class Solution2(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
res = [0] * len(nums)
bst = self.BST()
# Insert into BST and get left count.
for i in reversed(xrange(len(nums))):
bst.insertNode(nums[i])
res[i] = bst.query(nums[i])
return res
class BST(object):
class BSTreeNode(object):
def __init__(self, val):
self.val = val
self.count = 0
self.left = self.right = None
def __init__(self):
self.root = None
# Insert node into BST.
def insertNode(self, val):
node = self.BSTreeNode(val)
if not self.root:
self.root = node
return
curr = self.root
while curr:
# Insert left if smaller.
if node.val < curr.val:
curr.count += 1 # Increase the number of left children.
if curr.left:
curr= curr.left
else:
curr.left = node
break
else: # Insert right if larger or equal.
if curr.right:
curr= curr.right
else:
curr.right = node
break
# Query the smaller count of the value.
def query(self, val):
count = 0
curr = self.root
while curr:
#Insert left
if val < curr.val:
curr = curr.left
elif val > curr.val:
count += 1 + curr.count # Count the number of the smaller nodes.
curr = curr.right
else: # Equal
return count + curr.count
return 0
# MergeSort
# Thought: https://discuss.leetcode.com/topic/31162/mergesort-solution
# 128ms 64.78%
class Solution3(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def sort(enum):
half = len(enum) / 2
if half:
left, right = sort(enum[:half]), sort(enum[half:])
for i in range(len(enum))[::-1]:
if not right or left and left[-1][1] > right[-1][1]:
smaller[left[-1][0]] += len(right)
enum[i] = left.pop()
else:
enum[i] = right.pop()
return enum
smaller = [0] * len(nums)
sort(list(enumerate(nums)))
return smaller
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
Thought:
1. building BST:
Every node will maintain a val sum recording the total of number on it's left bottom side,
dup counts the duplication. For example, [3, 2, 2, 6, 1], from back to beginning,we would have:
1(0, 1)
\
6(3, 1)
/
2(0, 2)
\
3(0, 1)
When we try to insert a number,
the total number of smaller number would be adding dup and sum of the nodes where we turn right.
for example, if we insert 5,
it should be inserted on the way down to the right of 3,
the nodes where we turn right is 1(0,1), 2,(0,2), 3(0,1), so the answer should be (0 + 1)+(0 + 2)+ (0 + 1) = 4
if we insert 7, the right-turning nodes are 1(0,1), 6(3,1), so answer should be (0 + 1) + (3 + 1) = 5
# 5ms 97.81%
class Solution {
class Node {
Node left, right;
int val, sum, dup = 1;
public Node(int v, int s) {
val = v;
sum = s;
}
}
public List<Integer> countSmaller(int[] nums) {
Integer[] ans = new Integer[nums.length];
Node root = null;
for (int i = nums.length - 1; i >= 0; i--) {
root = insert(nums[i], root, ans, i, 0);
}
return Arrays.asList(ans);
}
private Node insert(int num, Node node, Integer[] ans, int i, int preSum) {
if (node == null) {
node = new Node(num, 0);
ans[i] = preSum;
} else if (node.val == num) {
node.dup++;
ans[i] = preSum + node.sum;
} else if (node.val > num) {
node.sum++;
node.left = insert(num, node.left, ans, i, preSum);
} else {
node.right = insert(num, node.right, ans, i, preSum + node.dup + node.sum);
}
return node;
}
}
# building BST
# 10ms 58.31%
class Solution {
public List<Integer> countSmaller(int[] nums) {
List<Integer> result = new ArrayList<>();
Tree tree = new Tree();
for (int i = nums.length - 1; i >= 0; i--) {
result.add(tree.insert(nums[i]));
}
Collections.reverse(result);
return result;
}
private class Tree {
private TreeNode root;
private int insert(int val) {
if (root == null) {
root = new TreeNode(val);
return 0;
}
int count = 0;
TreeNode cur = root;
while (true) {
if (cur.val > val) {
cur.leftCount++;
if (cur.left != null) {
cur = cur.left;
} else {
cur.left = new TreeNode(val);
break;
}
} else if (cur.val < val) {
count += cur.leftCount + cur.selfCount;
if (cur.right != null) {
cur = cur.right;
} else {
cur.right = new TreeNode(val);
break;
}
} else {
cur.selfCount++;
count += cur.leftCount;
break;
}
}
return count;
}
}
private class TreeNode {
private TreeNode left;
private TreeNode right;
private int val;
private int leftCount;
private int selfCount;
TreeNode(int val) {
this.val = val;
this.selfCount = 1;
}
}
}
# BIT
# 3ms 100%
public class Solution {
public List<Integer> countSmaller(int[] nums) {
if (nums == null || nums.length == 0) {
return new ArrayList<Integer>();
}
makePositive(nums);
int max = findMax(nums);
int[] tree = new int[max + 1];
Integer[] result = new Integer[nums.length];
for (int i = nums.length - 1; i >= 0; i--) {
result[i] = findResult(nums[i], tree);
updateTree(nums[i] + 1, tree);
}
return Arrays.asList(result);
}
private void makePositive(int[] nums) {
int minus = 0;
for (int num : nums) {
minus = Math.min(minus, num);
}
if (minus < 0) {
minus = -minus + 1;
for (int i = 0; i < nums.length; i++) {
nums[i] += minus;
}
}
}
private int findMax(int[] nums) {
int max = 0;
for (int num : nums) {
max = Math.max(max, num);
}
return max;
}
private void updateTree(int index, int[] tree) {
while (index < tree.length && index > 0) {
tree[index]++;
index += index & (-index);
}
}
private int findResult(int index, int[] tree) {
int result = 0;
while (index > 0) {
result += tree[index];
index &= index - 1;
}
return result;
}
}
2.Traverse from the back to the beginning of the array, maintain an sorted array of numbers have been visited.
Use findIndex() to find the first element in the sorted array which is larger or equal to target number.
For example, [5,2,3,6,1], when we reach 2, we have a sorted array[1,3,6], findIndex() returns 1,
which is the index where 2 should be inserted and is also the number smaller than 2.
Then we insert 2 into the sorted array to form [1,2,3,6].
Due to the O(n) complexity of ArrayList | |
<reponame>agilman/django_maps2
from maps.models import *
from maps.serializers import *
from maps.forms import *
from django.http import JsonResponse
from collections import OrderedDict
from django.contrib.auth.models import User
from rest_framework.parsers import JSONParser
from django.views.decorators.csrf import csrf_exempt
from django_maps2 import settings
from datetime import datetime
import pytz
import os
import PIL
from PIL import Image
from PIL.ExifTags import TAGS
@csrf_exempt
def userInfo(request,userId=None):
if request.method == 'GET':
adventures = Adventure.objects.filter(owner_id=userId)
advSerializer = AdventureSerializer(adventures,many=True)
bio = UserBio.objects.filter(user=userId).first()
bioSerializer = None
if type(bio)!=type(None):
bioSerializer = UserBioSerializer(bio,many=False).data
userPicture = UserProfilePicture.objects.filter(user=userId)
userPicData = None
if type(bio)!=type(None):
bioSerializer = UserBioSerializer(bio,many=False).data
userPicture = UserProfilePicture.objects.filter(user=userId)
userPicData = None
if type(userPicture)!=type(None):
userPicData = UserProfilePictureSerializer(userPicture,many=True).data
total = {"adventures":advSerializer.data,"bio":bioSerializer,"profile_pictures":userPicData}
return JsonResponse(total, safe=False)
elif request.method == 'POST': #NO PUT,Only POST
data = JSONParser().parse(request)
user = User.objects.get(pk=int(data["userId"]))
#check if exists:
bioQuery = UserBio.objects.filter(user=user)
bio = None
if bioQuery.exists():
bioQuery.update(bio=data["bio"])
bio = bioQuery.first()
else:
bio = UserBio(user=user,bio=data["bio"])
bio.save()
serialized = UserBioSerializer(bio)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def adventures(request,advId=None):
if request.method == 'POST':
data = JSONParser().parse(request)
userId = int(data["owner"])
#TODO: VALIDATION
user = User.objects.get(pk=userId)
advName = data["name"]
advType = data["advType"]
advStatus = data["advStatus"]
#TODO
#If advStatus = active, need to unset previous active.
adv = Adventure(name=advName,owner=user,advType=advType,advStatus=advStatus)
adv.save()
#create directory
media_root = settings.USER_MEDIA_ROOT
os.mkdir(media_root + "/" + str(userId)+"/"+str(adv.id))
os.mkdir(media_root + "/" + str(userId)+"/"+str(adv.id)+"/gear")
serialized = AdventureSerializer(adv)
return JsonResponse(serialized.data,safe=False)
elif request.method == "DELETE":
advToDel = Adventure.objects.get(pk=advId)
advToDel.delete()
serialized = AdventureSerializer(advToDel)
#TODO Probably should return success code instead of object...
return JsonResponse([],safe=False)
elif request.method == "PUT":
data = JSONParser().parse(request)
owner = User.objects.get(pk=int(data["owner"]))
advName = data["name"]
advType = data["advType"]
advStatus = data["advStatus"]
#If advStatus = active, need to unset previous active.
adv = Adventure(id=advId,name=advName,owner=owner,advType=advType,advStatus=advStatus)
adv.save()
serialized = AdventureSerializer(adv)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def advsOverview(request,userId):
"""This returns all start and end points from all the segments in all the maps, for all adventures.
The goal is to visualize roughly all the travelling the user has done."""
if request.method=="GET":
allAdvs = []
#this is awful
advs = Adventure.objects.filter(owner_id=userId).all()
for adv in advs:
advCoordinates = []
distance = 0
startTime = None
endTime = None
#get startTime
advMaps = adv.maps.all()
if advMaps.count()>0:
startSegments = advMaps[0].segments.all()
endSegments=[]
if startSegments.count()>0:
startTime = startSegments[0].startTime
endSegments = advMaps[advMaps.count()-1].segments.all()
if endSegments.count()>0:
endTime = endSegments[endSegments.count()-1].endTime
for advMap in advMaps:
segments = advMap.segments.all()
for segment in segments:
start = segment.coordinates.first()
startPoint = [float(start.lat),float(start.lng)]
end = segment.coordinates.last()
endPoint = [float(end.lat),float(end.lng)]
###TODO: allow for non-continuous lines?
#Add first segment
if len(advCoordinates) == 0:
advCoordinates.append(startPoint)
advCoordinates.append(endPoint)
#If this is not the first segment, check if startPoint is same as last endPoint
else:
if advCoordinates[len(advCoordinates)-1]==startPoint:
advCoordinates.append(endPoint)
else:
advCoordinates.append(startPoint)
advCoordinates.append(endPoint)
distance += segment.distance
advGeoJson = {'type':'Feature',
'properties':{'advId':adv.id,
'distance': distance,
'startTime': startTime,
'endTime': endTime,
'status': adv.advStatus},
'geometry':{'type':'LineString',
'coordinates': advCoordinates}}
allAdvs.append(advGeoJson)
adventuresGeoJson = {'type':'FeatureCollection','properties':{'userId':userId},'features': allAdvs}
return JsonResponse(adventuresGeoJson, safe=False)
@csrf_exempt
def mapsOverview(request,advId):
if request.method=="GET":
adv = Adventure.objects.get(id=advId)
advCoordinates = []
distance = 0
startTime = None
endTime = None
#get startTime
advMaps = adv.maps.all()
if advMaps.count()>0:
startSegments = advMaps[0].segments.all()
if startSegments.count()>0:
startTime = startSegments[0].startTime
endSegments = advMaps[advMaps.count()-1].segments.all()
if endSegments.count()>0:
endTime = endSegments[endSegments.count()-1].endTime
results =[]
for advMap in advMaps:
segments = advMap.segments.all()
for segment in segments:
start = segment.coordinates.first()
startPoint = [float(start.lat),float(start.lng)]
end = segment.coordinates.last()
endPoint = [float(end.lat),float(end.lng)]
###TODO: allow for non-continuous lines?
#Add first segment
if len(advCoordinates) == 0:
advCoordinates.append(startPoint)
advCoordinates.append(endPoint)
#If this is not the first segment, check if startPoint is same as last endPoint
else:
if advCoordinates[len(advCoordinates)-1]==startPoint:
advCoordinates.append(endPoint)
else:
advCoordinates.append(startPoint)
advCoordinates.append(endPoint)
distance += segment.distance
mapGeoJson = {'type':'Feature',
'properties':{'mapId':advMap.id,
'mapName': advMap.name,
'distance': distance,
'startTime': startTime,
'endTime': endTime},
'geometry':{'type':'LineString',
'coordinates': advCoordinates}}
results.append(mapGeoJson)
adventuresGeoJson = {'type':'FeatureCollection','properties':{'advId':advId},'features': results}
return JsonResponse(adventuresGeoJson, safe=False)
def handle_uploaded_profilePhoto(userId,f):
#write file as is, convert to decided format, add to db, delete old ?
#save file as is
target = settings.USER_MEDIA_ROOT+'/'+str(userId)+'/profile_pictures/'+f.name
with open(target, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
#convert,resize,thumbs
#add to db
user = User.objects.get(pk=int(userId))
my_date = datetime.now(pytz.timezone('US/Pacific'))
profilePicture = UserProfilePicture(user=user,uploadTime=my_date,active=True)
profilePicture.save()
#temp solution... need to convert to target file with right extension, and then delete the old file.
#rename
newName = settings.USER_MEDIA_ROOT+'/'+str(userId)+'/profile_pictures/'+str(profilePicture.id)+".png"
os.rename(target,newName)
return profilePicture
@csrf_exempt
def pictures(request,advId=None,albumId=None):
if request.method == 'POST':
form = AlbumPhotoUploadForm(request.POST,request.FILES)
if form.is_valid():
f = request.FILES['file']
userId= request.user.id
pic = handle_uploaded_albumPhoto(userId,advId,albumId,f)
serialized = PictureSerializer(pic)
return JsonResponse(serialized.data,safe=False)
else:
return JsonResponse({"msg":"FAIL"},safe=False)
if request.method == 'GET':
album = Album.objects.get(id=albumId)
pics = Picture.objects.filter(album=album).all()
serialized = PictureSerializer(pics,many=True)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def advPictures(request,advId=None):
if request.method == 'GET':
adv = Adventure.objects.get(id = advId)
albums = list(Album.objects.filter(adv=adv).values_list('id',flat=True))
pics = Picture.objects.filter(album__in=albums).all()
serialized = PictureSerializer(pics,many=True)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def deletePictures(request,advId=None,albumId=None): #This is used to bulk delete pictures.
if request.method == 'POST':
data = JSONParser().parse(request)
deleted = []
for picId in data:
pic = Picture.objects.get(id=picId)
pic.delete()
#delete pic from disk
settings.USER_MEDIA_ROOT
path = settings.USER_MEDIA_ROOT+"/"+str(request.user.pk)+"/"+str(advId)+"/"+str(albumId)+"/"+pic.id+".jpg"
thpath = settings.USER_MEDIA_ROOT+"/"+str(request.user.pk)+"/"+str(advId)+"/"+str(albumId)+"/.th/"+pic.id+".jpg"
mipath = settings.USER_MEDIA_ROOT+"/"+str(request.user.pk)+"/"+str(advId)+"/"+str(albumId)+"/.mi/"+pic.id+".jpg"
os.remove(path)
os.remove(thpath)
os.remove(mipath)
#check for success? only push on succes...
deleted.append(picId)
return JsonResponse(deleted,safe=False)
@csrf_exempt
def geotagPictures(request): #This is used to bulk delete pictures.
if request.method == 'POST':
data = JSONParser().parse(request)
pictures = data["pictures"]
point = data['tag']
results = []
for i in pictures:
#create pic meta
pic = Picture.objects.get(id=i)
metaQuery = PicMeta.objects.filter(picture=pic)
#Check if meta object already exists -> update if exists, create new one otherwise
if metaQuery.exists():
metaQuery.update(lat=point['lat'],lng=point['lng'])
serialized = PicMetaSerializer(metaQuery.first())
results.append(serialized.data)
else:
newMeta = PicMeta(picture = pic, lat=point['lat'], lng=point['lng'])
newMeta.save()
serialized = PicMetaSerializer(newMeta)
results.append(serialized.data)
return JsonResponse(results,safe=False)
@csrf_exempt
def profilePhoto(request):
if request.method == 'POST':
form = ProfilePhotoUploadForm(request.POST,request.FILES)
if form.is_valid():
userId = form.data['userId']
f = request.FILES['file']
userPic = handle_uploaded_profilePhoto(userId,f)
return JsonResponse({"picId":userPic.id},safe=False)
else:
return JsonResponse({"msg":"FAIL"},safe=False)
@csrf_exempt
def advMaps(request,advId=None):
"""Used to get list of maps, no coordinates"""
if request.method == 'GET':
queryset = Map.objects.filter(adv=advId)
results = []
# TODO Write a serializer...
for i in queryset.all():
myMap = {"id":i.id,"name":i.name,"distance":i.total_distance()}
results.append(myMap)
return JsonResponse(results,safe=False)
if request.method == 'POST':
data = JSONParser().parse(request)
adv = Adventure.objects.get(id=int(data["advId"]))
map = Map(name=data["name"],adv=adv)
map.save()
#Side effect of creating a map: album created with same name, folder created with albumId.
album = Album(adv = adv, advMap=map, title=data["name"])
album.save()
#create dir..
createAlbumDirs(request.user.id,advId,album.id)
#Hmm, maybe I should just get a serializer...
result = {"id":map.id,"name":map.name,"features":[],"distance":0 }
return JsonResponse(result,safe=False)
def createAlbumDirs(userId,advId,newAlbumId):
#create album directory
media_root = settings.USER_MEDIA_ROOT
albumPath = media_root + "/" + str(userId) + "/"+ str(advId) + "/" + str(newAlbumId)
if not os.path.exists(albumPath):
os.mkdir(albumPath)
os.mkdir(albumPath+"/.th") #make dir for thumbs
os.mkdir(albumPath+"/.mi") #make dir for midsize image
@csrf_exempt
def map(request,mapId=None):
"""Used to get map segments """
if request.method == 'GET':
map = Map.objects.filter(id=mapId).first()
results = []
if map!=None:
results = makeGeoJsonFromMap(map)
return JsonResponse(results,safe=False)
elif request.method == 'DELETE':
mapToDel = Map.objects.get(id=mapId)
mapToDel.delete()
serialized = MapSerializer(mapToDel)
return JsonResponse(serialized.data,safe=False)
@csrf_exempt
def advMapSegments(request,advId=None):
"""Used to get all map segments for entire adventure"""
if request.method=='GET':
adv = Adventure.objects.get(id = advId)
maps = Map.objects.filter(adv = adv).all()
results = []
for m in maps:
geoJson = makeGeoJsonFromMap(m)
results.append(geoJson)
return JsonResponse(results,safe=False)
def makeGeoJsonFromMap(map):
features = []
for segment in map.segments.all():
coordinates = []
for coord in segment.coordinates.all():
coordinates.append([float(coord.lat),float(coord.lng)])
geometry = {"type":"LineString","coordinates":coordinates}
notesResults = segment.dayNotes.first()
notes = []
if type(notesResults)!=type(None):
note = notesResults.note
notes.append(note)
segmentDict = {"type":"Feature",
"properties": {"segmentId":segment.id,
'distance':segment.distance,
'startTime':segment.startTime,
'endTime':segment.endTime,
'delay': segment.delay,
'notes':notes},
"geometry":geometry}
features.append(segmentDict)
mapDict = {"type":"FeatureCollection","properties":{"mapId": map.id,"mapName":map.name},"features":features}
return mapDict
#TODO : Use makeGeoJsonFromSegment inside makeGeoJsonFromMap...
def makeGeoJsonFromSegment(segment):
coordinates = []
for coord in segment.coordinates.all():
coordinates.append([float(coord.lat),float(coord.lng)])
geometry = {"type":"LineString","coordinates":coordinates}
notes = []
for notesObj in segment.dayNotes.all():
notes.append(notesObj.note)
feature = {"type":"Feature",
"properties":{"segmentId": segment.id,
"distance": segment.distance,
"delay": segment.delay,
"notes": notes,
'startTime':segment.startTime,
'endTime':segment.endTime},
"geometry":geometry}
return feature
@csrf_exempt
def mapSegment(request,segmentId=None):
if request.method=='POST':
data = JSONParser().parse(request)
#Try validation with serializers...
if "mapId" in data.keys() and data["mapId"] is not None:
map = Map.objects.get(id=int(data["mapId"]))
startTime = None
endTime = None
dayNotes = None
if "startTime" in data.keys():
startTime = data["startTime"]
if "endTime" in data.keys():
endTime = data["endTime"]
distance = data["distance"]
waypoints = data["waypoints"]
if 'dayNotes' in data.keys():
dayNotes = data['dayNotes']
delay = data['delay']
#create segment
mapSegment = MapSegment(map=map,
startTime=startTime,
endTime=endTime,
distance = distance,
delay=delay)
mapSegment.save()
if dayNotes:
dayNoteObj = DayNote(segment = mapSegment,note = dayNotes)
dayNoteObj.save()
#create | |
AttributeError as atre:
raise atre
@property
def scaler(self):
try:
return self._scaler
except AttributeError as atre:
raise atre
@scaler.setter
def scaler(self, scaler):
"""
Setter for the model scaler.
:param scaler: The object which will handle data scaling.
:type scaler: ChemometricsScaler object, scaling/preprocessing objects from scikit-learn or None
:raise AttributeError: If there is a problem changing the scaler and resetting the model.
:raise TypeError: If the new scaler provided is not a valid object.
"""
try:
if not (isinstance(scaler, TransformerMixin) or scaler is None):
raise TypeError("Scikit-learn Transformer-like object or None")
if scaler is None:
scaler = ChemometricsScaler(0, with_std=False)
self._scaler = scaler
self.pca_algorithm = clone(self.pca_algorithm, safe=True)
self.modelParameters = None
self.loadings = None
self.scores = None
self.cvParameters = None
return None
except AttributeError as atre:
raise atre
except TypeError as typerr:
raise typerr
def hotelling_T2(self, comps=None, alpha=0.05):
"""
Obtain the parameters for the Hotelling T2 ellipse at the desired significance level.
:param list comps:
:param float alpha: Significance level
:return: The Hotelling T2 ellipsoid radii at vertex
:rtype: numpy.ndarray
:raise AtributeError: If the model is not fitted
:raise ValueError: If the components requested are higher than the number of components in the model
:raise TypeError: If comps is not None or list/numpy 1d array and alpha a float
"""
try:
if self._isfitted is False:
raise AttributeError("Model is not fitted")
nsamples = self.scores.shape[0]
if comps is None:
ncomps = self.ncomps
ellips = self.scores[:, range(self.ncomps)] ** 2
ellips = 1 / nsamples * (ellips.sum(0))
else:
ncomps = len(comps)
ellips = self.scores[:, comps] ** 2
ellips = 1 / nsamples * (ellips.sum(0))
# F stat
fs = (nsamples - 1) / nsamples * ncomps * (nsamples ** 2 - 1) / (nsamples * (nsamples - ncomps))
fs = fs * st.f.ppf(1-alpha, ncomps, nsamples - ncomps)
hoteling_t2 = list()
for comp in range(ncomps):
hoteling_t2.append(np.sqrt((fs * ellips[comp])))
return np.array(hoteling_t2)
except AttributeError as atre:
raise atre
except ValueError as valerr:
raise valerr
except TypeError as typerr:
raise typerr
def _residual_ssx(self, x):
"""
:param x: Data matrix [n samples, m variables]
:return: The residual Sum of Squares per sample
"""
pred_scores = self.transform(x)
x_reconstructed = self.scaler.transform(self.inverse_transform(pred_scores))
xscaled = self.scaler.transform(x)
residuals = np.sum((xscaled - x_reconstructed)**2, axis=1)
return residuals
def x_residuals(self, x, scale=True):
"""
:param x: data matrix [n samples, m variables]
:param scale: Return the residuals in the scale the model is using or in the raw data scale
:return: X matrix model residuals
"""
pred_scores = self.transform(x)
x_reconstructed = self.scaler.transform(self.inverse_transform(pred_scores))
xscaled = self.scaler.transform(x)
x_residuals = np.sum((xscaled - x_reconstructed)**2, axis=1)
if scale:
x_residuals = self.scaler.inverse_transform(x_residuals)
return x_residuals
def dmodx(self, x):
"""
Normalised DmodX measure
:param x: data matrix [n samples, m variables]
:return: The Normalised DmodX measure for each sample
"""
resids_ssx = self._residual_ssx(x)
s = np.sqrt(resids_ssx/(self.loadings.shape[1] - self.ncomps))
dmodx = np.sqrt((s/self.modelParameters['S0'])**2)
return dmodx
def leverages(self):
"""
Calculate the leverages for each observation
:return: The leverage (H) for each observation
:rtype: numpy.ndarray
"""
return np.diag(np.dot(self.scores, np.dot(np.linalg.inv(np.dot(self.scores.T, self.scores)), self.scores.T)))
def cross_validation(self, x, cv_method=KFold(7, True), outputdist=False, press_impute=True):
"""
Cross-validation method for the model. Calculates cross-validated estimates for Q2X and other
model parameters using row-wise cross validation.
:param x: Data matrix.
:type x: numpy.ndarray, shape [n_samples, n_features]
:param cv_method: An instance of a scikit-learn CrossValidator object.
:type cv_method: BaseCrossValidator
:param bool outputdist: Output the whole distribution for the cross validated parameters.
Useful when using ShuffleSplit or CrossValidators other than KFold.
:param bool press_impute: Use imputation of test set observations instead of row wise cross-validation.
Slower but more reliable.
:return: Adds a dictionary cvParameters to the object, containing the cross validation results
:rtype: dict
:raise TypeError: If the cv_method passed is not a scikit-learn CrossValidator object.
:raise ValueError: If the x data matrix is invalid.
"""
try:
if not (isinstance(cv_method, BaseCrossValidator) or isinstance(cv_method, BaseShuffleSplit)):
raise TypeError("Scikit-learn cross-validation object please")
# Check if global model is fitted... and if not, fit it using all of X
if self._isfitted is False or self.loadings is None:
self.fit(x)
# Make a copy of the object, to ensure the internal state doesn't come out differently from the
# cross validation method call...
cv_pipeline = deepcopy(self)
# Initialise predictive residual sum of squares variable (for whole CV routine)
total_press = 0
# Calculate Sum of Squares SS in whole dataset
ss = np.sum((cv_pipeline.scaler.transform(x)) ** 2)
# Initialise list for loadings and for the VarianceExplained in the test set values
# Check if model has loadings, as in case of kernelPCA these are not available
if hasattr(self.pca_algorithm, 'components_'):
loadings = []
# cv_varexplained_training is a list containing lists with the SingularValue/Variance Explained metric
# as obtained in the training set during fitting.
# cv_varexplained_test is a single R2X measure obtained from using the
# model fitted with the training set in the test set.
cv_varexplained_training = []
cv_varexplained_test = []
# Default version (press_impute = False) will perform
# Row/Observation-Wise CV - Faster computationally, but has some limitations
# See <NAME>. et al, Cross-validation of component models: A critical look at current methods,
# Analytical and Bioanalytical Chemistry 2008
# press_impute method requires computational optimization, and is under construction
for xtrain, xtest in cv_method.split(x):
cv_pipeline.fit(x[xtrain, :])
# Calculate R2/Variance Explained in test set
# To calculate an R2X in the test set
xtest_scaled = cv_pipeline.scaler.transform(x[xtest, :])
tss = np.sum((xtest_scaled) ** 2)
# Append the var explained in training set for this round and loadings for this round
cv_varexplained_training.append(cv_pipeline.pca_algorithm.explained_variance_ratio_)
if hasattr(self.pca_algorithm, 'components_'):
loadings.append(cv_pipeline.loadings)
if press_impute is True:
press_testset = 0
for column in range(0, x[xtest, :].shape[1]):
xpred = cv_pipeline.scaler.transform(cv_pipeline._press_impute_pinv(x[xtest, :], column))
press_testset += np.sum(np.square(xtest_scaled[:, column] - xpred[:, column]))
cv_varexplained_test.append(1 - (press_testset / tss))
total_press += press_testset
else:
# RSS for row wise cross-validation
pred_scores = cv_pipeline.transform(x[xtest, :])
pred_x = cv_pipeline.scaler.transform(cv_pipeline.inverse_transform(pred_scores))
rss = np.sum(np.square(xtest_scaled - pred_x))
total_press += rss
cv_varexplained_test.append(1 - (rss / tss))
# Create matrices for each component loading containing the cv values in each round
# nrows = nrounds, ncolumns = n_variables
# Check that the PCA model has loadings
if hasattr(self.pca_algorithm, 'components_'):
cv_loads = []
for comp in range(0, self.ncomps):
cv_loads.append(np.array([x[comp] for x in loadings]))
# Align loadings due to sign indeterminacy.
# The solution followed here is to select the sign that gives a more similar profile to the
# Loadings calculated with the whole data.
# TODO add scores for CV scores, but still need to check the best way to do it properly
# Don't want to enforce the common "just average everything" and interpret score plot behaviour...
for cvround in range(0, cv_method.n_splits):
for currload in range(0, self.ncomps):
choice = np.argmin(np.array([np.sum(np.abs(self.loadings - cv_loads[currload][cvround, :])),
np.sum(
np.abs(self.loadings - cv_loads[currload][cvround, :] * -1))]))
if choice == 1:
cv_loads[currload][cvround, :] = -1 * cv_loads[currload][cvround, :]
# Calculate total sum of squares
# Q^2X
q_squared = 1 - (total_press / ss)
# Assemble the dictionary and data matrices
if self.cvParameters is not None:
self.cvParameters['Mean_VarExpRatio_Training'] = np.array(cv_varexplained_training).mean(axis=0)
self.cvParameters['Stdev_VarExpRatio_Training'] = np.array(cv_varexplained_training).std(axis=0)
self.cvParameters['Mean_VarExp_Test'] = np.mean(cv_varexplained_test)
self.cvParameters['Stdev_VarExp_Test'] = np.std(cv_varexplained_test)
self.cvParameters['Q2X'] = q_squared
else:
self.cvParameters = {'Mean_VarExpRatio_Training': np.array(cv_varexplained_training).mean(axis=0),
'Stdev_VarExpRatio_Training': np.array(cv_varexplained_training).std(axis=0),
'Mean_VarExp_Test': np.mean(cv_varexplained_test),
'Stdev_VarExp_Test': np.std(cv_varexplained_test),
'Q2X': q_squared}
if outputdist is True:
self.cvParameters['CV_VarExpRatio_Training'] = cv_varexplained_training
self.cvParameters['CV_VarExp_Test'] = cv_varexplained_test
# Check that the PCA model has loadings
if hasattr(self.pca_algorithm, 'components_'):
self.cvParameters['Mean_Loadings'] = [np.mean(x, 0) for x in cv_loads]
self.cvParameters['Stdev_Loadings'] = [np.std(x, 0) for x in cv_loads]
if outputdist is True:
self.cvParameters['CV_Loadings'] = cv_loads
return None
except TypeError as terp:
raise terp
except ValueError as verr:
raise verr
#@staticmethod
#def stop_cond(model, x):
# stop_check = getattr(model, modelParameters)
# if stop_check > 0:
# return True
# else:
# return False
def _screecv_optimize_ncomps(self, x, total_comps=5, cv_method=KFold(7, True), stopping_condition=None):
"""
Routine to optimize number of components quickly using Cross validation and stabilization of Q2X.
:param numpy.ndarray x: Data
:param int total_comps:
:param sklearn.BaseCrossValidator cv_method:
:param None or float stopping_condition:
:return:
"""
models = list()
for ncomps in range(1, total_comps + 1):
currmodel = deepcopy(self)
currmodel.ncomps = ncomps
currmodel.fit(x)
currmodel.cross_validation(x, outputdist=False, cv_method=cv_method, press_impute=False)
models.append(currmodel)
| |
import array
import errno
import json
import os
import random
import zipfile
from argparse import ArgumentParser
from collections import Counter
from os.path import dirname, abspath
import nltk
import six
import torch
from six.moves.urllib.request import urlretrieve
from tqdm import trange, tqdm
URL = {
'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',
'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',
'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip'
}
def get_args():
parser = ArgumentParser(description='PyTorch R-net')
parser.add_argument('--name', type=str, default="r-net")
parser.add_argument('--device_id', type=int, default=None)
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--epoch_num', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=48)
parser.add_argument('--batch_size_dev', type=int, default=64)
parser.add_argument('--debug', type=bool, default=False)
parser.add_argument('--checkpoint_path', type=str, default="checkpoint")
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--train_json', type=str, default="./data/squad/train-v1.1.json")
parser.add_argument('--dev_json', type=str, default="./data/squad/dev-v1.1.json")
parser.add_argument('--update_word_embedding', type=bool, default=False)
parser.add_argument('--update_char_embedding', type=bool, default=True)
parser.add_argument('--hidden_size', type=int, default=75)
parser.add_argument('--attention_size', type=int, default=75)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--residual', type=bool, default=False)
parser.add_argument('--bidirectional', type=bool, default=True)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--app_path', type=str, default=dirname(dirname(abspath(__file__))))
parser.add_argument('--pin_memory', type=bool, default=False)
args = parser.parse_args()
return args
def reporthook(t):
"""https://github.com/tqdm/tqdm"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b: int, optionala
Number of blocks just transferred [default: 1].
bsize: int, optional
Size of each block (in tqdm units) [default: 1].
tsize: int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
def load_word_vectors(root, wv_type, dim):
"""
From https://github.com/pytorch/text/
BSD 3-Clause License
Copyright (c) <NAME> and <NAME> 2016,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""Load word vectors from a path, trying .pt, .txt, and .zip extensions."""
if isinstance(dim, int):
dim = str(dim) + 'd'
fname = os.path.join(root, wv_type + '.' + dim)
if os.path.isfile(fname + '.pt'):
fname_pt = fname + '.pt'
print('loading word vectors from', fname_pt)
return torch.load(fname_pt)
if os.path.isfile(fname + '.txt'):
fname_txt = fname + '.txt'
cm = open(fname_txt, 'rb')
cm = [line for line in cm]
elif os.path.basename(wv_type) in URL:
url = URL[wv_type]
print('downloading word vectors from {}'.format(url))
filename = os.path.basename(fname)
if not os.path.exists(root):
os.makedirs(root)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))
with zipfile.ZipFile(fname, "r") as zf:
print('extracting word vectors into {}'.format(root))
zf.extractall(root)
if not os.path.isfile(fname + '.txt'):
raise RuntimeError('no word vectors of requested dimension found')
return load_word_vectors(root, wv_type, dim)
else:
raise RuntimeError('unable to load word vectors %s from %s' % (wv_type, root))
wv_tokens, wv_arr, wv_size = [], array.array('d'), None
if cm is not None:
print("Loading word vectors from {}".format(fname_txt))
for line in trange(len(cm)):
entries = cm[line].strip().split(b' ')
word, entries = entries[0], entries[1:]
if wv_size is None:
wv_size = len(entries)
try:
if isinstance(word, six.binary_type):
word = word.decode('utf-8')
except:
print('non-UTF8 token', repr(word), 'ignored')
continue
wv_arr.extend(float(x) for x in entries)
wv_tokens.append(word)
wv_dict = {word: i for i, word in enumerate(wv_tokens)}
wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)
ret = (wv_dict, wv_arr, wv_size)
torch.save(ret, fname + '.pt')
return ret
class RawExample(object):
pass
def make_dirs(name):
"""helper function for python 2 and 3 to call os.makedirs()
avoiding an error if the directory to be created already exists"""
try:
os.makedirs(name)
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(name):
# ignore existing directory
pass
else:
# a different error happened
raise
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def maybe_download(url, download_path, filename):
if not os.path.exists(os.path.join(download_path, filename)):
try:
print("Downloading file {}...".format(url + filename))
with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
local_filename, _ = urlretrieve(url, os.path.join(download_path, filename), reporthook=t.update_to)
except AttributeError as e:
print("An error occurred when downloading the file! Please get the dataset using a browser.")
raise e
def read_train_json(path, debug_mode, debug_len, delete_long_context=True, delete_long_question=True,
longest_context=300, longest_question=30):
with open(path) as fin:
data = json.load(fin)
examples = []
for topic in data["data"]:
title = topic["title"]
for p in topic['paragraphs']:
qas = p['qas']
passage = p['context']
if delete_long_context and len(nltk.word_tokenize(passage)) > longest_context:
continue
for qa in qas:
question = qa["question"]
answers = qa["answers"]
if delete_long_question and len(nltk.word_tokenize(question)) > longest_question:
continue
question_id = qa["id"]
for ans in answers:
answer_start = int(ans["answer_start"])
answer_text = ans["text"]
e = RawExample()
e.title = title
e.passage = passage
e.question = question
e.question_id = question_id
e.answer_start = answer_start
e.answer_text = answer_text
examples.append(e)
if debug_mode and len(examples) >= debug_len:
return examples
print("train examples :%s" % len(examples))
return examples
def get_counter(*seqs):
word_counter = {}
char_counter = {}
for seq in seqs:
for doc in seq:
for word in doc:
word_counter.setdefault(word, 0)
word_counter[word] += 1
for char in word:
char_counter.setdefault(char, 0)
char_counter[char] += 1
return word_counter, char_counter
def read_dev_json(path, debug_mode, debug_len):
with open(path) as fin:
data = json.load(fin)
examples = []
for topic in data["data"]:
title = topic["title"]
for p in topic['paragraphs']:
qas = p['qas']
context = p['context']
for qa in qas:
question = qa["question"]
answers = qa["answers"]
question_id = qa["id"]
answer_start_list = [ans["answer_start"] for ans in answers]
c = Counter(answer_start_list)
most_common_answer, freq = c.most_common()[0]
answer_text = None
answer_start = None
if freq > 1:
for i, ans_start in enumerate(answer_start_list):
if ans_start == most_common_answer:
answer_text = answers[i]["text"]
answer_start = answers[i]["answer_start"]
break
else:
answer_text = answers[random.choice(range(len(answers)))]["text"]
answer_start = answers[random.choice(range(len(answers)))]["answer_start"]
e = RawExample()
e.title = title
e.passage = context
e.question = question
e.question_id = question_id
e.answer_start = answer_start
e.answer_text = answer_text
examples.append(e)
if debug_mode and len(examples) >= debug_len:
return examples
return examples
def tokenized_by_answer(context, answer_text, answer_start, tokenizer):
"""
Locate the answer token-level position after tokenizing as the original location is based on
char-level
snippet modified from: https://github.com/haichao592/squad-tf/blob/master/dataset.py
:param context: passage
:param answer_text: context/passage
:param answer_start: answer start position (char level)
:param tokenizer: tokenize function
:return: tokenized passage, answer start index, answer end index (inclusive)
"""
fore = context[:answer_start]
mid = context[answer_start: answer_start + len(answer_text)]
after = context[answer_start + len(answer_text):]
tokenized_fore = tokenizer(fore)
tokenized_mid = tokenizer(mid)
tokenized_after = tokenizer(after)
tokenized_text = tokenizer(answer_text)
for i, j in zip(tokenized_text, tokenized_mid):
if i != j:
return None
words = []
words.extend(tokenized_fore)
words.extend(tokenized_mid)
words.extend(tokenized_after)
answer_start_token, answer_end_token = len(tokenized_fore), len(tokenized_fore) + len(tokenized_mid) - 1
return words, answer_start_token, answer_end_token
def truncate_word_counter(word_counter, max_symbols):
words = [(freq, word) for word, freq in word_counter.items()]
words.sort()
return {word: freq for freq, word in words[:max_symbols]}
def read_embedding(root, word_type, dim):
wv_dict, wv_vectors, wv_size = load_word_vectors(root, word_type, dim)
return wv_dict, wv_vectors, wv_size
def get_rnn(rnn_type):
rnn_type = rnn_type.lower()
if rnn_type == "gru":
network = torch.nn.GRU
elif rnn_type == "lstm":
network = torch.nn.LSTM
else:
raise ValueError("Invalid RNN type %s" % rnn_type)
return network
def sort_idx(seq):
"""
:param seq: variable
:return:
"""
return sorted(range(seq.size(0)), key=lambda x: seq[x])
def prepare_data():
make_dirs("data/cache")
make_dirs("data/embedding/char")
make_dirs("data/embedding/word")
make_dirs("data/squad")
make_dirs("data/trained_model")
make_dirs("checkpoint")
nltk.download("punkt")
train_filename = "train-v1.1.json"
dev_filename = "dev-v1.1.json"
squad_base_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
train_url = os.path.join(squad_base_url, train_filename)
dev_url = os.path.join(squad_base_url, dev_filename)
download_prefix = os.path.join("data", "squad")
maybe_download(train_url, download_prefix, train_filename)
maybe_download(dev_url, download_prefix, dev_filename)
char_embedding_pretrain_url | |
#@String input_file_location
#@String subdir
#@String out_subdir_tag
#@String rows
#@String columns
#@String imperwell
#@String stitchorder
#@String channame
#@String size
#@String overlap_pct
#@String tileperside
#@String filterstring
#@String scalingstring
#@String awsdownload
#@String bucketname
#@String localtemp
#@String downloadfilter
#@String round_or_square
#@String final_tile_size
#@String xoffset_tiles
#@String yoffset_tiles
#@String compress
from ij import IJ, WindowManager
import os
import string
import sys
import time
from loci.plugins.out import Exporter
from loci.plugins import LociExporter
plugin = LociExporter()
def tiffextend(imname):
if '.tif' in imname:
return imname
if '.' in imname:
return imname[:imname.index('.')]+'.tiff'
else:
return imname+'.tiff'
def savefile(im,imname,plugin,compress='false'):
attemptcount = 0
imname = tiffextend(imname)
print('Saving ',imname,im.width,im.height)
if compress.lower()!='true':
IJ.saveAs(im, "tiff",imname)
else:
while attemptcount <5:
try:
plugin.arg="outfile="+imname+" windowless=true compression=LZW saveROI=false"
exporter = Exporter(plugin, im)
exporter.run()
print('Succeeded after attempt ',attemptcount)
return
except:
attemptcount +=1
print('failed 5 times at saving')
top_outfolder = 'output'
if not os.path.exists(top_outfolder):
os.mkdir(top_outfolder)
# Define and create the parent folders where the images will be output
step_being_stitched = subdir.split("/")[-2]
outfolder = os.path.join(top_outfolder,(step_being_stitched + '_stitched'))
tile_outdir = os.path.join(top_outfolder,(step_being_stitched + '_cropped'))
downsample_outdir = os.path.join(top_outfolder,(step_being_stitched + '_stitched_10X'))
if not os.path.exists(outfolder):
os.mkdir(outfolder)
if not os.path.exists(tile_outdir):
os.mkdir(tile_outdir)
if not os.path.exists(downsample_outdir):
os.mkdir(downsample_outdir)
# Define and create the batch-specific subfolders where the images will be output
out_subdir=os.path.join(outfolder, out_subdir_tag)
tile_subdir=os.path.join(tile_outdir, out_subdir_tag)
downsample_subdir=os.path.join(downsample_outdir, out_subdir_tag)
if not os.path.exists(tile_subdir):
os.mkdir(tile_subdir)
if not os.path.exists(downsample_subdir):
os.mkdir(downsample_subdir)
if not os.path.exists(out_subdir):
os.mkdir(out_subdir)
subdir=os.path.join(input_file_location,subdir)
if awsdownload == 'True':
if not os.path.exists(localtemp):
os.mkdir(localtemp)
import subprocess
cmd = ['aws','s3','sync','--exclude', '*', '--include', str(downloadfilter)]
cmd += ['s3://'+str(bucketname)+'/'+ str(subdir).split('ubuntu/bucket/')[1], str(localtemp)]
print('Running', cmd)
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
output= subp.stdout.readline()
if output== '' and subp.poll() is not None:
break
if output:
print(output.strip())
# Examine all downloaded files and make a list of the tifs
tiflist=[]
for root, dirs, files in os.walk(localtemp):
for name in files:
if '.tif' in name:
if 'Overlay' not in name:
tiflist.append([root,name])
print(len(tiflist), 'tifs found')
for eachtif in tiflist:
# If the tif is in a subfolder
if eachtif[0]!=localtemp:
os.rename(os.path.join(eachtif[0],eachtif[1]),os.path.join(localtemp,eachtif[1]))
print ("Successfully moved", os.path.join(localtemp,eachtif[1]))
subdir = localtemp
if os.path.isdir(subdir):
dirlist=os.listdir(subdir)
welllist=[]
presuflist = []
permprefix = None
permsuffix = None
for eachfile in dirlist:
if '.tif' in eachfile:
if filterstring in eachfile:
if 'Overlay' not in eachfile:
prefixBeforeWell,suffixWithWell=eachfile.split('_Well_')
Well,suffixAfterWell=suffixWithWell.split('_Site_')
channelSuffix = suffixAfterWell[suffixAfterWell.index('_')+1:]
if (prefixBeforeWell,channelSuffix) not in presuflist:
presuflist.append((prefixBeforeWell,channelSuffix))
if Well not in welllist:
welllist.append(Well)
if channame in channelSuffix:
if permprefix == None:
permprefix=prefixBeforeWell
permsuffix=channelSuffix
for eachpresuf in presuflist:
if eachpresuf[1][-4:]!='.tif':
if eachpresuf[1][-5:]!='.tiff':
presuflist.remove(eachpresuf)
presuflist.sort()
print welllist, presuflist
if round_or_square == 'square':
stitchedsize=int(rows)*int(size)
tileperside=int(tileperside)
scale_factor=float(scalingstring)
rounded_scale_factor=int(round(scale_factor))
upscaledsize=int(stitchedsize*rounded_scale_factor)
if upscaledsize > 46340:
upscaledsize = 46340
tilesize=int(upscaledsize/tileperside)
for eachwell in welllist:
standard_grid_instructions=["type=["+stitchorder+"] order=[Right & Down ] grid_size_x="+rows+" grid_size_y="+columns+" tile_overlap="+overlap_pct+" first_file_index_i=0 directory="+subdir+" file_names=",
" output_textfile_name=TileConfiguration.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap computation_parameters=[Save computation time (but use more RAM)] image_output=[Fuse and display]"]
copy_grid_instructions="type=[Positions from file] order=[Defined by TileConfiguration] directory="+subdir+" layout_file=TileConfiguration.registered_copy.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 ignore_z_stage computation_parameters=[Save computation time (but use more RAM)] image_output=[Fuse and display]"
filename=permprefix+'_Well_'+eachwell+'_Site_{i}_'+permsuffix
fileoutname='Stitched'+filename.replace("{i}","")
IJ.run("Grid/Collection stitching", standard_grid_instructions[0] + filename + standard_grid_instructions[1])
im=IJ.getImage()
#We're going to overwrite this file later, but it gives is a chance for an early checkpoint
#This doesn't seem to play nicely with the compression option on, it doesn't get overwritten later and bad things happen
if compress.lower()!='true':
savefile(im,os.path.join(out_subdir,fileoutname),plugin,compress=compress)
IJ.run("Close All")
for eachpresuf in presuflist:
thisprefix, thissuffix=eachpresuf
thissuffixnicename = thissuffix.split('.')[0]
if thissuffixnicename[0]=='_':
thissuffixnicename=thissuffixnicename[1:]
tile_subdir_persuf = os.path.join(tile_subdir,thissuffixnicename)
if not os.path.exists(tile_subdir_persuf):
os.mkdir(tile_subdir_persuf)
filename=thisprefix+'_Well_'+eachwell+'_Site_{i}_'+thissuffix
fileoutname='Stitched'+filename.replace("{i}","")
with open(os.path.join(subdir, 'TileConfiguration.registered.txt'),'r') as infile:
with open(os.path.join(subdir, 'TileConfiguration.registered_copy.txt'),'w') as outfile:
for line in infile:
line=line.replace(permprefix,thisprefix)
line=line.replace(permsuffix,thissuffix)
outfile.write(line)
IJ.run("Grid/Collection stitching", copy_grid_instructions)
im=IJ.getImage()
width = str(int(round(im.width*float(scalingstring))))
height = str(int(round(im.height*float(scalingstring))))
print("Scale...", "x="+scalingstring+" y="+scalingstring+" width="+width+" height="+height+" interpolation=Bilinear average create")
IJ.run("Scale...", "x="+scalingstring+" y="+scalingstring+" width="+width+" height="+height+" interpolation=Bilinear average create")
time.sleep(15)
im2=IJ.getImage()
print("Canvas Size...", "width="+str(upscaledsize)+" height="+str(upscaledsize)+" position=Top-Left zero")
IJ.run("Canvas Size...", "width="+str(upscaledsize)+" height="+str(upscaledsize)+" position=Top-Left zero")
time.sleep(15)
im3=IJ.getImage()
savefile(im3,os.path.join(out_subdir,fileoutname),plugin,compress=compress)
im=IJ.getImage()
print("Scale...", "x=0.1 y=0.1 width="+str(im.width/10)+" height="+str(im.width/10)+" interpolation=Bilinear average create")
im_10=IJ.run("Scale...", "x=0.1 y=0.1 width="+str(im.width/10)+" height="+str(im.width/10)+" interpolation=Bilinear average create")
im_10=IJ.getImage()
savefile(im_10,os.path.join(downsample_subdir,fileoutname),plugin,compress=compress)
IJ.run("Close All")
im=IJ.open(os.path.join(out_subdir,fileoutname))
im = IJ.getImage()
for eachxtile in range(tileperside):
for eachytile in range(tileperside):
each_tile_num = eachxtile*tileperside + eachytile + 1
IJ.makeRectangle(eachxtile*tilesize, eachytile*tilesize,tilesize,tilesize)
im_tile=im.crop()
savefile(im_tile,os.path.join(tile_subdir_persuf,thissuffixnicename+'_Site_'+str(each_tile_num)+'.tiff'),plugin,compress=compress)
IJ.run("Close All")
elif round_or_square == 'round':
if imperwell == '1364':
row_widths = [8,14,18,22,26,28,30,
32,34,34,36,36,38,38,
40,40,40,42,42,42,42,
42,42,42,42,40,40,40,
38,38,36,36,34,34,32,
30,28,26,22,18,14,8]
elif imperwell == '1332':
row_widths = [14,18,22,26,28,30,
32,34,34,36,36,38,38,
40,40,40,40,40,40,40,
40,40,40,40,40,40,40,
38,38,36,36,34,34,32,
30,28,26,22,18,14]
elif imperwell == '320':
row_widths = [4, 8, 12, 14, 16,
18, 18, 20, 20, 20,
20, 20, 20, 20, 18,
18, 16, 14, 12, 8, 4]
elif imperwell == '316':
row_widths = [6, 10, 14, 16, 16,
18, 18, 20, 20, 20,
20, 20, 20, 18, 18,
16, 16, 14, 10, 6]
else:
print(imperwell, "images/well for a round well is not currently supported")
sys.exit()
rows = str(len(row_widths))
columns = str(max(row_widths))
# xoffset_tiles and yoffset_tiles can be used if you need to adjust the "where to draw the line between quarters"
# by a whole tile. You may want to add more padding if you do this
top_rows = str((int(rows)/2)+int(yoffset_tiles))
left_columns = str((int(columns)/2)+int(xoffset_tiles))
bot_rows = str(int(rows)-int(top_rows))
right_columns = str(int(columns)-int(left_columns))
scale_factor=float(scalingstring)
rounded_scale_factor=int(round(scale_factor))
#For upscaled row and column size, we're always going to use the biggest number, we'd rather pad than miss stuff
#Because we can't assure same final tile size now either, now we need to specify it, ugh, and make sure the padding is big enough
max_val = max(int(top_rows),int(bot_rows),int(left_columns),int(right_columns))
upscaled_row_size=int(size)*max_val*rounded_scale_factor
tiles_per_quarter = int(tileperside)/2
tilesize=int(final_tile_size)
if tilesize * tiles_per_quarter > upscaled_row_size:
upscaled_row_size = tilesize * tiles_per_quarter
upscaled_col_size=upscaled_row_size
pixels_to_crop = int(round(int(size)*float(overlap_pct)/200))
pos_dict = {}
count = 0
for row in range(len(row_widths)):
row_width = row_widths[row]
left_pos = int((int(columns)-row_width)/2)
for col in range(row_width):
if row%2 == 0:
pos_dict[(int(left_pos + col), row)] = str(count)
count += 1
else:
right_pos = left_pos + row_width - 1
pos_dict[(int(right_pos - col), row)]= str(count)
count += 1
filled_positions = pos_dict.keys()
emptylist = []
for eachwell in welllist:
for eachpresuf in presuflist:
thisprefix, thissuffix=eachpresuf
for x in range(int(columns)):
for y in range(int(rows)):
out_name = thisprefix+'_Well_'+eachwell+'_x_'+ '%02d'%x+'_y_'+'%02d'%y+ '_'+ thissuffix
if (x,y) in filled_positions:
series = pos_dict[(x,y)]
in_name = thisprefix+'_Well_'+eachwell+'_Site_'+str(series)+'_'+thissuffix
IJ.open(os.path.join(subdir,in_name))
else:
IJ.newImage("Untitled", "16-bit noise",int(size),int(size), 1)
IJ.run("Divide...", "value=300") #get the noise value below the real camera noise level
emptylist.append(out_name)
im = IJ.getImage()
IJ.saveAs(im,'tiff',os.path.join(subdir, out_name))
IJ.run("Close All")
if (x,y) in filled_positions:
try: #try to clean up after yourself, but don't die if you can't
os.remove(os.path.join(subdir,in_name))
except:
pass
print("Renamed all files for prefix "+thisprefix+" and suffix "+thissuffix+" in well "+eachwell)
imagelist = os.listdir(subdir)
print(len(imagelist), 'files in subdir')
print(imagelist[:10])
#top left quarter
print('Running top left')
#Change per quarter
standard_grid_instructions=["type=[Filename defined position] order=[Defined by filename ] grid_size_x="+str(left_columns)+" grid_size_y="+top_rows+" tile_overlap="+overlap_pct+" first_file_index_x=0 first_file_index_y=0 directory="+subdir+" file_names=",
" output_textfile_name=TileConfiguration.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap computation_parameters=[Save computation time (but use more RAM)] image_output=[Fuse and display]"]
copy_grid_instructions="type=[Positions from file] order=[Defined by TileConfiguration] directory="+subdir+" layout_file=TileConfiguration.registered_copy.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 ignore_z_stage computation_parameters=[Save computation time (but use more RAM)] image_output=[Fuse and display]"
filename=permprefix+'_Well_'+eachwell+'_x_{xx}_y_{yy}_'+permsuffix
#Change per quarter
fileoutname='StitchedTopLeft'+filename.replace("{xx}","").replace("{yy}","")
instructions = standard_grid_instructions[0] + filename + standard_grid_instructions[1]
print(instructions)
IJ.run("Grid/Collection stitching", instructions)
im=IJ.getImage()
#We're going to overwrite this file later, but it gives us a chance for an early checkpoint
#This doesn't seem to play nicely with the compression option on, it doesn't get overwritten later and bad things happen
if compress.lower()!='true':
savefile(im,os.path.join(out_subdir,fileoutname),plugin,compress=compress)
IJ.run("Close All")
for eachpresuf in presuflist:
thisprefix, thissuffix=eachpresuf
thissuffixnicename = thissuffix.split('.')[0]
if thissuffixnicename[0]=='_':
thissuffixnicename=thissuffixnicename[1:]
tile_subdir_persuf = os.path.join(tile_subdir,thissuffixnicename)
if not os.path.exists(tile_subdir_persuf):
os.mkdir(tile_subdir_persuf)
filename=thisprefix+'_Well_'+eachwell+'_x_{xx}_y_{yy}_'+thissuffix
#Change per quarter
fileoutname='StitchedTopLeft'+filename.replace("{xx}","").replace("{yy}","")
with open(os.path.join(subdir, 'TileConfiguration.registered.txt'),'r') as infile:
with open(os.path.join(subdir, 'TileConfiguration.registered_copy.txt'),'w') as outfile:
for line in infile:
if not any([empty in line for empty in emptylist]):
line=line.replace(permprefix,thisprefix)
line=line.replace(permsuffix,thissuffix)
outfile.write(line)
IJ.run("Grid/Collection stitching", copy_grid_instructions)
im0=IJ.getImage()
#chop off the bottom and right
#Change per quarter
IJ.makeRectangle(0,0,im0.width-pixels_to_crop,im0.height-pixels_to_crop)
im1=im0.crop()
width = str(int(round(im1.width*float(scalingstring))))
height = str(int(round(im1.height*float(scalingstring))))
print("Scale...", "x="+scalingstring+" y="+scalingstring+" width="+width+" height="+height+" interpolation=Bilinear average create")
IJ.run("Scale...", "x="+scalingstring+" y="+scalingstring+" width="+width+" height="+height+" interpolation=Bilinear average create")
time.sleep(15)
im2=IJ.getImage()
#Chnage per quarter
print("Canvas Size...", "width="+str(upscaled_col_size)+" height="+str(upscaled_row_size)+" position=Bottom-Right zero")
IJ.run("Canvas Size...", "width="+str(upscaled_col_size)+" height="+str(upscaled_row_size)+" position=Bottom-Right zero")
time.sleep(15)
im3=IJ.getImage()
savefile(im3,os.path.join(out_subdir,fileoutname),plugin,compress=compress)
im=IJ.getImage()
print("Scale...", "x=0.1 y=0.1 width="+str(im.width/10)+" height="+str(im.width/10)+" interpolation=Bilinear average create")
im_10=IJ.run("Scale...", "x=0.1 y=0.1 width="+str(im.width/10)+" height="+str(im.width/10)+" interpolation=Bilinear average create")
im_10=IJ.getImage()
savefile(im_10,os.path.join(downsample_subdir,fileoutname),plugin,compress=compress)
IJ.run("Close All")
im=IJ.open(os.path.join(out_subdir,fileoutname))
im = IJ.getImage()
tile_offset = upscaled_row_size - (tilesize * tiles_per_quarter)
for eachxtile in range(tiles_per_quarter):
for eachytile in range(tiles_per_quarter):
#Change per quarter
each_tile_num = eachxtile*int(tileperside) + eachytile + 1
#Change per quarter
IJ.makeRectangle((eachxtile*tilesize)+tile_offset, (eachytile*tilesize)+tile_offset,tilesize,tilesize)
im_tile=im.crop()
savefile(im_tile,os.path.join(tile_subdir_persuf,thissuffixnicename+'_Site_'+str(each_tile_num)+'.tiff'),plugin,compress=compress)
IJ.run("Close All")
#top right quarter
print('Running top right')
#Change per quarter
standard_grid_instructions=["type=[Filename defined position] order=[Defined by filename ] grid_size_x="+str(right_columns)+" grid_size_y="+top_rows+" tile_overlap="+overlap_pct+" first_file_index_x="+str(left_columns)+" first_file_index_y=0 directory="+subdir+" file_names=",
" output_textfile_name=TileConfiguration.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap computation_parameters=[Save computation time (but use | |
INT, self.FOLLOW_INT_in_assign_types2319)
INT206_tree = self._adaptor.createWithPayload(INT206)
self._adaptor.addChild(root_0, INT206_tree)
elif alt33 == 6:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:248:78: BOOL
pass
root_0 = self._adaptor.nil()
BOOL207 = self.match(self.input, BOOL, self.FOLLOW_BOOL_in_assign_types2323)
BOOL207_tree = self._adaptor.createWithPayload(BOOL207)
self._adaptor.addChild(root_0, BOOL207_tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "assign_types"
class math_op_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "math_op"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:249:5: math_op : val_range ( PLUS | MINUS ) val_range ;
def math_op(self, ):
retval = self.math_op_return()
retval.start = self.input.LT(1)
root_0 = None
set209 = None
val_range208 = None
val_range210 = None
set209_tree = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:249:13: ( val_range ( PLUS | MINUS ) val_range )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:249:15: val_range ( PLUS | MINUS ) val_range
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_val_range_in_math_op2334)
val_range208 = self.val_range()
self._state.following.pop()
self._adaptor.addChild(root_0, val_range208.tree)
set209 = self.input.LT(1)
if self.input.LA(1) in {MINUS, PLUS}:
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set209))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
self._state.following.append(self.FOLLOW_val_range_in_math_op2344)
val_range210 = self.val_range()
self._state.following.pop()
self._adaptor.addChild(root_0, val_range210.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "math_op"
class conditional_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "conditional"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:252:1: conditional : ( if_stmt | ifnot_stmt );
def conditional(self, ):
retval = self.conditional_return()
retval.start = self.input.LT(1)
root_0 = None
if_stmt211 = None
ifnot_stmt212 = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:252:12: ( if_stmt | ifnot_stmt )
alt34 = 2
LA34_0 = self.input.LA(1)
if (LA34_0 == IF) :
LA34_1 = self.input.LA(2)
if (LA34_1 == NEG) :
alt34 = 2
elif (LA34_1 in {BOOL, ID, INT, OBRACE}) :
alt34 = 1
else:
nvae = NoViableAltException("", 34, 1, self.input)
raise nvae
else:
nvae = NoViableAltException("", 34, 0, self.input)
raise nvae
if alt34 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:252:14: if_stmt
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_if_stmt_in_conditional2353)
if_stmt211 = self.if_stmt()
self._state.following.pop()
self._adaptor.addChild(root_0, if_stmt211.tree)
elif alt34 == 2:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:252:24: ifnot_stmt
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_ifnot_stmt_in_conditional2357)
ifnot_stmt212 = self.ifnot_stmt()
self._state.following.pop()
self._adaptor.addChild(root_0, ifnot_stmt212.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "conditional"
class if_stmt_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "if_stmt"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:253:5: if_stmt : IF cond_comb OCBRACE if_expression CCBRACE ( ELSE OCBRACE else_expression CCBRACE )* -> {t_else}? ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ( else_expression )* ENDIF_ ) ) -> ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ENDIF_ ) ) ;
def if_stmt(self, ):
retval = self.if_stmt_return()
retval.start = self.input.LT(1)
root_0 = None
IF213 = None
OCBRACE215 = None
CCBRACE217 = None
ELSE218 = None
OCBRACE219 = None
CCBRACE221 = None
cond_comb214 = None
if_expression216 = None
else_expression220 = None
IF213_tree = None
OCBRACE215_tree = None
CCBRACE217_tree = None
ELSE218_tree = None
OCBRACE219_tree = None
CCBRACE221_tree = None
stream_OCBRACE = RewriteRuleTokenStream(self._adaptor, "token OCBRACE")
stream_ELSE = RewriteRuleTokenStream(self._adaptor, "token ELSE")
stream_IF = RewriteRuleTokenStream(self._adaptor, "token IF")
stream_CCBRACE = RewriteRuleTokenStream(self._adaptor, "token CCBRACE")
stream_else_expression = RewriteRuleSubtreeStream(self._adaptor, "rule else_expression")
stream_if_expression = RewriteRuleSubtreeStream(self._adaptor, "rule if_expression")
stream_cond_comb = RewriteRuleSubtreeStream(self._adaptor, "rule cond_comb")
t_else = 0
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:254:5: ( IF cond_comb OCBRACE if_expression CCBRACE ( ELSE OCBRACE else_expression CCBRACE )* -> {t_else}? ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ( else_expression )* ENDIF_ ) ) -> ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ENDIF_ ) ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:254:7: IF cond_comb OCBRACE if_expression CCBRACE ( ELSE OCBRACE else_expression CCBRACE )*
pass
IF213 = self.match(self.input, IF, self.FOLLOW_IF_in_if_stmt2376)
stream_IF.add(IF213)
self._state.following.append(self.FOLLOW_cond_comb_in_if_stmt2378)
cond_comb214 = self.cond_comb()
self._state.following.pop()
stream_cond_comb.add(cond_comb214.tree)
OCBRACE215 = self.match(self.input, OCBRACE, self.FOLLOW_OCBRACE_in_if_stmt2380)
stream_OCBRACE.add(OCBRACE215)
self._state.following.append(self.FOLLOW_if_expression_in_if_stmt2382)
if_expression216 = self.if_expression()
self._state.following.pop()
stream_if_expression.add(if_expression216.tree)
CCBRACE217 = self.match(self.input, CCBRACE, self.FOLLOW_CCBRACE_in_if_stmt2384)
stream_CCBRACE.add(CCBRACE217)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:255:5: ( ELSE OCBRACE else_expression CCBRACE )*
while True: #loop35
alt35 = 2
LA35_0 = self.input.LA(1)
if (LA35_0 == ELSE) :
alt35 = 1
if alt35 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:255:6: ELSE OCBRACE else_expression CCBRACE
pass
ELSE218 = self.match(self.input, ELSE, self.FOLLOW_ELSE_in_if_stmt2391)
stream_ELSE.add(ELSE218)
#action start
t_else=1
#action end
OCBRACE219 = self.match(self.input, OCBRACE, self.FOLLOW_OCBRACE_in_if_stmt2395)
stream_OCBRACE.add(OCBRACE219)
self._state.following.append(self.FOLLOW_else_expression_in_if_stmt2397)
else_expression220 = self.else_expression()
self._state.following.pop()
stream_else_expression.add(else_expression220.tree)
CCBRACE221 = self.match(self.input, CCBRACE, self.FOLLOW_CCBRACE_in_if_stmt2399)
stream_CCBRACE.add(CCBRACE221)
else:
break #loop35
# AST Rewrite
# elements: cond_comb, if_expression, cond_comb, else_expression, cond_comb, if_expression, cond_comb
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
if t_else:
# 256:5: -> {t_else}? ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ( else_expression )* ENDIF_ ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:18: ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ( else_expression )* ENDIF_ ) )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IFELSE_, "IFELSE_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:28: ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ )
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IF_, "IF_")
, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:34: ^( COND_ cond_comb )
root_3 = self._adaptor.nil()
root_3 = self._adaptor.becomeRoot(
self._adaptor.createFromType(COND_, "COND_")
, root_3)
self._adaptor.addChild(root_3, stream_cond_comb.nextTree())
self._adaptor.addChild(root_2, root_3)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:53: ( if_expression )*
while stream_if_expression.hasNext():
self._adaptor.addChild(root_2, stream_if_expression.nextTree())
stream_if_expression.reset();
self._adaptor.addChild(root_2,
self._adaptor.createFromType(ENDIF_, "ENDIF_")
)
self._adaptor.addChild(root_1, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:76: ^( IF_ ^( NCOND_ cond_comb ) ( else_expression )* ENDIF_ )
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IF_, "IF_")
, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:82: ^( NCOND_ cond_comb )
root_3 = self._adaptor.nil()
root_3 = self._adaptor.becomeRoot(
self._adaptor.createFromType(NCOND_, "NCOND_")
, root_3)
self._adaptor.addChild(root_3, stream_cond_comb.nextTree())
self._adaptor.addChild(root_2, root_3)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:256:102: ( else_expression )*
while stream_else_expression.hasNext():
self._adaptor.addChild(root_2, stream_else_expression.nextTree())
stream_else_expression.reset();
self._adaptor.addChild(root_2,
self._adaptor.createFromType(ENDIF_, "ENDIF_")
)
self._adaptor.addChild(root_1, root_2)
self._adaptor.addChild(root_0, root_1)
else:
# 257:5: -> ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ENDIF_ ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:257:8: ^( IFELSE_ ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( NCOND_ cond_comb ) ENDIF_ ) )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IFELSE_, "IFELSE_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:257:18: ^( IF_ ^( COND_ cond_comb ) ( if_expression )* ENDIF_ )
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IF_, "IF_")
, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:257:24: ^( COND_ cond_comb )
root_3 = self._adaptor.nil()
root_3 = self._adaptor.becomeRoot(
self._adaptor.createFromType(COND_, "COND_")
, root_3)
self._adaptor.addChild(root_3, stream_cond_comb.nextTree())
self._adaptor.addChild(root_2, root_3)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:257:43: ( if_expression )*
while stream_if_expression.hasNext():
self._adaptor.addChild(root_2, stream_if_expression.nextTree())
stream_if_expression.reset();
self._adaptor.addChild(root_2,
self._adaptor.createFromType(ENDIF_, "ENDIF_")
)
self._adaptor.addChild(root_1, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:257:66: ^( IF_ ^( NCOND_ cond_comb ) ENDIF_ )
root_2 = self._adaptor.nil()
root_2 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IF_, "IF_")
, root_2)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:257:72: ^( NCOND_ cond_comb )
root_3 = self._adaptor.nil()
root_3 = self._adaptor.becomeRoot(
self._adaptor.createFromType(NCOND_, "NCOND_")
, root_3)
self._adaptor.addChild(root_3, stream_cond_comb.nextTree())
self._adaptor.addChild(root_2, root_3)
self._adaptor.addChild(root_2,
self._adaptor.createFromType(ENDIF_, "ENDIF_")
)
self._adaptor.addChild(root_1, root_2)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "if_stmt"
class ifnot_stmt_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "ifnot_stmt"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:260:5: ifnot_stmt : IF NEG cond_comb OCBRACE if_expression CCBRACE ( ELSE OCBRACE else_expression CCBRACE )* -> {t_else}? ^( IFELSE_ ^( IF_ ^( NCOND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( COND_ cond_comb ) ( else_expression )* ENDIF_ ) ) -> ^( IFELSE_ ^( IF_ ^( NCOND_ cond_comb ) ( if_expression )* ENDIF_ ) ^( IF_ ^( COND_ cond_comb ) ENDIF_ ) ) ;
def ifnot_stmt(self, ):
retval = self.ifnot_stmt_return()
retval.start = self.input.LT(1)
root_0 = None
IF222 = None
NEG223 = None
OCBRACE225 = None
CCBRACE227 = None
| |
import collections
import copy
import tensorflow as tf
import numpy as np
import os
import multiprocessing
import functools
from abc import ABC, abstractmethod
from rl.tools.utils.misc_utils import unflatten, flatten, cprint
tf_float = tf.float32
tf_int = tf.int32
"""
For compatibility with stop_gradient
"""
def tf_flatten(variables):
return tf.concat([tf.reshape(var, [-1]) for var in variables], axis=0)
def gradients(tensor, var_list):
grads = tf.gradients(tensor, var_list)
return [grad if grad is not None else tf.zeros_like(var)
for var, grad in zip(var_list, grads)]
"""
Wrapper of tensorflow graphs
"""
class tfObjectManager(object):
"""
An object manager that makes sure each one has an unique name.
"""
def __init__(self, default_name='tfObject'):
self._name = default_name
self._dict = collections.defaultdict(lambda: None)
self._table = collections.defaultdict(lambda: False)
def get_name(self, name=None):
""" automatically get a unique name for the constructing a tfObject instance """
if name is None:
name = self._name
name = str(name)
valid_name = False
while not valid_name:
# propose a new name
ind = self._dict[name]
if ind is None:
new_name = str(name)
self._dict[name] = 1
else:
new_name = str(name) + '_' + str(ind)
self._dict[name] += 1
# check if the proposed name exists
if not self._table[new_name]:
self._table[new_name] = True
valid_name = True
if name != new_name:
cprint('An tfObject under {} already exists. A new name {} is created by tfObjectManager'.format(name, new_name))
return new_name
# This makes sure every tfObject instance has an unique name
_tfOBJECT_MANAGER = tfObjectManager()
class tfObject(ABC):
"""
A helper class for defining custom classes based on tensorflow.
It makes sure that each instance of tfObject has an unique name (realized
as tf.variable_scope) and support basic functionalities, like
copy.deepcopy, assign, save, and restore.
Usage guideilnes:
The user needs to define _build_graph and use tfObject.save_init_args
to decorate its __init__. Note stateful non-tensorflow attributes
(which change during the use of the instance, like a counter) should be
NOT created inside _build_graph. The decorator tfObject.save_init_args
is used to record input arguments to __init__ for deepcopying. It
should be used to decorate a child class's __init__ when its signature
or default value changes.
In order to maintain desired deepcopy behaviors during inheritance, the
vuser should modify _pre_deepcopy_list and _post_deepcopy_list methods
to to add the name of attributes that should be copied during deepcopy.
By default, an tfObject instance does not deepcopy any attribute,
except for those provided by the user. This convention is chosen for
robust behvaiors in case of potential furture behavior changes of
tensorflow. When copy.deepcopy is called, tfObject calls the __init__
function defined by the custom class, in which before _build_graph is
called (through __init__ of tfObject) the attributes in
_pre_deepcopy_list will be deepcopied, and then deepcopies the
attributes in the _post_deepcopy_list. As a rule of thumb,
_pre_deepcopy_list should contain stateful attributes that pertain to
the tensorflow graph creation (i.e. those created before calling
__init__ ) _post_deepcopy_list contains other stateful attributes.
Note when defining _pre_deepcopy and _post_deepcopy_list, make sure it
contains the contents from the parent class.
Public attributes:
ts_vars, ts_allvars
Public methods:
copy, __deepcopy__, assign, save, restore
"""
def __init__(self, name='tfObject', max_to_keep=None, bg_kwargs=None):
"""
The tensorflow graph constructor.
Args:
name: the name of the object
max_to_keep: the maximal number of copies to save
bg_kwargs: the additional kwargs of _build_graph.
"""
assert hasattr(self, '_tfObject__args') and hasattr(self, '_tfObject__kwargs'), \
'Must use save_init_args decorator on __init__'
if bg_kwargs is None:
bg_kwargs = {}
# pre-processing
if hasattr(self, '_tfObject__pre_init_fun'):
self._tfObject__pre_init_fun()
if hasattr(self, '_tfObject__default_name'):
name = self._tfObject__default_name # force using a default name
# create the tensorflow graph
self.__name = _tfOBJECT_MANAGER.get_name(name) # get a unique name
with tf.variable_scope(self._tfObject__name):
self.__scope = tf.get_variable_scope().name # for later tensors retrieval
self._build_graph(**bg_kwargs)
# build getters and setters (np.ndarray)
if len(self.ts_vars) > 0:
self.__get_vars = build_get(self.ts_vars)
self.__set_vars = build_set(self.ts_vars)
if len(self.ts_allvars) > 0:
self.__get_allvars = build_get(self.ts_allvars)
self.__set_allvars = build_set(self.ts_allvars)
if len(self.ts_allvars) > 0:
self.__saver = tf.train.Saver(self.ts_allvars, max_to_keep=max_to_keep)
# for deepcopy
self.__pre_deepcopy_list = [] # attributes should be deep copied
self.__pre_deepcopy_list.extend(self._pre_deepcopy_list)
self.__post_deepcopy_list = ['_scope'] # attributes should be deep copied
self.__post_deepcopy_list.extend(self._post_deepcopy_list)
# Some functions for the user to define
@abstractmethod
def _build_graph(self, **kwargs):
""" Build the tensorflow graph """
@property
def _pre_deepcopy_list(self):
""" Return a list of strings of attribute names that should be deep
copied before calling tfObject.__init__ """
return []
@property
def _post_deepcopy_list(self):
""" Return a list of strings of attribute names that should be deep
copied before calling self.__init__ """
return []
# Functions for correct deepcopy
@staticmethod
def save_init_args(deepcopy_args=False):
""" A decorator for child class's __init__, which saves the input
arguments for performing deepcopying"""
if deepcopy_args: # whether to deepcopy the input arguments
def safe_copy(val):
try:
return copy.deepcopy(val)
except:
return copy.copy(val)
else:
def safe_copy(val): return val
def decorator(fun):
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
if hasattr(self, '_tfObject__args_saved'):
if self._tfObject__args_saved: # make sure it's only called once
return fun(self, *args, **kwargs)
# save the input arguments
self.__args, self.__kwargs = [], {}
self.__args = [safe_copy(arg) for arg in args]
self.__kwargs = {k: safe_copy(v) for k, v in kwargs.items()}
self.__args_saved = True
return fun(self, *args, **kwargs)
return wrapper
return decorator
def copy(self, new_name):
""" Like copy.deepcopy but with a new custom name """
return self.__deepcopy(name=new_name, memo={})
def __deepcopy__(self, memo):
# we need to overload this because of tensorflow graph
return self._tfObject__deepcopy(memo=memo)
def __deepcopy(self, memo, name=None):
# create new instance
tfobj = type(self).__new__(type(self), *self._tfObject__args, **self._tfObject__kwargs)
memo[id(self)] = tfobj # prevent forming a loop
# customize the behavior of tfObject.__init__
if name is not None:
tfobj.__default_name = name # use a new name
def _pre_deepcopy(): # deepcopy attributes before _build_graph
tfobj._tfObject__update_attrs(self, self._tfObject__pre_deepcopy_list, memo)
tfobj.__pre_init_fun = _pre_deepcopy
# initialize the instance as usual
tfobj.__init__(*self._tfObject__args, **self._tfObject__kwargs)
# post deepcopying
tfobj._tfObject__update_attrs(self, self._tfObject__post_deepcopy_list, memo)
# update tf.Variables
tfobj.assign(self)
return tfobj
def __update_attrs(self, src, attrs, memo):
# try to deepcopy attrs from src to self
for k in list(set(attrs) & set(src.__dict__.keys())):
setattr(self, k, copy.deepcopy(getattr(src, k), memo))
# Miscellaneous functions
def assign(self, other):
"""Set the tf.Variables of self as that of the other """
assert type(self) == type(other)
if len(self.ts_allvars) > 0:
self._tfObject__set_allvars(*other._tfObject__get_allvars()) # update tf.Variables
@property
def ts_vars(self): # list of trainable tf.Variables
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._tfObject__scope)
@ts_vars.setter
def ts_vars(self, vals): # list of values to set to trainable tf.Variables
self._tfObject__set_vars(*vals)
@property
def ts_allvars(self): # list of all tf.Variables, including non-trainable ones
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self._tfObject__scope)
@ts_allvars.setter
def ts_allvars(self, vals): # list of all tf.Variables, including non-trainable ones
self._tfObject__set_allvars(*vals)
# TODO make this object pickle-compatible?
def save(self, path):
""" Save the ts_allvars to path """
if len(self.ts_allvars) > 0:
path = self._tfObject__saver.save(tf.get_default_session(), path)
return path
def restore(self, path, saved_name=None):
"""Recover ts_allvars from path saved with saved_name"""
if len(self.ts_allvars) > 0:
if saved_name is None:
saved_name = self._tfObject__name
ts_dict = {}
for ts in self.ts_allvars:
splits = ts.name.split('/')
# XXX only restore the vars match the saved_name!!!!
if splits[0] != saved_name:
continue
splits[0] = saved_name
saved_ts_name = '/'.join(splits)
assert saved_ts_name.split(':')[1] == '0'
saved_ts_name = saved_ts_name.split(':')[0]
ts_dict[saved_ts_name] = ts
saver = tf.train.Saver(ts_dict)
saver.restore(tf.get_default_session(), path)
"""
Session management.
"""
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
tf_config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
"""
Placeholder cache. Create if necessary.
"""
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype=None, shape=None):
if name in _PLACEHOLDER_CACHE:
assert dtype is None
assert shape is None
return _PLACEHOLDER_CACHE[name][0]
else:
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
"""
Simple functions that construct tensors from tensors.
"""
def squared_sum(sy_x, axis=None):
sy_x_sqr = tf.square(sy_x)
return tf.reduce_sum(sy_x_sqr, axis=axis)
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Args:
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
sy_x_shape = copy.copy(then_expression.get_shape())
sy_x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
sy_x.set_shape(sy_x_shape)
return sy_x
def build_multilayer_perceptron(
scope,
sy_input,
output_size,
n_layers=2,
size=64,
activation=tf.tanh,
hid_layer_std=1.0,
output_activation=None,
output_init_std=1.0,
reuse=False,
):
with tf.variable_scope(scope, reuse=reuse):
sy_y = sy_input
for _ in range(n_layers):
sy_y = tf.layers.dense(sy_y, size, activation=activation,
kernel_initializer=normc_initializer(hid_layer_std))
sy_y = tf.layers.dense(sy_y, output_size, activation=output_activation,
kernel_initializer=normc_initializer(output_init_std))
return | |
there
#> StringInsert["abcdefghijklm", "X", -15]
: Cannot insert at position -15 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, -15]
>> StringInsert["adac", "he", {1, 5}]
= headache
#> StringInsert["abcdefghijklm", "X", {1, -1, 14, -14}]
= XXabcdefghijklmXX
#> StringInsert["abcdefghijklm", "X", {1, 0}]
: Cannot insert at position 0 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, {1, 0}]
#> StringInsert["", "X", {1}]
= X
#> StringInsert["", "X", {1, -1}]
= XX
#> StringInsert["", "", {1}]
= #<--#
#> StringInsert["", "X", {1, 2}]
: Cannot insert at position 2 in .
= StringInsert[, X, {1, 2}]
#> StringInsert["abcdefghijklm", "", {1, 2, 3, 4 ,5, -6}]
= abcdefghijklm
#> StringInsert["abcdefghijklm", "X", {}]
= abcdefghijklm
>> StringInsert[{"something", "sometimes"}, " ", 5]
= {some thing, some times}
#> StringInsert[{"abcdefghijklm", "Mathics"}, "X", 13]
: Cannot insert at position 13 in Mathics.
= {abcdefghijklXm, StringInsert[Mathics, X, 13]}
#> StringInsert[{"", ""}, "", {1, 1, 1, 1}]
= {, }
#> StringInsert[{"abcdefghijklm", "Mathics"}, "X", {0, 2}]
: Cannot insert at position 0 in abcdefghijklm.
: Cannot insert at position 0 in Mathics.
= {StringInsert[abcdefghijklm, X, {0, 2}], StringInsert[Mathics, X, {0, 2}]}
#> StringInsert[{"abcdefghijklm", Mathics}, "X", {1, 2}]
: String or list of strings expected at position 1 in StringInsert[{abcdefghijklm, Mathics}, X, {1, 2}].
= StringInsert[{abcdefghijklm, Mathics}, X, {1, 2}]
#> StringInsert[{"", "Mathics"}, "X", {1, 1, -1}]
= {XXX, XXMathicsX}
>> StringInsert["1234567890123456", ".", Range[-16, -4, 3]]
= 1.234.567.890.123.456 """
messages = {
'strse': 'String or list of strings expected at position `1` in `2`.',
'string': 'String expected at position `1` in `2`.',
'ins': 'Cannot insert at position `1` in `2`.',
'psl': 'Position specification `1` in `2` is not a machine-sized integer or a list of machine-sized integers.',
}
def _insert(self, str, add, lpos, evaluation):
for pos in lpos:
if abs(pos) < 1 or abs(pos) > len(str)+1:
evaluation.message('StringInsert', 'ins', Integer(pos), String(str))
return evaluation.format_output(Expression('StringInsert', str, add, lpos[0] if len(lpos) == 1 else lpos))
# Create new list of position which are rearranged
pos_limit = len(str) + 2
listpos = [p if p > 0 else pos_limit+p for p in lpos]
listpos.sort()
result = ''
start = 0
for pos in listpos:
stop = pos - 1
result += str[start:stop] + add
start = stop
else:
result += str[start:len(str)]
return result
def apply(self, strsource, strnew, pos, evaluation):
'StringInsert[strsource_, strnew_, pos_]'
exp = Expression('StringInsert', strsource, strnew, pos)
py_strnew = strnew.get_string_value()
if py_strnew is None:
return evaluation.message('StringInsert', 'string', Integer(2), exp)
# Check and create list of position
listpos = []
if pos.has_form('List', None):
leaves = pos.get_leaves()
if not leaves:
return strsource
else:
for i, posi in enumerate(leaves):
py_posi = posi.get_int_value()
if py_posi is None:
return evaluation.message('StringInsert', 'psl', pos, exp)
listpos.append(py_posi)
else:
py_pos = pos.get_int_value()
if py_pos is None:
return evaluation.message('StringInsert', 'psl', pos, exp)
listpos.append(py_pos)
# Check and perform the insertion
if strsource.has_form('List', None):
py_strsource = [sub.get_string_value()
for sub in strsource.leaves]
if any(sub is None for sub in py_strsource):
return evaluation.message('StringInsert', 'strse', Integer(1), exp)
return Expression('List', *[String(self._insert(s, py_strnew, listpos, evaluation)) for s in py_strsource])
else:
py_strsource = strsource.get_string_value()
if py_strsource is None:
return evaluation.message('StringInsert', 'strse', Integer(1), exp)
return String(self._insert(py_strsource, py_strnew, listpos, evaluation))
def _pattern_search(name, string, patt, evaluation, options, matched):
# Get the pattern list and check validity for each
if patt.has_form('List', None):
patts = patt.get_leaves()
else:
patts = [patt]
re_patts = []
for p in patts:
py_p = to_regex(p, evaluation)
if py_p is None:
return evaluation.message('StringExpression', 'invld', p, patt)
re_patts.append(py_p)
flags = re.MULTILINE
if options['System`IgnoreCase'] == SymbolTrue:
flags = flags | re.IGNORECASE
def _search(patts, str, flags, matched):
if any(re.search(p, str, flags=flags) for p in patts):
return SymbolTrue if matched else SymbolFalse
return SymbolFalse if matched else SymbolTrue
# Check string validity and perform regex searchhing
if string.has_form('List', None):
py_s = [s.get_string_value() for s in string.leaves]
if any(s is None for s in py_s):
return evaluation.message(name, 'strse', Integer(1),
Expression(name, string, patt))
return Expression('List', *[_search(re_patts, s, flags, matched) for s in py_s])
else:
py_s = string.get_string_value()
if py_s is None:
return evaluation.message(name, 'strse', Integer(1),
Expression(name, string, patt))
return _search(re_patts, py_s, flags, matched)
class StringContainsQ(Builtin):
"""
<dl>
<dt>'StringContainsQ["$string$", $patt$]'
<dd>returns True if any part of $string$ matches $patt$, and returns False otherwise.
<dt>'StringContainsQ[{"s1", "s2", ...}, patt]'
<dd>returns the list of results for each element of string list.
<dt>'StringContainsQ[patt]'
<dd>represents an operator form of StringContainsQ that can be applied to an expression.
</dl>
>> StringContainsQ["mathics", "m" ~~ __ ~~ "s"]
= True
>> StringContainsQ["mathics", "a" ~~ __ ~~ "m"]
= False
#> StringContainsQ["Hello", "o"]
= True
#> StringContainsQ["a"]["abcd"]
= True
#> StringContainsQ["Mathics", "ma", IgnoreCase -> False]
= False
>> StringContainsQ["Mathics", "MA" , IgnoreCase -> True]
= True
#> StringContainsQ["", "Empty String"]
= False
#> StringContainsQ["", ___]
= True
#> StringContainsQ["Empty Pattern", ""]
= True
#> StringContainsQ[notastring, "n"]
: String or list of strings expected at position 1 in StringContainsQ[notastring, n].
= StringContainsQ[notastring, n]
#> StringContainsQ["Welcome", notapattern]
: Element notapattern is not a valid string or pattern element in notapattern.
= StringContainsQ[Welcome, notapattern]
>> StringContainsQ[{"g", "a", "laxy", "universe", "sun"}, "u"]
= {False, False, False, True, True}
#> StringContainsQ[{}, "list of string is empty"]
= {}
>> StringContainsQ["e" ~~ ___ ~~ "u"] /@ {"The Sun", "Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"}
= {True, True, True, False, False, False, False, False, True}
## special cases, Mathematica allows list of patterns
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {"F" ~~ __ ~~ "r", "aw" ~~ ___}]
= {False, False, True, True, False}
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {"F" ~~ __ ~~ "r", "aw" ~~ ___}, IgnoreCase -> True]
= {False, False, True, True, True}
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {}]
= {False, False, False, False, False}
#> StringContainsQ[{"A", Galaxy, "Far", "Far", Away}, {"F" ~~ __ ~~ "r", "aw" ~~ ___}]
: String or list of strings expected at position 1 in StringContainsQ[{A, Galaxy, Far, Far, Away}, {F ~~ __ ~~ r, aw ~~ ___}].
= StringContainsQ[{A, Galaxy, Far, Far, Away}, {F ~~ __ ~~ r, aw ~~ ___}]
#> StringContainsQ[{"A", "Galaxy", "Far", "Far", "Away"}, {F ~~ __ ~~ "r", aw ~~ ___}]
: Element F ~~ __ ~~ r is not a valid string or pattern element in {F ~~ __ ~~ r, aw ~~ ___}.
= StringContainsQ[{A, Galaxy, Far, Far, Away}, {F ~~ __ ~~ r, aw ~~ ___}]
## Mathematica can detemine correct invalid element in the pattern, it reports error:
## Element F is not a valid string or pattern element in {F ~~ __ ~~ r, aw ~~ ___}.
"""
options = {
'IgnoreCase': 'False',
}
rules = {
'StringContainsQ[patt_][expr_]': 'StringContainsQ[expr, patt]',
}
messages = {
'strse': 'String or list of strings expected at position `1` in `2`.',
}
def apply(self, string, patt, evaluation, options):
'StringContainsQ[string_, patt_, OptionsPattern[%(name)s]]'
return _pattern_search(self.__class__.__name__, string, patt, evaluation, options, True)
class StringFreeQ(Builtin):
"""
<dl>
<dt>'StringFreeQ["$string$", $patt$]'
<dd>returns True if no substring in $string$ matches the string expression $patt$, and returns False otherwise.
<dt>'StringFreeQ[{"s1", "s2", ...}, patt]'
<dd>returns the list of results for each element of string list.
<dt>'StringFreeQ["string", {p1, p2, ...}]'
<dd>returns True if no substring matches any of the $pi$.
<dt>'StringFreeQ[patt]'
<dd>represents an operator form of StringFreeQ that can be applied to an expression.
</dl>
>> StringFreeQ["mathics", "m" ~~ __ ~~ "s"]
= False
>> StringFreeQ["mathics", "a" ~~ __ ~~ "m"]
= True
#> StringFreeQ["Hello", "o"]
= False
#> StringFreeQ["a"]["abcd"]
= False
#> StringFreeQ["Mathics", "ma", IgnoreCase -> False]
= True
>> StringFreeQ["Mathics", "MA" , IgnoreCase -> True]
= False
#> StringFreeQ["", "Empty String"]
= True
#> StringFreeQ["", ___]
= False
#> StringFreeQ["Empty Pattern", ""]
= False
#> StringFreeQ[notastring, "n"]
: String or list of strings expected at position 1 in StringFreeQ[notastring, n].
= StringFreeQ[notastring, n]
#> StringFreeQ["Welcome", notapattern]
: Element notapattern is not a valid string or pattern element in notapattern.
= StringFreeQ[Welcome, notapattern]
>> StringFreeQ[{"g", "a", "laxy", "universe", "sun"}, "u"]
= {True, True, True, False, False}
#> StringFreeQ[{}, "list of string is empty"]
= {}
>> StringFreeQ["e" ~~ ___ | |
u'徭'),
(0xFA86, 'M', u'惘'),
(0xFA87, 'M', u'慎'),
(0xFA88, 'M', u'愈'),
(0xFA89, 'M', u'憎'),
(0xFA8A, 'M', u'慠'),
(0xFA8B, 'M', u'懲'),
(0xFA8C, 'M', u'戴'),
(0xFA8D, 'M', u'揄'),
(0xFA8E, 'M', u'搜'),
(0xFA8F, 'M', u'摒'),
(0xFA90, 'M', u'敖'),
(0xFA91, 'M', u'晴'),
(0xFA92, 'M', u'朗'),
(0xFA93, 'M', u'望'),
(0xFA94, 'M', u'杖'),
(0xFA95, 'M', u'歹'),
(0xFA96, 'M', u'殺'),
(0xFA97, 'M', u'流'),
(0xFA98, 'M', u'滛'),
(0xFA99, 'M', u'滋'),
(0xFA9A, 'M', u'漢'),
(0xFA9B, 'M', u'瀞'),
(0xFA9C, 'M', u'煮'),
(0xFA9D, 'M', u'瞧'),
(0xFA9E, 'M', u'爵'),
(0xFA9F, 'M', u'犯'),
(0xFAA0, 'M', u'猪'),
(0xFAA1, 'M', u'瑱'),
(0xFAA2, 'M', u'甆'),
(0xFAA3, 'M', u'画'),
(0xFAA4, 'M', u'瘝'),
(0xFAA5, 'M', u'瘟'),
(0xFAA6, 'M', u'益'),
(0xFAA7, 'M', u'盛'),
(0xFAA8, 'M', u'直'),
(0xFAA9, 'M', u'睊'),
(0xFAAA, 'M', u'着'),
(0xFAAB, 'M', u'磌'),
(0xFAAC, 'M', u'窱'),
]
def _seg_43():
return [
(0xFAAD, 'M', u'節'),
(0xFAAE, 'M', u'类'),
(0xFAAF, 'M', u'絛'),
(0xFAB0, 'M', u'練'),
(0xFAB1, 'M', u'缾'),
(0xFAB2, 'M', u'者'),
(0xFAB3, 'M', u'荒'),
(0xFAB4, 'M', u'華'),
(0xFAB5, 'M', u'蝹'),
(0xFAB6, 'M', u'襁'),
(0xFAB7, 'M', u'覆'),
(0xFAB8, 'M', u'視'),
(0xFAB9, 'M', u'調'),
(0xFABA, 'M', u'諸'),
(0xFABB, 'M', u'請'),
(0xFABC, 'M', u'謁'),
(0xFABD, 'M', u'諾'),
(0xFABE, 'M', u'諭'),
(0xFABF, 'M', u'謹'),
(0xFAC0, 'M', u'變'),
(0xFAC1, 'M', u'贈'),
(0xFAC2, 'M', u'輸'),
(0xFAC3, 'M', u'遲'),
(0xFAC4, 'M', u'醙'),
(0xFAC5, 'M', u'鉶'),
(0xFAC6, 'M', u'陼'),
(0xFAC7, 'M', u'難'),
(0xFAC8, 'M', u'靖'),
(0xFAC9, 'M', u'韛'),
(0xFACA, 'M', u'響'),
(0xFACB, 'M', u'頋'),
(0xFACC, 'M', u'頻'),
(0xFACD, 'M', u'鬒'),
(0xFACE, 'M', u'龜'),
(0xFACF, 'M', u'𢡊'),
(0xFAD0, 'M', u'𢡄'),
(0xFAD1, 'M', u'𣏕'),
(0xFAD2, 'M', u'㮝'),
(0xFAD3, 'M', u'䀘'),
(0xFAD4, 'M', u'䀹'),
(0xFAD5, 'M', u'𥉉'),
(0xFAD6, 'M', u'𥳐'),
(0xFAD7, 'M', u'𧻓'),
(0xFAD8, 'M', u'齃'),
(0xFAD9, 'M', u'龎'),
(0xFADA, 'X'),
(0xFB00, 'M', u'ff'),
(0xFB01, 'M', u'fi'),
(0xFB02, 'M', u'fl'),
(0xFB03, 'M', u'ffi'),
(0xFB04, 'M', u'ffl'),
(0xFB05, 'M', u'st'),
(0xFB07, 'X'),
(0xFB13, 'M', u'մն'),
(0xFB14, 'M', u'մե'),
(0xFB15, 'M', u'մի'),
(0xFB16, 'M', u'վն'),
(0xFB17, 'M', u'մխ'),
(0xFB18, 'X'),
(0xFB1D, 'M', u'יִ'),
(0xFB1E, 'V'),
(0xFB1F, 'M', u'ײַ'),
(0xFB20, 'M', u'ע'),
(0xFB21, 'M', u'א'),
(0xFB22, 'M', u'ד'),
(0xFB23, 'M', u'ה'),
(0xFB24, 'M', u'כ'),
(0xFB25, 'M', u'ל'),
(0xFB26, 'M', u'ם'),
(0xFB27, 'M', u'ר'),
(0xFB28, 'M', u'ת'),
(0xFB29, '3', u'+'),
(0xFB2A, 'M', u'שׁ'),
(0xFB2B, 'M', u'שׂ'),
(0xFB2C, 'M', u'שּׁ'),
(0xFB2D, 'M', u'שּׂ'),
(0xFB2E, 'M', u'אַ'),
(0xFB2F, 'M', u'אָ'),
(0xFB30, 'M', u'אּ'),
(0xFB31, 'M', u'בּ'),
(0xFB32, 'M', u'גּ'),
(0xFB33, 'M', u'דּ'),
(0xFB34, 'M', u'הּ'),
(0xFB35, 'M', u'וּ'),
(0xFB36, 'M', u'זּ'),
(0xFB37, 'X'),
(0xFB38, 'M', u'טּ'),
(0xFB39, 'M', u'יּ'),
(0xFB3A, 'M', u'ךּ'),
(0xFB3B, 'M', u'כּ'),
(0xFB3C, 'M', u'לּ'),
(0xFB3D, 'X'),
(0xFB3E, 'M', u'מּ'),
(0xFB3F, 'X'),
(0xFB40, 'M', u'נּ'),
(0xFB41, 'M', u'סּ'),
(0xFB42, 'X'),
(0xFB43, 'M', u'ףּ'),
(0xFB44, 'M', u'פּ'),
(0xFB45, 'X'),
]
def _seg_44():
return [
(0xFB46, 'M', u'צּ'),
(0xFB47, 'M', u'קּ'),
(0xFB48, 'M', u'רּ'),
(0xFB49, 'M', u'שּ'),
(0xFB4A, 'M', u'תּ'),
(0xFB4B, 'M', u'וֹ'),
(0xFB4C, 'M', u'בֿ'),
(0xFB4D, 'M', u'כֿ'),
(0xFB4E, 'M', u'פֿ'),
(0xFB4F, 'M', u'אל'),
(0xFB50, 'M', u'ٱ'),
(0xFB52, 'M', u'ٻ'),
(0xFB56, 'M', u'پ'),
(0xFB5A, 'M', u'ڀ'),
(0xFB5E, 'M', u'ٺ'),
(0xFB62, 'M', u'ٿ'),
(0xFB66, 'M', u'ٹ'),
(0xFB6A, 'M', u'ڤ'),
(0xFB6E, 'M', u'ڦ'),
(0xFB72, 'M', u'ڄ'),
(0xFB76, 'M', u'ڃ'),
(0xFB7A, 'M', u'چ'),
(0xFB7E, 'M', u'ڇ'),
(0xFB82, 'M', u'ڍ'),
(0xFB84, 'M', u'ڌ'),
(0xFB86, 'M', u'ڎ'),
(0xFB88, 'M', u'ڈ'),
(0xFB8A, 'M', u'ژ'),
(0xFB8C, 'M', u'ڑ'),
(0xFB8E, 'M', u'ک'),
(0xFB92, 'M', u'گ'),
(0xFB96, 'M', u'ڳ'),
(0xFB9A, 'M', u'ڱ'),
(0xFB9E, 'M', u'ں'),
(0xFBA0, 'M', u'ڻ'),
(0xFBA4, 'M', u'ۀ'),
(0xFBA6, 'M', u'ہ'),
(0xFBAA, 'M', u'ھ'),
(0xFBAE, 'M', u'ے'),
(0xFBB0, 'M', u'ۓ'),
(0xFBB2, 'V'),
(0xFBC2, 'X'),
(0xFBD3, 'M', u'ڭ'),
(0xFBD7, 'M', u'ۇ'),
(0xFBD9, 'M', u'ۆ'),
(0xFBDB, 'M', u'ۈ'),
(0xFBDD, 'M', u'ۇٴ'),
(0xFBDE, 'M', u'ۋ'),
(0xFBE0, 'M', u'ۅ'),
(0xFBE2, 'M', u'ۉ'),
(0xFBE4, 'M', u'ې'),
(0xFBE8, 'M', u'ى'),
(0xFBEA, 'M', u'ئا'),
(0xFBEC, 'M', u'ئە'),
(0xFBEE, 'M', u'ئو'),
(0xFBF0, 'M', u'ئۇ'),
(0xFBF2, 'M', u'ئۆ'),
(0xFBF4, 'M', u'ئۈ'),
(0xFBF6, 'M', u'ئې'),
(0xFBF9, 'M', u'ئى'),
(0xFBFC, 'M', u'ی'),
(0xFC00, 'M', u'ئج'),
(0xFC01, 'M', u'ئح'),
(0xFC02, 'M', u'ئم'),
(0xFC03, 'M', u'ئى'),
(0xFC04, 'M', u'ئي'),
(0xFC05, 'M', u'بج'),
(0xFC06, 'M', u'بح'),
(0xFC07, 'M', u'بخ'),
(0xFC08, 'M', u'بم'),
(0xFC09, 'M', u'بى'),
(0xFC0A, 'M', u'بي'),
(0xFC0B, 'M', u'تج'),
(0xFC0C, 'M', u'تح'),
(0xFC0D, 'M', u'تخ'),
(0xFC0E, 'M', u'تم'),
(0xFC0F, 'M', u'تى'),
(0xFC10, 'M', u'تي'),
(0xFC11, 'M', u'ثج'),
(0xFC12, 'M', u'ثم'),
(0xFC13, 'M', u'ثى'),
(0xFC14, 'M', u'ثي'),
(0xFC15, 'M', u'جح'),
(0xFC16, 'M', u'جم'),
(0xFC17, 'M', u'حج'),
(0xFC18, 'M', u'حم'),
(0xFC19, 'M', u'خج'),
(0xFC1A, 'M', u'خح'),
(0xFC1B, 'M', u'خم'),
(0xFC1C, 'M', u'سج'),
(0xFC1D, 'M', u'سح'),
(0xFC1E, 'M', u'سخ'),
(0xFC1F, 'M', u'سم'),
(0xFC20, 'M', u'صح'),
(0xFC21, 'M', u'صم'),
(0xFC22, 'M', u'ضج'),
(0xFC23, 'M', u'ضح'),
(0xFC24, 'M', u'ضخ'),
(0xFC25, 'M', u'ضم'),
(0xFC26, 'M', u'طح'),
]
def _seg_45():
return [
(0xFC27, 'M', u'طم'),
(0xFC28, 'M', u'ظم'),
(0xFC29, 'M', u'عج'),
(0xFC2A, 'M', u'عم'),
(0xFC2B, 'M', u'غج'),
(0xFC2C, 'M', u'غم'),
(0xFC2D, 'M', u'فج'),
(0xFC2E, 'M', u'فح'),
(0xFC2F, 'M', u'فخ'),
(0xFC30, 'M', u'فم'),
(0xFC31, 'M', u'فى'),
(0xFC32, 'M', u'في'),
(0xFC33, 'M', u'قح'),
(0xFC34, 'M', u'قم'),
(0xFC35, 'M', u'قى'),
(0xFC36, 'M', u'قي'),
(0xFC37, 'M', u'كا'),
(0xFC38, 'M', u'كج'),
(0xFC39, 'M', u'كح'),
(0xFC3A, 'M', u'كخ'),
(0xFC3B, 'M', u'كل'),
(0xFC3C, 'M', u'كم'),
(0xFC3D, 'M', u'كى'),
(0xFC3E, 'M', u'كي'),
(0xFC3F, 'M', u'لج'),
(0xFC40, 'M', u'لح'),
(0xFC41, 'M', u'لخ'),
(0xFC42, 'M', u'لم'),
(0xFC43, 'M', u'لى'),
(0xFC44, 'M', u'لي'),
(0xFC45, 'M', u'مج'),
(0xFC46, 'M', u'مح'),
(0xFC47, 'M', u'مخ'),
(0xFC48, 'M', u'مم'),
(0xFC49, 'M', u'مى'),
(0xFC4A, 'M', u'مي'),
(0xFC4B, 'M', u'نج'),
(0xFC4C, 'M', u'نح'),
(0xFC4D, 'M', u'نخ'),
(0xFC4E, 'M', u'نم'),
(0xFC4F, 'M', u'نى'),
(0xFC50, 'M', u'ني'),
(0xFC51, 'M', u'هج'),
(0xFC52, 'M', u'هم'),
(0xFC53, 'M', u'هى'),
(0xFC54, 'M', u'هي'),
(0xFC55, 'M', u'يج'),
(0xFC56, 'M', u'يح'),
(0xFC57, 'M', u'يخ'),
(0xFC58, 'M', u'يم'),
(0xFC59, 'M', u'يى'),
(0xFC5A, 'M', u'يي'),
(0xFC5B, 'M', u'ذٰ'),
(0xFC5C, 'M', u'رٰ'),
(0xFC5D, 'M', u'ىٰ'),
(0xFC5E, '3', u' ٌّ'),
(0xFC5F, '3', u' ٍّ'),
(0xFC60, '3', u' َّ'),
(0xFC61, '3', u' ُّ'),
(0xFC62, '3', u' ِّ'),
(0xFC63, '3', u' ّٰ'),
(0xFC64, 'M', u'ئر'),
(0xFC65, 'M', u'ئز'),
(0xFC66, 'M', u'ئم'),
(0xFC67, 'M', u'ئن'),
(0xFC68, 'M', u'ئى'),
(0xFC69, 'M', u'ئي'),
(0xFC6A, 'M', u'بر'),
(0xFC6B, 'M', u'بز'),
(0xFC6C, 'M', u'بم'),
(0xFC6D, 'M', u'بن'),
(0xFC6E, 'M', u'بى'),
(0xFC6F, 'M', u'بي'),
(0xFC70, 'M', u'تر'),
(0xFC71, 'M', u'تز'),
(0xFC72, 'M', u'تم'),
(0xFC73, 'M', u'تن'),
(0xFC74, 'M', u'تى'),
(0xFC75, 'M', u'تي'),
(0xFC76, 'M', u'ثر'),
(0xFC77, 'M', u'ثز'),
(0xFC78, 'M', u'ثم'),
(0xFC79, 'M', u'ثن'),
(0xFC7A, 'M', u'ثى'),
(0xFC7B, 'M', u'ثي'),
(0xFC7C, 'M', u'فى'),
(0xFC7D, 'M', u'في'),
(0xFC7E, 'M', u'قى'),
(0xFC7F, 'M', u'قي'),
(0xFC80, 'M', u'كا'),
(0xFC81, 'M', u'كل'),
(0xFC82, 'M', u'كم'),
(0xFC83, 'M', u'كى'),
(0xFC84, 'M', u'كي'),
(0xFC85, 'M', u'لم'),
(0xFC86, 'M', u'لى'),
(0xFC87, 'M', u'لي'),
(0xFC88, 'M', u'ما'),
(0xFC89, 'M', u'مم'),
(0xFC8A, 'M', u'نر'),
]
def _seg_46():
return [
(0xFC8B, 'M', u'نز'),
(0xFC8C, 'M', u'نم'),
(0xFC8D, 'M', u'نن'),
(0xFC8E, 'M', u'نى'),
(0xFC8F, 'M', u'ني'),
(0xFC90, 'M', u'ىٰ'),
(0xFC91, 'M', u'ير'),
(0xFC92, 'M', u'يز'),
(0xFC93, 'M', u'يم'),
(0xFC94, 'M', u'ين'),
(0xFC95, 'M', u'يى'),
(0xFC96, 'M', u'يي'),
(0xFC97, 'M', u'ئج'),
(0xFC98, 'M', u'ئح'),
(0xFC99, 'M', u'ئخ'),
(0xFC9A, 'M', u'ئم'),
(0xFC9B, 'M', u'ئه'),
(0xFC9C, 'M', u'بج'),
(0xFC9D, 'M', u'بح'),
(0xFC9E, 'M', u'بخ'),
(0xFC9F, 'M', u'بم'),
(0xFCA0, 'M', u'به'),
(0xFCA1, 'M', u'تج'),
(0xFCA2, 'M', u'تح'),
(0xFCA3, 'M', u'تخ'),
(0xFCA4, 'M', u'تم'),
(0xFCA5, 'M', u'ته'),
(0xFCA6, 'M', u'ثم'),
(0xFCA7, 'M', u'جح'),
(0xFCA8, 'M', u'جم'),
(0xFCA9, 'M', u'حج'),
(0xFCAA, 'M', u'حم'),
(0xFCAB, 'M', u'خج'),
(0xFCAC, 'M', u'خم'),
(0xFCAD, 'M', u'سج'),
(0xFCAE, 'M', u'سح'),
(0xFCAF, 'M', u'سخ'),
(0xFCB0, 'M', u'سم'),
(0xFCB1, 'M', u'صح'),
(0xFCB2, 'M', u'صخ'),
(0xFCB3, 'M', u'صم'),
(0xFCB4, 'M', u'ضج'),
(0xFCB5, 'M', u'ضح'),
(0xFCB6, 'M', u'ضخ'),
(0xFCB7, 'M', u'ضم'),
(0xFCB8, 'M', u'طح'),
(0xFCB9, 'M', u'ظم'),
(0xFCBA, 'M', u'عج'),
(0xFCBB, 'M', u'عم'),
(0xFCBC, 'M', u'غج'),
(0xFCBD, 'M', u'غم'),
(0xFCBE, 'M', u'فج'),
(0xFCBF, 'M', u'فح'),
(0xFCC0, 'M', u'فخ'),
(0xFCC1, 'M', u'فم'),
(0xFCC2, 'M', u'قح'),
(0xFCC3, 'M', u'قم'),
(0xFCC4, 'M', u'كج'),
(0xFCC5, 'M', u'كح'),
(0xFCC6, 'M', u'كخ'),
(0xFCC7, 'M', u'كل'),
(0xFCC8, 'M', u'كم'),
(0xFCC9, 'M', u'لج'),
(0xFCCA, 'M', u'لح'),
(0xFCCB, 'M', u'لخ'),
(0xFCCC, 'M', u'لم'),
(0xFCCD, 'M', u'له'),
(0xFCCE, 'M', u'مج'),
(0xFCCF, 'M', u'مح'),
(0xFCD0, 'M', u'مخ'),
(0xFCD1, 'M', u'مم'),
(0xFCD2, 'M', u'نج'),
(0xFCD3, 'M', u'نح'),
(0xFCD4, 'M', u'نخ'),
(0xFCD5, 'M', u'نم'),
(0xFCD6, 'M', u'نه'),
(0xFCD7, 'M', u'هج'),
(0xFCD8, 'M', u'هم'),
(0xFCD9, 'M', u'هٰ'),
(0xFCDA, 'M', u'يج'),
(0xFCDB, 'M', u'يح'),
(0xFCDC, 'M', u'يخ'),
(0xFCDD, 'M', u'يم'),
(0xFCDE, 'M', | |
unit = 1
else:
unit = u.Unit(unitstr)
value = fitsext[keyword]
is_string = isinstance(value, str)
is_iterable = isinstance(value, Iterable)
if is_string or (is_iterable and isinstance(value[0], str)):
return value
else:
return value * unit
def adjust_temperature_size_rough(temp, comparison_array):
"""Adjust the size of the temperature array.
Examples
--------
>>> temp = [1, 2, 3, 4]
>>> adjust_temperature_size_rough(temp, [5, 6, 7])
array([1, 2, 3])
>>> adjust_temperature_size_rough(temp, [5, 6, 7, 5, 4])
array([1, 2, 3, 4, 4])
>>> adjust_temperature_size_rough(temp, [5, 6])
array([2, 3])
>>> adjust_temperature_size_rough(temp, [5, 6, 7, 5, 4, 6])
array([1, 1, 2, 3, 4, 4])
"""
import copy
temp = np.asarray(temp)
comparison_array = np.asarray(comparison_array)
temp_save = copy.deepcopy(temp)
sizediff = temp.size - comparison_array.size
if sizediff > 0:
temp = temp[sizediff // 2 : sizediff // 2 + comparison_array.size]
elif sizediff < 0:
# make it positive
sizediff = -sizediff
temp = np.zeros_like(comparison_array)
temp[sizediff // 2 : sizediff // 2 + temp_save.size] = temp_save
temp[: sizediff // 2] = temp_save[0]
temp[sizediff // 2 + temp_save.size - 1 :] = temp_save[-1]
return temp
def adjust_temperature_size(temp, comparison_array):
"""Adjust the size of the temperature array.
Examples
--------
>>> temp = [1, 2, 3, 4]
>>> np.allclose(adjust_temperature_size(temp, [5, 6]), [1.0, 4.0])
True
>>> temp = [1, 2, 3, 4]
>>> np.allclose(adjust_temperature_size(temp, [5, 6, 4, 5]), temp)
True
"""
temp = np.asarray(temp)
comparison_array = np.asarray(comparison_array)
Ntemp = temp.shape[0]
Ndata = comparison_array.shape[0]
if Ntemp == Ndata:
return temp
temp_func = interp1d(np.linspace(0, 1, Ntemp), temp)
newtemp = temp_func(np.linspace(0, 1, Ndata))
return newtemp
# from memory_profiler import profile
# @profile
def _read_data_fitszilla(lchdulist):
"""Open a fitszilla FITS file and read all relevant information."""
is_new_fitszilla = np.any(["coord" in i.name.lower() for i in lchdulist])
# ----------- Extract generic observation information ------------------
headerdict = dict(lchdulist[0].header.items())
source = lchdulist[0].header["SOURCE"]
site = lchdulist[0].header["ANTENNA"].lower()
receiver = lchdulist[0].header["RECEIVER CODE"]
ra = lchdulist[0].header["RIGHTASCENSION"] * u.rad
dec = lchdulist[0].header["DECLINATION"] * u.rad
ra_offset = dec_offset = az_offset = el_offset = 0 * u.rad
if "RightAscension Offset" in lchdulist[0].header:
ra_offset = lchdulist[0].header["RightAscension Offset"] * u.rad
if "Declination Offset" in lchdulist[0].header:
dec_offset = lchdulist[0].header["Declination Offset"] * u.rad
if "Azimuth Offset" in lchdulist[0].header:
az_offset = lchdulist[0].header["Azimuth Offset"] * u.rad
if "Elevation Offset" in lchdulist[0].header:
el_offset = lchdulist[0].header["Elevation Offset"] * u.rad
# ----------- Read the list of channel ids ------------------
section_table_data = lchdulist["SECTION TABLE"].data
chan_ids = get_value_with_units(section_table_data, "id")
nbin_per_chan = get_value_with_units(section_table_data, "bins")
sample_rate = get_value_with_units(section_table_data, "sampleRate")
try:
bw_section = get_value_with_units(section_table_data, "bandWidth")
fr_section = get_value_with_units(section_table_data, "frequency")
except KeyError:
bw_section = None
fr_section = None
integration_time = lchdulist["SECTION TABLE"].header["Integration"] * u.ms
if len(list(set(nbin_per_chan))) > 1:
raise ValueError(
"Only datasets with the same nbin per channel are "
"supported at the moment"
)
nbin_per_chan = list(set(nbin_per_chan))[0]
types = get_value_with_units(section_table_data, "type")
if "stokes" in types:
is_polarized = True
else:
is_polarized = False
# Check. If backend is not specified, use Total Power
try:
backend = lchdulist[0].header["BACKEND NAME"]
except Exception:
if "stokes" in types:
if nbin_per_chan == 2048:
backend = "XARCOS"
else:
backend = "SARDARA"
elif "spectra" in types:
backend = "SARDARA"
else:
backend = "TP"
# ----------- Read the list of RF inputs, feeds, polarization, etc. --
rf_input_data = lchdulist["RF INPUTS"].data
feeds = get_value_with_units(rf_input_data, "feed")
IFs = get_value_with_units(rf_input_data, "ifChain")
polarizations = get_value_with_units(rf_input_data, "polarization")
sections = get_value_with_units(rf_input_data, "section")
frequencies_rf = get_value_with_units(rf_input_data, "frequency")
bandwidths_rf = get_value_with_units(rf_input_data, "bandWidth")
local_oscillator = get_value_with_units(rf_input_data, "localOscillator")
try:
cal_mark_temp = get_value_with_units(rf_input_data, "calibrationMark")
except KeyError:
# Old, stupid typo
cal_mark_temp = get_value_with_units(rf_input_data, "calibratonMark")
if bw_section is not None:
bandwidths_section = [bw_section[i] for i in sections]
frequencies_section = [fr_section[i] for i in sections]
frequencies_section = [
f + l for (f, l) in zip(frequencies_section, local_oscillator)
]
if backend == "TP" or bw_section is None:
frequencies, bandwidths = frequencies_rf, bandwidths_rf
else:
frequencies, bandwidths = frequencies_section, bandwidths_section
combinations = list(zip(frequencies, bandwidths))
combination_idx = np.arange(len(combinations))
# Solve stupid problem with old CCB data
if receiver.lower() == "ccb":
feeds[:] = 0
if len(set(combinations)) > 1:
chan_names = [
_chan_name(f, p, c)
for f, p, c in zip(feeds, polarizations, combination_idx)
]
else:
chan_names = [_chan_name(f, p) for f, p in zip(feeds, polarizations)]
# ----- Read the offsets of different feeds (nonzero only if multifeed)--
feed_input_data = lchdulist["FEED TABLE"].data
# Add management of historical offsets.
# Note that we need to add the units by hand in this case.
xoffsets = get_value_with_units(feed_input_data, "xOffset", default="rad")
yoffsets = get_value_with_units(feed_input_data, "yOffset", default="rad")
relpowers = get_value_with_units(feed_input_data, "relativePower")
# -------------- Read data!-----------------------------------------
datahdu = lchdulist["DATA TABLE"]
# N.B.: there is an increase in memory usage here. This is just because
# data are being read from the file at this point, not before.
data_table_data = Table(datahdu.data)
tempdata = Table(lchdulist["ANTENNA TEMP TABLE"].data)
for col in data_table_data.colnames:
if col == col.lower():
continue
data_table_data.rename_column(col, col.lower())
for col in tempdata.colnames:
if col == col.lower():
continue
tempdata.rename_column(col, col.lower())
is_old_spectrum = "SPECTRUM" in list(datahdu.header.values())
if is_old_spectrum:
data_table_data.rename_column("spectrum", "ch0")
sections = np.array([0, 0])
unsupported_temperature = False
if len(tempdata[tempdata.colnames[0]].shape) == 2:
try:
tempdata_new = Table()
for i, (feed, ifnum) in enumerate(zip(feeds, IFs)):
tempdata_new[f"ch{i}"] = tempdata[f"ch{feed}"][:, ifnum]
tempdata = tempdata_new
except Exception: # pragma: no cover
warnings.warn("Temperature format not supported", UserWarning)
unsupported_temperature = True
pass
existing_columns = [
chn for chn in data_table_data.colnames if chn.startswith("ch")
]
if existing_columns == []:
raise ValueError("Invalid data")
is_spectrum = nbin_per_chan > 1
is_single_channel = len(set(combinations)) == 1
good = np.ones(len(feeds), dtype=bool)
for i, s in enumerate(sections):
section_name = "ch{}".format(s)
if section_name not in existing_columns:
good[i] = False
allfeeds = feeds
feeds = allfeeds[good]
IFs = IFs[good]
polarizations = polarizations[good]
sections = sections[good]
if is_spectrum:
nchan = len(chan_ids)
sample_channel = existing_columns[0]
_, nbins = data_table_data[sample_channel].shape
# Development version of SARDARA -- will it remain the same?
if nbin_per_chan == nbins:
IFs = np.zeros_like(IFs)
if nbin_per_chan * nchan * 2 == nbins and not is_polarized:
warnings.warn(
"Data appear to contain polarization information "
"but are classified as simple, not stokes, in the "
"Section table."
)
is_polarized = True
if (
nbin_per_chan != nbins
and nbin_per_chan * nchan != nbins
and nbin_per_chan * nchan * 2 != nbins
and not is_polarized
):
raise ValueError(
"Something wrong with channel subdivision: "
"{} bins/channel, {} channels, "
"{} total bins".format(nbin_per_chan, nchan, nbins)
)
for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
c = s
if is_single_channel:
c = None
section_name = "ch{}".format(s)
ch = _chan_name(f, p, c)
start, end = ic * nbin_per_chan, (ic + 1) * nbin_per_chan
data_table_data[ch] = data_table_data[section_name][:, start:end]
if is_polarized:
# for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
for s in list(set(sections)):
f = feeds[sections == s][0]
c = s
if is_single_channel:
c = None
section_name = "ch{}".format(s)
qname, uname = _chan_name(f, "Q", c), _chan_name(f, "U", c)
qstart, qend = 2 * nbin_per_chan, 3 * nbin_per_chan
ustart, uend = 3 * nbin_per_chan, 4 * nbin_per_chan
data_table_data[qname] = data_table_data[section_name][
:, qstart:qend
]
data_table_data[uname] = data_table_data[section_name][
:, ustart:uend
]
chan_names += [qname, uname]
for f, ic, p, s in zip(feeds, IFs, polarizations, sections):
section_name = "ch{}".format(s)
if section_name in data_table_data.colnames:
data_table_data.remove_column(section_name)
else:
for ic, ch in enumerate(chan_names):
data_table_data[ch] = data_table_data["ch{}".format(chan_ids[ic])]
# ----------- Read temperature data, if possible ----------------
for ic, ch in enumerate(chan_names):
data_table_data[ch + "-Temp"] = 0.0
if unsupported_temperature:
continue
if len(chan_ids) <= ic:
continue
ch_string = f"ch{chan_ids[ic]}"
if ch_string not in tempdata.colnames:
continue
td = np.asarray(tempdata[ch_string])
data_table_data[ch + "-Temp"] = adjust_temperature_size(
td, data_table_data[ch + "-Temp"]
)
info_to_retrieve = [
"time",
"derot_angle",
"weather",
"par_angle",
"flag_track",
"flag_cal",
] + [ch + "-Temp" for ch in chan_names]
new_table = Table()
new_table.meta.update(headerdict)
new_table.meta["SOURCE"] = source
new_table.meta["site"] = site
new_table.meta["backend"] = backend
new_table.meta["receiver"] = receiver
new_table.meta["RA"] = ra
new_table.meta["Dec"] = dec
new_table.meta["channels"] = nbin_per_chan
new_table.meta["VLSR"] = new_table.meta["VLSR"] * u.Unit("km/s")
for i, off in zip(
"ra,dec,el,az".split(","),
[ra_offset, dec_offset, el_offset, az_offset],
):
new_table.meta[i + "_offset"] = off
for info in info_to_retrieve:
new_table[info] = data_table_data[info]
if not _check_derotator(new_table["derot_angle"]):
log.debug("Derotator angle looks weird. Setting to 0")
new_table["derot_angle"][:] = 0
# Duplicate raj and decj columns (in order to be corrected later)
Nfeeds = np.max(allfeeds) + 1
new_table["ra"] = np.tile(
data_table_data["raj2000"], (Nfeeds, 1)
).transpose()
new_table["dec"] = np.tile(
| |
<reponame>dougthor42/pynuget
# -*- coding: utf-8 -*-
"""
"""
import os
import re
import shutil
import subprocess
import sys
from datetime import datetime as dt
from pathlib import Path
import requests
from sqlalchemy import create_engine
from pynuget import db
from pynuget import _logging
logger = _logging.setup_logging(True, False, "./pynuget-cli.log")
def init(server_path, package_dir, db_name, db_backend, apache_config,
replace_wsgi=False, replace_apache=False):
"""
Initialize the PyNuGet server.
Parameters
----------
server_path : str
The directory that you want the server to live in. Must be an
absolue path. Defaults to /var/www/pynuget
package_dir : str
The directory that the packages will be saved in and served from. If
this value is not an absolute path, `server_path` will be used as
the root. Defaults to 'nuget_packages' in `server_path`
db_name : str
The name of the database to use. If db_backend == 'sqlite', then
this is the relative or absolute path to the SQLite file to use. If
db_backend != 'sqlite', then this is the name of the Schema to create.
Defaults to 'packages.db' in `server_path`.
db_backend : str
One of ('sqlite', 'mysql', 'postgresql'). Defaults to 'sqlite'.
apache_config : str
The name of the apache configuration file. Defaults to 'pynuget.conf'.
Details
-------
+ Create /var/www/pynuget if it doesn't exist.
+ Create /var/www/pynuget/package_files if it doesn't exist
+ Create /var/www/pynuget/wsgi.py if it doesn't exist
+ Copy Apache config if it doesn't exist
+ Enable Apache site.
+ Create the DB file or schema if it doesn't exist.
"""
args = dict(locals())
_check_permissions()
_create_directories(server_path, package_dir, "/var/log/pynuget")
_create_db(db_backend, db_name, server_path)
wsgi_file = _copy_wsgi(server_path, replace_wsgi)
_update_wsgi(wsgi_file)
conf = _copy_apache_config(apache_config, replace_apache)
_enable_apache_conf(conf.resolve())
default_config_file = Path(__file__).parent / "default_config.py"
_save_config(default_config_file, **args)
_set_permissions([server_path, "/var/log/pynuget"])
_reload_apache()
return True
def clear(server_path, force=False):
"""
+ Truncate/drop all tables and recreate them
+ Delete all packages in package_files
"""
# Confirm:
ans = input("Are you sure you want to delete all packages? [yN]")
if ans.lower() in ('y', 'yes'):
pass
elif ans.lower() in('n', 'no'):
logger.debug("User aborted.")
sys.exit(0)
else:
logger.debug("Unknown response '%s'. Aborting." % ans)
sys.exit(1)
# Read the config file to find our other locations
# TODO: I hate this...
sys.path.append(server_path)
import config
# Delete all the packages
pkg_path = Path(config.SERVER_PATH) / Path(config.PACKAGE_DIR)
try:
shutil.rmtree(str(pkg_path))
except FileNotFoundError:
logger.warn("Path '%s' does not exist." % str(pkg_path))
# Delete/drop the database
if config.DB_BACKEND == 'sqlite':
sqlite_path = Path(config.SERVER_PATH) / Path(config.DB_NAME)
try:
sqlite_path.unlink()
except FileNotFoundError:
logger.warn("Path '%s' does not exist." % str(pkg_path))
# And receate the directories and database based on the config file.
logger.info("Recreating database and package dir.")
_create_directories(server_path, config.PACKAGE_DIR, "/var/log/pynuget")
_create_db(config.DB_BACKEND, config.DB_NAME, server_path)
return False
def rebuild():
"""Rebuild the package database."""
raise NotImplementedError
import config
# First let's get a list of all the packages in the database.
# TODO: create the session.
logger.debug("Getting database packages and versions.")
db_data = db.search_packages(session, include_prerelease=True)
db_data = _db_data_to_dict(db_data)
# Then we'll get the list of all the packages in the package directory
# Same data structure as the db data.
pkg_path = Path(config.SERVER_PATH) / Path(config.PACKAGE_DIR)
file_data = _get_packages_from_files(pkg_path)
_add_packages_to_db(file_data)
_remove_packages_from_db(file_data, db_data)
return False
def push(file, source, key):
"""
Push a package to a nuget server.
Mimics the basic functionality of::
nuget.exe push -ApiKey $key -Source $source $file
This is needed because the standard NuGet.exe is not sending
Content-Length in the header during HTTP PUT. I also can't figure out how
to get Apache to ignore Content-Length (as it's a SHOULD not a MUST in
the spec), and the Content-Length apache direcive isn't available until
Apache 2.5...
Parameters
----------
file : str
The path to the file to upload
source : str
The URL for the (py)NuGet server to push to.
key : str
The ApiKey value.
"""
logger.debug("push('%s', '%s', '<redacted>')" % (file, source))
if not Path(file).exists():
logger.error("File '%s' does not exist. Aborting." % file)
return
header = {
'X-Nuget-ApiKey': key,
'User-Agent': 'PyNuGet',
}
if source[-1] == "/":
source = source[:-1]
source += '/api/v2/package/'
files = {'package': open(file, 'rb')}
resp = requests.put(source, headers=header, files=files)
logger.debug("{} {}".format(resp, resp.text))
# 201 = Item Created. Means we were successful.
return resp.status_code == 201
def _create_dir(path):
"""
Create (and chown) a given directory.
Parameters
----------
path : pathlib.Path
Returns
-------
None
"""
logger.debug("Creating '%s'" % path)
try:
# u=rwx,g=srwx,o=rx
path.mkdir(parents=True, exist_ok=True)
except PermissionError:
logger.warn("Unable to make dir %s" % path)
logger.warn(path.parent.stat())
logger.warn("Parent Mode: %s" % (path.parent.stat().st_mode & 0o0777))
else:
try:
shutil.chown(str(path), 'www-data', 'www-data')
except PermissionError:
logger.warn("Unable to change owner of %s" % path)
def _replace_prompt(path):
"""Return True if the user wants to replace the file."""
while True:
result = input("{} already exists. Replace? [yN] ".format(path))
result = result.lower()
if result in ('y', 'yes'):
return True
elif result in ('n', 'no', ''):
return False
else:
print("Invalid answer. Please answer 'yes' or 'no'.")
def _now_str():
"""Return the current local time as a str for appending to a filename."""
return dt.now().strftime("%y%m%d-%H%M%S")
def _copy_file_with_replace_prompt(src, dst, replace=None):
"""
Copy a file, prompting to replace if it exists. Always save old file.
Parameters
----------
src : :class:`pathlib.Path`
The source file to copy.
dst : :class:`pathlib.Path`
The location to copy to.
replace : bool or None
Returns
-------
modified : bool
If True, the file was modified (created or replaced). If False, the
existing file was not modified.
"""
if replace is None:
replace = _replace_prompt(dst)
# Save the old file by renaming it with a timestamp.
# TODO: I don't like this logic very much. There's got to be a better way.
if dst.exists():
logger.debug("Path {} already exists".format(dst))
if replace:
logger.debug("Replacing (and saving previous version)")
dst.rename(Path(str(dst) + "." + _now_str()))
shutil.copy(str(src.resolve()), str(dst))
return True
else:
logger.debug("Not replacing")
return False
else:
logger.debug("Copying new file to {}".format(dst))
shutil.copy(str(src.resolve()), str(dst))
return True
def _db_data_to_dict(db_data):
"""
Convert the result of db.search_packages into a dict of
{'pkg': ['vers1', 'vers2', ...]}
"""
data = {}
for row in db_data:
try:
data[row.package.name]
except KeyError:
data[row.package.name] = []
data[row.package.name].append(row.version)
logger.debug("Found %d database packages." % len(data))
logger.debug("Found %d database versions." % sum(len(v) for v
in data.values()))
return data
def _get_packages_from_files(pkg_path):
"""
Get a list of packages from the package directory.
Parameters
----------
pkg_path : :class:`pathlib.Path` or str
The path to the package directory.
Returns
-------
data : dict
Dict of {'pkg_name': ['vers1', 'vers2', ...], ...}
"""
logger.debug("Getting list of packages in package dir.")
if not isinstance(pkg_path, Path):
pkg_path = Path(pkg_path)
data = {}
# XXX: There's got to be a better way!
for root, dirs, _ in os.walk(str(pkg_path)):
rel_path = Path(root).relative_to(pkg_path)
pkg = str(rel_path.parent)
if pkg != '.':
try:
data[pkg]
except KeyError:
data[pkg] = []
data[pkg].append(rel_path.name)
logger.debug("Found %d packages." % len(data))
logger.debug("Found %d versions." % sum(len(v) for v in data.values()))
return data
def _add_packages_to_db(file_data):
logger.debug("Adding packages to database.")
raise NotImplementedError
for pkg, versions in file_data.items():
# TODO
# Check that the package exists in the database.
if not package_in_db(pkg):
db.insert_or_update_package(session, None, pkg, versions[0])
for version in versions:
if not version_in_db(pkg, version):
db.insert_version(session, package_id=None, title=pkg,
version=version)
def _remove_packages_from_db(file_data, db_data):
logger.debug("Removing packages from database.")
raise NotImplementedError
for pkg, versions in db_data.items():
if pkg not in file_data.keys():
db.delete_version(pkg)
else:
for version in versions:
if version not in file_data[pkg]:
db.delete_version(pkg, version)
def _check_permissions():
"""Raise PermissionError if we're not root/sudo."""
if os.getuid() != 0:
logger.warn("This script probably needs `sudo`. Trying anyway.")
def _set_permissions(paths):
"""
Set permissions because apparently nothing else likes to..."
Parameters
----------
paths : iterable of str or iterable of pathlib.Path objects
"""
logger.debug("Setting owner and permissions for %s" % paths)
for path in paths:
try:
args = ['chown', '-R', 'www-data:www-data', str(path)]
subprocess.run(args, check=True)
except subprocess.CalledProcessError as err:
logger.exception("Unlable to chown %s" % path)
try:
args = ['chmod', '-R', '2775', str(path)]
subprocess.run(args, check=True)
except subprocess.CalledProcessError as err:
logger.exception("Unlable to chmod %s" % path)
def _create_directories(server_path, package_dir, log_dir):
"""Create the server directories if they don't exist."""
logger.info("Creating directories (if they don't exist).")
server_path = Path(server_path)
package_dir = Path(package_dir)
log_path = Path(log_dir)
if not server_path.is_absolute():
server_path = Path.cwd() / server_path
logger.warn("'server_path' is not absolute, setting to %s" % server_path)
if not package_dir.is_absolute():
package_dir = server_path / package_dir
logger.warn("'package_dir' is not absolue, setting to %s" % package_dir)
if not log_path.is_absolute():
log_path = Path.cwd() / log_dir
logger.warn("'log_dir' is not absolute, setting to %s" % log_path)
_create_dir(server_path)
| |
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for serializing TensorFlow computations."""
import os
import os.path
import shutil
import tempfile
import types
from typing import Dict, Optional, Set, MutableSequence
import zipfile
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl.context_stack import context_stack_base
from tensorflow_federated.python.core.impl.tensorflow_context import tensorflow_computation_context
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import function_utils
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
from tensorflow_federated.python.tensorflow_libs import variable_utils
class SerializationError(Exception):
"""Error raised during value serialization or deserialization."""
pass
def finalize_binding(binding, tensor_info_map):
"""Mutates binding by filling in actual tensor names.
Args:
binding: A `pb.Binding` or one of its submessages.
tensor_info_map: A dict mapping the placeholder `tensor_name`s found in
`binding` to final tensor names.
"""
if not binding:
if tensor_info_map:
raise ValueError('Empty binding, but non-empty tensor_info_map {}'.format(
tensor_info_map))
return
if isinstance(binding, pb.TensorFlow.Binding):
sub_binding = getattr(binding, binding.WhichOneof('binding'))
finalize_binding(sub_binding, tensor_info_map)
elif isinstance(binding, pb.TensorFlow.TensorBinding):
name = binding.tensor_name
if name not in tensor_info_map:
raise ValueError(
'Did not find tensor_name {} in provided tensor_info_map with keys {}'
.format(name, list(tensor_info_map.keys())))
binding.tensor_name = tensor_info_map[name].name
elif isinstance(binding, pb.TensorFlow.StructBinding):
for sub_binding in binding.element:
finalize_binding(sub_binding, tensor_info_map)
else:
raise ValueError('Unsupported binding type {}'.format(
py_typecheck.type_string(type(binding))))
def serialize_tf2_as_tf_computation(target, parameter_type, unpack=None):
"""Serializes the 'target' as a TF computation with a given parameter type.
Args:
target: The entity to convert into and serialize as a TF computation. This
can currently only be a Python function or `tf.function`, with arguments
matching the 'parameter_type'.
parameter_type: The parameter type specification if the target accepts a
parameter, or `None` if the target doesn't declare any parameters. Either
an instance of `types.Type`, or something that's convertible to it by
`types.to_type()`.
unpack: Whether to always unpack the parameter_type. Necessary for support
of polymorphic tf2_computations.
Returns:
The constructed `pb.Computation` instance with the `pb.TensorFlow` variant
set.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the signature of the target is not compatible with the given
parameter type.
"""
py_typecheck.check_callable(target)
parameter_type = computation_types.to_type(parameter_type)
signature = function_utils.get_signature(target)
if signature.parameters and parameter_type is None:
raise ValueError(
'Expected the target to declare no parameters, found {!r}.'.format(
signature.parameters))
# In the codepath for TF V1 based serialization (tff.tf_computation),
# we get the "wrapped" function to serialize. Here, target is the
# raw function to be wrapped; however, we still need to know if
# the parameter_type should be unpacked into multiple args and kwargs
# in order to construct the TensorSpecs to be passed in the call
# to get_concrete_fn below.
unpack = function_utils.infer_unpack_needed(target, parameter_type, unpack)
arg_typespecs, kwarg_typespecs, parameter_binding = (
tensorflow_utils.get_tf_typespec_and_binding(
parameter_type,
arg_names=list(signature.parameters.keys()),
unpack=unpack))
# Pseudo-global to be appended to once when target_poly below is traced.
type_and_binding_slot = []
# N.B. To serialize a tf.function or eager python code,
# the return type must be a flat list, tuple, or dict. However, the
# tff.tf_computation must be able to handle structured inputs and outputs.
# Thus, we intercept the result of calling the original target fn, introspect
# its structure to create a result_type and bindings, and then return a
# flat dict output. It is this new "unpacked" tf.function that we will
# serialize using tf.saved_model.save.
#
# TODO(b/117428091): The return type limitation is primarily a limitation of
# SignatureDefs and therefore of the signatures argument to
# tf.saved_model.save. tf.functions attached to objects and loaded back with
# tf.saved_model.load can take/return nests; this might offer a better
# approach to the one taken here.
@tf.function
def target_poly(*args, **kwargs):
result = target(*args, **kwargs)
result_dict, result_type, result_binding = (
tensorflow_utils.get_tf2_result_dict_and_binding(result))
assert not type_and_binding_slot
# A "side channel" python output.
type_and_binding_slot.append((result_type, result_binding))
return result_dict
# Triggers tracing so that type_and_binding_slot is filled.
cc_fn = target_poly.get_concrete_function(*arg_typespecs, **kwarg_typespecs)
assert len(type_and_binding_slot) == 1
result_type, result_binding = type_and_binding_slot[0]
# N.B. Note that cc_fn does *not* accept the same args and kwargs as the
# Python target_poly; instead, it must be called with **kwargs based on the
# unique names embedded in the TensorSpecs inside arg_typespecs and
# kwarg_typespecs. The (preliminary) parameter_binding tracks the mapping
# between these tensor names and the components of the (possibly nested) TFF
# input type. When cc_fn is serialized, concrete tensors for each input are
# introduced, and the call finalize_binding(parameter_binding,
# sigs['serving_default'].inputs) updates the bindings to reference these
# concrete tensors.
# Associate vars with unique names and explicitly attach to the Checkpoint:
var_dict = {
'var{:02d}'.format(i): v for i, v in enumerate(cc_fn.graph.variables)
}
saveable = tf.train.Checkpoint(fn=target_poly, **var_dict)
try:
# TODO(b/122081673): All we really need is the meta graph def, we could
# probably just load that directly, e.g., using parse_saved_model from
# tensorflow/python/saved_model/loader_impl.py, but I'm not sure we want to
# depend on that presumably non-public symbol. Perhaps TF can expose a way
# to just get the MetaGraphDef directly without saving to a tempfile? This
# looks like a small change to v2.saved_model.save().
outdir = tempfile.mkdtemp('savedmodel')
tf.saved_model.save(saveable, outdir, signatures=cc_fn)
graph = tf.Graph()
with tf.compat.v1.Session(graph=graph) as sess:
mgd = tf.compat.v1.saved_model.load(
sess, tags=[tf.saved_model.SERVING], export_dir=outdir)
finally:
shutil.rmtree(outdir)
sigs = mgd.signature_def
# TODO(b/123102455): Figure out how to support the init_op. The meta graph def
# contains sigs['__saved_model_init_op'].outputs['__saved_model_init_op']. It
# probably won't do what we want, because it will want to read from
# Checkpoints, not just run Variable initializerse (?). The right solution may
# be to grab the target_poly.get_initialization_function(), and save a sig for
# that.
# Now, traverse the signature from the MetaGraphDef to find
# find the actual tensor names and write them into the bindings.
finalize_binding(parameter_binding, sigs['serving_default'].inputs)
finalize_binding(result_binding, sigs['serving_default'].outputs)
annotated_type = computation_types.FunctionType(parameter_type, result_type)
return pb.Computation(
type=pb.Type(
function=pb.FunctionType(
parameter=type_serialization.serialize_type(parameter_type),
result=type_serialization.serialize_type(result_type))),
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(mgd.graph_def),
parameter=parameter_binding,
result=result_binding)), annotated_type
def serialize_py_fn_as_tf_computation(target, parameter_type, context_stack):
"""Serializes the 'target' as a TF computation with a given parameter type.
See also `serialize_tf2_as_tf_computation` for TensorFlow 2
serialization.
Args:
target: The entity to convert into and serialize as a TF computation. This
can currently only be a Python function. In the future, we will add here
support for serializing the various kinds of non-eager and eager
functions, and eventually aim at full support for and compliance with TF
2.0. This function is currently required to declare either zero parameters
if `parameter_type` is `None`, or exactly one parameter if it's not
`None`. The nested structure of this parameter must correspond to the
structure of the 'parameter_type'. In the future, we may support targets
with multiple args/keyword args (to be documented in the API and
referenced from here).
parameter_type: The parameter type specification if the target accepts a
parameter, or `None` if the target doesn't declare any parameters. Either
an instance of `types.Type`, or something that's convertible to it by
`types.to_type()`.
context_stack: The context stack to use.
Returns:
A tuple of (`pb.Computation`, `tff.Type`), where the computation contains
the instance with the `pb.TensorFlow` variant set, and the type is an
instance of `tff.Type`, potentially including Python container annotations,
for use by TensorFlow computation wrappers.
Raises:
TypeError: If the arguments are of the wrong types.
ValueError: If the signature of the target is not compatible with the given
parameter type.
"""
# TODO(b/113112108): Support a greater variety of target type signatures,
# with keyword args or multiple args corresponding to elements of a tuple.
# Document all accepted forms with examples in the API, and point to there
# from here.
py_typecheck.check_type(target, types.FunctionType)
py_typecheck.check_type(context_stack, context_stack_base.ContextStack)
parameter_type = computation_types.to_type(parameter_type)
signature = function_utils.get_signature(target)
with tf.Graph().as_default() as graph:
if parameter_type is not None:
if len(signature.parameters) != 1:
raise ValueError(
'Expected the target to declare exactly one parameter, found {!r}.'
.format(signature.parameters))
parameter_name = next(iter(signature.parameters))
parameter_value, | |
<reponame>masonng-astro/nicerpy_xrayanalysis<filename>Lv2_average_ps_methods.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tues Jul 16 1:48pm 2019
Getting averaged power spectra from M segments to the whole data, where the data
was pre-processed using NICERsoft!
"""
from __future__ import division, print_function
import numpy as np
from scipy import stats, signal
from tqdm import tqdm
import matplotlib.pyplot as plt
from astropy.io import fits
#from presto import binary_psr
import Lv2_presto_subroutines,Lv3_detection_level
import pathlib
import subprocess
import os
import glob
import Lv0_dirs
Lv0_dirs.global_par()
def do_demodulate(eventfile,segment_length,mode,par_file):
"""
Do orbital demodulation on the original events.
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
segment_length - length of the segments
par_file - orbital parameter file for input into binary_psr
mode - "all", "t" or "E" ; basically to tell the function where to access files to run do_demodulate
"""
TIMEZERO = -1
if mode == "all":
parent_folder = str(pathlib.Path(eventfile).parent) + '/'
elif mode == "t":
parent_folder = str(pathlib.Path(eventfile).parent) + '/accelsearch_' + str(segment_length) + 's/'
elif mode == "E":
parent_folder = str(pathlib.Path(eventfile).parent) + '/accelsearch_E/'
else:
raise ValueError("mode should either of 'all', 't', or 'E'!")
eventfiles = sorted(glob.glob(parent_folder + '*.evt')) #get absolute paths of all event FITS files
for i in range(len(eventfiles)): #for every event file (e.g., for each segment)
oldfile = eventfiles[i] #old event FITS file
if len(fits.open(oldfile)[1].data['TIME']) == 0:
continue
newfile = eventfiles[i][:-4]+'_demod.evt' #new event FITS file, to be demodulated
subprocess.run(['cp',oldfile,newfile])
with fits.open(newfile,mode='update') as fitsfile_demod:
MJDREFI = fitsfile_demod[1].header['MJDREFI'] #integer for MJD reference
MJDREFF = fitsfile_demod[1].header['MJDREFF'] #float decimal for MJD reference
times = fitsfile_demod[1].data['TIME'] #original time series
gtis_start = fitsfile_demod[2].data['START'] #original GTI start times
gtis_stop = fitsfile_demod[2].data['STOP'] #original GTI end times
times_MJD = MJDREFI + MJDREFF + (TIMEZERO+times)/86400 #converting METs to MJD
gtis_start_MJD = MJDREFI + MJDREFF + (TIMEZERO+gtis_start)/86400 #converting GTIs in METs to MJD
gtis_stop_MJD = MJDREFI + MJDREFF + (TIMEZERO+gtis_stop)/86400 #converting GTIs in METs to MJD
times_demod = binary_psr.binary_psr(par_file).demodulate_TOAs(times_MJD) #demodulated event times
gtis_start_demod = binary_psr.binary_psr(par_file).demodulate_TOAs(gtis_start_MJD) #demodulated GTI start times
gtis_stop_demod = binary_psr.binary_psr(par_file).demodulate_TOAs(gtis_stop_MJD) #demodulated GTI end times
fitsfile_demod[1].data['TIME'] = (times_demod - MJDREFI - MJDREFF) * 86400 #convert back to METs
fitsfile_demod[2].data['START'] = (gtis_start_demod - MJDREFI - MJDREFF) * 86400 #convert back to METs
fitsfile_demod[2].data['STOP'] = (gtis_stop_demod - MJDREFI - MJDREFF) * 86400 #convert back to METs
fitsfile_demod.flush()
return
def do_nicerfits2presto(eventfile,tbin,segment_length):
"""
Using nicerfits2presto.py to bin the data, and to convert into PRESTO-readable format.
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
tbin - size of the bins in time
segment_length - length of the individual segments for combining power spectra
"""
parent_folder = str(pathlib.Path(eventfile).parent)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
eventfiles = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*.evt')) #get absolute paths of all demodulated event FITS files
print('Now converting NICER event FITS files into the PRESTO-readable binary format!')
for i in tqdm(range(len(eventfiles))):
if os.path.exists(eventfiles[i][:-3] + 'dat'):
continue
try:
subprocess.run(['nicerfits2presto.py','--dt='+str(tbin),eventfiles[i]])
except (ValueError,subprocess.CalledProcessError):
pass
presto_files = glob.glob('*'+obsid+'*')
if 'merged' in eventfile:
presto_files = glob.glob('merged*')
for i in range(len(presto_files)):
subprocess.run(['mv',presto_files[i],parent_folder+'/accelsearch_'+str(segment_length)+'s/'])
def edit_inf(eventfile,tbin,segment_length):
"""
Editing the .inf file, as it seems like accelsearch uses some information from the .inf file!
Mainly need to edit the "Number of bins in the time series".
This is only for when we make segments by time though!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
tbin - size of the bins in time
segment_length - length of the individual segments
"""
parent_folder = str(pathlib.Path(eventfile).parent)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
inf_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*.inf')) #not the .evt file; some .evt files will be empty
no_desired_bins = float(segment_length)/float(tbin)
print('Editing the .inf files!')
for i in tqdm(range(len(inf_files))):
inf_file = open(inf_files[i],'r')
contents = inf_file.read()
contents = contents.split('\n')
inf_file.close()
nobins_equal = contents[9].index('=') #find the '=' sign for the "Number of bins..." line)
newstring = contents[9][:nobins_equal+1] + ' ' + str(int(no_desired_bins)) #replace old line with new line containing updated number of bins!
inf_file = open(inf_files[i],'w')
for j in range(len(contents)):
if j != 9:
inf_file.write(contents[j]+'\n')
else:
inf_file.write(newstring+'\n')
inf_file.close()
return
def edit_binary(eventfile,tbin,segment_length):
"""
To pad the binary file so that it will be as long as the desired segment length.
The value to pad with for each time bin, is the average count rate in THAT segment!
Jul 10: Do zero-padding instead... so that number of counts is consistent!
Again, this is only for when we make segments by time!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
tbin - size of the bins in time
segment_length - length of the individual segments
"""
parent_folder = str(pathlib.Path(eventfile).parent)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
dat_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*.dat')) #not that order matters here I think, but just in case
no_desired_bins = float(segment_length)/float(tbin) #TOTAL number of desired bins for the segment
print('Editing the binary .dat files!')
for i in tqdm(range(len(dat_files))):
bins = np.fromfile(dat_files[i],dtype='<f',count=-1) #reads the binary file ; converts to little endian, count=-1 means grab everything
no_padded = int(no_desired_bins - len(bins)) #number of bins needed to reach the TOTAL number of desired bins
if no_padded >= 0:
#padding = np.ones(no_padded,dtype=np.float32)*average_count_rate #generate the array of (averaged) counts needed to pad the original segment
padding = np.zeros(no_padded,dtype=np.float32) #just in case this is ever needed...
new_bins = np.array(list(bins) + list(padding))
new_bins.tofile(dat_files[i]) #don't need to do mv since obsdir already has absolute path to the SSD
else:
new_bins = bins[:int(no_desired_bins)] #truncate the original series; say we had a 1000s segment, but
#nicerfits2presto went up to 1008s, so take that last 8s away because there's no data in it anyways...
new_bins.tofile(dat_files[i])
return
def realfft(eventfile,segment_length):
"""
Performing PRESTO's realfft on the binned data (.dat)
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
segment_length - length of the individual segments
"""
parent_folder = str(pathlib.Path(eventfile).parent)
dat_files = sorted(glob.glob(parent_folder+'/accelsearch_' + str(segment_length) + 's/*.dat')) #not that order matters here I think, but just in case
# recall that un-truncated data is "*bary.dat", so "*bary_*.dat" is truncated data!
logfile = parent_folder + '/accelsearch_' + str(segment_length) + 's/realfft.log'
print('Doing realfft now!')
with open(logfile,'w') as logtextfile:
for i in tqdm(range(len(dat_files))):
if os.path.exists(dat_files[i][:-3] + 'fft')==False:
output = subprocess.run(['realfft',dat_files[i]],capture_output=True,text=True)
logtextfile.write(output.stdout)
logtextfile.write('*------------------------------* \n')
logtextfile.write(output.stderr)
logtextfile.close()
return
def presto_dat(eventfile,segment_length,demod,PI1,PI2,t1,t2):
"""
Obtain the dat files that were generated from PRESTO
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
segment_length - length of the segments
demod - whether we're dealing with demodulated data or not!
PI1 - lower bound of PI (not energy in keV!) desired for the energy range
PI2 - upper bound of PI (not energy in keV!) desired for the energy range
t1 - starting time for calculation of averaged power spectra
t2 - ending time for calculation of averaged power spectra
(note that t=0 corresponds to the MET of the FIRST event in the eventfile, so will need to inspect light curve with Lv2_lc.py to get times)
"""
if demod != True and demod != False:
raise ValueError("demod should either be True or False!")
parent_folder = str(pathlib.Path(eventfile).parent)
if PI1 != '': #if we're doing energy cuts instead
dat_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*E' + str(PI1).zfill(4) + '-' + str(PI2).zfill(4) + '*.dat'))
demod_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*E' + str(PI1).zfill(4) + '-' + str(PI2).zfill(4) + '*demod.dat'))
else:
dat_files = []
demod_files = []
all_dat_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*.dat'))
all_demod_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*demod.dat'))
for i in range(len(all_dat_files)):
if 'E' not in str(pathlib.Path(all_dat_files[i]).name):
dat_files.append(all_dat_files[i])
for i in range(len(all_demod_files)):
if 'E' not in str(pathlib.Path(all_demod_files[i]).name):
demod_files.append(all_demod_files[i])
if t1 != 0 or t2 != 0: #if both starting and ending times are not zero; otherwise default is to use ALL the data in the eventfile
gti_start = int(t1/segment_length)
gti_end = np.ceil(t2/segment_length)
filt_dat_files = np.array([dat_files[i] for i in range(len(dat_files)) if (int(dat_files[i][dat_files[i].index('GTI')+3:dat_files[i].index('GTI')+9]) >= gti_start) and (int(dat_files[i][dat_files[i].index('GTI')+3:dat_files[i].index('GTI')+9]) <= gti_end)])
filt_demod_files = np.array([demod_files[i] for | |
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
| |
item
from the list(action_past[n])
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular
Optional display name (if the data_type method is not defined) for the
type of data that receives the action. ("Key Pair", "Floating IP", etc.)
.. attribute:: data_type_plural
Optional plural word (if the data_type method is not defined) for the
type of data being acted on. Defaults to appending 's'. Relying on the
default is bad for translations and should not be done, so it's absence
will raise a DeprecationWarning. It is currently kept as optional for
legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should be avoided. Please use the action_present and
action_past methods. This form is kept for legacy.
.. attribute:: success_url
Optional location to redirect after completion of the delete
action. Defaults to the current page.
.. attribute:: help_text
Optional message for providing an appropriate help text for
the horizon user.
"""
help_text = _("This action cannot be undone.")
def __init__(self, **kwargs):
super(BatchAction, self).__init__(**kwargs)
action_present_method = False
if hasattr(self, 'action_present'):
if callable(self.action_present):
action_present_method = True
else:
warnings.warn(PendingDeprecationWarning(
'The %s BatchAction class must have an action_present '
'method instead of attribute.' % self.__class__.__name__
))
action_past_method = False
if hasattr(self, 'action_past'):
if callable(self.action_past):
action_past_method = True
else:
warnings.warn(PendingDeprecationWarning(
'The %s BatchAction class must have an action_past '
'method instead of attribute.' % self.__class__.__name__
))
action_methods = action_present_method and action_past_method
has_action_method = action_present_method or action_past_method
if has_action_method and not action_methods:
raise NotImplementedError(
'The %s BatchAction class must have both action_past and'
'action_present methods.' % self.__class__.__name__
)
if not action_methods:
if not kwargs.get('data_type_singular'):
raise NotImplementedError(
'The %s BatchAction class must have a data_type_singular '
'attribute when action_past and action_present attributes '
'are used.' % self.__class__.__name__
)
self.data_type_singular = kwargs.get('data_type_singular')
self.data_type_plural = kwargs.get('data_type_plural',
self.data_type_singular + 's')
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
self.use_action_method = action_methods
self.success_url = kwargs.get('success_url', None)
# If setting a default name, don't initialize it too early
self.verbose_name = kwargs.get('verbose_name', self._get_action_name)
self.verbose_name_plural = kwargs.get(
'verbose_name_plural',
lambda: self._get_action_name('plural'))
self.current_present_action = 0
self.current_past_action = 0
# Keep record of successfully handled objects
self.success_ids = []
self.help_text = kwargs.get('help_text', self.help_text)
def _allowed(self, request, datum=None):
# Override the default internal action method to prevent batch
# actions from appearing on tables with no data.
if not self.table.data and not datum:
return False
return super(BatchAction, self)._allowed(request, datum)
def _get_action_name(self, items=None, past=False):
"""Builds combinations like 'Delete Object' and 'Deleted
Objects' based on the number of items and `past` flag.
:param items:
A list or tuple of items (or container with a __len__ method) to
count the number of concerned items for which this method is
called.
When this method is called for a single item (by the BatchAction
itself), this parameter can be omitted and the number of items
will be considered as "one".
If we want to evaluate to "zero" this parameter must not be omitted
(and should be an empty container).
:param past:
Boolean flag indicating if the action took place in the past.
By default a present action is considered.
"""
action_type = "past" if past else "present"
if items is None:
# Called without items parameter (by a single instance.)
count = 1
else:
count = len(items)
# TODO(ygbo): get rid of self.use_action_method once action_present and
# action_past are changed to methods handling plurals.
action_attr = getattr(self, "action_%s" % action_type)
if self.use_action_method:
action_attr = action_attr(count)
if isinstance(action_attr, (basestring, Promise)):
action = action_attr
else:
toggle_selection = getattr(self, "current_%s_action" % action_type)
action = action_attr[toggle_selection]
if self.use_action_method:
return action
# TODO(ygbo): get rid of all this bellow once action_present and
# action_past are changed to methods handling plurals.
data_type = ungettext_lazy(
self.data_type_singular,
self.data_type_plural,
count
)
if '%(data_type)s' in action:
# If full action string is specified, use action as format string.
msgstr = action
else:
if action_type == "past":
msgstr = pgettext_lazy(u"past", "%(action)s %(data_type)s")
else:
msgstr = pgettext_lazy(u"present", "%(action)s %(data_type)s")
return msgstr % {'action': action, 'data_type': data_type}
def action(self, request, datum_id):
"""Required. Accepts a single object id and performs the specific
action.
Return values are discarded, errors raised are caught and logged.
"""
def update(self, request, datum):
"""Switches the action verbose name, if needed."""
if getattr(self, 'action_present', False):
self.verbose_name = self._get_action_name()
self.verbose_name_plural = self._get_action_name('plural')
def get_success_url(self, request=None):
"""Returns the URL to redirect to after a successful action."""
if self.success_url:
return self.success_url
return request.get_full_path()
def get_default_attrs(self):
"""Returns a list of the default HTML attributes for the action."""
attrs = super(BatchAction, self).get_default_attrs()
attrs.update({'data-batch-action': 'true'})
return attrs
def handle(self, table, request, obj_ids):
action_success = []
action_failure = []
action_not_allowed = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or datum_id
if not table._filter_action(self, request, datum):
action_not_allowed.append(datum_display)
LOG.info('Permission denied to %s: "%s"' %
(self._get_action_name(past=True).lower(),
datum_display))
continue
try:
self.action(request, datum_id)
# Call update to invoke changes if needed
self.update(request, datum)
action_success.append(datum_display)
self.success_ids.append(datum_id)
LOG.info('%s: "%s"' %
(self._get_action_name(past=True), datum_display))
except Exception as ex:
# Handle the exception but silence it since we'll display
# an aggregate error message later. Otherwise we'd get
# multiple error messages displayed to the user.
if getattr(ex, "_safe_message", None):
ignore = False
else:
ignore = True
action_failure.append(datum_display)
exceptions.handle(request, ignore=ignore)
# Begin with success message class, downgrade to info if problems.
success_message_level = messages.success
if action_not_allowed:
msg = _('You are not allowed to %(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_not_allowed).lower(),
"objs": functions.lazy_join(", ", action_not_allowed)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_failure:
msg = _('Unable to %(action)s: %(objs)s')
params = {"action": self._get_action_name(action_failure).lower(),
"objs": functions.lazy_join(", ", action_failure)}
messages.error(request, msg % params)
success_message_level = messages.info
if action_success:
msg = _('%(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_success, past=True),
"objs": functions.lazy_join(", ", action_success)}
success_message_level(request, msg % params)
return shortcuts.redirect(self.get_success_url(request))
class DeleteAction(BatchAction):
"""A table action used to perform delete operations on table data.
.. attribute:: name
A short name or "slug" representing this action.
Defaults to 'delete'
.. method:: action_present
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_present (PendingDeprecation)
A string containing the transitive verb describing the delete action.
Defaults to 'Delete'
NOTE: action_present attribute is bad for translations and should be
avoided. Please use the action_present method instead.
This form is kept for legacy.
.. method:: action_past
Method accepting an integer/long parameter and returning the display
forms of the name properly pluralised (depending on the integer) and
translated in a string or tuple/list.
.. attribute:: action_past (PendingDeprecation)
A string set to the past tense of action_present.
Defaults to 'Deleted'
NOTE: action_past attribute is bad for translations and should be
avoided. Please use the action_past method instead.
This form is kept for legacy.
.. attribute:: data_type_singular (PendingDeprecation)
A string used to name the data to be deleted.
.. attribute:: data_type_plural (PendingDeprecation)
Optional. Plural of ``data_type_singular``.
Defaults to ``data_type_singular`` appended with an 's'. Relying on
the default is bad for translations and should not be done, so it's
absence will raise a DeprecationWarning. It is currently kept as
optional for legacy code.
NOTE: data_type_singular and data_type_plural attributes are bad for
translations and should be avoided. Please use the action_present and
action_past methods. This form is kept for legacy.
"""
name = "delete"
def __init__(self, **kwargs):
super(DeleteAction, self).__init__(**kwargs)
self.name = kwargs.get('name', self.name)
if not hasattr(self, "action_present"):
self.action_present = kwargs.get('action_present', _("Delete"))
if not hasattr(self, "action_past"):
self.action_past = kwargs.get('action_past', _("Deleted"))
self.icon = "remove"
def action(self, request, obj_id):
"""Action entry point. Overrides base class' action method.
Accepts a single object id passing it over to the delete method
responsible for the object's destruction.
"""
return self.delete(request, obj_id)
def delete(self, request, obj_id):
"""Required. Deletes an object referenced by obj_id.
Override to provide delete functionality specific to your data.
"""
def get_default_classes(self):
"""Appends | |
<filename>sunpy/map/mapbase.py<gh_stars>1-10
"""
Map is a generic Map class from which all other Map classes inherit from.
"""
import copy
import html
import textwrap
import warnings
import webbrowser
from io import BytesIO
from base64 import b64encode
from tempfile import NamedTemporaryFile
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.figure import Figure
import astropy.units as u
import astropy.wcs
from astropy.coordinates import Latitude, Longitude, SkyCoord, UnitSphericalRepresentation
from astropy.nddata import NDData
from astropy.visualization import AsymmetricPercentileInterval, HistEqStretch, ImageNormalize
from astropy.visualization.wcsaxes import WCSAxes
# The next two are not used but are called to register functions with external modules
import sunpy.coordinates
import sunpy.io as io
import sunpy.visualization.colormaps
from sunpy import config
from sunpy.coordinates import HeliographicCarrington, HeliographicStonyhurst, get_earth, sun
from sunpy.coordinates.utils import get_rectangle_coordinates
from sunpy.image.resample import resample as sunpy_image_resample
from sunpy.image.resample import reshape_image_to_4d_superpixel
from sunpy.sun import constants
from sunpy.time import is_time, parse_time
from sunpy.util import expand_list
from sunpy.util.decorators import deprecate_positional_args_since, deprecated
from sunpy.util.exceptions import SunpyMetadataWarning, SunpyUserWarning
from sunpy.util.functools import seconddispatch
from sunpy.visualization import axis_labels_from_ctype, peek_show, wcsaxes_compat
TIME_FORMAT = config.get("general", "time_format")
PixelPair = namedtuple('PixelPair', 'x y')
SpatialPair = namedtuple('SpatialPair', 'axis1 axis2')
_META_FIX_URL = 'https://docs.sunpy.org/en/stable/code_ref/map.html#fixing-map-metadata'
__all__ = ['GenericMap']
class MapMetaValidationError(AttributeError):
pass
class GenericMap(NDData):
"""
A Generic spatially-aware 2D data array
Parameters
----------
data : `numpy.ndarray`, list
A 2d list or ndarray containing the map data.
header : dict
A dictionary of the original image header tags.
plot_settings : dict, optional
Plot settings.
Other Parameters
----------------
**kwargs :
Additional keyword arguments are passed to `~astropy.nddata.NDData`
init.
Examples
--------
>>> import sunpy.map
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA
>>> aia # doctest: +REMOTE_DATA
<sunpy.map.sources.sdo.AIAMap object at 0x...>
SunPy Map
---------
Observatory: SDO
Instrument: AIA 3
Detector: AIA
Measurement: 171.0 Angstrom
Wavelength: 171.0 Angstrom
Observation Date: 2011-06-07 06:33:02
Exposure Time: 0.234256 s
Dimension: [1024. 1024.] pix
Coordinate System: helioprojective
Scale: [2.402792 2.402792] arcsec / pix
Reference Pixel: [512.5 512.5] pix
Reference Coord: [3.22309951 1.38578135] arcsec
array([[ -95.92475 , 7.076416 , -1.9656711, ..., -127.96519 ,
-127.96519 , -127.96519 ],
[ -96.97533 , -5.1167884, 0. , ..., -98.924576 ,
-104.04137 , -127.919716 ],
[ -93.99607 , 1.0189276, -4.0757103, ..., -5.094638 ,
-37.95505 , -127.87541 ],
...,
[-128.01454 , -128.01454 , -128.01454 , ..., -128.01454 ,
-128.01454 , -128.01454 ],
[-127.899666 , -127.899666 , -127.899666 , ..., -127.899666 ,
-127.899666 , -127.899666 ],
[-128.03072 , -128.03072 , -128.03072 , ..., -128.03072 ,
-128.03072 , -128.03072 ]], dtype=float32)
>>> aia.spatial_units # doctest: +REMOTE_DATA
SpatialPair(axis1=Unit("arcsec"), axis2=Unit("arcsec"))
>>> aia.peek() # doctest: +SKIP
Notes
-----
A number of the properties of this class are returned as two-value named
tuples that can either be indexed by position ([0] or [1]) or be accessed
by the names (.x and .y) or (.axis1 and .axis2). Things that refer to pixel
axes use the ``.x``, ``.y`` convention, where x and y refer to the FITS
axes (x for columns y for rows). Spatial axes use ``.axis1`` and ``.axis2``
which correspond to the first and second axes in the header. ``axis1``
corresponds to the coordinate axis for ``x`` and ``axis2`` corresponds to
``y``.
This class makes some assumptions about the WCS information contained in
the meta data. The first and most extensive assumption is that it is
FITS-like WCS information as defined in the FITS WCS papers.
Within this scope it also makes some other assumptions.
* In the case of APIS convention headers where the CROTAi/j arguments are
provided it assumes that these can be converted to the standard PCi_j
notation using equations 32 in Thompson (2006).
* If a CDi_j matrix is provided it is assumed that it can be converted to a
PCi_j matrix and CDELT keywords as described in
`Greisen & Calabretta (2002) <https://doi.org/10.1051/0004-6361:20021327>`_
* The 'standard' FITS keywords that are used by this class are the PCi_j
matrix and CDELT, along with the other keywords specified in the WCS
papers. All subclasses of this class must convert their header
information to this formalism. The CROTA to PCi_j conversion is done in
this class.
.. warning::
This class currently assumes that a header with the CDi_j matrix
information also includes the CDELT keywords, without these keywords
this class will not process the WCS.
Also the rotation_matrix does not work if the CDELT1 and CDELT2
keywords are exactly equal.
Also, if a file with more than two dimensions is feed into the class,
only the first two dimensions (NAXIS1, NAXIS2) will be loaded and the
rest will be discarded.
"""
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the
`is_datasource_for` attribute.
This is then passed into the Map Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, header, plot_settings=None, **kwargs):
# If the data has more than two dimensions, the first dimensions
# (NAXIS1, NAXIS2) are used and the rest are discarded.
ndim = data.ndim
if ndim > 2:
# We create a slice that removes all but the 'last' two
# dimensions. (Note dimensions in ndarray are in reverse order)
new_2d_slice = [0]*(ndim-2)
new_2d_slice.extend([slice(None), slice(None)])
data = data[tuple(new_2d_slice)]
# Warn the user that the data has been truncated
warnings.warn("This file contains more than 2 dimensions. "
"Data will be truncated to the first two dimensions.", SunpyUserWarning)
super().__init__(data, meta=header, **kwargs)
# Correct possibly missing meta keywords
self._fix_date()
self._fix_naxis()
# Setup some attributes
self._nickname = None
# These are palceholders for default attributes, which are only set
# once if their data isn't present in the map metadata.
self._default_time = None
self._default_dsun = None
self._default_carrington_longitude = None
self._default_heliographic_latitude = None
self._default_heliographic_longitude = None
# Validate header
# TODO: This should be a function of the header, not of the map
self._validate_meta()
self._shift = SpatialPair(0 * u.arcsec, 0 * u.arcsec)
if self.dtype == np.uint8:
norm = None
else:
# Put import here to reduce sunpy.map import time
from matplotlib import colors
norm = colors.Normalize()
# Visualization attributes
self.plot_settings = {'cmap': 'gray',
'norm': norm,
'interpolation': 'nearest',
'origin': 'lower'
}
if plot_settings:
self.plot_settings.update(plot_settings)
def __getitem__(self, key):
""" This should allow indexing by physical coordinate """
raise NotImplementedError(
"The ability to index Map by physical"
" coordinate is not yet implemented.")
def _text_summary(self):
return textwrap.dedent("""\
SunPy Map
---------
Observatory:\t\t {obs}
Instrument:\t\t {inst}
Detector:\t\t {det}
Measurement:\t\t {meas}
Wavelength:\t\t {wave}
Observation Date:\t {date}
Exposure Time:\t\t {dt:f}
Dimension:\t\t {dim}
Coordinate System:\t {coord}
Scale:\t\t\t {scale}
Reference Pixel:\t {refpix}
Reference Coord:\t {refcoord}\
""").format(obs=self.observatory, inst=self.instrument, det=self.detector,
meas=self.measurement, wave=self.wavelength,
date=self.date.strftime(TIME_FORMAT),
dt=self.exposure_time,
dim=u.Quantity(self.dimensions),
scale=u.Quantity(self.scale),
coord=self._coordinate_frame_name,
refpix=u.Quantity(self.reference_pixel),
refcoord=u.Quantity((self._reference_longitude,
self._reference_latitude)),
tmf=TIME_FORMAT)
def __repr__(self):
return object.__repr__(self) + "\n" + self._text_summary() + "\n" + self.data.__repr__()
def _repr_html_(self):
"""
Produce an HTML summary with plots for use in Jupyter notebooks.
"""
# Convert the text repr to an HTML table
partial_html = self._text_summary()[20:].replace('\n', '</td></tr><tr><th>')\
.replace(':\t', '</th><td>')
text_to_table = textwrap.dedent(f"""\
<table style='text-align:left'>
<tr><th>{partial_html}</td></tr>
</table>""").replace('\n', '')
# Handle bad values (infinite and NaN) in the data array
finite_data = self.data[np.isfinite(self.data)]
count_nan = np.isnan(self.data).sum()
count_inf = np.isinf(self.data).sum()
# Assemble an informational string with the counts of bad pixels
bad_pixel_text = ""
if count_nan + count_inf > 0:
bad_pixel_text = "Bad pixels are shown in red: "
text_list = []
if count_nan > 0:
text_list.append(f"{count_nan} NaN")
if count_inf > 0:
text_list.append(f"{count_inf} infinite")
bad_pixel_text += ", ".join(text_list)
# Use a grayscale colormap with histogram equalization (and red for bad values)
# Make a copy of the colormap to avoid modifying the matplotlib instance when
# doing set_bad()
cmap = copy.copy(cm.get_cmap('gray'))
cmap.set_bad(color='red')
norm = ImageNormalize(stretch=HistEqStretch(finite_data))
# Plot the image in pixel space
fig = Figure(figsize=(5.2, 4.8))
# Figure instances in matplotlib<3.1 do not create a canvas by default
if fig.canvas is None:
FigureCanvasBase(fig)
ax = fig.subplots()
ax.imshow(self.data, origin='lower', interpolation='nearest', cmap=cmap, norm=norm)
ax.set_xlabel('X pixel')
ax.set_ylabel('Y pixel')
ax.set_title('In pixel space')
pixel_src = _figure_to_base64(fig)
bounds = ax.get_position().bounds # save these axes bounds for later use
# | |
source: str, reference: datetime, date_ers: [ExtractResult],
time_ers: [ExtractResult]) -> List[Token]:
tokens = []
ers_datetime = self.config.single_date_time_extractor.extract(source, reference)
time_points = []
# Handle the overlap problem
j = 0
for er_datetime in ers_datetime:
time_points.append(er_datetime)
while j < len(time_ers) and time_ers[j].start + time_ers[j].length < er_datetime.start:
time_points.append(time_ers[j])
j += 1
while j < len(time_ers) and time_ers[j].overlap(er_datetime):
j += 1
while j < len(time_ers):
time_points.append(time_ers[j])
j += 1
time_points = sorted(time_points, key=lambda x: x.start)
# Merge "{TimePoint} to {TimePoint}", "between {TimePoint} and {TimePoint}"
index = 0
while index < len(time_points) - 1:
if time_points[index].type == Constants.SYS_DATETIME_TIME and time_points[index + 1].type == \
Constants.SYS_DATETIME_TIME:
index += 1
break
middle_begin = time_points[index].start + time_points[index].length
middle_end = time_points[index + 1].start
middle_str = source[middle_begin:middle_end].strip().lower()
# Handle "{TimePoint} to {TimePoint}"
if RegExpUtility.is_exact_match(self.config.till_regex, middle_str, True):
period_begin = time_points[index].start
period_end = time_points[index + 1].start + time_points[index + 1].length
# Handle "from"
before_str = source[0:period_begin].strip()
match_from = self.config.get_from_token_index(before_str)
from_token_index = match_from if match_from.matched else self.config.get_between_token_index(
before_str)
if from_token_index.matched:
period_begin = from_token_index.index
elif self.config.check_both_before_after:
after_str = source[period_end:len(source) - period_end]
after_token_index = self.config.get_between_token_index(after_str)
if after_token_index.matched:
# Handle "between" in after_str
period_end += after_token_index.index
tokens.append(Token(period_begin, period_end))
index += 2
break
# Handle "between {TimePoint} and {TimePoint}"
if self.config.has_connector_token(middle_str):
period_begin = time_points[index].start
period_end = time_points[index + 1].start + time_points[index + 1].length
before_str = source[0:period_begin].strip()
before_token_index = self.config.get_between_token_index(before_str)
if before_token_index.matched:
period_begin = before_token_index.index
tokens.append(Token(period_begin, period_end))
index += 2
break
index += 1
# Regarding the phrase as-- {Date} {TimePeriod}, like "2015-9-23 1pm to 4"
# Or {TimePeriod} ond {Date}, like "1:30 to 4 2015-9-23"
ers_time_period = self.config.time_period_extractor.extract(source, reference)
for er_time_period in ers_time_period:
if not er_time_period.meta_data:
date_ers.append(er_time_period)
points: List[ExtractResult] = sorted(date_ers, key=lambda x: x.start)
index = 0
while index < len(points) - 1:
if points[index].type == points[index + 1].type:
break
mid_begin = points[index].start + points[index].length
mid_end = points[index + 1].start
if mid_end - mid_begin > 0:
mid_str = source[mid_begin:mid_end]
if not mid_str.strip() or mid_str.strip().startswith(self.config.token_before_date):
# Extend date extraction for cases like "Monday evening next week"
extended_str = points[index].text + source[int(points[index + 1].start + points[index + 1].length):]
extended_date_str = self.config.single_date_extractor.extract(extended_str)
offset = 0
if extended_date_str is not None and extended_date_str.index == 0:
offset = int(len(extended_date_str) - points[index].length)
tokens.append(Token(points[index].start,
offset + points[index + 1].start + points[index + 1].length))
index += 2
index += 1
return tokens
def match_duration(self, source: str, reference: datetime) -> List[Token]:
tokens: List[Token] = list()
source = source.strip().lower()
ers_duration: List[ExtractResult] = self.config.duration_extractor.extract(
source, reference)
durations: List[Token] = list()
for extracted_result in ers_duration:
if regex.search(self.config.time_unit_regex, extracted_result.text):
durations.append(Token(extracted_result.start, extracted_result.start + extracted_result.length))
for duration in durations:
before_str = source[0:duration.start].strip()
after_str = source[duration.start + duration.length:].strip()
if not before_str and not after_str:
break
# within (the) (next) "Seconds/Minutes/Hours" should be handled as datetimeRange here
# within (the) (next) XX days/months/years + "Seconds/Minutes/Hours" should
# also be handled as datetimeRange here
token = self.match_within_next_prefix(before_str, source, duration, True)
if token.start >= 0:
tokens.append(token)
break
# check also afterStr
if self.config.check_both_before_after:
token = self.match_within_next_prefix(after_str, source, duration, False)
if token.start >= 0:
tokens.append(token)
break
match = RegExpUtility.match_end(self.config.previous_prefix_regex, before_str, True)
index = -1
if match and match.success:
index = match.index
if index < 0:
# For cases like 'next five days'
match = RegExpUtility.match_end(self.config.next_prefix_regex, before_str, True)
if match and match.success:
index = match.index
if index >= 0:
prefix = before_str[0: index].strip()
duration_text = source[duration.start: duration.length]
numbers_in_prefix = self.config.cardinal_extractor.extract(prefix)
numbers_in_duration = self.config.cardinal_extractor.extract(duration_text)
# Cases like "2 upcoming days", should be supported here
# Cases like "2 upcoming 3 days" is invalid, only extract "upcoming 3 days" by default
if any(numbers_in_prefix) and not any(numbers_in_duration):
last_number = sorted(numbers_in_prefix, key=lambda t: t.start + t.length).pop()
# Prefix should end with the last number
if last_number.start + last_number.length == len(prefix):
tokens.append(Token(last_number.start, duration.end))
else:
tokens.append(Token(index, duration.end))
continue
match_date_unit = regex.search(self.config.date_unit_regex, after_str)
if not match_date_unit:
# Match suffix
match = RegExpUtility.match_begin(self.config.previous_prefix_regex, after_str, True)
if match and match.success:
tokens.append(Token(duration.start, duration.end + match.index + match.length + 1))
continue
match = RegExpUtility.match_begin(self.config.next_prefix_regex, after_str, True)
if match and match.success:
tokens.append(Token(duration.start, duration.end + match.index + match.length))
continue
match = RegExpUtility.match_begin(self.config.future_suffix_regex, after_str, True)
if match and match.success:
tokens.append(Token(duration.start, duration.end + match.index + match.length))
continue
return tokens
def match_within_next_prefix(self, sub_str: str, source: str, duration: Token, in_prefix: bool) -> Token:
start_out = end_out = -1
success = False
match = self.config.within_next_prefix_regex.match(sub_str)
if self.match_prefix_regex_in_segment(sub_str, match, in_prefix):
if in_prefix:
start_token = source.index(match.group())
end_token = duration.end + 0
else:
start_token = duration.start
end_token = duration.end + (source.index(match.group()) + duration.length)
match = self.config.time_unit_regex.match(source[duration.start: duration.length])
success = match
if not in_prefix:
# Match prefix for "next"
before_str = source[0:duration.start]
match_next = self.config.next_prefix_regex.match(before_str)
success = match or match_next
if self.match_prefix_regex_in_segment(before_str, match_next, True):
start_token = match_next.start
if success:
start_out, end_out = start_token, end_token
return Token(start_out, end_out)
def match_night(self, source: str, reference: datetime) -> List[Token]:
tokens: List[Token] = list()
source = source.strip().lower()
matches = regex.finditer(
self.config.specific_time_of_day_regex, source)
tokens.extend(map(lambda x: Token(x.start(), x.end()), matches))
ers_date: List[ExtractResult] = self.config.single_date_extractor.extract(
source, reference)
for extracted_result in ers_date:
after_str = source[extracted_result.start + extracted_result.length:]
match = regex.search(
self.config.period_time_of_day_with_date_regex, after_str)
if match:
if not after_str[0:match.start()].strip():
tokens.append(
Token(extracted_result.start, extracted_result.start + extracted_result.length + match.end()))
else:
pause_match = regex.search(
self.config.middle_pause_regex, after_str[0:match.start()].strip())
if pause_match:
suffix = after_str[match.end():].strip()
ending_match = regex.search(
self.config.general_ending_regex, suffix)
if ending_match:
tokens.append(
Token(extracted_result.start, extracted_result.start +
extracted_result.length + match.end()))
before_str = source[0:extracted_result.start]
match = regex.search(
self.config.period_time_of_day_with_date_regex, before_str)
if match:
if not before_str[match.end():].strip():
middle_str = source[match.end():extracted_result.start]
if middle_str == ' ':
tokens.append(
Token(match.start(), extracted_result.start + extracted_result.length))
else:
pause_match = regex.search(
self.config.middle_pause_regex, before_str[match.end():])
if pause_match:
suffix = source[extracted_result.start + extracted_result.length:].strip()
ending_match = regex.search(
self.config.general_ending_regex, suffix)
if ending_match:
tokens.append(
Token(match.start(), extracted_result.start + extracted_result.length))
# check whether there are adjacent time period strings, before or after
for token in tokens:
# try to extract a time period in before-string
if token.start > 0:
before_str = source[0:token.start].strip()
if before_str:
ers_time = self.config.time_period_extractor.extract(
before_str, reference)
for er_time in ers_time:
middle_str = before_str[er_time.start +
er_time.length:].strip()
if not middle_str:
tokens.append(Token(er_time.start,
er_time.start + er_time.length + len(middle_str) + token.length))
if token.start + token.length <= len(source):
after_str = source[token.start + token.length:]
if after_str:
ers_time = self.config.time_period_extractor.extract(
after_str, reference)
for er_time in ers_time:
middle_str = after_str[0:er_time.start]
if not middle_str:
token_end = token.start + token.length + \
len(middle_str) + er_time.length
tokens.append(Token(token.start, token_end))
return tokens
@staticmethod
def match_prefix_regex_in_segment(string: str, match: Match, in_prefix: bool):
substring = ''
if match:
substring = string[match.start(): match.end()] if in_prefix else string[0: match.start()]
return match and substring.strip() is not ''
def match_relative_unit(self, source: str) -> List[Token]:
tokens: List[Token] = list()
matches = list(regex.finditer(
self.config.relative_time_unit_regex, source))
if not matches:
matches = list(regex.finditer(
self.config.rest_of_date_time_regex, source))
tokens.extend(map(lambda x: Token(x.start(), x.end()), matches))
return tokens
class DateTimePeriodParserConfiguration:
@property
@abstractmethod
def future_suffix_regex(self):
raise NotImplementedError
@property
@abstractmethod
def within_next_prefix_regex(self):
raise NotImplementedError
@property
@abstractmethod
def previous_prefix_regex(self):
raise NotImplementedError
@property
@abstractmethod
def cardinal_extractor(self):
raise NotImplementedError
@property
@abstractmethod
def am_desc_regex(self):
raise NotImplementedError
@property
@abstractmethod
def pm_desc_regex(self):
raise NotImplementedError
@property
@abstractmethod
def before_regex(self):
raise NotImplementedError
@property
@abstractmethod
def after_regex(self):
raise NotImplementedError
@property
@abstractmethod
def prefix_day_regex(self):
raise NotImplementedError
@property
@abstractmethod
def token_before_date(self):
raise NotImplementedError
@property
@abstractmethod
def token_before_time(self):
raise NotImplementedError
@property
@abstractmethod
def check_both_before_after(self) -> bool:
raise NotImplementedError
@property
@abstractmethod
def pure_number_from_to_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def pure_number_between_and_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def period_time_of_day_with_date_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def specific_time_of_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def time_of_day_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def past_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def future_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def relative_time_unit_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def rest_of_date_time_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def numbers(self) -> Dict[str, int]:
raise NotImplementedError
@property
@abstractmethod
def unit_map(self) -> Dict[str, str]:
raise NotImplementedError
@property
@abstractmethod
def date_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def duration_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_zone_parser(self) -> DateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def date_parser(self) -> DateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def time_parser(self) -> DateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def date_time_parser(self) -> DateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def time_period_parser(self) | |
<reponame>efortuna/minigo
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
from tqdm import tqdm
import numpy as np
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from absl import flags
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
## Column family and qualifier constants.
#### Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
#### Column Qualifiers
#### Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = tf.contrib.cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@property
def latest_game_number(self):
"""Return the number of the next game to be written."""
table_state = self.bt_table.read_row(
TABLE_STATE,
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, GAME_COUNTER, GAME_COUNTER))
if table_state is None:
return 0
return cbt_intvalue(table_state.cell_value(METADATA, GAME_COUNTER))
@latest_game_number.setter
def latest_game_number(self, latest):
table_state = self.bt_table.row(TABLE_STATE)
table_state.set_cell(METADATA, GAME_COUNTER, int(latest))
table_state.commit()
def games_by_time(self, start_game, end_game):
"""Given a range of games, return the games sorted by time.
Returns [(time, game_number), ...]
The time will be a `datetime.datetime` and the game
number is the integer used as the basis of the row ID.
Note that when a cluster of self-play nodes are writing
concurrently, the game numbers may be out of order.
"""
move_count = b'move_count'
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(start_game),
ROWCOUNT_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, move_count, move_count))
def parse(r):
rk = str(r.row_key, 'utf-8')
game = _game_from_counter.match(rk).groups()[0]
return (r.cells[METADATA][move_count][0].timestamp, game)
return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
def delete_row_range(self, format_str, start_game, end_game):
"""Delete rows related to the given game range.
Args:
format_str: a string to `.format()` by the game numbers
in order to create the row prefixes.
start_game: the starting game number of the deletion.
end_game: the ending game number of the deletion.
"""
row_keys = make_single_array(
self.tf_table.keys_by_range_dataset(
format_str.format(start_game),
format_str.format(end_game)))
row_keys = list(row_keys)
if not row_keys:
utils.dbg('No rows left for games %d..%d' % (
start_game, end_game))
return
utils.dbg('Deleting %d rows: %s..%s' % (
len(row_keys), row_keys[0], row_keys[-1]))
# Reverse the keys so that the queue is left in a more
# sensible end state if you change your mind (say, due to a
# mistake in the timestamp) and abort the process: there will
# be a bit trimmed from the end, rather than a bit
# trimmed out of the middle.
row_keys.reverse()
total_keys = len(row_keys)
utils.dbg('Deleting total of %d keys' % total_keys)
concurrency = min(MAX_BT_CONCURRENCY,
multiprocessing.cpu_count() * 2)
with multiprocessing.Pool(processes=concurrency) as pool:
batches = []
with tqdm(desc='Keys', unit_scale=2, total=total_keys) as pbar:
for b in utils.iter_chunks(bigtable.row.MAX_MUTATIONS,
row_keys):
pbar.update(len(b))
batches.append((self.btspec, b))
if len(batches) >= concurrency:
pool.map(_delete_rows, batches)
batches = []
pool.map(_delete_rows, batches)
batches = []
def trim_games_since(self, t, max_games=500000):
"""Trim off the games since the given time.
Search back no more than max_games for this time point, locate
the game there, and remove all games since that game,
resetting the latest game counter.
If `t` is a `datetime.timedelta`, then the target time will be
found by subtracting that delta from the time of the last
game. Otherwise, it will be the target time.
"""
latest = self.latest_game_number
earliest = int(latest - max_games)
gbt = self.games_by_time(earliest, latest)
if not gbt:
utils.dbg('No games between %d and %d' % (earliest, latest))
return
most_recent = gbt[-1]
if isinstance(t, datetime.timedelta):
target = most_recent[0] - t
else:
target = t
i = bisect.bisect_right(gbt, (target,))
if i >= len(gbt):
utils.dbg('Last game is already at %s' % gbt[-1][0])
return
when, which = gbt[i]
utils.dbg('Most recent: %s %s' % most_recent)
utils.dbg(' Target: %s %s' % (when, which))
which = int(which)
self.delete_row_range(ROW_PREFIX, which, latest)
self.delete_row_range(ROWCOUNT_PREFIX, which, latest)
self.latest_game_number = which
def bleakest_moves(self, start_game, end_game):
"""Given a range of games, return the bleakest moves.
Returns a list of (game, move, q) sorted by q.
"""
bleak = b'bleakest_q'
rows = self.bt_table.read_rows(
ROW_PREFIX.format(start_game),
ROW_PREFIX.format(end_game),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, bleak, bleak))
def parse(r):
rk = str(r.row_key, | |
from typing import Any, List, Optional
import numpy as np
from rdkit.Chem import rdchem, rdmolfiles, rdmolops, rdDistGeom, rdPartialCharges
class MolFeatureExtractionError(Exception):
pass
def one_hot(x: Any, allowable_set: List[Any]) -> List[int]:
"""One hot encode labels.
If label `x` is not included in the set, set the value to the last
element in the list. TODO: Is this true? How else can we use the
last elem in the list.
Params:
-------
x: Any
Label to one hot encode.
allowed_set: list of Any
All possible values the label can have.
Returns:
--------
vec: list of int
One hot encoded vector of the features with the label `x` as
the `True` label.
Examples:
---------
```python
>>> one_hot(x='Si', allowable_set=['C', 'O', 'N', 'S', 'Cl', 'F',
... 'Br', 'P', 'I', 'Si', 'B', 'Na', 'Sn', 'Se', 'other'])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
```
"""
# Use last index of set if x is not in set
if x not in allowable_set:
x = allowable_set[:-1]
return list(map(lambda s: int(x == s), allowable_set))
def check_num_atoms(mol: rdchem.Mol, max_num_atoms: Optional[int]=-1) -> None:
"""Check number of atoms in `mol` does not exceed `max_num_atoms`.
If number of atoms in `mol` exceeds the number `max_num_atoms`, it
will raise `MolFeatureExtractionError` exception.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
The molecule to check.
num_max_atoms: int, optional , default=-1
Maximum allowed number of atoms in a molecule. If negative,
check passes unconditionally.
"""
num_atoms = mol.GetNumAtoms()
if max_num_atoms >= 0 and num_atoms > max_num_atoms:
raise MolFeatureExtractionError("Atoms in mol (N={}) exceeds " \
"num_max_atoms (N={}).".format(num_atoms, max_num_atoms))
def construct_mol_features(mol: rdchem.Mol, out_size: Optional[int]=-1) -> np.ndarray:
"""Returns the atom features of all the atoms in the molecule.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
Molecule of interest.
out_size: int, optional, default=-1
The size of the returned array. If this option is negative, it
does not take any effect. Otherwise, it must be larger than or
equal to the number of atoms in the input molecule. If so, the
end of the array is padded with zeros.
Returns:
--------
mol_feats: np.ndarray, shape=(n,m)
Where `n` is the total number of atoms within the molecule, and
`m` is the number of feats.
"""
# Caluclate charges and chirality of atoms within molecule
rdPartialCharges.ComputeGasteigerCharges(mol) # stored under _GasteigerCharge
rdmolops.AssignStereochemistry(mol) # stored under _CIPCode, see doc for more info
# Retrieve atom index locations of matches
HYDROGEN_DONOR = rdmolfiles.MolFromSmarts("[$([N;!H0;v3,v4&+1]),$([O,S;H1;+0])" +
",n&H1&+0]")
HYROGEN_ACCEPTOR = rdmolfiles.MolFromSmarts("[$([O,S;H1;v2;!$(*-*=[O,N,P,S])])" +
",$([O,S;H0;v2]),$([O,S;-]),$([N;v3;!$(N-*=[O,N,P,S])]),n&H0&+0," +
"$([o,s;+0;!$([o,s]:n);!$([o,s]:c:n)])]")
ACIDIC = rdmolfiles.MolFromSmarts("[$([C,S](=[O,S,P])-[O;H1,-1])]")
BASIC = rdmolfiles.MolFromSmarts("[#7;+,$([N;H2&+0][$([C,a]);!$([C,a](=O))])" +
",$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);!$([C,a](=O))])," +
"$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))])]")
hydrogen_donor_match = sum(mol.GetSubstructMatches(HYDROGEN_DONOR), ())
hydrogen_acceptor_match = sum(mol.GetSubstructMatches(HYROGEN_ACCEPTOR), ())
acidic_match = sum(mol.GetSubstructMatches(ACIDIC), ())
basic_match = sum(mol.GetSubstructMatches(BASIC), ())
# Get ring info
ring = mol.GetRingInfo()
mol_feats = []
n_atoms = mol.GetNumAtoms()
for atom_idx in range(n_atoms):
atom = mol.GetAtomWithIdx(atom_idx)
atom_feats = []
atom_feats += one_hot(atom.GetSymbol(), ['C', 'O', 'N', 'S', 'Cl', 'F', 'Br', 'P',
'I', 'Si', 'B', 'Na', 'Sn', 'Se', 'other'])
atom_feats += one_hot(atom.GetDegree(), [1,2,3,4,5,6])
atom_feats += one_hot(atom.GetHybridization(), list(rdchem.HybridizationType.names.values()))
atom_feats += one_hot(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6])
atom_feats += one_hot(atom.GetFormalCharge(), [-3, -2, -1, 0, 1, 2, 3])
g_charge = float(atom.GetProp("_GasteigerCharge"))
atom_feats += [g_charge] if not np.isnan(g_charge) else [0.]
atom_feats += [atom.GetIsAromatic()]
atom_feats += [ring.IsAtomInRingOfSize(atom_idx, size) for size in range(3,9)]
atom_feats += one_hot(atom.GetTotalNumHs(), [0, 1, 2, 3, 4])
# Chirality
try:
atom_feats += one_hot(atom.GetProp('_CIPCode'), ["R", "S"]) + [atom.HasProp("_ChiralityPossible")]
except:
atom_feats += [False, False] + [atom.HasProp("_ChiralityPossible")]
# Hydrogen bonding
atom_feats += [atom_idx in hydrogen_donor_match]
atom_feats += [atom_idx in hydrogen_acceptor_match]
# Is Acidic/Basic
atom_feats += [atom_idx in acidic_match]
atom_feats += [atom_idx in basic_match]
mol_feats.append(atom_feats)
if out_size < 0:
return np.array(mol_feats, dtype=np.float)
elif out_size >= n_atoms:
# 'empty' padding for `mol_feats`. Generate(s) feature matrix of same size for all mols
# NOTE: len(mol_feats[0]) is the number of feats
padded_mol_feats = np.zeros((out_size, len(mol_feats[0])), dtype=np.float)
padded_mol_feats[:n_atoms] = np.array(mol_feats, dtype=np.float)
return padded_mol_feats
else:
raise ValueError('`out_size` (N={}) must be negative or larger than or '
'equal to the number of atoms in the input molecules (N={}).'.format(out_size, n_atoms))
def construct_adj_matrix(mol: rdchem.Mol,
out_size: Optional[int]=-1,
add_self_loops: Optional[bool]=True,
normalize: Optional[bool]=True) -> np.ndarray:
"""Returns the adjacency matrix of the molecule.
Normalization of the matrix is highly recommened. When we apply a
layer propogation rule defined by,
.. ::math: `f(H^{(l)}, A) = \\sigma(A H^{(l)} W^{(l)})
multiplication with `A` will completely change the scale of the
features vectors, which we can observe by looking into the eigenvals
of A. By performing :math: `D^{-1}A`, where `D` is the diagonal
degree node matrix, the rows become normalized to 1. However, in
practice, it is better to use symmetric normalization (i.e.
:math:`D^{-\\frac{1/2}} \\hat{A} D^{-\\frac{1/2}}) as that has been
observed to yield better results.
Additionally, when multiplying by `A`, for every node, we sum up
all the feature vectors of all neighboring nodes but not the node
itself (unless there are self-loops in the graph). We can "fix" this
by adding self-loops in the graph: aka add an identity matrix `I` to `A`.
See https://tkipf.github.io/graph-convolutional-networks/ for a
more in-depth overview.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
Molecule of interest.
out_size: int, optional, default=-1
The size of the returned array. If this option is negative, it
does not take any effect. Otherwise, it must be larger than or
equal to the number of atoms in the input molecule. If so, the
end of the array is padded with zeros.
add_self_loops: bool, optional, default=True
Whether or not to add the `I` matrix (aka self-connections).
If normalize is True, this option is ignored.
normalize: bool, optional, default=True
Whether or not to normalize the matrix. If `True`, the diagonal
elements are filled with 1, and symmetric normalization is
performed: :math:`D^{-\\frac{1/2}} * \\hat{A} * D^{-\\frac{1/2}}`
Returns:
--------
adj: np.ndarray
Adjacency matrix of input molecule. If `out_size` is non-negative,
the returned matrix is equal to that value. Otherwise, it is
equal to the number of atoms in the the molecule.
"""
adj = rdmolops.GetAdjacencyMatrix(mol)
s1, s2 = adj.shape # shape=(n_atoms, n_atoms)
# Normalize using D^(-1/2) * A_hat * D^(-1/2)
if normalize:
adj = adj + np.eye(s1)
degree = np.array(adj.sum(1))
deg_inv_sqrt = np.power(degree, -0.5)
deg_inv_sqrt[np.isinf(deg_inv_sqrt)] = 0.
deg_inv_sqrt = np.diag(deg_inv_sqrt)
adj = deg_inv_sqrt
elif add_self_loops:
adj = adj + np.eye(s1)
if out_size < 0:
return adj
elif out_size >= s1:
# 'empty' padding for `adj`. Useful to generate adj matrix of same size for all mols
padded_adj = np.zeros(shape=(out_size, out_size), dtype=np.float)
padded_adj[:s1, :s2] = adj
return padded_adj
else:
raise ValueError('`out_size` (N={}) must be negative or larger than or equal to the '
'number of atoms in the input molecules (N={}).'.format(out_size, s1))
def construct_pos_matrix(mol: rdchem.Mol, out_size: Optional[int]=-1) -> np.ndarray:
"""Construct relative positions from each atom within the molecule.
Params:
-------
mol: rdkit.Chem.rdchem.Mol
Molecule of interest.
out_size: int, optional, default=-1
The size of the returned array. If this option is negative, it
does not take any effect. Otherwise, it must be larger than or
equal to the number of atoms in the input molecule. If so, the
end of the array is padded with zeros.
Returns:
--------
pos_matrix: np.ndarray, shape=(n,n,3)
Relative position (XYZ) coordinates from one atom the others in
the mol.
Examples:
---------
```python
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> smiles = 'N[C@@]([H])([C@]([H])(O2)C)C(=O)N[C@@]([H])(CC(=O)N)C(=O)N[C@@]([H])([C@]([H])' \
'(O)C)C(=O)N[C@@]([H])(Cc1ccc(O)cc1)C(=O)2'
>>> mol = Chem.MolFromSmiles(smiles)
>>> mol = Chem.AddHs(mol, addCoords=True)
>>> AllChem.EmbedMolecule(mol, AllChem.ETKDG())
>>> mol = Chem.RemoveHs(mol)
>>> pos_matrix = construct_pos_matrix(mol, out_size=-1)
>>> pos_matrix.shape
(34,34,3)
>>> pos_matrix = construct_pos_matrix(mol, out_size=49)
>>> pos_matrix.shape
(49,49,3)
```
"""
# Obtain initial distance geometry between atoms, if unavilable
if mol.GetNumConformers() == 0:
mol = rdmolops.AddHs(mol, addCoords=True)
rdDistGeom.EmbedMolecule(mol, rdDistGeom.ETKDG())
mol = rdmolops.RemoveHs(mol)
coords = mol.GetConformer().GetPositions() # shape=(N,3)
N = mol.GetNumAtoms()
# Determine appropiate output size to generate feature matrix of same size for all mols.
if out_size < 0:
size = N
elif | |
-3): (0, 1),
(9, 2, -5, -2): (0, 0),
(9, 2, -5, -1): (0, 1),
(9, 2, -5, 0): (0, 1),
(9, 2, -5, 1): (0, 1),
(9, 2, -5, 2): (0, 1),
(9, 2, -5, 3): (0, 1),
(9, 2, -5, 4): (0, 1),
(9, 2, -5, 5): (0, 1),
(9, 2, -4, -5): (-1, 1),
(9, 2, -4, -4): (-1, 1),
(9, 2, -4, -3): (-1, 1),
(9, 2, -4, -2): (-1, 0),
(9, 2, -4, -1): (0, 1),
(9, 2, -4, 0): (0, 1),
(9, 2, -4, 1): (0, 1),
(9, 2, -4, 2): (0, 1),
(9, 2, -4, 3): (0, 1),
(9, 2, -4, 4): (1, 1),
(9, 2, -4, 5): (1, 0),
(9, 2, -3, -5): (0, 1),
(9, 2, -3, -4): (0, 1),
(9, 2, -3, -3): (0, 1),
(9, 2, -3, -2): (-1, 1),
(9, 2, -3, -1): (-1, 1),
(9, 2, -3, 0): (1, 1),
(9, 2, -3, 1): (1, 1),
(9, 2, -3, 2): (1, 1),
(9, 2, -3, 3): (1, 1),
(9, 2, -3, 4): (1, 1),
(9, 2, -3, 5): (1, 0),
(9, 2, -2, -5): (1, 0),
(9, 2, -2, -4): (1, 0),
(9, 2, -2, -3): (1, 0),
(9, 2, -2, -2): (1, 0),
(9, 2, -2, -1): (1, 1),
(9, 2, -2, 0): (1, 1),
(9, 2, -2, 1): (1, 1),
(9, 2, -2, 2): (1, 1),
(9, 2, -2, 3): (1, 1),
(9, 2, -2, 4): (1, 1),
(9, 2, -2, 5): (1, 0),
(9, 2, -1, -5): (1, 0),
(9, 2, -1, -4): (1, 0),
(9, 2, -1, -3): (1, 0),
(9, 2, -1, -2): (1, 0),
(9, 2, -1, -1): (1, 1),
(9, 2, -1, 0): (1, 1),
(9, 2, -1, 1): (1, 1),
(9, 2, -1, 2): (1, 1),
(9, 2, -1, 3): (1, 1),
(9, 2, -1, 4): (1, 1),
(9, 2, -1, 5): (1, 0),
(9, 2, 0, -5): (0, 1),
(9, 2, 0, -4): (0, 1),
(9, 2, 0, -3): (0, 1),
(9, 2, 0, -2): (0, 0),
(9, 2, 0, -1): (0, 1),
(9, 2, 0, 0): (0, 1),
(9, 2, 0, 1): (0, 1),
(9, 2, 0, 2): (0, 1),
(9, 2, 0, 3): (0, 1),
(9, 2, 0, 4): (0, 1),
(9, 2, 0, 5): (0, 1),
(9, 2, 1, -5): (0, 1),
(9, 2, 1, -4): (0, 1),
(9, 2, 1, -3): (0, 1),
(9, 2, 1, -2): (0, 1),
(9, 2, 1, -1): (0, 1),
(9, 2, 1, 0): (-1, 1),
(9, 2, 1, 1): (-1, 1),
(9, 2, 1, 2): (-1, 1),
(9, 2, 1, 3): (-1, 1),
(9, 2, 1, 4): (-1, 1),
(9, 2, 1, 5): (-1, 1),
(9, 2, 2, -5): (0, 1),
(9, 2, 2, -4): (0, 1),
(9, 2, 2, -3): (0, 1),
(9, 2, 2, -2): (0, 1),
(9, 2, 2, -1): (0, 1),
(9, 2, 2, 0): (0, 1),
(9, 2, 2, 1): (0, 1),
(9, 2, 2, 2): (0, 1),
(9, 2, 2, 3): (0, 1),
(9, 2, 2, 4): (0, 1),
(9, 2, 2, 5): (0, 1),
(9, 2, 3, -5): (0, 1),
(9, 2, 3, -4): (0, 1),
(9, 2, 3, -3): (0, 1),
(9, 2, 3, -2): (0, 1),
(9, 2, 3, -1): (0, 1),
(9, 2, 3, 0): (0, 1),
(9, 2, 3, 1): (0, 1),
(9, 2, 3, 2): (0, 1),
(9, 2, 3, 3): (0, 1),
(9, 2, 3, 4): (0, 1),
(9, 2, 3, 5): (0, 1),
(9, 2, 4, -5): (0, 1),
(9, 2, 4, -4): (0, 1),
(9, 2, 4, -3): (0, 1),
(9, 2, 4, -2): (0, 1),
(9, 2, 4, -1): (0, 1),
(9, 2, 4, 0): (0, 1),
(9, 2, 4, 1): (0, 1),
(9, 2, 4, 2): (0, 1),
(9, 2, 4, 3): (0, 1),
(9, 2, 4, 4): (0, 1),
(9, 2, 4, 5): (0, 1),
(9, 2, 5, -5): (0, 1),
(9, 2, 5, -4): (0, 1),
(9, 2, 5, -3): (0, 1),
(9, 2, 5, -2): (0, 1),
(9, 2, 5, -1): (0, 1),
(9, 2, 5, 0): (0, 1),
(9, 2, 5, 1): (0, 1),
(9, 2, 5, 2): (0, 1),
(9, 2, 5, 3): (0, 1),
(9, 2, 5, 4): (0, 1),
(9, 2, 5, 5): (0, 1),
(9, 3, -5, -5): (0, 1),
(9, 3, -5, -4): (0, 1),
(9, 3, -5, -3): (0, 0),
(9, 3, -5, -2): (0, 1),
(9, 3, -5, -1): (0, 1),
(9, 3, -5, 0): (0, 1),
(9, 3, -5, 1): (0, 1),
(9, 3, -5, 2): (0, 1),
(9, 3, -5, 3): (0, 1),
(9, 3, -5, 4): (0, 1),
(9, 3, -5, 5): (0, 1),
(9, 3, -4, -5): (-1, 1),
(9, 3, -4, -4): (-1, 1),
(9, 3, -4, -3): (-1, 0),
(9, 3, -4, -2): (0, 1),
(9, 3, -4, -1): (0, 1),
(9, 3, -4, 0): (0, 1),
(9, 3, -4, 1): (0, 1),
(9, 3, -4, 2): (0, 1),
(9, 3, -4, 3): (0, 1),
(9, 3, -4, 4): (1, 1),
(9, 3, -4, 5): (1, 0),
(9, 3, -3, -5): (0, 1),
(9, 3, -3, -4): (0, 1),
(9, 3, -3, -3): (-1, 1),
(9, 3, -3, -2): (-1, 1),
(9, 3, -3, -1): (-1, 1),
(9, 3, -3, 0): (1, 1),
(9, 3, -3, 1): (1, 1),
(9, 3, -3, 2): (1, 1),
(9, 3, -3, 3): (1, 1),
(9, 3, -3, 4): (1, 1),
(9, 3, -3, 5): (1, 0),
(9, 3, -2, -5): (1, 0),
(9, 3, -2, -4): (1, 0),
(9, 3, -2, -3): (1, 0),
(9, 3, -2, -2): (1, -1),
(9, 3, -2, -1): (1, 1),
(9, 3, -2, 0): (1, 1),
(9, 3, -2, 1): (1, 1),
(9, 3, -2, 2): (1, 1),
(9, 3, -2, 3): (1, 1),
(9, 3, -2, 4): (1, 1),
(9, 3, -2, 5): (1, 0),
(9, 3, -1, -5): (1, 0),
(9, 3, -1, -4): (1, 0),
(9, 3, -1, -3): (1, 0),
(9, 3, -1, -2): (1, -1),
(9, 3, -1, -1): (1, 1),
(9, 3, -1, 0): (1, 1),
(9, 3, -1, 1): (1, 1),
(9, 3, -1, 2): (1, 1),
(9, 3, -1, 3): (1, 1),
(9, 3, -1, 4): (1, 1),
(9, 3, -1, 5): (1, 0),
(9, 3, 0, -5): (0, 1),
(9, 3, 0, -4): (0, 1),
(9, 3, 0, -3): (0, 0),
(9, 3, 0, -2): (1, 1),
(9, 3, 0, -1): (0, 1),
(9, 3, 0, 0): (0, 1),
(9, 3, 0, 1): (0, 1),
(9, 3, 0, 2): (0, 1),
(9, 3, 0, 3): (0, 1),
(9, 3, 0, 4): (0, 1),
(9, 3, 0, 5): (0, 1),
(9, 3, 1, -5): (0, 1),
(9, 3, 1, -4): (0, 1),
(9, 3, 1, -3): (0, 1),
(9, 3, 1, -2): (0, 1),
(9, 3, 1, -1): (0, 1),
(9, 3, 1, 0): (-1, 1),
(9, 3, 1, 1): (-1, 1),
(9, 3, 1, 2): (-1, 1),
(9, 3, 1, 3): (-1, 1),
(9, 3, 1, 4): (-1, 1),
(9, 3, 1, 5): (-1, 1),
(9, 3, 2, -5): (0, 1),
(9, 3, 2, -4): (0, 1),
(9, 3, 2, -3): (0, 1),
(9, 3, 2, -2): (0, 1),
(9, 3, 2, -1): (0, 1),
(9, 3, 2, 0): (0, 1),
(9, 3, 2, 1): (0, 1),
(9, 3, 2, 2): (0, 1),
(9, 3, 2, 3): (0, 1),
(9, 3, 2, 4): (0, 1),
(9, 3, 2, 5): (0, 1),
(9, 3, 3, -5): (0, 1),
(9, 3, 3, -4): (0, 1),
(9, 3, 3, -3): (0, 1),
(9, 3, 3, -2): (0, 1),
(9, 3, 3, -1): (0, 1),
(9, 3, 3, 0): (0, 1),
(9, 3, 3, 1): (0, | |
+
0.3016*m.x383*m.x191 - m.x167*m.x391 == 0)
m.c193 = Constraint(expr=0.2369*m.x328*m.x136 + 0.0415*m.x336*m.x144 + 0.1653*m.x360*m.x168 + 0.2546*m.x368*m.x176 +
0.3016*m.x384*m.x192 - m.x168*m.x392 == 0)
m.c194 = Constraint(expr=0.07*m.x321*m.x129 + 0.0293*m.x329*m.x137 + 0.068*m.x337*m.x145 + 0.0442*m.x353*m.x161 + 0.677*
m.x361*m.x169 + 0.0505*m.x369*m.x177 + 0.061*m.x377*m.x185 - m.x169*m.x385 == 0)
m.c195 = Constraint(expr=0.07*m.x322*m.x130 + 0.0293*m.x330*m.x138 + 0.068*m.x338*m.x146 + 0.0442*m.x354*m.x162 + 0.677*
m.x362*m.x170 + 0.0505*m.x370*m.x178 + 0.061*m.x378*m.x186 - m.x170*m.x386 == 0)
m.c196 = Constraint(expr=0.07*m.x323*m.x131 + 0.0293*m.x331*m.x139 + 0.068*m.x339*m.x147 + 0.0442*m.x355*m.x163 + 0.677*
m.x363*m.x171 + 0.0505*m.x371*m.x179 + 0.061*m.x379*m.x187 - m.x171*m.x387 == 0)
m.c197 = Constraint(expr=0.07*m.x324*m.x132 + 0.0293*m.x332*m.x140 + 0.068*m.x340*m.x148 + 0.0442*m.x356*m.x164 + 0.677*
m.x364*m.x172 + 0.0505*m.x372*m.x180 + 0.061*m.x380*m.x188 - m.x172*m.x388 == 0)
m.c198 = Constraint(expr=0.07*m.x325*m.x133 + 0.0293*m.x333*m.x141 + 0.068*m.x341*m.x149 + 0.0442*m.x357*m.x165 + 0.677*
m.x365*m.x173 + 0.0505*m.x373*m.x181 + 0.061*m.x381*m.x189 - m.x173*m.x389 == 0)
m.c199 = Constraint(expr=0.07*m.x326*m.x134 + 0.0293*m.x334*m.x142 + 0.068*m.x342*m.x150 + 0.0442*m.x358*m.x166 + 0.677*
m.x366*m.x174 + 0.0505*m.x374*m.x182 + 0.061*m.x382*m.x190 - m.x174*m.x390 == 0)
m.c200 = Constraint(expr=0.07*m.x327*m.x135 + 0.0293*m.x335*m.x143 + 0.068*m.x343*m.x151 + 0.0442*m.x359*m.x167 + 0.677*
m.x367*m.x175 + 0.0505*m.x375*m.x183 + 0.061*m.x383*m.x191 - m.x175*m.x391 == 0)
m.c201 = Constraint(expr=0.07*m.x328*m.x136 + 0.0293*m.x336*m.x144 + 0.068*m.x344*m.x152 + 0.0442*m.x360*m.x168 + 0.677*
m.x368*m.x176 + 0.0505*m.x376*m.x184 + 0.061*m.x384*m.x192 - m.x176*m.x392 == 0)
m.c202 = Constraint(expr=0.0244*m.x329*m.x137 + 0.0332*m.x337*m.x145 + 0.013*m.x345*m.x153 + 0.0612*m.x361*m.x169 +
0.8682*m.x369*m.x177 - m.x177*m.x385 == 0)
m.c203 = Constraint(expr=0.0244*m.x330*m.x138 + 0.0332*m.x338*m.x146 + 0.013*m.x346*m.x154 + 0.0612*m.x362*m.x170 +
0.8682*m.x370*m.x178 - m.x178*m.x386 == 0)
m.c204 = Constraint(expr=0.0244*m.x331*m.x139 + 0.0332*m.x339*m.x147 + 0.013*m.x347*m.x155 + 0.0612*m.x363*m.x171 +
0.8682*m.x371*m.x179 - m.x179*m.x387 == 0)
m.c205 = Constraint(expr=0.0244*m.x332*m.x140 + 0.0332*m.x340*m.x148 + 0.013*m.x348*m.x156 + 0.0612*m.x364*m.x172 +
0.8682*m.x372*m.x180 - m.x180*m.x388 == 0)
m.c206 = Constraint(expr=0.0244*m.x333*m.x141 + 0.0332*m.x341*m.x149 + 0.013*m.x349*m.x157 + 0.0612*m.x365*m.x173 +
0.8682*m.x373*m.x181 - m.x181*m.x389 == 0)
m.c207 = Constraint(expr=0.0244*m.x334*m.x142 + 0.0332*m.x342*m.x150 + 0.013*m.x350*m.x158 + 0.0612*m.x366*m.x174 +
0.8682*m.x374*m.x182 - m.x182*m.x390 == 0)
m.c208 = Constraint(expr=0.0244*m.x335*m.x143 + 0.0332*m.x343*m.x151 + 0.013*m.x351*m.x159 + 0.0612*m.x367*m.x175 +
0.8682*m.x375*m.x183 - m.x183*m.x391 == 0)
m.c209 = Constraint(expr=0.0244*m.x336*m.x144 + 0.0332*m.x344*m.x152 + 0.013*m.x352*m.x160 + 0.0612*m.x368*m.x176 +
0.8682*m.x376*m.x184 - m.x184*m.x392 == 0)
m.c210 = Constraint(expr=0.1076*m.x353*m.x161 + 0.027*m.x361*m.x169 + 0.8654*m.x377*m.x185 - m.x185*m.x385 == 0)
m.c211 = Constraint(expr=0.1076*m.x354*m.x162 + 0.027*m.x362*m.x170 + 0.8654*m.x378*m.x186 - m.x186*m.x386 == 0)
m.c212 = Constraint(expr=0.1076*m.x355*m.x163 + 0.027*m.x363*m.x171 + 0.8654*m.x379*m.x187 - m.x187*m.x387 == 0)
m.c213 = Constraint(expr=0.1076*m.x356*m.x164 + 0.027*m.x364*m.x172 + 0.8654*m.x380*m.x188 - m.x188*m.x388 == 0)
m.c214 = Constraint(expr=0.1076*m.x357*m.x165 + 0.027*m.x365*m.x173 + 0.8654*m.x381*m.x189 - m.x189*m.x389 == 0)
m.c215 = Constraint(expr=0.1076*m.x358*m.x166 + 0.027*m.x366*m.x174 + 0.8654*m.x382*m.x190 - m.x190*m.x390 == 0)
m.c216 = Constraint(expr=0.1076*m.x359*m.x167 + 0.027*m.x367*m.x175 + 0.8654*m.x383*m.x191 - m.x191*m.x391 == 0)
m.c217 = Constraint(expr=0.1076*m.x360*m.x168 + 0.027*m.x368*m.x176 + 0.8654*m.x384*m.x192 - m.x192*m.x392 == 0)
m.c218 = Constraint(expr=-(m.x193 - 0.1092*m.x193*m.x1) + m.x194 == 0)
m.c219 = Constraint(expr=-(m.x194 - 0.1092*m.x194*m.x2) + m.x195 == 0)
m.c220 = Constraint(expr=-(m.x195 - 0.1092*m.x195*m.x3) + m.x196 == 0)
m.c221 = Constraint(expr=-(m.x196 - 0.1092*m.x196*m.x4) + m.x197 == 0)
m.c222 = Constraint(expr=-(m.x197 - 0.1092*m.x197*m.x5) + m.x198 == 0)
m.c223 = Constraint(expr=-(m.x198 - 0.1092*m.x198*m.x6) + m.x199 == 0)
m.c224 = Constraint(expr=-(m.x199 - 0.1092*m.x199*m.x7) + m.x200 == 0)
m.c225 = Constraint(expr=-(m.x201 - 0.1092*m.x201*m.x9) + m.x202 == 0)
m.c226 = Constraint(expr=-(m.x202 - 0.1092*m.x202*m.x10) + m.x203 == 0)
m.c227 = Constraint(expr=-(m.x203 - 0.1092*m.x203*m.x11) + m.x204 == 0)
m.c228 = Constraint(expr=-(m.x204 - 0.1092*m.x204*m.x12) + m.x205 == 0)
m.c229 = Constraint(expr=-(m.x205 - 0.1092*m.x205*m.x13) + m.x206 == 0)
m.c230 = Constraint(expr=-(m.x206 - 0.1092*m.x206*m.x14) + m.x207 == 0)
m.c231 = Constraint(expr=-(m.x207 - 0.1092*m.x207*m.x15) + m.x208 == 0)
m.c232 = Constraint(expr=-(m.x209 - 0.1092*m.x209*m.x17) + m.x210 == 0)
m.c233 = Constraint(expr=-(m.x210 - 0.1092*m.x210*m.x18) + m.x211 == 0)
m.c234 = Constraint(expr=-(m.x211 - 0.1092*m.x211*m.x19) + m.x212 == 0)
m.c235 = Constraint(expr=-(m.x212 - 0.1092*m.x212*m.x20) + m.x213 == 0)
m.c236 = Constraint(expr=-(m.x213 - 0.1092*m.x213*m.x21) + m.x214 == 0)
m.c237 = Constraint(expr=-(m.x214 - 0.1092*m.x214*m.x22) + m.x215 == 0)
m.c238 = Constraint(expr=-(m.x215 - 0.1092*m.x215*m.x23) + m.x216 == 0)
m.c239 = Constraint(expr=-(m.x217 - 0.1092*m.x217*m.x25) + m.x218 == 0)
m.c240 = Constraint(expr=-(m.x218 - 0.1092*m.x218*m.x26) + m.x219 == 0)
m.c241 = Constraint(expr=-(m.x219 - 0.1092*m.x219*m.x27) + m.x220 == 0)
m.c242 = Constraint(expr=-(m.x220 - 0.1092*m.x220*m.x28) + m.x221 == 0)
m.c243 = Constraint(expr=-(m.x221 - 0.1092*m.x221*m.x29) + m.x222 == 0)
m.c244 = Constraint(expr=-(m.x222 - 0.1092*m.x222*m.x30) + m.x223 == 0)
m.c245 = Constraint(expr=-(m.x223 - 0.1092*m.x223*m.x31) + m.x224 == 0)
m.c246 = Constraint(expr=-(m.x225 - 0.1092*m.x225*m.x33) + m.x226 == 0)
m.c247 = Constraint(expr=-(m.x226 - 0.1092*m.x226*m.x34) + m.x227 == 0)
m.c248 = Constraint(expr=-(m.x227 - 0.1092*m.x227*m.x35) + m.x228 == 0)
m.c249 = Constraint(expr=-(m.x228 - 0.1092*m.x228*m.x36) + m.x229 == 0)
m.c250 = Constraint(expr=-(m.x229 - 0.1092*m.x229*m.x37) + m.x230 == 0)
m.c251 = Constraint(expr=-(m.x230 - 0.1092*m.x230*m.x38) + m.x231 == 0)
m.c252 = Constraint(expr=-(m.x231 - 0.1092*m.x231*m.x39) + m.x232 == 0)
m.c253 = Constraint(expr=-(m.x233 - 0.1092*m.x233*m.x41) + m.x234 == 0)
m.c254 = Constraint(expr=-(m.x234 - 0.1092*m.x234*m.x42) + m.x235 == 0)
m.c255 = Constraint(expr=-(m.x235 - 0.1092*m.x235*m.x43) + m.x236 == 0)
m.c256 = Constraint(expr=-(m.x236 - 0.1092*m.x236*m.x44) + m.x237 == 0)
m.c257 = Constraint(expr=-(m.x237 - 0.1092*m.x237*m.x45) + m.x238 == 0)
m.c258 = Constraint(expr=-(m.x238 - 0.1092*m.x238*m.x46) + m.x239 == 0)
m.c259 = Constraint(expr=-(m.x239 - 0.1092*m.x239*m.x47) + m.x240 == 0)
m.c260 = Constraint(expr=-(m.x241 - 0.1092*m.x241*m.x49) + m.x242 == 0)
m.c261 = Constraint(expr=-(m.x242 - 0.1092*m.x242*m.x50) + m.x243 == 0)
m.c262 = Constraint(expr=-(m.x243 - 0.1092*m.x243*m.x51) + m.x244 == 0)
m.c263 = Constraint(expr=-(m.x244 - 0.1092*m.x244*m.x52) + m.x245 == 0)
m.c264 = Constraint(expr=-(m.x245 - 0.1092*m.x245*m.x53) + m.x246 == 0)
m.c265 = Constraint(expr=-(m.x246 - 0.1092*m.x246*m.x54) + m.x247 == 0)
m.c266 = Constraint(expr=-(m.x247 - 0.1092*m.x247*m.x55) + m.x248 == 0)
m.c267 = Constraint(expr=-(m.x249 - 0.1092*m.x249*m.x57) + m.x250 == 0)
m.c268 = Constraint(expr=-(m.x250 - 0.1092*m.x250*m.x58) + m.x251 == 0)
m.c269 = Constraint(expr=-(m.x251 - 0.1092*m.x251*m.x59) + m.x252 == 0)
m.c270 = Constraint(expr=-(m.x252 - 0.1092*m.x252*m.x60) + m.x253 == 0)
m.c271 = Constraint(expr=-(m.x253 - 0.1092*m.x253*m.x61) + m.x254 == 0)
m.c272 = Constraint(expr=-(m.x254 - 0.1092*m.x254*m.x62) + m.x255 == 0)
m.c273 = Constraint(expr=-(m.x255 - 0.1092*m.x255*m.x63) + m.x256 == 0)
m.c274 = Constraint(expr=-(m.x257 - 0.1092*m.x257*m.x65) + m.x258 == 0)
m.c275 = Constraint(expr=-(m.x258 - 0.1092*m.x258*m.x66) + m.x259 == 0)
m.c276 = Constraint(expr=-(m.x259 - 0.1092*m.x259*m.x67) + m.x260 == 0)
m.c277 = Constraint(expr=-(m.x260 - 0.1092*m.x260*m.x68) + m.x261 == 0)
m.c278 = Constraint(expr=-(m.x261 - 0.1092*m.x261*m.x69) + m.x262 == 0)
m.c279 = Constraint(expr=-(m.x262 - 0.1092*m.x262*m.x70) + m.x263 == 0)
m.c280 = Constraint(expr=-(m.x263 - 0.1092*m.x263*m.x71) + m.x264 == 0)
m.c281 = Constraint(expr=-(m.x265 - 0.1092*m.x265*m.x73) + m.x266 == 0)
m.c282 = Constraint(expr=-(m.x266 - 0.1092*m.x266*m.x74) + m.x267 == 0)
m.c283 = Constraint(expr=-(m.x267 - 0.1092*m.x267*m.x75) + m.x268 == 0)
m.c284 = Constraint(expr=-(m.x268 - 0.1092*m.x268*m.x76) + m.x269 == 0)
m.c285 = Constraint(expr=-(m.x269 - 0.1092*m.x269*m.x77) + m.x270 == 0)
m.c286 = Constraint(expr=-(m.x270 - 0.1092*m.x270*m.x78) + m.x271 == 0)
m.c287 = Constraint(expr=-(m.x271 - 0.1092*m.x271*m.x79) + m.x272 == 0)
m.c288 = Constraint(expr=-(m.x273 - 0.1092*m.x273*m.x81) + m.x274 == 0)
m.c289 = Constraint(expr=-(m.x274 - 0.1092*m.x274*m.x82) + m.x275 == 0)
m.c290 = Constraint(expr=-(m.x275 - 0.1092*m.x275*m.x83) + m.x276 == 0)
m.c291 = Constraint(expr=-(m.x276 - 0.1092*m.x276*m.x84) + m.x277 == 0)
m.c292 = Constraint(expr=-(m.x277 - 0.1092*m.x277*m.x85) + m.x278 == 0)
m.c293 = Constraint(expr=-(m.x278 - 0.1092*m.x278*m.x86) + m.x279 == 0)
m.c294 = Constraint(expr=-(m.x279 - 0.1092*m.x279*m.x87) + m.x280 == 0)
m.c295 = Constraint(expr=-(m.x281 - 0.1092*m.x281*m.x89) + m.x282 == 0)
m.c296 = Constraint(expr=-(m.x282 - 0.1092*m.x282*m.x90) + m.x283 == 0)
m.c297 = Constraint(expr=-(m.x283 - 0.1092*m.x283*m.x91) + m.x284 == 0)
m.c298 = Constraint(expr=-(m.x284 - 0.1092*m.x284*m.x92) + m.x285 == 0)
m.c299 = Constraint(expr=-(m.x285 - 0.1092*m.x285*m.x93) + m.x286 == 0)
m.c300 = Constraint(expr=-(m.x286 - 0.1092*m.x286*m.x94) + m.x287 == 0)
m.c301 = Constraint(expr=-(m.x287 - 0.1092*m.x287*m.x95) + m.x288 == 0)
m.c302 = Constraint(expr=-(m.x289 - 0.1092*m.x289*m.x97) + m.x290 == 0)
m.c303 = Constraint(expr=-(m.x290 - 0.1092*m.x290*m.x98) + m.x291 == 0)
m.c304 = Constraint(expr=-(m.x291 - 0.1092*m.x291*m.x99) + m.x292 == 0)
m.c305 = Constraint(expr=-(m.x292 - 0.1092*m.x292*m.x100) + m.x293 == 0)
m.c306 = Constraint(expr=-(m.x293 - 0.1092*m.x293*m.x101) + m.x294 == 0)
m.c307 = Constraint(expr=-(m.x294 - 0.1092*m.x294*m.x102) + m.x295 == 0)
m.c308 = Constraint(expr=-(m.x295 - 0.1092*m.x295*m.x103) + m.x296 == 0)
m.c309 = Constraint(expr=-(m.x297 - 0.1092*m.x297*m.x105) + m.x298 == 0)
m.c310 = Constraint(expr=-(m.x298 - 0.1092*m.x298*m.x106) + m.x299 == 0)
m.c311 = Constraint(expr=-(m.x299 - 0.1092*m.x299*m.x107) + m.x300 == 0)
m.c312 = Constraint(expr=-(m.x300 - 0.1092*m.x300*m.x108) + m.x301 == 0)
m.c313 = Constraint(expr=-(m.x301 - 0.1092*m.x301*m.x109) + m.x302 == 0)
m.c314 = Constraint(expr=-(m.x302 - 0.1092*m.x302*m.x110) + m.x303 == 0)
m.c315 = Constraint(expr=-(m.x303 - 0.1092*m.x303*m.x111) + m.x304 == 0)
m.c316 = Constraint(expr=-(m.x305 - 0.1092*m.x305*m.x113) + m.x306 == 0)
m.c317 = Constraint(expr=-(m.x306 - 0.1092*m.x306*m.x114) + m.x307 == 0)
m.c318 = Constraint(expr=-(m.x307 - 0.1092*m.x307*m.x115) + m.x308 == 0)
m.c319 = Constraint(expr=-(m.x308 - 0.1092*m.x308*m.x116) + m.x309 == 0)
m.c320 = Constraint(expr=-(m.x309 - 0.1092*m.x309*m.x117) + m.x310 == 0)
m.c321 = Constraint(expr=-(m.x310 - 0.1092*m.x310*m.x118) + m.x311 == 0)
m.c322 = Constraint(expr=-(m.x311 - 0.1092*m.x311*m.x119) + m.x312 == 0)
m.c323 = Constraint(expr=-(m.x313 - 0.1092*m.x313*m.x121) + m.x314 == 0)
m.c324 = Constraint(expr=-(m.x314 - 0.1092*m.x314*m.x122) + m.x315 == 0)
m.c325 = Constraint(expr=-(m.x315 - 0.1092*m.x315*m.x123) + m.x316 == 0)
m.c326 = Constraint(expr=-(m.x316 - 0.1092*m.x316*m.x124) + m.x317 == 0)
m.c327 = Constraint(expr=-(m.x317 - 0.1092*m.x317*m.x125) + m.x318 == 0)
m.c328 = Constraint(expr=-(m.x318 - 0.1092*m.x318*m.x126) + m.x319 == 0)
m.c329 = Constraint(expr=-(m.x319 - 0.1092*m.x319*m.x127) + m.x320 == 0)
m.c330 = Constraint(expr=-(m.x321 - 0.1092*m.x321*m.x129) + m.x322 == 0)
m.c331 = Constraint(expr=-(m.x322 | |
sensor.index)
elif sensor.sens_type == 6 and sensor.multi_type == 6:
try:
state = sensor.last_read_value[6] # multi Moisture
except:
sensor.last_read_value[6] = -127
pass
if state == -127:
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Probe Error'), sensor.index)
else:
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Moisture {}%').format(state), sensor.index)
if state != sensor.prev_read_value:
sensor.prev_read_value = state
changed_state = True
major_change = False
status_update = False
if state > float(sensor.trigger_high_threshold) and changed_state:
(major_change, status_update) = self._check_high_trigger(sensor)
sensor.last_high_report = now()
action = _(u'High Trigger') if major_change else _(u'High Value')
if status_update:
if sensor.log_samples:
self.update_log(sensor, 'lgs', state, action) # wait for reading to be updated
if major_change:
self._trigger_programs(sensor, sensor.trigger_high_program)
elif state < float(sensor.trigger_low_threshold) and changed_state:
(major_change, status_update) = self._check_low_trigger(sensor)
sensor.last_low_report = now()
action = _(u'Low Trigger') if major_change else _(u'Low Value')
if status_update:
if sensor.log_samples:
self.update_log(sensor, 'lgs', state, action) # wait for reading to be updated
if major_change:
self._trigger_programs(sensor, sensor.trigger_low_program)
else:
if changed_state:
(major_change, status_update) = self._check_good_trigger(sensor)
sensor.last_good_report = now()
action = _(u'Normal Trigger') if major_change else _(u'Normal Value')
if status_update:
self.update_log(sensor, 'lgs', state, action) # wait for reading to be updated
if major_change:
if sensor.send_email:
text = _(u'Sensor') + u': {} ({})'.format(sensor.name, self.status[sensor.index][1])
subj = _(u'Sensor Change')
body = _(u'Sensor Change') + u': {} ({})'.format(sensor.name, self.status[sensor.index][1])
self._try_send_mail(body, text, attachment=None, subject=subj)
if sensor.log_samples: # sensor is enabled and enabled log samples
if int(now() - sensor.last_log_samples) >= int(sensor.sample_rate):
sensor.last_log_samples = now()
self.update_log(sensor, 'lgs', state) # lge is event, lgs is samples
if sensor.sens_type == 5:
sensor.err_msg[0] = 1
if sensor.sens_type == 6 and sensor.multi_type == 0:
sensor.err_msg[0] = 1
if sensor.sens_type == 6 and sensor.multi_type == 1:
sensor.err_msg[1] = 1
if sensor.sens_type == 6 and sensor.multi_type == 2:
sensor.err_msg[2] = 1
if sensor.sens_type == 6 and sensor.multi_type == 3:
sensor.err_msg[3] = 1
for i in range(4):
if sensor.last_msg[i] != sensor.err_msg[i]:
sensor.last_msg[i] = sensor.err_msg[i]
logging.warning(_(u'Sensor: {} now response').format(sensor.name))
if sensor.send_email:
text = _(u'Now response')
subj = _(u'Sensor {}').format(sensor.name)
body = text
self._try_send_mail(body, text, attachment=None, subject=subj)
if sensor.log_event:
self.update_log(sensor, 'lge', _(u'Now response'))
else:
if sensor.sens_type == 5:
sensor.err_msg[0] = 0
if sensor.sens_type == 6 and sensor.multi_type == 0:
sensor.err_msg[0] = 0
if sensor.sens_type == 6 and sensor.multi_type == 1:
sensor.err_msg[1] = 0
if sensor.sens_type == 6 and sensor.multi_type == 2:
sensor.err_msg[2] = 0
if sensor.sens_type == 6 and sensor.multi_type == 3:
sensor.err_msg[3] = 0
for i in range(4):
if sensor.last_msg[i] != sensor.err_msg[i]:
sensor.last_msg[i] = sensor.err_msg[i]
if sensor.enabled:
logging.warning(_(u'Sensor: {} not response!').format(sensor.name))
if sensor.send_email:
if sensor.enabled:
text = _(u'Not response!')
else:
text = _(u'Out of order')
subj = _(u'Sensor {}').format(sensor.name)
body = text
self._try_send_mail(body, text, attachment=None, subject=subj)
if sensor.log_event:
if sensor.enabled:
self.update_log(sensor, 'lge', _(u'Not response!'))
else:
self.update_log(sensor, 'lge', _(u'Out of order'))
if sensor.show_in_footer:
if sensor.enabled:
self.start_status(sensor.name, _(u'Not response!'), sensor.index)
else:
self.start_status(sensor.name, _(u'Out of order'), sensor.index)
### Leak Detector, Multi Leak Detector ###
if sensor.sens_type == 2 or (sensor.sens_type == 6 and sensor.multi_type == 5):
if sensor.response and sensor.enabled: # sensor is enabled and response is OK
state = -127
liters_per_sec = -1
if sensor.sens_type == 2:
try:
state = sensor.last_read_value[5] # type is Leak Detector
liters_per_sec = float(sensor.liter_per_pulses)*state
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Leak {}l/s').format(liters_per_sec), sensor.index)
except:
sensor.last_read_value[5] = -127.0
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Probe Error'), sensor.index)
pass
if sensor.last_read_value[5] != sensor.prev_read_value[5]:
sensor.prev_read_value[5] = sensor.last_read_value[5]
changed_state = True
elif sensor.sens_type == 6 and sensor.multi_type == 5:
try:
state = sensor.last_read_value[5] # multi Leak Detector
liters_per_sec = float(sensor.liter_per_pulses)*state
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Leak {}l/s').format(liters_per_sec), sensor.index)
except:
sensor.last_read_value[5] = -127
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Probe Error'), sensor.index)
pass
if sensor.last_read_value[5] != sensor.prev_read_value:
sensor.prev_read_value = sensor.last_read_value[5]
changed_state = True
# todo reaction Leak Detector(run progams)
if sensor.log_samples: # sensor is enabled and enabled log samples
if int(now() - sensor.last_log_samples) >= int(sensor.sample_rate):
sensor.last_log_samples = now()
self.update_log(sensor, 'lgs', liters_per_sec) # lge is event, lgs is samples
sensor.err_msg[5] = 1
if sensor.last_msg[5] != sensor.err_msg[5]:
sensor.last_msg[5] = sensor.err_msg[5]
logging.warning(_(u'Sensor: {} now response').format(sensor.name))
if sensor.send_email:
text = _(u'Now response')
subj = _(u'Sensor {}').format(sensor.name)
body = text
self._try_send_mail(body, text, attachment=None, subject=subj)
if sensor.log_event:
self.update_log(sensor, 'lge', _(u'Now response'))
else:
sensor.err_msg[5] = 0
if sensor.last_msg[5] != sensor.err_msg[5]:
sensor.last_msg[5] = sensor.err_msg[5]
logging.warning(_(u'Sensor: {} not response!').format(sensor.name))
if sensor.send_email:
if sensor.enabled:
text = _(u'Not response!')
else:
text = _(u'Out of order')
subj = _(u'Sensor {}').format(sensor.name)
body = text
self._try_send_mail(body, text, attachment=None, subject=subj)
if sensor.log_event:
if sensor.enabled:
self.update_log(sensor, 'lge', _(u'Not response!'))
else:
self.update_log(sensor, 'lge', _(u'Out of order'))
if sensor.show_in_footer:
if sensor.enabled:
self.start_status(sensor.name, _(u'Not response!'), sensor.index)
else:
self.start_status(sensor.name, _(u'Out of order'), sensor.index)
### Multi Sonic ###
if sensor.sens_type == 6 and sensor.multi_type == 8:
if sensor.response and sensor.enabled: # sensor is enabled and response is OK
state = -127
try:
state = sensor.last_read_value[8] # multi Sonic
except:
pass
sensor.last_read_value[8] = -127
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Probe Error'), sensor.index)
if sensor.use_water_stop: # If the level sensor fails, the above selected stations in the scheduler will stop
self.set_stations_in_scheduler_off(sensor)
if sensor.last_read_value[8] != sensor.prev_read_value:
sensor.prev_read_value = sensor.last_read_value[8]
changed_state = True
level_in_tank = 0
volume_in_tank = 0
percent_in_tank = 0
if state > 0:
if changed_state:
sensor.aux_reg_p = 1
level_in_tank = self.get_tank_cm(state, sensor.distance_bottom, sensor.distance_top) # tank level in cm from ping
percent_in_tank = self.get_percent(level_in_tank, sensor.distance_bottom, sensor.distance_top) # percent in tank from tank level
if sensor.check_liters:
# in liters
volume_in_tank = self.get_volume(level_in_tank, sensor.diameter, True) # volume in tank from tank level in liters
tempText = str(volume_in_tank) + ' ' + _(u'liters') + ', ' + str(level_in_tank) + ' ' + _(u'cm') + ' (' + str(percent_in_tank) + ' ' + (u'%)')
else:
# in m3
volume_in_tank = self.get_volume(level_in_tank, sensor.diameter, False) # volume in tank from tank level in m3
tempText = str(volume_in_tank) + ' ' + _(u'm3') + ', ' + str(level_in_tank) + ' ' + _(u'cm') + ' (' + str(percent_in_tank) + ' ' + (u'%)')
if sensor.show_in_footer:
self.start_status(sensor.name, u'{}'.format(tempText), sensor.index)
else:
if sensor.show_in_footer:
self.start_status(sensor.name, _(u'Probe Error'), sensor.index)
if sensor.use_water_stop: # If the level sensor fails, the above selected stations in the scheduler will stop
if int(sensor.aux_reg_p)==1:
sensor.aux_reg_p = 0
self.set_stations_in_scheduler_off(sensor)
if sensor.log_event:
self.update_log(sensor, 'lge', _(u'Probe Error'))
if sensor.send_email: # Send Email?
text = _(u'Sensor') + u': {}'.format(sensor.name)
subj = _(u'Sensor {}').format(sensor.name)
body = _(u'Sensor Notification') + u': ' + _(u'Probe Error')
self._try_send_mail(body, text, attachment=None, subject=subj)
### regulation water in tank if enable regulation ###
if level_in_tank > 0 and sensor.enable_reg: # if enable regulation "maximum water level"
reg_station = stations.get(int(sensor.reg_output))
### level > regulation maximum ###
if level_in_tank > int(sensor.reg_max): # if actual level in tank > set maximum water level
if int(sensor.aux_reg_u)==1:
sensor.aux_reg_u = 0
sensor.aux_reg_d = 1
regulation_text = _(u'Regulation set ON.') + ' ' + ' (' + _(u'Output') + ' ' + str(reg_station.index+1) + ').'
start = datetime.datetime.now()
sid = reg_station.index
end = datetime.datetime.now() + datetime.timedelta(seconds=int(sensor.reg_ss), minutes=int(sensor.reg_mm))
new_schedule = {
'active': True,
'program': -1,
'station': sid,
'program_name': u'{}'.format(sensor.name),
'fixed': True,
'cut_off': 0,
'manual': True,
'blocked': False,
'start': start,
'original_start': start,
'end': end,
'uid': '%s-%s-%d' % (str(start), "Manual", sid),
'usage': stations.get(sid).usage
}
log.start_run(new_schedule)
stations.activate(new_schedule['station'])
if sensor.log_event:
self.update_log(sensor, 'lge', u'{}'.format(regulation_text))
### level < regulation minimum ###
if level_in_tank < int(sensor.reg_min):
if int(sensor.aux_reg_d)==1:
sensor.aux_reg_u = 1
sensor.aux_reg_d = 0
regulation_text = _(u'Regulation set OFF.') + ' ' + ' (' + _(u'Output') + ' ' + str(reg_station.index+1) + ').'
sid = reg_station.index
stations.deactivate(sid)
active = log.active_runs()
for interval in active:
if interval['station'] == sid:
log.finish_run(interval)
if sensor.log_event:
self.update_log(sensor, 'lge', u'{}'.format(regulation_text))
### level in tank has minimum +5cm refresh ###
if level_in_tank > int(sensor.water_minimum)+5 and int(sensor.aux_mini)==0:
sensor.aux_mini = 1
action = _(u'Normal Trigger')
if sensor.log_samples:
self.update_log(sensor, 'lgs', level_in_tank, action)
delaytime = int(sensor.delay_duration) # if the level in the tank rises above the minimum +5 cm, the delay is deactivated
regulation_text = _(u'Water in Tank') + ' > ' + str(int(sensor.water_minimum)+5) + _(u'cm')
if sensor.log_event:
self.update_log(sensor, 'lge', u'{}'.format(regulation_text))
rd_text = None
if delaytime > 0:
if sensor.name in rain_blocks:
del rain_blocks[sensor.name]
rd_text = _(u'Removing Rain delay')
if sensor.log_event:
self.update_log(sensor, 'lge', u'{}'.format(rd_text))
if sensor.send_email: # Send Email?
text = _(u'Sensor') + u': {} | |
'choro_graph.relayoutData':
if type(click_value).__name__ == 'dict' and 'mapbox.zoom' in click_value.keys() and toggle_value is True:
fig_info['data'][0]['marker']['size'] = click_value['mapbox.zoom'] * 4
# fig_info['data'][0]['radius'] = math.ceil(click_value['mapbox.zoom'] * 3 + 1)
return 'output_tab', toggle_value, store_state, False, fig_info
elif click_value != {'autosize': True}:
print("HERE")
raise PreventUpdate
# Get the cached contents of the data file here instead of rereading every time
data = cache.get(data_state)
if data is not None:
df = data[0]
file_info = data[1]
else:
return 'info_tab', False, store_state, True, fig_info
# Process inputs (years, data) and set up variables
year_list = xvu.get_target_years(start, end, through_options)
# Determine if viewing by country or basin to set up data calls
df_per_area = None
if area_type == "gcam":
if toggle_value is False:
df_per_area = xvu.data_per_basin(df, statistic, year_list, df_ref, months, filename, units)
df_per_area['var'] = round(df_per_area['var'], 2)
features = basin_features
else:
if toggle_value is False:
df_per_area = xvu.data_per_country(df, statistic, year_list, df_ref, months, filename, units)
df_per_area['var'] = round(df_per_area['var'], 2)
features = country_features
# If the user clicked the reset button then reset graph selection store data to empty
if click_info == 'reset_btn.n_clicks':
if area_type == "gcam":
df_per_area = xvu.data_per_basin(df, statistic, year_list, df_ref, months, filename, units)
else:
df_per_area = xvu.data_per_country(df, statistic, year_list, df_ref, months, filename, units)
df_per_area['var'] = round(df_per_area['var'], 2)
fig = xvu.plot_choropleth(df_per_area, features, mapbox_token, statistic, start, end, file_info, months,
area_type, units)
store_state = None
return 'output_tab', False, store_state, False, fig
# Generate figure based on type of click data (click, area select, or initial load)
if selected_data is not None and click_info == 'choro_graph.selectedData':
store_state = selected_data
if len(selected_data['points']) == 0:
fig = xvu.plot_choropleth(df_per_area, features, mapbox_token, statistic, start, end, file_info,
months, area_type, units)
else:
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.update_choro_select(df_ref, df_per_area, features, year_list, mapbox_token,
selected_data, start, end, statistic, file_info, months, area_type,
units)
elif click_info == "grid_toggle.on":
if store_state is None:
selected_data = None
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.update_choro_select(df_ref, df_per_area, features, year_list, mapbox_token,
selected_data, start, end, statistic, file_info, months, area_type, units)
else:
if store_state is None:
selected_data = None
if selected_data is not None and len(selected_data['points']) != 0:
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.update_choro_select(df_ref, df_per_area, features, year_list, mapbox_token,
selected_data, start, end, statistic, file_info, months, area_type,
units)
else:
if toggle_value is True:
fig = xvu.update_choro_grid(df_ref, df, features, year_list, mapbox_token, selected_data,
start, end, statistic, file_info, months, area_type, units, filename)
else:
fig = xvu.plot_choropleth(df_per_area, features, mapbox_token, statistic, start, end,
file_info, months, area_type, units)
return 'output_tab', toggle_value, store_state, False, fig
# If no contents, just return the blank map with instruction
else:
raise PreventUpdate
# Callback to set start year options when file is uploaded and store data in disk cache
@app.callback(
[Output("start_year", "options"), Output("start_year", "value"), Output("upload-data", "children"),
Output("data_store", 'data'), Output("months_select", "options"), Output("units", "options"),
Output("units", "value")],
[Input("upload-data", "contents")], [State('upload-data', 'filename'), State('upload-data', 'last_modified')],
prevent_initial_call=True
)
def update_options(contents, filename, filedate):
"""Set start year options based on uploaded file's data
:param contents: Contents of uploaded file
:type contents: str
:param filename: Name of uploaded file
:type filename: str
:param filedate: Date of uploaded file
:type filedate: str
:return: Options list, initial value, new upload component text
"""
# Check if there is uploaded content
if contents:
# Process contents for available years
target_years, months_list = xvu.process_input_years(contents, filename, filedate)
if months_list is None:
months = []
else:
months = xvu.get_available_months(months_list)
name = filename[0]
new_text = html.Div(["Using file " + name[:25] + '...' if (len(name) > 25) else "Using file " + name])
data = xvu.process_file(contents, filename, filedate, years=None)
xanthos_data = data[0]
# Create id key for data store and use it as reference
file_id = str(uuid.uuid4())
df = xvu.prepare_data(xanthos_data, df_ref)
data_state = file_id
cache.set(file_id, [df, data[1]])
# Evaluate and set unit options
unit_options = xvu.get_unit_options(data[1])
if 'km3' in name:
unit_val = 'km³'
elif 'mm' in name:
unit_val = 'mm'
else:
unit_val = 'm³/s'
return target_years, target_years[0]['value'], new_text, data_state, months, unit_options, unit_val
# Callback to set through year options when start year changes
@app.callback(
[Output('through_year', 'options'), Output('through_year', 'value')],
[Input('start_year', 'value'), Input('start_year', 'options')], [State('through_year', 'value')],
prevent_initial_call=True)
def set_through_year_list(value, options, current_value):
"""Assign through/end year options based on the start year options and value
:param value: Start year's selected value
:type value: int
:param options: Start year's option list
:type options: dataframe
:param current_value: Current value of through_year, if any
:type current_value: int
:return: Through/end year options and initial value
"""
print(value)
if current_value is None:
year_list = xvu.available_through_years(options, options[0]['value'])
new_value = options[len(options) - 1]['value']
else:
year_list = xvu.available_through_years(options, value)
if len([i for i in options if i['value'] == current_value]) >= 1:
new_value = current_value
else:
new_value = options[len(options) - 1]['value']
return year_list, new_value
# Callback to load the hydro graph when user clicks on choropleth graph
@app.callback(
Output('hydro_graph', 'figure'),
[Input('choro_graph', 'clickData'), Input("submit_btn", 'n_clicks')],
[State('start_year', 'value'), State('through_year', 'value'), State("upload-data", "contents"),
State('upload-data', 'filename'), State('upload-data', 'last_modified'), State("through_year", "options"),
State('months_select', 'value'), State('area_select', 'value'), State("hydro_graph", 'figure'),
State("units", "value"), State("data_store", "data")],
prevent_initial_call=True
)
def update_hydro(click_data, n_click, start, end, contents, filename, filedate, year_options, months, area_type,
hydro_state, units, data_state):
"""Generate choropleth figure based on input values and type of click event
:param click_data: Click event data for the choropleth graph
:type click_data: dict
:param n_click Submit button click event
:type n_click object
:param start Start year value
:type start str
:param end End year value
:type end str
:param contents: Contents of uploaded file
:type contents: str
:param filename: Name of uploaded file
:type filename: list
:param filedate: Date of uploaded file
:type filedate: str
:param year_options: List of year range
:type year_options: dict
:param months: List of selected months
:type months: list
:param area_type: Indicates if user is viewing by country or basin
:type area_type: str
:param hydro_state: Current state of hydro figure
:type hydro_state: dict
:param units: Chosen units
:type units: str
:param data_state: File cache data
:type data_state: dict
:return: Choropleth figure
"""
if contents is not None:
# If invalid end date then don't do anything and output message
if start >= end:
return {
'data': [],
'layout': {
'title': 'Please choose an end year that is greater than the start year'
}
}
# If there wasn't a click event on choro graph then do not load new hydro graph
if click_data is None:
return {
'data': [],
'layout': {
'title': 'Single Basin Data per Year (Click on a basin to load)'
}
}
# Get data from cache
data = cache.get(data_state)
if data is not None:
df = data[0]
file_info = data[1]
else:
raise PreventUpdate
# Evaluate chosen area type (basin or country) and set dynamic parameter values
if area_type == "gcam":
area_name = "basin_name"
area_id = "basin_id"
feature_id = "properties.basin_id"
area_loc = "basin_id"
area_title = "Basin"
area_custom_index = 0
else:
area_name = "country_name"
area_id = "country_id"
feature_id = "properties.name"
area_loc = "country_name"
area_title = "Country"
area_custom_index = 1
# Get data from user click
points = click_data['points']
context = dash.callback_context.triggered[0]['prop_id']
# Evaluate current state and only update if user made a different selection
if context != 'choro_graph.clickData' and 'data' in hydro_state.keys() and len(hydro_state['data']) > 0:
hydro_type = hydro_state['data'][0]['customdata'][0][0]
if hydro_type == "basin_id" and area_type == "country":
raise PreventUpdate
elif hydro_type == "country_name" and area_type == "gcam":
raise PreventUpdate
# Evaluate click event to determine if user clicked on an area or a grid cell
if 'cell_id' not in points[0]['customdata'].keys():
location = points[0]['customdata'][area_loc]
location_type = area_title
else:
location = points[0]['customdata']['cell_id']
location_type = 'cell'
# Process years, basin/cell information
years = xvu.get_target_years(start, end, year_options)
if location_type == 'Basin':
hydro_data = xvu.data_per_year_area(df, location, years, months, area_loc, filename, units, df_ref)
return xvu.plot_hydrograph(hydro_data, location, df_ref, 'basin_id', file_info, units)
elif location_type == 'Country':
hydro_data = xvu.data_per_year_area(df, location, years, months, area_loc, filename, units, df_ref)
return xvu.plot_hydrograph(hydro_data, location, df_ref, 'country_name', file_info, units)
elif location_type == 'cell':
hydro_data = | |
* qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return qc
def func_23576a1c407d4bfca4fd093d67be1f4f(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return x
def func_e5d9d276d9864bf892d3e11c4cb342c1(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return width
def func_142e2ded8668483299c11b71b55833c7(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return qa
def func_7a4f8abeb261461aa2a63a6409324a5c(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return needed
def func_0bd74ffe29ef485cb99780a529ba7a06(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return area_from_left
def func_645b604a84d94fdcbe12614868bdaf6c(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return segment_remaining
def func_5f1b28a4d8ad48abb0c5631bddc03ed6(npieces, x2, h1, x1, cuts, size, h2):
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return qb
def func_b4e8bbd0f98a4651a9b9620f250943ce(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return qb
def func_f849a07ce77b4c019e7fbf9dbfcd6261(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return area_from_left
def func_8fed5ef91e044718bc56b5d8f6d8d36a(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return qa
def func_c68e157ddbbc45d09ad16e6253b134a5(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return width
def func_fad014cd8c53449fb1def762cd640790(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return needed
def func_745a8e9a96ec49fa9fb77777a522228b(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return x
def func_df76a225017b4d7b834707716d86c5f1(npieces, x2, h1, x1, segment_area,
cuts, size, h2):
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return | |
<gh_stars>1-10
import numpy as np
import random
import copy
from collections import namedtuple, deque
import torch
from ddpg_agent import Agent
import torch.nn.functional as F
# Default hyperparameters
SEED = 10 # Random seed
NB_EPISODES = 10000 # Max nb of episodes
NB_STEPS = 1000 # Max nb of steps per episodes
UPDATE_EVERY_NB_EPISODE = 4 # Nb of episodes between learning process
MULTIPLE_LEARN_PER_UPDATE = 3 # Nb of multiple learning process performed in a row
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 200 # minibatch size
ACTOR_FC1_UNITS = 400 #256 # Number of units for the layer 1 in the actor model
ACTOR_FC2_UNITS = 300 #128 # Number of units for the layer 2 in the actor model
CRITIC_FCS1_UNITS = 400 #256 # Number of units for the layer 1 in the critic model
CRITIC_FC2_UNITS = 300 #128 # Number of units for the layer 2 in the critic model
NON_LIN = F.relu #F.leaky_relu # Non linearity operator used in the model
LR_ACTOR = 1e-4 #1e-4 # learning rate of the actor
LR_CRITIC = 5e-3 #2e-3 # learning rate of the critic
WEIGHT_DECAY = 0 #0.0001 # L2 weight decay
GAMMA = 0.995 #0.99 # Discount factor
TAU = 1e-3 # For soft update of target parameters
CLIP_CRITIC_GRADIENT = False # Clip gradient during Critic optimization
ADD_OU_NOISE = True # Add Ornstein-Uhlenbeck noise
MU = 0. # Ornstein-Uhlenbeck noise parameter
THETA = 0.15 # Ornstein-Uhlenbeck noise parameter
SIGMA = 0.2 # Ornstein-Uhlenbeck noise parameter
NOISE = 1.0 # Initial Noise Amplitude
NOISE_REDUCTION = 1.0 # 0.995 # Noise amplitude decay ratio
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
#if (len(self.memory)%10000==0):
# print("\n[INFO] Replay memory size =", len(self.memory))
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Maddpg():
"""MADDPG Agent : Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, random_seed):
"""Initialize a MADDPG Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
num_agents (int): number of agents
random_seed (int): random seed
"""
super(Maddpg, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.num_agents = num_agents
self.seed = random.seed(random_seed)
# Instantiate Multiple Agent
self.agents = [ Agent(state_size,action_size, random_seed, num_agents)
for i in range(num_agents) ]
# Instantiate Memory replay Buffer (shared between agents)
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def reset(self):
"""Reset all the agents"""
for agent in self.agents:
agent.reset()
def act(self, states, noise):
"""Return action to perform for each agents (per policy)"""
return [ agent.act(state, noise) for agent, state in zip(self.agents, states) ]
def step(self, states, actions, rewards, next_states, dones, num_current_episode):
""" # Save experience in replay memory, and use random sample from buffer to learn"""
#self.memory.add(states, It mainly reuse function from ``actions, rewards, next_states, dones)
self.memory.add(encode(states),
encode(actions),
rewards,
encode(next_states),
dones)
# If enough samples in the replay memory and if it is time to update
if (len(self.memory) > BATCH_SIZE) and (num_current_episode % UPDATE_EVERY_NB_EPISODE ==0) :
# Note: this code only expects 2 agents
assert(len(self.agents)==2)
# Allow to learn several time in a row in the same episode
for i in range(MULTIPLE_LEARN_PER_UPDATE):
# Sample a batch of experience from the replay buffer
experiences = self.memory.sample()
# Update Agent #0
self.maddpg_learn(experiences, own_idx=0, other_idx=1)
# Sample another batch of experience from the replay buffer
experiences = self.memory.sample()
# Update Agent #1
self.maddpg_learn(experiences, own_idx=1, other_idx=0)
def maddpg_learn(self, experiences, own_idx, other_idx, gamma=GAMMA):
"""
Update the policy of the MADDPG "own" agent. The actors have only access to agent own
information, whereas the critics have access to all agents information.
Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(states) -> action
critic_target(all_states, all_actions) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
own_idx (int) : index of the own agent to update in self.agents
other_idx (int) : index of the other agent to update in self.agents
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# Filter out the agent OWN states, actions and next_states batch
own_states = decode(self.state_size, self.num_agents, own_idx, states)
own_actions = decode(self.action_size, self.num_agents, own_idx, actions)
own_next_states = decode(self.state_size, self.num_agents, own_idx, next_states)
# Filter out the OTHER agent states, actions and next_states batch
other_states = decode(self.state_size, self.num_agents, other_idx, states)
other_actions = decode(self.action_size, self.num_agents, other_idx, actions)
other_next_states = decode(self.state_size, self.num_agents, other_idx, next_states)
# Concatenate both agent information (own agent first, other agent in second position)
all_states=torch.cat((own_states, other_states), dim=1).to(device)
all_actions=torch.cat((own_actions, other_actions), dim=1).to(device)
all_next_states=torch.cat((own_next_states, other_next_states), dim=1).to(device)
agent = self.agents[own_idx]
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
all_next_actions = torch.cat((agent.actor_target(own_states), agent.actor_target(other_states)),
dim =1).to(device)
Q_targets_next = agent.critic_target(all_next_states, all_next_actions)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = agent.critic_local(all_states, all_actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
agent.critic_optimizer.zero_grad()
critic_loss.backward()
if (CLIP_CRITIC_GRADIENT):
torch.nn.utils.clip_grad_norm(agent.critic_local.parameters(), 1)
agent.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
all_actions_pred = torch.cat((agent.actor_local(own_states), agent.actor_local(other_states).detach()),
dim = 1).to(device)
actor_loss = -agent.critic_local(all_states, all_actions_pred).mean()
# Minimize the loss
agent.actor_optimizer.zero_grad()
actor_loss.backward()
agent.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
agent.soft_update(agent.critic_local, agent.critic_target, TAU)
agent.soft_update(agent.actor_local, agent.actor_target, TAU)
def checkpoints(self):
"""Save checkpoints for all Agents"""
for idx, agent in enumerate(self.agents):
actor_local_filename = 'model_dir/checkpoint_actor_local_' + str(idx) + '.pth'
critic_local_filename = 'model_dir/checkpoint_critic_local_' + str(idx) + '.pth'
actor_target_filename = 'model_dir/checkpoint_actor_target_' + str(idx) + '.pth'
critic_target_filename = 'model_dir/checkpoint_critic_target_' + str(idx) + '.pth'
torch.save(agent.actor_local.state_dict(), actor_local_filename)
torch.save(agent.critic_local.state_dict(), critic_local_filename)
torch.save(agent.actor_target.state_dict(), actor_target_filename)
torch.save(agent.critic_target.state_dict(), critic_target_filename)
def encode(sa):
"""
Encode an Environment state or action list of array, which contain multiple agents action/state information,
by concatenating their information, thus removing (but not loosing) the agent dimension in the final output.
The ouput is a list intended to be inserted into a buffer memmory originally not designed to handle multiple
agents information, such as in the context of MADDPG)
Params
======
sa (listr) : List of Environment states or actions array, corresponding to each agent
"""
return np.array(sa).reshape(1,-1).squeeze()
def decode(size, num_agents, id_agent, sa, debug=False):
"""
Decode a batch of Environment states or actions, which have been previously concatened to store
multiple agent information into a buffer memmory originally not designed to handle multiple
agents information(such as in the context of MADDPG)
This returns a batch of Environment states or actions (torch.tensor) containing the data
of only the agent specified.
Params
======
size (int): size of the action space of state spaec to decode
num_agents (int) : Number of agent in the environment (and for which info hasbeen concatenetaded)
id_agent (int): index of the agent whose informationis going to be retrieved
sa (torch.tensor) : Batch | |
<filename>src/autogluon_contrib_nlp/data/tokenizers/huggingface.py
__all__ = ['HuggingFaceTokenizer', 'HuggingFaceBPETokenizer', 'HuggingFaceWordPieceTokenizer',
'HuggingFaceByteBPETokenizer']
import os
import json
from pkg_resources import parse_version
from typing import Optional, Union, List, Tuple
from collections import OrderedDict
from uuid import uuid4
from .base import *
from ..vocab import Vocab, load_vocab
from ...utils.lazy_imports import try_import_huggingface_tokenizers
# Disable the TOKENIZERS_PARALLEL as suggested by the huggingface.
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def is_new_version_model_file(model_file_path: str) -> bool:
"""Check whether the model file belongs to the new version of HuggingFace Tokenizers,
i.e., >= 0.8
Parameters
----------
model_file_path
Path to the model file
Returns
-------
is_new_version
Whether the model file is generated by the new version of huggingface tokenizer.
"""
with open(model_file_path, 'r', encoding='utf-8') as f:
try:
_ = json.load(f)
return True
except Exception:
return False
def hf_encode(model, sentences, output_type: type = str):
"""
Parameters
----------
model
Model object in HuggingFace tokenizer
sentences
Input sentences
output_type
Output type
Returns
-------
ret
"""
is_multi_sentences = isinstance(sentences, list)
if not is_multi_sentences:
sentences = [sentences]
encode_sentences = model.encode_batch(sentences, add_special_tokens=False)
if output_type is str:
ret = [encode_sentence.tokens for encode_sentence in encode_sentences]
elif output_type is int:
ret = [encode_sentence.ids for encode_sentence in encode_sentences]
else:
raise TokenTypeNotSupportedError(output_type)
if is_multi_sentences:
return ret
else:
return ret[0]
def hf_encode_with_offsets(model, sentences, output_type: type = str):
is_multi_sentences = isinstance(sentences, list)
if not is_multi_sentences:
sentences = [sentences]
encode_sentences = model.encode_batch(sentences, add_special_tokens=False)
if output_type is str:
ret = [encode_sentence.tokens for encode_sentence in encode_sentences]
offsets = [encode_sentence.offsets for encode_sentence in encode_sentences]
elif output_type is int:
ret = [encode_sentence.ids for encode_sentence in encode_sentences]
offsets = [encode_sentence.offsets for encode_sentence in encode_sentences]
else:
raise TokenTypeNotSupportedError(output_type)
if is_multi_sentences:
return ret, offsets
else:
return ret[0], offsets[0]
def hf_decode(model, tokens):
is_multiple_sentences = is_tokens_from_multiple_sentences(tokens)
if not is_multiple_sentences:
tokens = [tokens]
token_type = get_token_type(tokens)
if token_type is str:
id_tokens = [[model.token_to_id(token) for token in sentence] for sentence in
tokens]
ret = model.decode_batch(id_tokens)
elif token_type is int:
ret = model.decode_batch(tokens)
else:
raise TokenTypeNotSupportedError(token_type)
if is_multiple_sentences:
return ret
else:
return ret[0]
@TOKENIZER_REGISTRY.register('hf_tokenizer')
class HuggingFaceTokenizer(BaseTokenizerWithVocab):
def __init__(self, model_path: Optional[str] = None,
vocab: Optional[str] = None):
tokenizers = try_import_huggingface_tokenizers()
assert parse_version(tokenizers.__version__) >= parse_version('0.8'), \
'Only support tokenizers>=0.8. You can upgrade tokenizers via ' \
'`python3 -m pip install --upgrade tokenizers`.'
self._model_path = model_path
self._model = tokenizers.Tokenizer.from_file(model_path)
hf_vocab = self._model.get_vocab()
with open(model_path, 'r', encoding='utf-8') as f:
model_info = json.load(f)
self._model_info = model_info
added_tokens = model_info['added_tokens']
if vocab is not None:
self._vocab = load_vocab(vocab)
else:
sorted_hf_vocab_kv = sorted(list(hf_vocab.items()), key=lambda x: x[1])
for i, ele in enumerate(sorted_hf_vocab_kv):
assert ele[1] == i
all_tokens = [ele[0] for ele in sorted_hf_vocab_kv]
special_tokens = [token['content'] for token in added_tokens]
special_token_keys = []
no_valid_name_key_cnt = 0
for special_token in special_tokens:
if special_token.startswith('<') and special_token.endswith('>') \
and len(special_token) > 2:
key = special_token[1:-1] + '_token'
else:
key = 'special{}_token'.format(no_valid_name_key_cnt)
no_valid_name_key_cnt += 1
assert key not in special_token_keys
special_token_keys.append(key)
self._vocab = Vocab(all_tokens,
**{key: token
for key, token in zip(special_token_keys, special_tokens)})
# Verify the special tokens
for added_token in added_tokens:
assert self._vocab[added_token['content']] == added_token['id']
assert added_token['content'] in self._vocab.special_tokens
# Verify all tokens exist
for token, idx in hf_vocab.items():
assert self._vocab[token] == idx
if self._model_info['decoder']['type'] == 'BPEDecoder':
self._last_subtoken_id_set =\
frozenset([i for i, ele in enumerate(self._vocab.all_tokens)
if ele.endswith('</w>')])
elif self._model_info['decoder']['type'] == 'WordPiece':
self._first_subtoken_id_set =\
frozenset([i for i, ele in enumerate(self._vocab.all_tokens)
if not ele.startswith('##')])
def encode(self, sentences: SentencesType,
output_type: type = str) -> Union[TokensType, TokenIDsType]:
return hf_encode(self._model, sentences, output_type)
def decode(self, tokens: Union[TokensType, TokenIDsType]) -> SentencesType:
return hf_decode(self._model, tokens)
def encode_with_offsets(self, sentences: SentencesType,
output_type: type = str) -> Tuple[Union[TokensType, TokenIDsType],
TokenOffsetsType]:
return hf_encode_with_offsets(self._model, sentences, output_type)
@property
def model_type(self):
return self._model_info['decoder']['type']
@property
def model_info(self):
"""Get the model info."""
return self._model_info
def is_last_subword(self, tokens):
"""Whether the sub-token is the last sub-token in a split token list.
Only supports the case when the tokenizer is a HuggingFaceBPETokenizer
Parameters
----------
tokens
A single token or a list of tokens
Returns
-------
ret
The results
"""
assert self.model_type == 'BPEDecoder',\
'Only supports BPE model. The model_type={}'.format(self.model_type)
if isinstance(tokens, str):
return tokens.endswith('</w>')
elif isinstance(tokens, int):
return tokens in self._last_subtoken_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [ele.endswith('</w>') for ele in tokens], False
elif isinstance(tokens[0], int):
return [ele in self._last_subtoken_id_set for ele in tokens], False
else:
raise NotImplementedError
else:
raise NotImplementedError
def is_first_subword(self, tokens):
"""Whether the sub-token is the first sub-token in a token list.
Only supports the case when the tokenizer is a HuggingFaceWordPieceTokenizer
Parameters
----------
tokens
A single token or a list of tokens
Returns
-------
ret
The results
"""
assert self.model_type == 'WordPiece', \
'Only supports WordPiece model. The model_type={}'.format(self.model_type)
if isinstance(tokens, str):
return not tokens.startswith('##')
elif isinstance(tokens, int):
return tokens in self._first_subtoken_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [not ele.startswith('##') for ele in tokens]
elif isinstance(tokens[0], int):
return [ele in self._first_subtoken_id_set for ele in tokens]
else:
raise NotImplementedError
else:
raise NotImplementedError
@property
def vocab(self) -> Optional[Vocab]:
return self._vocab
def set_vocab(self, vocab):
raise NotImplementedError('Cannot set vocabulary for the HuggingFaceTokenizer.')
def __repr__(self):
ret = '{}(\n' \
' type = {}\n' \
' model_path = {}\n' \
' normalizer = {}\n' \
' vocab = {}\n' \
')'.format(self.__class__.__name__,
self._model_info['decoder']['type'],
self._model_path,
self._model_info['normalizer'],
self._vocab)
return ret
class LegacyHuggingFaceTokenizer(BaseTokenizerWithVocab):
def __init__(self):
self._vocab = None
self._model = None
def encode(self, sentences: SentencesType,
output_type: type = str) -> Union[TokensType, TokenIDsType]:
return hf_encode(self._model, sentences, output_type)
def decode(self, tokens: Union[TokensType, TokenIDsType]) -> SentencesType:
return hf_decode(self._model, tokens)
def encode_with_offsets(self, sentences: SentencesType,
output_type: type = str) -> Tuple[Union[TokensType, TokenIDsType],
TokenOffsetsType]:
return hf_encode_with_offsets(self._model, sentences, output_type)
@property
def vocab(self) -> Optional[Vocab]:
return self._vocab
def set_vocab(self, vocab):
raise NotImplementedError('Cannot set vocabulary for the HuggingFaceTokenizer.')
@TOKENIZER_REGISTRY.register('hf_bpe')
class HuggingFaceBPETokenizer(LegacyHuggingFaceTokenizer):
def __init__(self, merges_file: Optional[str] = None,
vocab_file: Optional[str] = None,
unk_token: Optional[str] = Vocab.UNK_TOKEN,
suffix: Optional[str] = '</w>',
dropout: Optional[float] = None,
lowercase: bool = False):
"""
Parameters
----------
merges_file
The merges file saved by HuggingFace
vocab_file
Vocabulary file in GluonNLP
unk_token
The unknown token
suffix
The suffix for sub-tokens. For example, "Sunnyvale" will be "Sunny vale</w>"
dropout
Ratio of the BPE-Dropout
lowercase
Whether to lowercase the input before tokenizer
"""
super().__init__()
self._merges_file = merges_file
self._vocab_file = vocab_file
self._unk_token = unk_token
self._suffix = suffix
self._dropout = dropout
self._lowercase = lowercase
self.__rebuild_tokenizer()
self._last_subword_id_set = frozenset([self._vocab[ele]
for ele in self._vocab.all_tokens
if ele.endswith(self._suffix)])
def is_last_subword(self, tokens: Union[str, int, List[str], List[int]]) \
-> Union[bool, List[bool]]:
"""Whether the token is the last subword token. This can be used for whole-word masking.
Parameters
----------
tokens
The input tokens
Returns
-------
ret
Whether the token is the last subword token in the list of subwords.
"""
if isinstance(tokens, str):
return tokens.endswith(self._suffix)
elif isinstance(tokens, int):
return tokens in self._last_subword_id_set
elif isinstance(tokens, list):
if len(tokens) == 0:
return []
if isinstance(tokens[0], str):
return [ele.endswith(self._suffix) for ele in tokens]
elif isinstance(tokens[0], int):
return [ele in self._last_subword_id_set for ele in tokens]
else:
raise NotImplementedError
else:
raise NotImplementedError
def set_bpe_dropout(self, bpe_dropout: float):
"""Set the BPE Dropout of the tokenizer
Parameters
----------
bpe_dropout
The BPE Dropout ratio
"""
self._dropout = bpe_dropout
self.__rebuild_tokenizer()
def set_lowercase(self, lowercase: float):
"""Set the lowercase flag in the tokenizer
Parameters
----------
lowercase
Whether to lowercase the input
"""
self._lowercase = lowercase
self.__rebuild_tokenizer()
@property
def lowercase(self):
return self._lowercase
def __rebuild_tokenizer(self):
tokenizers = try_import_huggingface_tokenizers()
# Load the merge file from Huggingface tokenizers < 0.8
try:
# using Vocab obj file
self._vocab = load_vocab(self._vocab_file)
all_tokens = self._vocab.all_tokens
hf_vocab = OrderedDict()
for i in range(len(all_tokens)):
hf_vocab[all_tokens[i]] = i
temp_hf_vocab_file = str(uuid4()) + '.hf_vocab'
with open(temp_hf_vocab_file, 'w', encoding='utf-8') as ftv:
json.dump(hf_vocab, ftv, ensure_ascii=False)
except TypeError:
# using hf_bpe vocab file
with open(self._vocab_file, 'r', encoding='utf-8') as fv:
hf_vocab = json.load(fv)
hf_vocab = sorted(list(hf_vocab.items()), key=lambda x: x[1])
all_tokens = [x[0] for x in hf_vocab]
# default special tokens corresponding to the default
# special_tokens setting in CharBPETokenizer.train
# and the default special_tokens=[unk]
self._vocab = Vocab(all_tokens, unk_token=self._unk_token)
temp_hf_vocab_file = None
except Exception as exp:
raise exp
assert self._unk_token == self._vocab.unk_token
self._model = tokenizers.CharBPETokenizer(
vocab=temp_hf_vocab_file if temp_hf_vocab_file else self._vocab_file,
merges=self._merges_file,
unk_token=self._unk_token, suffix=self._suffix, dropout=self._dropout,
lowercase=self._lowercase)
if temp_hf_vocab_file:
os.remove(temp_hf_vocab_file)
@property
def vocab(self):
return self._vocab
def set_vocab(self, vocab):
raise NotImplementedError('Cannot set vocabulary for HuggingFaceBPETokenizer.')
def __repr__(self):
ret = '{}(\n' \
' merges_file = {}\n' \
' vocab_file = {}\n' | |
import re
from sympy import S, Symbol, EmptySet, Interval, FiniteSet
from sympy.solvers import solveset
import numpy as np
from src.solveminmax.minmax_term import MinMaxTerm
from src.solveminmax.cons_var_term import ConsVarTerm
# TODO: what if the equation starts with a -?
# TODO: what if the interval is infinity on one end?
def get_lhs(equation: str) -> str:
"""Get the left-hand side of the equation.
Args:
equation: The string of the equation to be solved.
Returns:
A string of the left-hand side of the equation.
"""
index = equation.find("=")
return equation[:index]
def get_constants(equation):
"""Extract constants from the left-hand side of the equation.
Args:
equation (str): A string of the equation to be solved.
Returns:
str: A list of constants on the LHS of the equation.
"""
lhs = get_lhs(equation)
l = []
for t in lhs.split():
try:
l.append(float(t))
except ValueError:
pass
return l
def get_minmax_terms(equation):
"""Get a list of minmax terms.
Args:
equation (str): A string of the equation to be solved.
Returns:
list: A list of minmax_term objects.
"""
minmax_terms = re.findall(r"([\+\-])\s*(\d*)\s*\**\s*(min|max)"
r"(\([^\)]+\))", equation)
minmax_objects = []
for term in minmax_terms:
minmax_objects.append(MinMaxTerm(term))
return minmax_objects
def get_cons_var_terms(equation):
"""Get a list of cons_var_terms.
Args:
equation (str): A string of the equation to be solved.
Returns:
list: A list of cons_var_term objects.
"""
match_list = re.findall(r"(\+|\-)\s*(\d+)\s*\*\s*([a-z]+)", equation)
i = 0
n = len(match_list)
while i < len(match_list):
if "min" in match_list[i][2] or "max" in match_list[i][2]:
match_list.pop(i)
i -= 1
n -= 1
i += 1
cons_var_objects = []
for term in match_list:
cons_var_objects.append(ConsVarTerm(term))
return cons_var_objects
def find_set_points(minmax_terms, var_name):
"""Return a list of sorted set points.
Args:
minmax_terms (list): A list of minmax_term objects.
var_name (str): A character, which is the name of the variable.
Returns:
list: An empty list if there are variables but all coefficients are 0,
such as min(30, 0*a), or if there are simply no variables, such as
min(30, 30).
"""
pts = set()
for term in minmax_terms:
left, right = term.left_right_nums()
left_half, right_half = term.left_right_half()
if var_name in left_half:
try:
pts.add(right / left)
except ZeroDivisionError:
continue
elif var_name in right_half:
try:
pts.add(left / right)
except ZeroDivisionError:
continue
return sorted(pts)
def create_intervals(set_points, low=0, high=1, left_open=True,
right_open=True):
"""Create a list of intervals based on set_points.
Args:
set_points (list): A list of sorted set points.
low (float): The lower bound of the variable.
high (float): The higher bound of the variable.
left_open (bool): True if the bound is open on the left.
right_open (bool): True if the bound is open on the right.
Returns:
list: A list of Interval objects based on set points.
"""
intervals = []
i = -1
j = 0
while i < len(set_points):
if i == -1:
interval = Interval(low, set_points[j],
left_open=left_open, right_open=True)
if interval is not EmptySet:
intervals.append(interval)
elif j == len(set_points):
interval = Interval(set_points[i], high,
left_open=True, right_open=right_open)
if interval is not EmptySet:
intervals.append(interval)
else:
intervals.append(Interval(set_points[i], set_points[j],
left_open=True, right_open=True))
i += 1
j += 1
return intervals
def get_validate_eq(equation):
"""Return a modified equation string with = replaced by ==.
Args:
equation (str): The equation to be solved.
Returns:
str: A modified equation string with = replaced by ==.
"""
return equation.replace("=", "==")
def random_interval(interval):
"""Generate a random number in the interval.
Args:
interval (:object:Interval): An Interval object.
Returns:
float: A random number generated uniformly from the interval.
"""
return np.random.uniform(interval.start, interval.end)
def get_value_term(equation):
"""Get the value term on the right hand side of the equation.
Args:
equation (str): The equation to be solved.
Returns:
str: The value term as a string.
"""
return re.findall(r"=\s*(.*\d+)", equation)[0]
def knit_solver(interval, minmax_terms, cons_var_terms, var_name):
"""Generate a string to be fed into the solver.
Args:
interval (:object:Interval): An Interval object.
minmax_terms (list): A list of minmax_term objects.
cons_var_terms (list): A list of cons_var_term objects.
var_name (str): The character of the variable.
Returns:
str: A knitted string of the equation.
"""
solver = ""
for term in minmax_terms:
if var_name not in term.minmax_tuple():
val = eval(f"{term.minmax_op()}{term.minmax_tuple()}")
solver += f"{term.operator()}{term.coef()}*{val}"
else:
rand = random_interval(interval)
index = term.minmax_tuple().rfind(var_name)
replaced = (term.minmax_tuple()[:index]
+ "rand" + term.minmax_tuple()[index + 1:])
replaced = f"{term.minmax_op()}{replaced}"
left, right = term.left_right_half()
if var_name not in left:
non_var_part = left
var_part = right
else:
non_var_part = right
var_part = left
try:
val = eval(replaced)
except Exception as e:
print(e)
return
if val != float(non_var_part):
solver += f"{term.operator()}{term.coef()}*{var_part}"
else:
solver += f"{term.operator()}{term.coef()}*{non_var_part}"
for term in cons_var_terms:
solver += f"{term.operator()}{term.coef()}*{term.var()}"
return solver
def reformat_and_solve(knit, value_term):
"""Reformat the knit str then solve with solveset.
Args:
knit (str): The knitted equation string.
value_term (str): The value term of the equation.
Returns:
Solution after being solved by solveset.
"""
s = f"{knit} - {value_term}"
a = Symbol("a")
return solveset(eval(s), a)
def solve_linear_eq(cons_var_terms, value_term, low, high,
left_open, right_open):
"""Solve a linear equation if the equation does not contain minmax_terms.
Args:
cons_var_terms (list): A list of cons_var_term objects.
value_term (str): The value term of the equation.
low (float): The lower bound of the variable.
high (float): The upper bound of the variable.
left_open (bool): True if the bound is open on the left.
right_open (bool): True if the bound is open on the right.
Returns:
Solution after being solved by solveset.
"""
knit = ""
for term in cons_var_terms:
knit += f"{term.operator()}{term.coef()}*{term.var()}"
result = reformat_and_solve(knit, value_term)
if result is S.Complexes:
return result.intersect(Interval(low, high, left_open=left_open,
right_open=right_open))
else:
return result
def extract_val_from_str(s):
"""Extract value from string.
Args:
s (str): A string that looks like "200*a"
Returns:
float: The float from the string.
"""
index = s.find("*")
return float(s[:index])
def minmax_replace_zeros(minmax_terms):
"""Replace zero coefficients minmax_terms with reformatted minmax_terms.
Args:
minmax_terms (list): A list of minmax_term objects.
Returns:
None
"""
i = 0
n = len(minmax_terms)
while i < n:
term = minmax_terms[i]
left, right = term.left_right_half()
if "a" in left and "*" in left:
num = extract_val_from_str(left)
non_var_term = right
elif "a" in right and "*" in right:
num = extract_val_from_str(right)
non_var_term = left
else:
# If the term does not contain variables.
i += 1
continue
if num == 0:
minmax_terms[i] = MinMaxTerm((term.operator(), term.coef(),
term.minmax_op(),
f"(0,{non_var_term})"))
i += 1
def solve_no_minmax_var(minmax_terms, cons_var_terms, value_term):
"""Solve the equation when there are no variables in minmax_terms.
For example, when minmax_terms look like min(30, 30) or min(30, 0*a).
Args:
minmax_terms (list): A list of minmax_term objects.
cons_var_terms (list): A list of cons_var_term objects.
value_term (str): The value term.
Returns:
Solution returned by solveset.
"""
knit = ""
for term in minmax_terms:
knit += f"{term.operator()}{term.coef()}*" \
f"{term.minmax_op()}{term.minmax_tuple()}"
for term in cons_var_terms:
knit += f"{term.operator()}{term.coef()}*{term.var()}"
return reformat_and_solve(knit, value_term)
def get_next(result):
"""Get next item in the result set.
Args:
result (:object:FiniteSet): The result returned by solveset.
Returns:
The next item in result.
"""
return next(iter(result))
def get_next_eval(result):
"""Return the evaluate next item from result.
Args:
result (:object:FiniteSet): The result returned by solveset.
Returns:
The next item in result, evaluated by calling .evalf().
"""
return get_next(result).evalf()
def find_intersect(interval, low, high, left_open, right_open):
"""Return the intersect of the interval with the required interval
specified by low, high, left_open, and right_open.
Args:
interval (:object:Interval): An Interval object.
low (float): The lower bound of the variable.
high (float): The upper bound of the variable.
left_open (bool): True if the bound is open on the left.
right_open (bool): True if the bound is open on the right.
Returns:
:object:Interval: The intersection of the interval with required
interval.
"""
return interval.intersect(Interval(low, high, left_open, right_open))
def append_interval_complexes(interval, eq, low, high, results):
"""Handle the case when the result is S.Complexes.
Args:
interval (:object:Interval): An Interval object.
eq (str): The equation to be solved.
low (float): The lower bound of the variable.
high (float): The upper bound of the variable.
results (list): The list of final results.
Returns:
None
"""
temp_interval = interval
validate_eq = get_validate_eq(eq)
a = interval.start
if a != low and eval(validate_eq):
temp_interval = temp_interval.union(FiniteSet(a))
a = interval.end
if a != high and eval(validate_eq):
temp_interval = temp_interval.union(FiniteSet(a))
results.append(temp_interval)
def append_interval_endpoints(interval, eq, result, results, low, high):
"""Handle the cases when result is interval.start or is interval.end.
Args:
interval (:object:Interval): An Interval object.
eq (str): The equation to | |
import ncls
from collections import defaultdict
import numpy as np
import vcf
import operator
import datetime
import pkg_resources
import copy
import pickle
from sys import stderr, stdin
import gzip
import time
from io import StringIO
import networkx as nx
from collections import Counter
import pandas as pd
import os
__all__ = ["Col", "CallSet", "concat_dfs"]
class Col:
"""This is a column parser class. The input column must be index-able by 'col' argument. Subfields are accessed
using the 'key' argument. Values can be further encoded by providing a dict with the required mappings using the
'encoding' argument. The 'bins' argument can be used to stratify input into predefined bins.
:param col: The key of the primary data field
:rtype col: str, optional
:param key: The key of the secondary data field, optional
:rtype key: str, optional
"""
def __init__(self, col, key=None, encoding=None, bins=None, norm=None, op=None, thresh=None, add=None):
self.col = col
self.key = key
self.encoding = encoding
self.bins = bins
self.norm = norm
self.op = op
self.thresh = thresh
self.add = None
if add is not None:
self.add = add
def __repr__(self):
return f"svbench.Col(add={self.add} col={self.col}, key={self.key}, encoding={self.encoding}, bins={self.bins}, norm={self.norm}, op={self.op}, thresh={self.thresh})"
class Operate:
def __init__(self):
self.opps = {k: eval('operator.' + k) for k in dir(operator) if "_" not in k}
def test(self, o, a, b):
if isinstance(a, list):
if len(a) == 0:
a = None
else:
a = a[0]
if isinstance(b, list):
if len(b) == 0:
a = None
else:
b = b[0]
if isinstance(a, str):
# and a.isnumeric():
try:
a = float(a)
except ValueError:
pass
if isinstance(b, str): # and b.isnumeric():
try:
b = float(b)
except ValueError:
pass
try:
v = self.opps[o](a, b)
except TypeError:
print(f"Failed operation using op={o}, a={a} {type(a)}, b={b} {type(b)}", file=stderr)
quit()
return v
class NSV:
def __init__(self, starts, ends, ids):
self.starts = starts
self.ends = ends
self.ids = ids
self.ncls = ncls.NCLS(starts, ends, ids)
def __reduce__(self): # Problems pickling original ncls object, so made a new reduce method
return self.__class__, (self.starts, self.ends, self.ids)
def get_interval_arrays(regions, slop, interval_type="breakpoint"):
chrom_interval_start = defaultdict(list)
chrom_interval_end = defaultdict(list)
chrom_interval_index = defaultdict(list)
# Make a region for each breakpoint
if interval_type == "breakpoint":
for c1, s, c2, e, index in regions:
chrom_interval_start[c1].append(s - slop)
chrom_interval_end[c1].append(s + slop)
chrom_interval_index[c1].append(index)
chrom_interval_start[c2].append(e - slop)
chrom_interval_end[c2].append(e + slop)
chrom_interval_index[c2].append(index)
elif interval_type == "bedpe":
for c1, s1, e1, c2, s2, e2, index in regions:
chrom_interval_start[c1].append(s1)
chrom_interval_end[c1].append(e1)
chrom_interval_index[c1].append(index)
chrom_interval_start[c2].append(s2)
chrom_interval_end[c2].append(e2)
chrom_interval_index[c2].append(index)
elif interval_type == "bed":
for c1, s, c2, e, index in regions:
chrom_interval_start[c1].append(s)
chrom_interval_end[c1].append(e)
chrom_interval_index[c1].append(index)
return ({k: np.array(v).astype(int) for k, v in chrom_interval_start.items()},
{k: np.array(v).astype(int) for k, v in chrom_interval_end.items()},
{k: np.array(v).astype(int) for k, v in chrom_interval_index.items()})
def make_ncls_table(chrom_interval_start, chrom_interval_end, chrom_interval_index):
"""Test doc
:param chrom_interval_start: an array of interval start start positions
:rtype chrom_interval_start: int, float
:param chrom_interval_end: an array of interval start start positions
:rtype chrom_interval_end: int, float
:param chrom_interval_index: an array of interval start start positions
:rtype chrom_interval_index: int, float
:return: dict
:rtype: dict"""
return {k: NSV(chrom_interval_start[k], chrom_interval_end[k], chrom_interval_index[k])
for k in chrom_interval_start}
def make_tree(regions, slop=250, interval_type="breakpoint"):
chrom_interval_start, chrom_interval_end, chrom_interval_index = get_interval_arrays(regions, slop, interval_type)
return make_ncls_table(chrom_interval_start, chrom_interval_end, chrom_interval_index)
def col_parser(r, col, key, first=True):
if col == "FORMAT":
col = r.__getattribute__("samples")
if len(col) > 1:
# Return the value from all the samples
try:
ck = [i[key] for i in col]
except IndexError:
raise IndexError(f"Error parsing key {key}")
# raise ValueError("SVBench only supports one FORMAT column per vcf file, {} found", len(col))
else:
try:
ck = col[0][key]
except (IndexError, AttributeError):
ck = None
# raise IndexError(f"The FORMAT column is missing a name: {key}")
else:
try:
col = r.__getattribute__(col)
except KeyError:
raise KeyError("Column argument not understood, did you mean 'INFO'?")
if key is not None:
if key in col:
ck = col[key]
else:
ck = None
else:
ck = col
if isinstance(ck, list):
if len(ck) >= 1: # Get first item of list or use None
v = ck[0]
try:
v = float(v)
return v
except ValueError:
return v
# elif len(ck) > 1:
# if first:
# ck = ck[0]
else:
return ck
# else:
# ck = None
return ck
def check_passed(operations, r, keep):
passed = True
for item in keep:
if item.op is None:
raise ValueError("Col.op must be set using 'keep' argument e.g. "
"Col('INFO', 'SU', op=eq, thresh=4)")
ck = col_parser(r, item.col, item.key)
if ck is None and item.col != "FILTER":
raise KeyError("TypeError: >= 'NoneType' and 'int'. Check the svbench.Col key exists {}".format(str(keep)))
if not operations.test(item.op, ck, item.thresh):
passed = False
break
return passed
def get_strata(r, strat):
cp = col_parser(r, strat.col, strat.key)
if strat.add is not None:
vals = [cp]
current = strat.add
while True:
v = col_parser(r, current.col, current.key)
if v is not None:
vals.append(v)
if current.add is None:
break
cp = sum(vals)
if cp is None:
cp = 0
if isinstance(cp, list):
cp = float(cp[0])
return float(cp)
def parse_cols(r, item):
encoding = item.encoding
col = item.col
key = item.key
if encoding:
if key is not None:
p = col_parser(r, item.col, item.key)
if p in encoding:
item.parsed_value = encoding[p]
return f"{col}:{key}", item.parsed_value # encoding[p]
elif None in encoding:
item.parsed_value = encoding[None]
return f"{col}:{key}", item.parsed_value
else:
raise ValueError("value from column={}:{} could not be found in encoding. Value was {}, of type {}".format(col, key, p, type(p)), item)
else:
if type(r) == vcf.model._Record:
p = r.__getattribute__(col)
if isinstance(p, list):
if len(p) == 0:
p = None
else:
p = p[0]
else:
p = r[col]
if p in encoding:
item.parsed_value = encoding[p]
return col, item.parsed_value
else:
item.parsed_value = encoding[None]
return col, item.parsed_value
else:
if key is not None:
p = col_parser(r, col, key, first=False)
if isinstance(p, list):
d = dict()
for index, v in enumerate(p):
d[f"{col}:{key}:{index}"] = v
return d
else:
item.parsed_value = p
return f"{col}:{key}", item.parsed_value
else:
try:
item.parsed_value = r[col]
except TypeError:
item.parsed_value = r.__getattribute__(col)
return col, item.parsed_value
def parse_cols_list(r, parse_list, data):
for item in parse_list:
d = parse_cols(r, item)
if isinstance(d, dict):
data.update(d)
else:
data[d[0]] = d[1]
return data
def parse_all_cols(r):
all_cols = []
for k in r.INFO.keys():
all_cols.append(Col("INFO", k))
for k in r.FORMAT.split(":"):
all_cols.append(Col("FORMAT", k))
return parse_cols_list(r, all_cols, {})
def check_args(stratify, weight_field, keep, other_cols, size_range):
if size_range is not None:
if size_range[0] is not None and size_range[1] is not None:
assert size_range[0] < size_range[1]
for item in (keep, weight_field, stratify):
if item is None:
continue
if isinstance(item, list):
for item2 in item:
assert isinstance(item2, Col)
else:
assert isinstance(item, Col)
if isinstance(other_cols, list):
for item2 in other_cols:
assert isinstance(item2, Col)
# elif other_cols != "all":
# assert isinstance(other_cols, Col)
if stratify is not None and (stratify.bins is None or not hasattr(stratify.bins, '__iter__')):
raise ValueError(
"Stratify must have an iterable for 'bins' argument e.g. Col('FORMAT', 'DV', bins=range(0, 60, 5))")
class CallSet:
"""
This class holds instructions for parsing an input file containing variants. Supported formats include
'vcf', 'bed', 'bedpe', 'csv'.
Raw data may optionally be saved within the object.
:param dataset: A name for the input dataset (optional)
:type dataset: str
:param caller: The name of the variant caller used (optional)
:type caller: str
:param kwargs: Allows CallSet attributes to be set during initialization - if any kwargs are not found in the \
self.default_params.keys() list, then the key: value pair is added to the self.required dictionary. The purpose of \
this feature is to allow the setting of required arguments, which can be useful when creating new class instances.
:returns: CallSet instance
"""
def __init__(self, dataset=None, caller=None, **kwargs):
# Primary attributes
self.dataset = dataset
self.caller = caller
self.name = None
self.kwargs = kwargs
self.required = {} # Set any number of required arguments. Note, arguments are accessed via kwargs
# Secondary attributes
self.bedpe = False
self.path = None
self.tree = None
self.breaks_df = None
self.extra_cols = None
self.weight_field = None
self.no_translocations = True
self.allowed_svtypes = None
self.keep = None
self.stratify = None
self.stratify_range = None
self.allowed_chroms = None
self.min_size = None
self.max_size = None
self.soft_size_filter = None
self.other_cols = None
self.model = None
self.slop = None
self.break_cols = "chrA,posA,chrB,posB"
self.sep = ","
self.drop_first_row = False
self.svtype_field = "svtype"
self.scores = None
self.size_scores = None
self.false_negative_indexes = None
| |
y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='green', anchor='nw')
else:
self.canvas.create_text(x + 5, y + 5, text=self.logReader.currentSystem, font="Ebrima 13 bold",
fill='orange', anchor='nw')
self.canvas.create_rectangle(x + 150, y, x + 500, y + 30, fill='black')
self.canvas.create_text(x + 158, y + 5, text='>> ', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.nextSystem == clip:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='green',
anchor='nw')
else:
self.canvas.create_text(x + 190, y + 5, text=self.nextSystem, font="Ebrima 13 bold", fill='orange',
anchor='nw')
self.canvas.create_rectangle(x + 340, y, x + 500, y + 30, fill='black')
timeSince = time.time() - self.logReader.lastJumpRequest
timeSince = self.maxCountdown - timeSince
if timeSince > 0:
if timeSince < 10 and self.data['alarm']:
winsound.Beep(3000, 100)
mins = str(round(timeSince // 60))
seconds = str(math.floor(timeSince % 60))
if len(mins) == 1:
mins = '0' + mins
if len(seconds) == 1:
seconds = '0' + seconds
text = mins + ':' + seconds
else:
text = 'Ready'
text = '| ' + text + ' |'
self.canvas.create_text(x + 350, y + 5, text=text, font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 420, y + 5, text='☰', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 440, y + 5, text='📁', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 463, y + 5, text='⚙', font="Ebrima 13 bold", fill='orange', anchor='nw')
if self.data['topmost'] == 1:
self.canvas.create_text(x + 485, y + 5, text='⮝', font="Ebrima 13 bold", fill='orange', anchor='nw')
else:
self.canvas.create_text(x + 485, y + 5, text='⮟', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_text(x + 500, y + 5, text='✘', font="Ebrima 13 bold", fill='orange', anchor='nw')
self.canvas.create_line(x, y, x + 520, y, fill='orange')
self.canvas.create_line(x, y + 30, x + 520, y + 30, fill='orange')
if self.data['more']:
self.createDashboard()
def createDashboard(self):
mouseX, mouseY = mousePosition()
x, y = self.data['window position'][0], self.data['window position'][1]
try:
self.canvas.create_rectangle(x, y + 35, x + 520, y + 600, fill='black', outline='orange')
# pannel backgrounds
self.canvas.create_rectangle(x + 10, y + 40, x + 510, y + 150, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 160, x + 510, y + 270, fill='#111111', outline='#333333')
self.canvas.create_rectangle(x + 10, y + 280, x + 510, y + 540, fill='#111111', outline='#333333')
above = False
for i in [0] + self.stopLocations:
horPos = i / len(self.currentFileData) * 480 + 20
if above and (mouseX - (x + horPos)) ** 2 + (mouseY - (y + 80)) ** 2 < 25:
if horPos < 250:
anchor = 'w'
else:
anchor = 'e'
self.canvas.create_rectangle(x + horPos - 8, y + 45, x + 500, y + 80, fill='#111111',
outline='#111111')
self.canvas.create_line(x + horPos, y + 70, x + horPos, y + 80, fill='orange')
jumps=i - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta=''
self.canvas.create_text(x + horPos, y + 60,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill='orange', anchor=anchor)
self.canvas.create_oval(x + horPos - 5, y + 75, x + horPos + 5, y + 85, fill='orange',
outline='orange')
elif not above:
self.canvas.create_rectangle(x + horPos - 8, y + 80, x + 500, y + 120, fill='#111111',
outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=self.currentFileData[i][self.currentFileDataKeys['System Name']],
font="Ebrima 10 bold", fill='orange', anchor='w')
above = True
horPos = 500
jumps = len(self.currentFileData) - 1 - self.position
if jumps > 0:
eta = ' | ' + self.getETA(jumps=jumps)
else:
eta = ''
self.canvas.create_rectangle(x + horPos - 10, y + 80, x + 500, y + 120, fill='#111111', outline='#111111')
self.canvas.create_line(x + horPos, y + 80, x + horPos, y + 90, fill='orange')
self.canvas.create_text(x + horPos, y + 95,
text=" " + self.currentFileData[-1][self.currentFileDataKeys['System Name']] + eta,
font="Ebrima 10 bold", fill='orange', anchor='e')
##print(self.stopLocations)
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_line(x + 20, y + 80, x + horPos, y + 80, fill='orange', width=2, dash=10)
self.canvas.create_line(x + horPos, y + 80, x + 500, y + 80, fill='orange', width=2)
self.canvas.create_oval(x + 15, y + 75, x + 25, y + 85, fill='orange', outline='orange')
self.canvas.create_oval(x + 495, y + 75, x + 505, y + 85, fill='orange', outline='orange')
self.canvas.create_text(x + 20, y + 130, text="Jumps | Completed: " + str(self.position),
font="Ebrima 13 bold", fill='orange', anchor='w')
found = False
for i in self.stopLocations:
diff = i - self.position
if diff >= 0:
self.canvas.create_text(x + 220, y + 130, text="| To Waypoint: " + str(diff), font="Ebrima 13 bold",
fill='orange', anchor='w')
found = True
break
if not found:
self.canvas.create_text(x + 220, y + 130,
text="| To Waypoint: " + str(len(self.currentFileData) - self.position - 1),
font="Ebrima 13 bold", fill='orange', anchor='w')
self.canvas.create_text(x + 380, y + 130,
text="| Left: " + str(len(self.currentFileData) - self.position - 1),
font="Ebrima 13 bold", fill='orange', anchor='w')
for i in self.stopLocations:
if i < self.position:
fill = 'orange'
outline = 'orange'
else:
fill = 'orange'
outline = 'orange'
horPos = i / len(self.currentFileData) * 480 + 20
self.canvas.create_oval(x + horPos - 3, y + 77, x + horPos + 3, y + 83, fill=fill, outline=outline)
##print('h',horPos)
##print(self.stopLocations)
horPos = self.position / len(self.currentFileData) * 480 + 20
self.canvas.create_polygon(x + horPos - 5, y + 85, x + horPos, y + 75, x + horPos + 5, y + 85,
fill='#00ff00', outline='#00ff00')
try:
reqFuel = self.currentFileData[self.position][self.currentFileDataKeys['Tritium in market']]
reqFuel = int(reqFuel)
if reqFuel > 0:
reqFuel += 1000
else:
for i in range(self.position, len(self.currentFileData)):
reqFuel += int(self.currentFileData[i][self.currentFileDataKeys['Fuel Used']])
reqFuel -= int(self.currentFileData[self.position][self.currentFileDataKeys['Fuel Used']])
except IndexError:
reqFuel = 'Error'
tankFuel = self.logReader.carrierFuel
shipFuel = self.logReader.shipInventory - self.data['shipCargo']
carrierFuel = self.logReader.carrierInventory - self.data['carrierCargo']
self.canvas.create_text(x + 20, y + 180, text="Tritium | ", font="Ebrima 13 bold", fill='orange',
anchor='w')
self.canvas.create_text(x + 95, y + 180, text="Tank: " + str(tankFuel), font="Ebrima 13 bold", fill='green',
anchor='w')
self.canvas.create_text(x + 190, y + 180, text="| Ship: " + str(shipFuel), font="Ebrima 13 bold",
fill='blue', anchor='w')
self.canvas.create_text(x + 280, y + 180, text="| Cargo: " + str(carrierFuel), font="Ebrima 13 bold",
fill='orange', anchor='w')
self.canvas.create_text(x + 400, y + 180, text="| Min: " + str(reqFuel), font="Ebrima 13 bold", fill='red',
anchor='w')
fuelTotal = tankFuel + shipFuel + carrierFuel
if reqFuel == 'Error':
reqFuel = 0
width = max(fuelTotal, reqFuel) / 480
self.canvas.create_rectangle(x + 20, y + 210, x + 20 + reqFuel / width, y + 230, fill='red', outline='red',
stipple='gray25')
self.canvas.create_rectangle(x + 20, y + 210, x + 20 + tankFuel / width, y + 230, fill='green',
outline='green')
self.canvas.create_rectangle(x + 20 + tankFuel / width, y + 210,
x + 20 + shipFuel / width + tankFuel / width, y + 230, fill='blue',
outline='blue')
self.canvas.create_rectangle(x + 20 + shipFuel / width + tankFuel / width, y + 210,
x + 20 + shipFuel / width + tankFuel / width + carrierFuel / width, y + 230,
fill='orange', outline='orange')
self.canvas.create_rectangle(x + 20 + reqFuel / width - 2, y + 210, x + 20 + reqFuel / width, y + 230,
fill='red', outline='red')
diff = fuelTotal - reqFuel
if diff >= 0:
self.canvas.create_text(x + 260, y + 250, text="You are " + str(diff) + " Tritium in excess",
font="Ebrima 13 bold", fill='green')
else:
self.canvas.create_text(x + 260, y + 250, text="Warning! You are " + str(-diff) + " Tritium short!",
font="Ebrima 13 bold", fill='red')
self.canvas.create_text(x + 260, y + 197,
text="Please note you need to open the carrier management page to update this.",
font="Ebrima 8 bold", fill='orange')
# routeList
length = 10
self.scrollLength = length
verticalSpacing = 25
self.verticalSpacing = verticalSpacing
boxHeight = 20
self.boxHeight = boxHeight
startY = 290
self.scrollHeight = verticalSpacing * (length - 1) + boxHeight
barHeight = min(length / len(self.currentFileData) * self.scrollHeight, self.scrollHeight)
self.barCentre = barHeight / 2
barPosition = y + (self.position + self.scroll) / len(self.currentFileData) * self.scrollHeight + startY
clipboard = pyperclip.paste()
for i in range(length):
if self.position + self.scroll + i < len(self.currentFileData):
if self.currentFileData[self.position + self.scroll + i][
self.currentFileDataKeys['System Name']] == clipboard:
boxFill = 'green'
textFill = 'black'
elif self.scroll + i == 0:
boxFill = 'orange'
textFill = 'black'
elif self.position + self.scroll + i in self.stopLocations | |
import tensorflow as tf
from tensorflow.image import rot90 as img_rot90
from tensorflow.image import flip_left_right as img_flip
from tensorflow.keras.optimizers import Adam
import logging
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import pandas as pd
import copy
import os
import time
class CustomModel(tf.keras.Model):
"""
The logic for one evaluation step.
This sub-calss of tf.keras.Model is ment to apply fixed augmentation techniques to the validation set, so the val dataset will be a better representation of the cell images population. This is done by overwritting the Model method test_step. More info on:
- https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit#providing_your_own_evaluation_step
This function contains the mathemetical logic for one step of evaluation.
This includes the forward pass, loss calculation, and metrics updates.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned.
"""
def __init__(self, **kwargs):
"""
Raplace tf.keras.Model __init__ method in case some arguments need to be given to CustomModel class.
"""
# Run original tf.keras.Model __init__ method
super().__init__(**kwargs)
def _apply_aug(self, x=None, k=0, flip=False):
"""
Apply data augmentation and channel filtering over x.
"""
if flip:
return img_flip(img_rot90(x, k=k))
else:
return img_rot90(x, k=k)
def _predict_targets_with_augmentation(self, x, y):
"""
This function is ment to evaluate (predict) the validation set (or any set given as argument to model.evaluate()) using fixed data augmentation (DA) techniques. Currently the val_dataset is evaluated 8 times (no DA, 90, 180, 270, deg rotations, flip, flip+90, flip+180, flip+270 deg rotations).
"""
# Compute predictions over imgs with k*90 deg (k=1,2,3) rotations
# apply rotations and random color shift using different seed. This increase the size of the val set.
counter = 0
for flip in [True, False]:
for k in range(0,4):
counter += 1
temp_pred = self(self._apply_aug(x, k=k, flip=flip), training=False)
if counter == 1:
y_pred = tf.identity(temp_pred)
y_new = tf.identity(y)
else:
y_pred = tf.concat((y_pred, temp_pred), axis=0)
y_new = tf.concat((y_new, y), axis=0)
return y_new, y_pred
def test_step(self, data):
#data = data_adapter.expand_1d(data)
# Unpack the data
#x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
x, y = data
# predict targets with and without data augmentation
y, y_pred = self._predict_targets_with_augmentation(x, y)
# Updates the metrics tracking the loss
#self.compiled_loss(
# y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Update the metrics.
#self.compiled_metrics.update_state(y, y_pred, sample_weight)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
return {m.name: m.result() for m in self.metrics}
class Individual_Model_Training():
"""
The idea of this clase is to join all the needed process to train a model into one class, so training several models using the same parameters can be easy.
"""
def __init__(self, input_shape=(224, 224, 38), input_ids=None):
self.log = logging.getLogger(self.__class__.__name__)
self._print_stdout_and_log("Individual_Model_Training class initialed")
self.model = None
self.metrics = ['mse', 'mean_absolute_error']
self.callbacks = []
self.input_ids = input_ids
if input_shape is None:
msg = 'Please specify the input shape! E.g.:\n'
msg += 'input_shape=(224, 224, 37)'
print(msg)
raise Exception(msg)
else:
self.input_shape = tuple(input_shape)
self.projection_tensor = self._get_projection_tensor(input_shape, input_ids)
def set_model(self, arch_name='baseline_CNN', pre_training=False, conv_reg=[0,0], dense_reg=[0,0], bias_l2_reg=0, use_custom_model=True):
msg = 'Model {} selected!'.format(arch_name)
self._print_stdout_and_log(msg)
self.pre_training = pre_training
self._set_regularization(conv_reg, dense_reg, bias_l2_reg)
# init model architecture
self.input_layer = tf.keras.Input(shape=self.input_shape, name='InputLayer')
# Filter unwanted channels
x = self._filter_channels(self.input_layer)
if (arch_name == 'baseline_CNN') or (arch_name == 'simple_CNN'):
prediction = self._get_baseline_CNN(x)
elif arch_name == 'ResNet50V2':
prediction = self._get_ResNet50V2(x)
elif arch_name == 'Xception':
prediction = self._get_Xception(x)
elif arch_name == 'Linear_Regression':
prediction = self._get_Linear_Regression(x)
else:
msg = 'Specified model {} not implemented!'.format(arch_name)
self.log.error(msg)
raise NotImplementedError(arch_name)
# Instantiate tf model class
if use_custom_model:
self.model = CustomModel(inputs=self.input_layer, outputs=prediction)
else:
self.model = tf.keras.models.Model(inputs=self.input_layer, outputs=prediction)
# Print model summary
self.model.summary(print_fn=self._print_stdout_and_log)
# Sanity check: print model losses (one for each layer regularized (1 for bias reg and 1 for kernel reg))
self._print_stdout_and_log('Losses:\n{}'.format(self.model.losses))
def build_model(self, loss_name='huber', learning_rate=0.001):
# Select the loss function
if loss_name == 'mse':
loss = tf.keras.losses.MeanSquaredError()
elif loss_name == 'huber':
loss = tf.keras.losses.Huber(delta=1.0)
elif loss_name == 'mean_absolute_error':
loss = tf.keras.losses.MeanAbsoluteError()
self._print_stdout_and_log('{} loss function selected. Building the model...'.format(loss_name))
self.model.compile(optimizer=Adam(learning_rate=learning_rate),
loss=loss,
metrics=self.metrics
)
self._print_stdout_and_log('Model compiled!')
def fit_model(self, train_data, val_data, n_epochs, verbose_level):
self._print_stdout_and_log('Starting model training...')
# Save time before training
tic = time.time()
# Fit model
self.history = self.model.fit(train_data,
validation_data=val_data,
epochs=n_epochs,
callbacks=self.callbacks,
verbose=verbose_level
)
toc = time.time()
self._print_stdout_and_log('Training time (in mins): {}'.format((toc-tic)/60))
def _print_stdout_and_log(self, msg):
self.log.info(msg)
print(msg)
def _set_regularization(self, conv_reg, dense_reg, bias_l2_reg):
# Set regularization parameters
# regularization for dense layers
self.dense_reg = None
if sum(dense_reg) != 0:
self.dense_reg = tf.keras.regularizers.l1_l2(l1=dense_reg[0], l2=dense_reg[1])
# Reg for conv layers
self.conv_reg = None
if sum(conv_reg) != 0:
self.conv_reg = tf.keras.regularizers.l1_l2(l1=conv_reg[0], l2=conv_reg[1])
# reg for bias
self.bias_reg = None
if bias_l2_reg != 0:
self.bias_reg = tf.keras.regularizers.l2(bias_l2_reg)
msg = '\nRegularization:'
msg += '\nconv_l1_reg: {}, conv_l2_reg: {}'.format(conv_reg[0], conv_reg[1])
msg += '\ndense_l1_reg: {}, dense_l2_reg: {}'.format(dense_reg[0], dense_reg[1])
msg += '\nBias l2 reg: {}'.format(bias_l2_reg)
self._print_stdout_and_log(msg)
def _get_projection_tensor(self, input_shape, input_ids):
"""This function returns a tensor used as preprocessing to filter the input channels.
return a Tensor of shape (n_total_channels, n_selected_channels), where values for selected channels are 1 in the diagonal an 0 otherwise.
"""
n_channels = input_shape[-1]
n_selected_channels = input_ids.shape[-1]
projection_matrix = np.zeros(shape=(n_channels, n_selected_channels))
for col, row in enumerate(input_ids):
projection_matrix[row,col] = 1
return tf.constant(projection_matrix, dtype=tf.float32)
#@tf.function
def _filter_channels(self, x):
"""
Function to preprocess cell images
x: Tensor of shape (bath_size, img_size, img_size, n_channels)
"""
return x @ self.projection_tensor
def _get_baseline_CNN(self, x):
"""
Baseline Model
Architecture:
Input -> Conv3x3_64 -> BN -> ReLu -> MaxPool2x2
-> Conv3x3_128 -> BN -> ReLu -> MaxPool2x2
-> Prediction Layers
"""
x = tf.keras.layers.Conv2D(64, (3,3),
padding='same',
kernel_regularizer=self.conv_reg,
bias_regularizer=self.bias_reg,
input_shape=self.input_shape
)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.MaxPooling2D((2,2), strides=2)(x)
x = tf.keras.layers.Conv2D(128, (3,3),
padding='same',
kernel_regularizer=self.conv_reg,
bias_regularizer=self.bias_reg
)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.MaxPooling2D((2,2), strides=2)(x)
# Add prediction layers to predict the TR:
return self._add_prediction_leyers(x)
def _get_Linear_Regression(self, x):
"""
Linear regresion model:
Input -> GlobalAveragePooling (Per-Channel average)
-> Dense_1 (Prediction)
"""
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(units=1,
kernel_regularizer=self.dense_reg,
bias_regularizer=self.bias_reg,
)(x)
return x
def _get_ResNet50V2(self, x):
"""
ResNet50V2 + Dense layers on the top.
Architecture:
Input -> ResNet50V2 without top layers (Dense prediction layers)
-> Prediction Layers
"""
# First, we need to load the RestNet50V2 arch. However, since our input
# shape is different from the original one, it is not possible to load
# the arch. and the pretrained weights at the same time. Moreover, we
# need to creat first a separate input layer with the shape of our data:
arch_input_shape = x.shape[1:]
arch_input_layer = tf.keras.Input(shape=arch_input_shape, name='temp_input')
# Now we load the base ResNet50V2 arch. using our input layer:
base_model = tf.keras.applications.ResNet50V2(
include_top=False,
weights=None,
input_tensor=arch_input_layer,
pooling=None,
classifier_activation=None
)
# load pretrained weights and biases
if self.pre_training:
base_model = self._set_ResNet50V2_pretrained_w_and_b(base_model, arch_input_shape)
# For testing
base_model = self._apply_regularization_to_prebuilt_model(base_model, 2)
# Add prediction layers to predict the TR:
return self._add_prediction_leyers(base_model(x))
def _get_Xception(self, x):
"""
Xception + Dense layers on the top.
Architecture:
Input -> Xception without top layers (Dense prediction layers)
-> Prediction Layers
"""
# First, we need to load the arch. However, sine our input
# shape is different from the original one, it is not possible to load
# the arch. and the pretrained weights at the same time. Moreover, we
# need to creat first a separate input layer with the shape of our data:
arch_input_shape = x.shape[1:]
arch_input_layer = tf.keras.Input(shape=arch_input_shape, name='temp_input')
# Now we load the base arch. using our input layer:
base_model = tf.keras.applications.Xception(
include_top=False,
weights=None,
input_tensor=arch_input_layer,
pooling=None,
classifier_activation=None
)
if self.pre_training:
base_model = self._set_Xception_pretrained_w_and_b(base_model, arch_input_shape)
# For testing
base_model = self._apply_regularization_to_prebuilt_model(base_model, 1)
# Add prediction layers to predict the TR:
return self._add_prediction_leyers(base_model(x))
def _add_prediction_leyers(self, x):
"""
This function add the final prediction layers to the model.
Architecture:
Input -> Base Model
-> GlobalAveragePooling (Flatten, output vector of size 2048)
-> Dense_256 -> BN -> ReLu
-> Dense_128 -> BN -> ReLu
-> Dense_1 (Prediction)
"""
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(
units=256,
kernel_regularizer=self.dense_reg,
bias_regularizer=self.bias_reg,
#activity_regularizer=tf.keras.regularizers.l2(1e-5)
)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Dense(
units=128,
kernel_regularizer=self.dense_reg,
bias_regularizer=self.bias_reg,
#activity_regularizer=tf.keras.regularizers.l2(1e-5)
)(x)
x = | |
= QtGui.QPushButton(self.nw_lpvp) #load Profile
self.pushButton_adp.setGeometry(QtCore.QRect(18, 18, 100, 28))
self.pushButton_adp.setText("Add Profile")
self.pushButton_adp.setFont(font)
self.pushButton_rmp = QtGui.QPushButton(self.nw_lpvp)
self.pushButton_rmp.setGeometry(QtCore.QRect(135, 18, 100, 28))
self.pushButton_rmp.setText("Remove")
self.pushButton_rmp.setFont(font)
self.pushButton_plots = QtGui.QPushButton(self.nw_lpvp)
self.pushButton_plots.setGeometry(QtCore.QRect(363, 18, 100, 28))
self.pushButton_plots.setText("Plot")
self.pushButton_plots.setFont(font)
self.listWidget_lpvp = QtGui.QListWidget(self.nw_lpvp)
self.listWidget_lpvp.setGeometry(QtCore.QRect(18, 60, 445, 240))
self.listWidget_lpvp.setFont(font)
self.pushButton_adp.clicked.connect(self.lpvp_adp)
self.pushButton_rmp.clicked.connect(self.lpvp_rmp)
self.pushButton_plots.clicked.connect(self.lpvp_plots)
self.scrollArea_lpvp.show()
def lpvp_adp(self):
global profile_loc
path_profile, _ =QtGui.QFileDialog.getOpenFileName(None, "Open csv file...",os.getcwd(), "text file (*.csv)")
if path_profile == "": return
try:
data = np.genfromtxt(path_profile,dtype=float,delimiter=',')
except UnicodeDecodeError:
reload(sys)
sys.setdefaultencoding('big5')
data = np.genfromtxt(path_profile,dtype=float,delimiter=',')
sys.setdefaultencoding('utf-8')
if data[0].size != 2:
print 'Open wrong file!!'
return
filename = path_profile.split('/')[len(path_profile.split('/'))-1]
profile_loc = np.append(profile_loc,path_profile)
item = QtGui.QListWidgetItem()
item.setText(filename)
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)#Unchecked
self.listWidget_lpvp.addItem(item)
def lpvp_rmp(self):
global profile_loc
if profile_loc.size == 0:return
item = self.listWidget_lpvp.item(profile_loc.size-1)
self.listWidget_lpvp.removeItemWidget(item)
self.listWidget_lpvp.takeItem(profile_loc.size-1)
profile_loc = profile_loc[0:profile_loc.size-1]
def lpvp_plots(self):
if profile_loc.size == 0: return
plot_idx = []
for idx in range(profile_loc.size):
item = self.listWidget_lpvp.item(idx)
if item.checkState() == QtCore.Qt.Checked : plot_idx.append(idx)
if len(plot_idx) == 0: return
fig_pv, ax_pv = plt.subplots()
plt.gcf().canvas.set_window_title('Profiles Plot')
plt.xlabel('Wavelength (nm)')
plt.ylabel('Intensity')
plt.minorticks_on()
plt.grid(True)
plt.tight_layout()
plt.locator_params(axis='x', tight=True,nbins=20) #set ticks range
for idx in plot_idx:
loc = profile_loc[idx]
try:
data = np.genfromtxt(loc,dtype=float,delimiter=',')
except UnicodeDecodeError:
reload(sys)
sys.setdefaultencoding('big5')
data = np.genfromtxt(loc,dtype=float,delimiter=',')
sys.setdefaultencoding('utf-8')
filename = loc.split('/')[len(loc.split('/'))-1]
plt.plot(data[:,0],data[:,1],label=filename)
plt.legend(loc='best')
plt.show()
########################################################################
########################################################################
def continuum_nw(self):
global profile_path #stored profile path
profile_path = ""
global cnabem_state #stored chk_state
cnabem_state = ''
global method
method = ''
global xlist,ylist
xlist = []
ylist = []
global z #continuum normalization result
z = np.array([],dtype=float)
self.nw_cn = QtGui.QWidget()
self.nw_cn.resize(575, 380)
self.nw_cn.setWindowTitle('Continuum normalization')
# self.nw_cn.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
self.pushButton_cnlp = QtGui.QPushButton(self.nw_cn) #load Profile
self.pushButton_cnlp.setGeometry(QtCore.QRect(8, 13, 110, 27))
self.pushButton_cnlp.setText("Load Profile")
font.setBold(True)
self.pushButton_cnlp.setFont(font)
font.setBold(False)
self.label_cnpn = QtGui.QLabel(self.nw_cn) #profile name
self.label_cnpn.setGeometry(QtCore.QRect(126, 16, 440, 25))
self.label_cnpn.setTextFormat(QtCore.Qt.PlainText)#"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbaaaaba"
font.setPointSize(11)
self.label_cnpn.setFont(font)
self.checkBox_cnab = QtGui.QCheckBox(self.nw_cn)
self.checkBox_cnab.setGeometry(QtCore.QRect(9, 43, 195, 25))
self.checkBox_cnab.setText("Absorption spectrum")
font.setPointSize(12)
self.checkBox_cnab.setFont(font)
self.checkBox_cnem = QtGui.QCheckBox(self.nw_cn)
self.checkBox_cnem.setGeometry(QtCore.QRect(204, 43, 180, 25))
self.checkBox_cnem.setText("Emission spectrum")
self.checkBox_cnem.setFont(font)
self.label_cnrange = QtGui.QLabel(self.nw_cn)
self.label_cnrange.setGeometry(QtCore.QRect(389, 43, 59, 25))
self.label_cnrange.setText("Range :")
self.label_cnrange.setFont(font)
self.lineEdit_cnrange = QtGui.QLineEdit(self.nw_cn)# y1
self.lineEdit_cnrange.setGeometry(QtCore.QRect(448, 45, 86, 22))
self.lineEdit_cnrange.setFont(font)
self.label_cnrangeu = QtGui.QLabel(self.nw_cn)
self.label_cnrangeu.setGeometry(QtCore.QRect(543, 43, 22, 25))
self.label_cnrangeu.setText("nm")
self.label_cnrangeu.setFont(font)
self.tabWidget_cn = QtGui.QTabWidget(self.nw_cn)
self.tabWidget_cn.setGeometry(QtCore.QRect(9, 77, 560, 190))
self.tabWidget_cn.setMouseTracking(False)
self.tabWidget_cn.setTabShape(QtGui.QTabWidget.Triangular)
self.tabWidget_cn.setFont(font)
self.tabcn_1 = QtGui.QWidget()
self.tabWidget_cn.addTab(self.tabcn_1, "Method A - AsLS Smoothing")
self.tabcn_2 = QtGui.QWidget()
self.tabWidget_cn.addTab(self.tabcn_2, "Method B - Pseudocontinuum points Fitting")
self.label_cnd1 = QtGui.QLabel(self.tabcn_1)
self.label_cnd1.setGeometry(QtCore.QRect(QtCore.QRect(5, 1, 552, 22)))
self.label_cnd1.setText("The higher S the more smoothness; the higher P the higher baseline. Generally, the S between ")
self.label_cnd2 = QtGui.QLabel(self.tabcn_1)
self.label_cnd2.setGeometry(QtCore.QRect(QtCore.QRect(5, 19, 552, 22)))
self.label_cnd2.setText("3 and 6 is a good choice both for emission and absroption spectrum, the P between 0.01 and ")
self.label_cnd3 = QtGui.QLabel(self.tabcn_1)
self.label_cnd3.setGeometry(QtCore.QRect(QtCore.QRect(5, 37, 552, 22)))
self.label_cnd3.setText("0.2 is suitable for emission spectrum but need to be greater than 0.9 for absorption spectrum.")
font.setPointSize(10)
self.label_cnd1.setFont(font)
self.label_cnd2.setFont(font)
self.label_cnd3.setFont(font)
self.label_cnsp1 = QtGui.QLabel(self.tabcn_1)
self.label_cnsp1.setGeometry(QtCore.QRect(QtCore.QRect(165, 65, 117, 20)))
self.label_cnsp1.setText("S (1~9) : ")
font.setPointSize(12)
self.label_cnsp1.setFont(font)
self.spbox_cn1 = QtGui.QSpinBox(self.tabcn_1)
self.spbox_cn1.setGeometry(QtCore.QRect(QtCore.QRect(315, 65, 65, 24)))
self.spbox_cn1.setSingleStep(1)
self.spbox_cn1.setRange(1, 9)
self.label_cnsp2 = QtGui.QLabel(self.tabcn_1)
self.label_cnsp2.setGeometry(QtCore.QRect(QtCore.QRect(165, 95, 165, 20)))
self.label_cnsp2.setText("P (0.01~0.99) : ")
self.label_cnsp2.setFont(font)
self.spbox_cn2 = QtGui.QDoubleSpinBox(self.tabcn_1)
self.spbox_cn2.setGeometry(QtCore.QRect(QtCore.QRect(315, 95, 65, 24)))
self.spbox_cn2.setSingleStep(0.01)
self.spbox_cn2.setRange(0.01, 0.99)
self.pushButton_cngc = QtGui.QPushButton(self.tabcn_1) #generate comtinuum function
self.pushButton_cngc.setGeometry(QtCore.QRect(130, 134, 300, 27))
self.pushButton_cngc.setText("Generate Continuum Function")
self.pushButton_cngc.setFont(font)
self.checkBox_cnpi = QtGui.QCheckBox(self.nw_cn)#plot with image
self.checkBox_cnpi.setGeometry(QtCore.QRect(9, 275, 550, 27))
self.checkBox_cnpi.setText("Plot with image")
self.checkBox_cnpi.setFont(font)
# if path == "": self.checkBox_cnpi.setEnabled(False)
self.label_cnin = QtGui.QLabel(self.nw_cn) #image name
self.label_cnin.setGeometry(QtCore.QRect(27, 294, 540, 24))
self.label_cnin.setTextFormat(QtCore.Qt.PlainText)#"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbaaaaba"
self.label_cnin.setText('(need to load corresponding image and select the image area from main screen)')
font.setPointSize(11)
self.label_cnin.setFont(font)
self.pushButton_cnplot = QtGui.QPushButton(self.nw_cn) #Plot Profile in Relative Intensity
self.pushButton_cnplot.setGeometry(QtCore.QRect(16, 345, 288, 27))
self.pushButton_cnplot.setText("Plot normalized spectrum")
font.setPointSize(12)
font.setBold(True)
self.pushButton_cnplot.setFont(font)
self.pushButton_cnsave = QtGui.QPushButton(self.nw_cn) #save result
self.pushButton_cnsave.setGeometry(QtCore.QRect(325, 345, 110, 27))
self.pushButton_cnsave.setText("Save Result")
font.setBold(False)
self.pushButton_cnsave.setFont(font)
self.pushButton_cnload = QtGui.QPushButton(self.nw_cn) #load
self.pushButton_cnload.setGeometry(QtCore.QRect(448, 345, 110, 27))
self.pushButton_cnload.setText("Load result")
self.pushButton_cnload.setFont(font)
######################################
self.label_cndt2_1 = QtGui.QLabel(self.tabcn_2)
self.label_cndt2_1.setGeometry(QtCore.QRect(QtCore.QRect(5, 1, 552, 22)))
self.label_cndt2_1.setText("Pick out continuum points along pseudocontinuum curve by your eyes in ~10 nm interval on")
self.label_cndt2_2 = QtGui.QLabel(self.tabcn_2)
self.label_cndt2_2.setGeometry(QtCore.QRect(QtCore.QRect(5, 19, 552, 22)))
self.label_cndt2_2.setText("the profile figure. Press 'a' to add point; 'd' to remove current point; 'c' to remove all points.")
font.setPointSize(10)
self.label_cndt2_1.setFont(font)
self.label_cndt2_2.setFont(font)
self.text_cnpickup = QtGui.QTextEdit(self.tabcn_2)
self.text_cnpickup.setGeometry(QtCore.QRect(190, 45, 190, 84))#(9, 20, 389, 286)
self.text_cnpickup.setFont(font)
self.text_cnpickup.setReadOnly(True)
self.text_cnpickup.setText('X(nm) Y(intensity)')#space 6
self.pushButton_cngc2 = QtGui.QPushButton(self.tabcn_2) #generate comtinuum function
self.pushButton_cngc2.setGeometry(QtCore.QRect(130, 134, 300, 27))
self.pushButton_cngc2.setText("Generate Continuum Function")
font.setPointSize(12)
self.pushButton_cngc2.setFont(font)
######################################
self.pushButton_cnlp.clicked.connect(self.cnlp_ldpf)#load profile
self.checkBox_cnab.stateChanged.connect(self.cnab)
self.checkBox_cnem.stateChanged.connect(self.cnem)
self.pushButton_cngc.clicked.connect(self.cngc)#generate cn function
self.pushButton_cngc2.clicked.connect(self.cngc2)#generate cn function
self.checkBox_cnpi.stateChanged.connect(self.cnpi_ck) #plot with img
self.pushButton_cnplot.clicked.connect(self.cnplot)#plot
self.pushButton_cnsave.clicked.connect(self.cnsave)#save
self.pushButton_cnload.clicked.connect(self.cnload)#load
self.label_cnpn.mousePressEvent = self.reopenprofile#reopen_profile
self.nw_cn.show()
self.spbox_cn1.setValue(5)
self.spbox_cn2.setValue(0.01)
def cnlp_ldpf(self):
global profile_path #stored profile path
global xlist,ylist
path_profile, _ =QtGui.QFileDialog.getOpenFileName(None, "Open csv file...",os.getcwd(), "text file (*.csv)") #"TXT(*.txt);;AllFiles(*.*)"
if path_profile == "": return
profile_path = path_profile
try:
data = np.genfromtxt(profile_path,dtype=float,delimiter=',')
except UnicodeDecodeError:
reload(sys)
sys.setdefaultencoding('big5')
data = np.genfromtxt(profile_path,dtype=float,delimiter=',')
sys.setdefaultencoding('utf-8')
if data[0].size != 2:
print 'Open wrong file!!'
return
nm_range1 = int(np.min(data[:,0]))
nm_range2 = int(np.max(data[:,0]))
self.lineEdit_cnrange.setText('%d-%d'%(nm_range1, nm_range2))
filename = path_profile.split('/')[len(path_profile.split('/'))-1]
self.label_cnpn.setText(filename)
x_nm = data[:,0]#nm
y_ii = data[:,1]#intensity
def cnlp_onkey(event):
if event.inaxes!=ax_cnlp.axes: return
onkey_chk(event.key,event.xdata,event.ydata)
def onkey_chk(key,x,y):
global xlist,ylist
if key == u'a':
for n in xlist:
if n == x: return #chk same element
xlist.append(x)
ylist.append(y)
ax_cnlp.plot(x,y,'bo')
self.text_cnpickup.append('%.3f %.3f'%(x,y))
fig_cnlp.canvas.draw()
if key == u'c':
if len(xlist) > 0:
xlist = []
ylist = []
self.text_cnpickup.setText('X(nm) Y(intensity)')
last_step = len(ax_cnlp.lines)
del ax_cnlp.lines[1:last_step]
fig_cnlp.canvas.draw()
if key == u'd':
if len(xlist) > 0:
xlist = xlist[0:len(xlist)-1]
ylist = ylist[0:len(ylist)-1]
last_step = len(ax_cnlp.lines)
del ax_cnlp.lines[last_step-1]
fig_cnlp.canvas.draw()
self.text_cnpickup.undo()
fig_cnlp, ax_cnlp = plt.subplots()
plt.gcf().canvas.set_window_title("Profile")
plt.title('Press key "a" to add point in Method B')
plt.xlabel('x (nm)')
plt.ylabel('intensity (counts)')
plt.tight_layout()
plt.minorticks_on()
plt.grid(True)
plt.ion()
ax_cnlp.plot(x_nm, y_ii,'k-')
plt.show()
cid_cnlp = fig_cnlp.canvas.mpl_connect('key_press_event', cnlp_onkey)
def reopenprofile(self,event):
global xlist,ylist
if profile_path == "": return
try:
data = np.genfromtxt(profile_path,dtype=float,delimiter=',')
except UnicodeDecodeError:
reload(sys)
sys.setdefaultencoding('big5')
data = np.genfromtxt(profile_path,dtype=float,delimiter=',')
sys.setdefaultencoding('utf-8')
x_nm = data[:,0]#nm
y_ii = data[:,1]#intensity
def cnlp_onkey(event):
if event.inaxes!=ax_cnlp.axes: return
onkey_chk(event.key,event.xdata,event.ydata)
def onkey_chk(key,x,y):
global xlist,ylist
if key == u'a':
for n in xlist:
if n == x: return #chk same element
xlist.append(x)
ylist.append(y)
ax_cnlp.plot(x,y,'bo')
self.text_cnpickup.append('%.3f %.3f'%(x,y))
fig_cnlp.canvas.draw()
if key == u'c':
if len(xlist) > 0:
xlist = []
ylist = []
self.text_cnpickup.setText('X(nm) Y(intensity)')
last_step = len(ax_cnlp.lines)
del ax_cnlp.lines[1:last_step]
fig_cnlp.canvas.draw()
if key == u'd':
if len(xlist) > 0:
xlist = xlist[0:len(xlist)-1]
ylist = ylist[0:len(ylist)-1]
last_step = len(ax_cnlp.lines)
del ax_cnlp.lines[last_step-1]
fig_cnlp.canvas.draw()
self.text_cnpickup.undo()
fig_cnlp, ax_cnlp = plt.subplots()
plt.gcf().canvas.set_window_title("Profile")
plt.title('Press key "a" to add point in Method B')
plt.xlabel('x (nm)')
plt.ylabel('intensity (counts)')
plt.tight_layout()
plt.minorticks_on()
plt.grid(True)
plt.ion()
ax_cnlp.plot(x_nm, y_ii,'k-')
plt.show()
cid_cnlp = fig_cnlp.canvas.mpl_connect('key_press_event', cnlp_onkey)
def cnab(self):
global cnabem_state #stored chk_state
if self.checkBox_cnab.isChecked() == True:
cnabem_state = 'Abs'
self.checkBox_cnem.setCheckState(QtCore.Qt.Unchecked)
else:
cnabem_state = 'Emi'
self.checkBox_cnem.setCheckState(QtCore.Qt.Checked)
def cnem(self):
global cnabem_state #stored chk_state
if self.checkBox_cnem.isChecked() == True:
cnabem_state = 'Emi'
self.checkBox_cnab.setCheckState(QtCore.Qt.Unchecked)
else:
cnabem_state = 'Abs'
self.checkBox_cnab.setCheckState(QtCore.Qt.Checked)
def cngc(self):
if profile_path == "": return
if cnabem_state == '': return
global z
global method
global nm_range
s_value = self.spbox_cn1.value()#1-9
p_value = self.spbox_cn2.value()#0.01-0.99
try:
data = np.genfromtxt(profile_path,dtype=float,delimiter=',')
except UnicodeDecodeError:
reload(sys)
sys.setdefaultencoding('big5')
data = np.genfromtxt(profile_path,dtype=float,delimiter=',')
sys.setdefaultencoding('utf-8')
nm_range = self.lineEdit_cnrange.text().split('-')
if nm_range == '' or len(nm_range) != 2:
print 'Please input the wavelength range with correct format.'
return
nm_range = [int(nm_range[0]),int(nm_range[1])]
if nm_range[1] == nm_range[0]: nm_range[1] = nm_range[1]+1
if nm_range[1]-nm_range[0]<0: nm_range = [min(nm_range),max(nm_range)]
x_nm = data[:,0]#nm
y_ii = data[:,1]#intensity
nm_range_idx = np.where((x_nm>=nm_range[0])&(x_nm<=nm_range[1]))
x_nm = x_nm[nm_range_idx]
y_ii = y_ii[nm_range_idx]
from scipy import sparse
from scipy.sparse.linalg import spsolve
def baseline_als(y, lam = 5e5, p = 0.01, niter=10): #lam = 1e7, p = 0.05 0.03
L = y.size#2e4
D = sparse.csc_matrix(np.diff(np.eye(L), 2))
w = np.ones(L)
for i in xrange(niter):
W = sparse.spdiags(w, 0, L, L)
Z = W + lam | |
import json
from collections import Counter
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.loading import get_model
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit, ResizeCanvas
from common.models import EmptyModelBase, ResultBase
from photos.models import Photo
from common.utils import compute_entropy, get_content_tuple, \
get_opensurfaces_storage
from common.geom import bbox_svg_transform
STORAGE = get_opensurfaces_storage()
##
## Categories
##
class ShapeName(EmptyModelBase):
""" Object category, e.g. "Kettle", "Cat", ... """
#: name of the category, e.g. "Kettle", "Cat", ...
name = models.CharField(max_length=127, unique=True)
#: (currently not used) an optional parent category, if caregories
#: are arranged in a tree
parent = models.ForeignKey('self', blank=True, null=True)
#: text description of this object category
description = models.TextField(blank=True)
#: a shape that can be shown as an example
representative_shape = models.ForeignKey(
'MaterialShape', blank=True, null=True)
#: if True, this is actually a special failure case category
fail = models.BooleanField(default=False)
#: values of ``name`` that are considered "fail"
FAIL_NAMES = ("Not on list", "More than one object", "I can't tell")
def material_shape_count(self):
return get_model('shapes', 'MaterialShape').objects.filter(
pixel_area__gt=Shape.MIN_PIXEL_AREA, correct=True,
name=self,
).count()
def __unicode__(self):
return self.name
class Meta:
ordering = ['-fail', 'name']
def save(self, *args, **kwargs):
if self.name in ShapeName.FAIL_NAMES:
self.fail = True
super(ShapeName, self).save(*args, **kwargs)
class ShapeSubstance(EmptyModelBase):
""" Material category, e.g. "wood", "brick", ... """
name = models.CharField(max_length=127, unique=True)
parent = models.ForeignKey('self', blank=True, null=True)
description = models.TextField(blank=True)
representative_shape = models.ForeignKey(
'MaterialShape', blank=True, null=True)
# if True, this is actually a special failure case category
fail = models.BooleanField(default=False)
# if True, this is shown as an option for new labels
active = models.BooleanField(default=False)
# each substance group corresponds to a different (possibly overlapping)
# set of potential object names
group = models.ForeignKey(
'ShapeSubstanceGroup', blank=True, null=True,
related_name='substances')
#: values of ``name`` that are considered "fail"
FAIL_NAMES = ("Not on list", "More than one material", "I can't tell")
def material_shape_count(self):
return get_model('shapes', 'MaterialShape').objects.filter(
pixel_area__gt=Shape.MIN_PIXEL_AREA, correct=True,
substance=self,
).count()
def save(self, *args, **kwargs):
if self.name in ShapeSubstance.FAIL_NAMES:
self.fail = True
super(ShapeSubstance, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
ordering = ['-fail', 'name']
class ShapeSubstanceGroup(EmptyModelBase):
""" Grouping of substances; each substance group is assigned a list of
names that can be used """
active = models.BooleanField(default=True)
names = models.ManyToManyField(ShapeName, related_name='substance_groups')
##
## Shapes
##
class Shape(ResultBase):
""" Abstract parent describing a complex polygon. Shapes are represented
as a bag of vertices, triangles, and line segments. Users submit
instances of SubmittedShapes, which are then intersected and triangulated
to form subclasses of Shape. """
#: min size of a shape
MIN_PIXEL_AREA = 4096
#: min size of a shape for rectification
MIN_PLANAR_AREA = 16384
#: Vertices format: x1,y1,x2,y2,x3,y3,... (coords are fractions of width/height)
#: (this format allows easy embedding into javascript)
vertices = models.TextField()
#: num_vertices should be equal to len(points.split(','))//2
num_vertices = models.IntegerField(db_index=True)
#: Triangles format: p1,p2,p3,p2,p3,p4..., where p_i is an index into
#: vertices, and p1-p2-p3 is a triangle. Each triangle is three indices
#: into points; all triangles are listed together. This format allows easy
#: embedding into javascript.
triangles = models.TextField()
#: num_triangles should be equal to len(triangles.split(','))//3
num_triangles = models.IntegerField()
#: Segments format: "p1,p2,p2,p3,...", where p_i is an index into vertices,
#: and p1-p2, p2-p3, ... are the line segments. The segments are unordered.
#: Each line segment is two indices into points; all segments are listed
#: together. This format allows easy embedding into javascript.
segments = models.TextField()
#: num_segments should be equal to len(segments.split(','))//2
num_segments = models.IntegerField()
## Line segments re-grouped as poly-lines "[[p1,p2,p3,p4],[p1,p2,p3],...]",
## json encoded. Each p_i is an index into vertices. This is the exact same
## data as the segments field, but with line segments properly grouped.
#polylines = models.TextField()
## Number of unique polylines; should equal len(json.loads(polylines))
#num_polylines = models.IntegerField()
#: Area in normalized units. To get the pixel area, multiply this by the
#: total photo area.
area = models.FloatField()
#: Area in pixels
pixel_area = models.IntegerField(null=True, db_index=True)
#: flag to separate out this shape
synthetic = models.BooleanField(default=False)
synthetic_slug = models.CharField(max_length=32, blank=True)
#: if true, enough users voted this to be the correct type of segmentation
correct = models.NullBooleanField()
#: further from 0: more confident in assignment of correct
correct_score = models.FloatField(
blank=True, null=True, db_index=True)
#: if true, enough users voted this to be flat
planar = models.NullBooleanField()
#: CUBAM score for the planar field. further from 0: more confident in
#: assignment of planar.
planar_score = models.FloatField(blank=True, null=True, db_index=True)
#: method by which the planar field was set
PLANAR_METHODS = (('A', 'admin'), ('C', 'CUBAM'), ('M', 'majority vote'))
planar_method_to_str = dict((k, v) for (k, v) in PLANAR_METHODS)
planar_method = models.CharField(
max_length=1, choices=PLANAR_METHODS, blank=True, null=True)
#: Photo masked by the shape and cropped to the bounding box. The masked
#: (excluded) region has pixels that are white with no opacity (ARGB value
#: (0, 255, 255, 255)).
image_crop = models.ImageField(
upload_to='shapes', blank=True, max_length=255, storage=STORAGE)
#: square thumbnail with whitebackground
image_square_300 = ImageSpecField(
[ResizeToFit(300, 300), ResizeCanvas(300, 300, color=(255, 255, 255))],
source='image_crop', format='JPEG', options={'quality': 90}, cachefile_storage=STORAGE)
#: bbox: photo cropped out to the bounding box of this shape
image_bbox = models.ImageField(
upload_to='bbox', blank=True, max_length=255, storage=STORAGE)
#: bbox resized to fit in 512x512
image_bbox_512 = ImageSpecField(
[ResizeToFit(512, 512)],
source='image_bbox', format='JPEG', options={'quality': 90}, cachefile_storage=STORAGE)
#: bbox resized to fit in 1024x1024 (used by opengl widget in rectify task)
image_bbox_1024 = ImageSpecField(
[ResizeToFit(1024, 1024)],
source='image_bbox', format='JPEG', options={'quality': 90}, cachefile_storage=STORAGE)
#: position to show a label (normalized coordinates)
label_pos_x = models.FloatField(blank=True, null=True)
label_pos_y = models.FloatField(blank=True, null=True)
## json-encoded array [min x, min y, max x, max y] indicating the position
## of the bounding box. as usual, positions are specified as fractions of
## width and height.
#bbox = models.TextField(blank=True)
## bbox width/height aspect ratio
#bbox_aspect_ratio = models.FloatField(null=True, blank=True)
#: padded bounding box image. this is the bounding box, expanded by 25% on
#: each side (as a fraction of the bbox width,height), and then the smaller
#: dimension is expanded to as quare.
image_pbox = models.ImageField(
upload_to='pbox', blank=True, max_length=255, storage=STORAGE)
image_pbox_300 = ImageSpecField(
[ResizeToFit(300, 300)],
source='image_pbox', format='JPEG', options={'quality': 90},
cachefile_storage=STORAGE)
image_pbox_512 = ImageSpecField(
[ResizeToFit(512, 512)],
source='image_pbox', format='JPEG', options={'quality': 90},
cachefile_storage=STORAGE)
image_pbox_1024 = ImageSpecField(
[ResizeToFit(1024, 1024)],
source='image_pbox', format='JPEG', options={'quality': 90},
cachefile_storage=STORAGE)
# pbox width/height aspect ratio (as a ratio of pixel lengths)
pbox_aspect_ratio = models.FloatField(null=True, blank=True)
# json-encoded array [min x, min y, max x, max y] indicating the position
# of the padded box. as usual, positions are specified as fractions of
# width and height.
pbox = models.TextField(blank=True)
## The THREE.js vertices are re-normalized so that the aspect ratio is
## correct. The x-coordinate is now in units of height, not width.
## THREE.js json file
#three_js = models.FileField(
#upload_to='three', blank=True, max_length=255)
## THREE.js buffer file
#three_bin = models.FileField(
#upload_to='three', blank=True, max_length=255)
# approximate area of the rectified texture (in pixels)
rectified_area = models.FloatField(null=True, blank=True)
# dominant color of this shape
dominant_r = models.FloatField(null=True, blank=True)
dominant_g = models.FloatField(null=True, blank=True)
dominant_b = models.FloatField(null=True, blank=True)
# top 4 dominant colors written as #rrggbb (for easy HTML viewing)
# (in decreasing order of frequency)
dominant_rgb0 = models.CharField(max_length=7, blank=True, default='')
dominant_rgb1 = models.CharField(max_length=7, blank=True, default='')
dominant_rgb2 = models.CharField(max_length=7, blank=True, default='')
dominant_rgb3 = models.CharField(max_length=7, blank=True, default='')
# difference between top two colors
dominant_delta = models.FloatField(null=True, blank=True)
def has_fov(self):
return self.photo.fov > 0
def publishable(self):
return self.photo.publishable()
def image_pbox_height(self, width):
""" Returns the height of image_pbox_<width> """
return min(width, width / self.pbox_aspect_ratio)
def label_pos_x_scaled(self):
""" Returns the label position normalized by height instead of width
"""
return self.label_pos_x * self.photo.aspect_ratio
def label_pos_2_y_512(self):
return self.label_pos_y + 1.25 * self.photo.font_size_512()
# helpers for templates
def image_pbox_height_1024(self):
return self.image_pbox_height(1024)
def image_pbox_height_512(self):
return self.image_pbox_height(512)
def save(self, *args, **kwargs):
# compute counts:
if not self.num_vertices:
self.num_vertices = len(self.vertices.split(',')) // 2
if not self.num_triangles:
self.num_triangles = len(self.triangles.split(',')) // 3
if not self.num_segments:
self.num_segments = len(self.segments.split(',')) // 2
if not self.area:
from shapes.utils import complex_polygon_area
self.area = complex_polygon_area(self.vertices, self.triangles)
if not self.pixel_area:
self.pixel_area = (self.area * self.photo.image_orig.width *
self.photo.image_orig.height)
if not self.synthetic:
self.synthetic = self.photo.synthetic
if not self.label_pos_x or not self.label_pos_y:
from shapes.utils import update_shape_label_pos
update_shape_label_pos(self, save=False)
| |
header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/virtualMachines/{virtualMachineName}/stop'} # type: ignore
async def _reimage_initial(
self,
resource_group_name: str,
lab_name: str,
virtual_machine_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reimage_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/virtualMachines/{virtualMachineName}/reimage'} # type: ignore
@distributed_trace_async
async def begin_reimage(
self,
resource_group_name: str,
lab_name: str,
virtual_machine_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Re-image a lab virtual machine.
Re-image a lab virtual machine. The virtual machine will be deleted and recreated using the
latest published snapshot of the reference environment of the lab.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:param virtual_machine_name: The ID of the virtual machine that uniquely identifies it within
the containing lab. Used in resource URIs.
:type virtual_machine_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/virtualMachines/{virtualMachineName}/reimage'} # type: ignore
async def _redeploy_initial(
self,
resource_group_name: str,
lab_name: str,
virtual_machine_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_redeploy_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
template_url=self._redeploy_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/virtualMachines/{virtualMachineName}/redeploy'} # type: ignore
@distributed_trace_async
async def begin_redeploy(
self,
resource_group_name: str,
lab_name: str,
virtual_machine_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Redeploy a lab virtual machine to a different compute node. For troubleshooting connectivity.
Action to redeploy a lab virtual machine to a different compute node. For troubleshooting
connectivity.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:param virtual_machine_name: The ID of the virtual machine that uniquely identifies it within
the containing lab. Used in resource URIs.
:type virtual_machine_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._redeploy_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/virtualMachines/{virtualMachineName}/redeploy'} # type: ignore
async def _reset_password_initial(
self,
resource_group_name: str,
lab_name: str,
virtual_machine_name: str,
body: "_models.ResetPasswordBody",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'ResetPasswordBody')
request = build_reset_password_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
content_type=content_type,
json=_json,
template_url=self._reset_password_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_password_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labs/{labName}/virtualMachines/{virtualMachineName}/resetPassword'} # type: ignore
@distributed_trace_async
async def begin_reset_password(
self,
resource_group_name: str,
lab_name: str,
virtual_machine_name: str,
body: "_models.ResetPasswordBody",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset a lab virtual machine password.
Resets a lab virtual machine password.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_name: The name of the lab that uniquely identifies it within containing lab account.
Used in resource URIs.
:type lab_name: str
:param virtual_machine_name: The ID of the virtual machine that uniquely identifies it within
the containing lab. Used in resource URIs.
:type virtual_machine_name: str
:param body: The request body.
:type body: ~azure.mgmt.labservices.models.ResetPasswordBody
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_password_initial(
resource_group_name=resource_group_name,
lab_name=lab_name,
virtual_machine_name=virtual_machine_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: | |
= ("table_id", table_id, "lookup_id", lookup_id)
_result = _execute.execute(b"TPUEmbeddingActivations", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TPUEmbeddingActivations", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def tpu_ordinal_selector(name=None):
r"""A TPU core selector Op.
This Op produces a set of TPU cores (for warm-up) or a single TPU core
(for regular inference) to execute the TPU program on. The output is
consumed by TPUPartitionedCall.
Args:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TPUOrdinalSelector", name,
tld.op_callbacks)
return _result
except _core._FallbackException:
try:
return tpu_ordinal_selector_eager_fallback(
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TPUOrdinalSelector", name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ()
_inputs_flat = _op.inputs
_execute.record_gradient(
"TPUOrdinalSelector", _inputs_flat, _attrs, _result)
_result, = _result
return _result
TPUOrdinalSelector = tf_export("raw_ops.TPUOrdinalSelector")(_ops.to_raw_op(tpu_ordinal_selector))
def tpu_ordinal_selector_eager_fallback(name, ctx):
_inputs_flat = []
_attrs = None
_result = _execute.execute(b"TPUOrdinalSelector", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TPUOrdinalSelector", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def tpu_partitioned_call(args, device_ordinal, Tout, f, autotuner_thresh=0, name=None):
r"""Calls a function placed on a specified TPU device.
Args:
args: A list of `Tensor` objects. The arguments to the function.
device_ordinal: A `Tensor` of type `int32`.
The TPU device ordinal to run the function on.
Tout: A list of `tf.DTypes`. The types of the outputs of the function.
f: A function decorated with @Defun. The function to call.
autotuner_thresh: An optional `int`. Defaults to `0`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `Tout`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TPUPartitionedCall", name,
tld.op_callbacks, args, device_ordinal, "Tout", Tout, "f", f,
"autotuner_thresh", autotuner_thresh)
return _result
except _core._FallbackException:
try:
return tpu_partitioned_call_eager_fallback(
args, device_ordinal, Tout=Tout, f=f,
autotuner_thresh=autotuner_thresh, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if not isinstance(Tout, (list, tuple)):
raise TypeError(
"Expected list for 'Tout' argument to "
"'tpu_partitioned_call' Op, not %r." % Tout)
Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
if autotuner_thresh is None:
autotuner_thresh = 0
autotuner_thresh = _execute.make_int(autotuner_thresh, "autotuner_thresh")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TPUPartitionedCall", args=args, device_ordinal=device_ordinal,
Tout=Tout, f=f,
autotuner_thresh=autotuner_thresh, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f",
_op.get_attr("f"), "autotuner_thresh",
_op._get_attr_int("autotuner_thresh"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"TPUPartitionedCall", _inputs_flat, _attrs, _result)
return _result
TPUPartitionedCall = tf_export("raw_ops.TPUPartitionedCall")(_ops.to_raw_op(tpu_partitioned_call))
def tpu_partitioned_call_eager_fallback(args, device_ordinal, Tout, f, autotuner_thresh, name, ctx):
if not isinstance(Tout, (list, tuple)):
raise TypeError(
"Expected list for 'Tout' argument to "
"'tpu_partitioned_call' Op, not %r." % Tout)
Tout = [_execute.make_type(_t, "Tout") for _t in Tout]
if autotuner_thresh is None:
autotuner_thresh = 0
autotuner_thresh = _execute.make_int(autotuner_thresh, "autotuner_thresh")
_attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx)
device_ordinal = _ops.convert_to_tensor(device_ordinal, _dtypes.int32)
_inputs_flat = list(args) + [device_ordinal]
_attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "autotuner_thresh",
autotuner_thresh)
_result = _execute.execute(b"TPUPartitionedCall", len(Tout),
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"TPUPartitionedCall", _inputs_flat, _attrs, _result)
return _result
def tpu_replicate_metadata(num_replicas, num_cores_per_replica=1, topology="", use_tpu=True, device_assignment=[], computation_shape=[], host_compute_core=[], padding_map=[], step_marker_location="STEP_MARK_AT_ENTRY", allow_soft_placement=False, name=None):
r"""Metadata indicating how the TPU computation should be replicated.
This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
Args:
num_replicas: An `int` that is `>= 0`.
Number of replicas of the computation
num_cores_per_replica: An optional `int`. Defaults to `1`.
Number of cores per replica. Used for model parallelism.
topology: An optional `string`. Defaults to `""`.
TopologyProto indicating the topology of the TPU pod slice.
use_tpu: An optional `bool`. Defaults to `True`.
Whether to place the computation on the TPU.
device_assignment: An optional list of `ints`. Defaults to `[]`.
The assignment of devices for the computation.
computation_shape: An optional list of `ints`. Defaults to `[]`.
DEPRECATED. Use num_cores_per_replica instead.
host_compute_core: An optional list of `strings`. Defaults to `[]`.
padding_map: An optional list of `strings`. Defaults to `[]`.
step_marker_location: An optional `string`. Defaults to `"STEP_MARK_AT_ENTRY"`.
allow_soft_placement: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
The created Operation.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "TPUReplicateMetadata", name,
tld.op_callbacks, "num_replicas", num_replicas,
"num_cores_per_replica", num_cores_per_replica, "topology", topology,
"use_tpu", use_tpu, "device_assignment", device_assignment,
"computation_shape", computation_shape, "host_compute_core",
host_compute_core, "padding_map", padding_map, "step_marker_location",
step_marker_location, "allow_soft_placement", allow_soft_placement)
return _result
except _core._FallbackException:
try:
return tpu_replicate_metadata_eager_fallback(
num_replicas=num_replicas,
num_cores_per_replica=num_cores_per_replica, topology=topology,
use_tpu=use_tpu, device_assignment=device_assignment,
computation_shape=computation_shape,
host_compute_core=host_compute_core, padding_map=padding_map,
step_marker_location=step_marker_location,
allow_soft_placement=allow_soft_placement, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
num_replicas = _execute.make_int(num_replicas, "num_replicas")
if num_cores_per_replica is None:
num_cores_per_replica = 1
num_cores_per_replica = _execute.make_int(num_cores_per_replica, "num_cores_per_replica")
if topology is None:
topology = ""
topology = _execute.make_str(topology, "topology")
if use_tpu is None:
use_tpu = True
use_tpu = _execute.make_bool(use_tpu, "use_tpu")
if device_assignment is None:
device_assignment = []
if not isinstance(device_assignment, (list, tuple)):
raise TypeError(
"Expected list for 'device_assignment' argument to "
"'tpu_replicate_metadata' Op, not %r." % device_assignment)
device_assignment = [_execute.make_int(_i, "device_assignment") for _i in device_assignment]
if computation_shape is None:
computation_shape = []
if not isinstance(computation_shape, (list, tuple)):
raise TypeError(
"Expected list for 'computation_shape' argument to "
"'tpu_replicate_metadata' Op, not %r." % computation_shape)
computation_shape = [_execute.make_int(_i, "computation_shape") for _i in computation_shape]
if host_compute_core is None:
host_compute_core = []
if not isinstance(host_compute_core, (list, tuple)):
raise TypeError(
"Expected list for 'host_compute_core' argument to "
"'tpu_replicate_metadata' Op, not %r." % host_compute_core)
host_compute_core = [_execute.make_str(_s, "host_compute_core") for _s in host_compute_core]
if padding_map is None:
padding_map = []
if not isinstance(padding_map, (list, tuple)):
raise TypeError(
"Expected list for 'padding_map' argument to "
"'tpu_replicate_metadata' Op, not %r." % padding_map)
padding_map = [_execute.make_str(_s, "padding_map") for _s in padding_map]
if step_marker_location is None:
step_marker_location = "STEP_MARK_AT_ENTRY"
step_marker_location = _execute.make_str(step_marker_location, "step_marker_location")
if allow_soft_placement is None:
allow_soft_placement = False
allow_soft_placement = _execute.make_bool(allow_soft_placement, "allow_soft_placement")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"TPUReplicateMetadata", num_replicas=num_replicas,
num_cores_per_replica=num_cores_per_replica,
topology=topology, use_tpu=use_tpu,
device_assignment=device_assignment,
computation_shape=computation_shape,
host_compute_core=host_compute_core,
padding_map=padding_map,
step_marker_location=step_marker_location,
allow_soft_placement=allow_soft_placement,
name=name)
return _op
TPUReplicateMetadata = tf_export("raw_ops.TPUReplicateMetadata")(_ops.to_raw_op(tpu_replicate_metadata))
def tpu_replicate_metadata_eager_fallback(num_replicas, num_cores_per_replica, topology, use_tpu, device_assignment, computation_shape, host_compute_core, padding_map, step_marker_location, allow_soft_placement, name, ctx):
num_replicas = _execute.make_int(num_replicas, "num_replicas")
if num_cores_per_replica is None:
num_cores_per_replica = 1
num_cores_per_replica = _execute.make_int(num_cores_per_replica, "num_cores_per_replica")
if topology is None:
topology = ""
topology = _execute.make_str(topology, "topology")
if use_tpu is None:
use_tpu = True
use_tpu = _execute.make_bool(use_tpu, "use_tpu")
if device_assignment is None:
device_assignment = []
if not isinstance(device_assignment, (list, tuple)):
raise TypeError(
"Expected list for 'device_assignment' argument to "
"'tpu_replicate_metadata' Op, not %r." % device_assignment)
device_assignment = [_execute.make_int(_i, "device_assignment") for _i in device_assignment]
if computation_shape is None:
computation_shape = []
if not isinstance(computation_shape, (list, tuple)):
raise TypeError(
"Expected list for 'computation_shape' argument to "
"'tpu_replicate_metadata' Op, not %r." % computation_shape)
computation_shape = [_execute.make_int(_i, "computation_shape") for _i in computation_shape]
if host_compute_core is None:
host_compute_core = []
if not isinstance(host_compute_core, (list, tuple)):
raise TypeError(
"Expected list for 'host_compute_core' argument to "
"'tpu_replicate_metadata' Op, not %r." % host_compute_core)
host_compute_core = [_execute.make_str(_s, "host_compute_core") for _s in host_compute_core]
if padding_map is None:
padding_map = []
if not isinstance(padding_map, (list, tuple)):
raise TypeError(
"Expected list for 'padding_map' argument to "
"'tpu_replicate_metadata' Op, not %r." % padding_map)
padding_map = [_execute.make_str(_s, "padding_map") for _s in padding_map]
if step_marker_location is None:
step_marker_location = "STEP_MARK_AT_ENTRY"
step_marker_location = _execute.make_str(step_marker_location, "step_marker_location")
if allow_soft_placement is None:
allow_soft_placement = False
allow_soft_placement = _execute.make_bool(allow_soft_placement, "allow_soft_placement")
_inputs_flat = []
_attrs = ("num_replicas", num_replicas, "num_cores_per_replica",
num_cores_per_replica, "topology", topology, "use_tpu", use_tpu,
"device_assignment", device_assignment, "computation_shape",
computation_shape, "host_compute_core", host_compute_core, "padding_map",
padding_map, "step_marker_location", step_marker_location,
"allow_soft_placement", allow_soft_placement)
_result = _execute.execute(b"TPUReplicateMetadata", 0, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
_result = None
return _result
def tpu_replicated_input(inputs, is_mirrored_variable=False, index=-1, name=None):
r"""Connects N inputs to an N-way replicated TPU computation.
This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
Each replicated input has the same shape and type alongside the output.
For example:
```
%a = "tf.opA"()
%b = "tf.opB"()
%replicated_input = "tf.TPUReplicatedInput"(%a, %b)
%computation = "tf.Computation"(%replicated_input)
```
The above computation has | |
consistent_kwargs_dict['plot_projection'] = \
do_allowed_projections[ppl]
else:
consistent_kwargs_dict['plot_projection'] = 'stereo'
except Exception:
pass
if args.plot_show_upper_hemis:
consistent_kwargs_dict['plot_show_upper_hemis'] = True
if args.plot_n_points and args.plot_n_points > 360:
consistent_kwargs_dict['plot_n_points'] = args.plot_n_points
if args.plot_size:
try:
if 0.01 < args.plot_size <= 1:
consistent_kwargs_dict['plot_size'] = \
args.plot_size * 10 / 2.54
elif 1 < args.plot_size < 45:
consistent_kwargs_dict['plot_size'] = \
args.plot_size / 2.54
else:
consistent_kwargs_dict['plot_size'] = 5
consistent_kwargs_dict['plot_aux_plot_size'] = \
consistent_kwargs_dict['plot_size']
except Exception:
pass
if args.plot_pressure_colour:
try:
sec_colour_raw = args.plot_pressure_colour.split(',')
if len(sec_colour_raw) == 1:
if sec_colour_raw[0].lower()[0] in 'bgrcmykw':
consistent_kwargs_dict['plot_pressure_colour'] = \
sec_colour_raw[0].lower()[0]
else:
raise
elif len(sec_colour_raw) == 3:
for sc in sec_colour_raw:
if not 0 <= (int(sc)) <= 255:
raise
consistent_kwargs_dict['plot_pressure_colour'] = \
(float(sec_colour_raw[0]) / 255.,
float(sec_colour_raw[1]) / 255.,
float(sec_colour_raw[2]) / 255.)
else:
raise
except Exception:
pass
if args.plot_tension_colour:
try:
sec_colour_raw = args.plot_tension_colour.split(',')
if len(sec_colour_raw) == 1:
if sec_colour_raw[0].lower()[0] in 'bgrcmykw':
consistent_kwargs_dict['plot_tension_colour'] = \
sec_colour_raw[0].lower()[0]
else:
raise
elif len(sec_colour_raw) == 3:
for sc in sec_colour_raw:
if not 0 <= (int(float(sc))) <= 255:
raise
consistent_kwargs_dict['plot_tension_colour'] = \
(float(sec_colour_raw[0]) / 255.,
float(sec_colour_raw[1]) / 255.,
float(sec_colour_raw[2]) / 255.)
else:
raise
except Exception:
pass
if args.plot_total_alpha:
if not 0 <= args.plot_total_alpha <= 1:
consistent_kwargs_dict['plot_total_alpha'] = 1
else:
consistent_kwargs_dict['plot_total_alpha'] = \
args.plot_total_alpha
if args.plot_show_1faultplane:
consistent_kwargs_dict['plot_show_1faultplane'] = True
try:
fp_args = args.plot_show_1faultplane
if not int(fp_args[0]) in [1, 2]:
consistent_kwargs_dict['plot_show_FP_index'] = 1
else:
consistent_kwargs_dict['plot_show_FP_index'] = \
int(fp_args[0])
if not 0 < float(fp_args[1]) <= 20:
consistent_kwargs_dict['plot_faultplane_width'] = 2
else:
consistent_kwargs_dict['plot_faultplane_width'] = \
float(fp_args[1])
try:
sec_colour_raw = fp_args[2].split(',')
if len(sec_colour_raw) == 1:
sc = sec_colour_raw[0].lower()[0]
if sc not in 'bgrcmykw':
raise
consistent_kwargs_dict['plot_faultplane_colour'] = \
sec_colour_raw[0].lower()[0]
elif len(sec_colour_raw) == 3:
for sc in sec_colour_raw:
if not 0 <= (int(sc)) <= 255:
raise
consistent_kwargs_dict['plot_faultplane_colour'] = \
(float(sec_colour_raw[0]) / 255.,
float(sec_colour_raw[1]) / 255.,
float(sec_colour_raw[2]) / 255.)
else:
raise
except Exception:
consistent_kwargs_dict['plot_faultplane_colour'] = 'k'
try:
if 0 <= float(fp_args[3]) <= 1:
consistent_kwargs_dict['plot_faultplane_alpha'] = \
float(fp_args[3])
except Exception:
consistent_kwargs_dict['plot_faultplane_alpha'] = 1
except Exception:
pass
if args.plot_show_faultplanes:
consistent_kwargs_dict['plot_show_faultplanes'] = True
consistent_kwargs_dict['plot_show_1faultplane'] = False
if args.plot_dpi:
if 200 <= args.plot_dpi <= 2000:
consistent_kwargs_dict['plot_dpi'] = args.plot_dpi
if args.plot_only_lines:
consistent_kwargs_dict['plot_fill_flag'] = False
if args.plot_outerline:
consistent_kwargs_dict['plot_outerline'] = True
try:
fp_args = args.plot_outerline
if not 0 < float(fp_args[0]) <= 20:
consistent_kwargs_dict['plot_outerline_width'] = 2
else:
consistent_kwargs_dict['plot_outerline_width'] = \
float(fp_args[0])
try:
sec_colour_raw = fp_args[1].split(',')
if len(sec_colour_raw) == 1:
if sec_colour_raw[0].lower()[0] in 'bgrcmykw':
consistent_kwargs_dict['plot_outerline_colour'] = \
sec_colour_raw[0].lower()[0]
else:
raise
elif len(sec_colour_raw) == 3:
for sc in sec_colour_raw:
if not 0 <= (int(sc)) <= 255:
raise
consistent_kwargs_dict['plot_outerline_colour'] = \
(float(sec_colour_raw[0]) / 255.,
float(sec_colour_raw[1]) / 255.,
float(sec_colour_raw[2]) / 255.)
else:
raise
except Exception:
consistent_kwargs_dict['plot_outerline_colour'] = 'k'
try:
if 0 <= float(fp_args[2]) <= 1:
consistent_kwargs_dict['plot_outerline_alpha'] = \
float(fp_args[2])
except Exception:
consistent_kwargs_dict['plot_outerline_alpha'] = 1
except Exception:
pass
if args.plot_nodalline:
consistent_kwargs_dict['plot_nodalline'] = True
try:
fp_args = args.plot_nodalline
if not 0 < float(fp_args[0]) <= 20:
consistent_kwargs_dict['plot_nodalline_width'] = 2
else:
consistent_kwargs_dict['plot_nodalline_width'] = \
float(fp_args[0])
try:
sec_colour_raw = fp_args[1].split(',')
if len(sec_colour_raw) == 1:
if sec_colour_raw[0].lower()[0] in 'bgrcmykw':
consistent_kwargs_dict['plot_nodalline_colour'] = \
sec_colour_raw[0].lower()[0]
else:
raise
elif len(sec_colour_raw) == 3:
for sc in sec_colour_raw:
if not 0 <= (int(sc)) <= 255:
raise
consistent_kwargs_dict['plot_nodalline_colour'] = \
(float(sec_colour_raw[0]) / 255.,
float(sec_colour_raw[1]) / 255.,
float(sec_colour_raw[2]) / 255.)
else:
raise
except Exception:
consistent_kwargs_dict['plot_nodalline_colour'] = 'k'
try:
if 0 <= float(fp_args[2]) <= 1:
consistent_kwargs_dict['plot_nodalline_alpha'] = \
float(fp_args[2])
except Exception:
consistent_kwargs_dict['plot_nodalline_alpha'] = 1
except Exception:
pass
if args.plot_show_princ_axes:
consistent_kwargs_dict['plot_show_princ_axes'] = True
try:
fp_args = args.plot_show_princ_axes
if not 0 < float(fp_args[0]) <= 40:
consistent_kwargs_dict['plot_princ_axes_symsize'] = 10
else:
consistent_kwargs_dict['plot_princ_axes_symsize'] = \
float(fp_args[0])
if not 0 < float(fp_args[1]) <= 20:
consistent_kwargs_dict['plot_princ_axes_lw '] = 3
else:
consistent_kwargs_dict['plot_princ_axes_lw '] = \
float(fp_args[1])
try:
if 0 <= float(fp_args[2]) <= 1:
consistent_kwargs_dict['plot_princ_axes_alpha'] = \
float(fp_args[2])
except Exception:
consistent_kwargs_dict['plot_princ_axes_alpha'] = 1
except Exception:
pass
if args.plot_show_basis_axes:
consistent_kwargs_dict['plot_show_basis_axes'] = True
consistent_kwargs_dict['in_system'] = args.plot_input_system
if args.plot_isotropic_part:
consistent_kwargs_dict['plot_isotropic_part'] = \
args.plot_isotropic_part
return consistent_kwargs_dict
def _build_parsers():
"""
build dictionary with 4 (5 incl. 'save') sets of options, belonging to
the 4 (5) possible calls
"""
from argparse import (ArgumentParser,
RawDescriptionHelpFormatter,
RawTextHelpFormatter,
SUPPRESS)
parser = ArgumentParser(prog='obspy-mopad',
formatter_class=RawDescriptionHelpFormatter,
description="""
###############################################################################
################################ MoPaD ################################
################ Moment tensor Plotting and Decomposition tool ################
###############################################################################
Multi method tool for:
- Plotting and saving of focal sphere diagrams ('Beachballs').
- Decomposition and Conversion of seismic moment tensors.
- Generating coordinates, describing a focal sphere diagram, to be
piped into GMT's psxy (Useful where psmeca or pscoupe fail.)
For more help, please run ``%(prog)s {command} --help''.
-------------------------------------------------------------------------------
Example:
To generate a beachball for a normal faulting mechanism (a snake's eye type):
%(prog)s plot 0,45,-90 or %(prog)s plot p 0,1,-1,0,0,0
""")
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
mechanism = ArgumentParser(add_help=False,
formatter_class=RawTextHelpFormatter)
mechanism.add_argument('mechanism', metavar='source-mechanism',
help="""
The 'source mechanism' as a comma-separated list of length:
3:
strike, dip, rake;
4:
strike, dip, rake, moment;
6:
M11, M22, M33, M12, M13, M23;
7:
M11, M22, M33, M12, M13, M23, moment;
9:
full moment tensor
(With all angles to be given in degrees)
""")
subparsers = parser.add_subparsers(title='commands')
# Case-insensitive typing
class caps(str):
def __new__(self, content):
return str.__new__(self, content.upper())
# Possible basis systems
ALLOWED_BASES = ['NED', 'USE', 'XYZ', 'NWU']
# gmt
help = "return the beachball as a string, to be piped into GMT's psxy"
desc = """Tool providing strings to be piped into the 'psxy' from GMT.
Either a string describing the fillable area (to be used with option
'-L' within psxy) or the nodallines or the coordinates of the principle
axes are given.
"""
parser_gmt = subparsers.add_parser('gmt', help=help, description=desc,
parents=[mechanism])
group_type = parser_gmt.add_argument_group('Output')
group_show = parser_gmt.add_argument_group('Appearance')
group_geo = parser_gmt.add_argument_group('Geometry')
group_type.add_argument(
'-t', '--type', dest='GMT_string_type', metavar='<type>',
type=caps, default='FILL',
help='choice of psxy data: area to fill (fill), nodal lines '
'(lines), or eigenvector positions (ev)')
group_show.add_argument(
'-s', '--scaling', dest='GMT_scaling', metavar='<scaling factor>',
type=float, default=1.0,
help='spatial scaling of the beachball')
group_show.add_argument(
'-r', '--color1', '--colour1', dest='GMT_tension_colour',
metavar='<tension colour>', type=int, default=1,
help="-Z option's key for the tension colour of the beachball - "
'type: integer')
group_show.add_argument(
'-w', '--color2', '--colour2', dest='GMT_pressure_colour',
metavar='<pressure colour>', type=int, default=0,
help="-Z option's key for the pressure colour of the beachball - "
'type: integer')
group_show.add_argument(
'-D', '--faultplanes', dest='GMT_show_2FP2', action='store_true',
help='boolean key, if 2 faultplanes shall be shown')
group_show.add_argument(
'-d', '--show-1fp', dest='GMT_show_1FP', metavar='<FP index>',
type=int, choices=[1, 2], default=False,
help='integer key (1,2), what faultplane shall be shown '
'[%(default)s]')
group_geo.add_argument(
'-V', '--viewpoint', dest='plot_viewpoint',
metavar='<lat,lon,azi>',
help='coordinates of the viewpoint - 3-tuple of angles in degree')
group_geo.add_argument(
'-p', '--projection', dest='GMT_projection',
metavar='<projection>', type=caps,
help='projection of the sphere')
group_show.add_argument(
'-I', '--show-isotropic-part', dest='GMT_plot_isotropic_part',
action='store_true',
help='if isotropic part shall be considered for plotting '
'[%(default)s]')
parser_gmt.set_defaults(call=_call_gmt, build=_build_gmt_dict)
# convert
help = 'convert a mechanism to/in (strike,dip,slip-rake) from/to ' \
'matrix form *or* convert a matrix, vector, tuple into ' \
'different basis representations'
desc = """Tool providing converted input.
Choose between the conversion from/to matrix-moment-tensor
form (-t), the change of the output basis system for a given
moment tensor (-b), or the change of basis for a 3D vector
(-v).
"""
parser_convert = subparsers.add_parser('convert', help=help,
description=desc,
parents=[mechanism])
group_type = parser_convert.add_argument_group('Type conversion')
group_basis = parser_convert.add_argument_group('M conversion')
group_vector = parser_convert.add_argument_group('Vector conversion')
# group_show = parser_convert.add_argument_group('Appearance')
group_type.add_argument(
'-t', '--type', dest='type_conversion',
type=caps, choices=['SDR', 'T'],
help='type conversion - convert to: strike,dip,rake (sdr) or '
'Tensor (T)')
group_basis.add_argument(
'-b', '--basis', dest='basis_conversion',
type=caps, choices=ALLOWED_BASES, nargs=2,
help='basis conversion for M - provide 2 arguments: input and '
'output bases')
group_vector.add_argument(
'-v', '--vector', dest='vector_conversion',
type=caps, choices=ALLOWED_BASES, nargs=2,
help='basis conversion for a vector - provide M as a 3Dvector '
'and 2 option-arguments of -v: input and output bases')
parser_convert.add_argument(
'-y', '--fancy', dest='fancy_conversion', action='store_true',
help='output in a stylish way')
parser_convert.set_defaults(call=_call_convert,
build=_build_convert_dict)
# plot
help = 'plot a beachball projection of the provided mechanism'
desc = """Plots a beachball diagram of the provided mechanism.
Several styles and configurations are available. Also saving
on the fly can be enabled.
"""
parser_plot = subparsers.add_parser('plot', help=help,
description=desc,
parents=[mechanism])
group_save = parser_plot.add_argument_group('Saving')
group_type = parser_plot.add_argument_group('Type of plot')
group_quality = parser_plot.add_argument_group('Quality')
group_colours = parser_plot.add_argument_group('Colours')
group_misc = parser_plot.add_argument_group('Miscellaneous')
group_dc = parser_plot.add_argument_group('Fault planes')
group_geo = parser_plot.add_argument_group('Geometry')
group_app = parser_plot.add_argument_group('Appearance')
group_save.add_argument(
'-f', '--output-file', dest='plot_outfile', metavar='<filename>',
help='filename for saving')
group_type.add_argument(
'-P', '--pa-system', dest='plot_pa_plot', action='store_true',
help='if principal axis system shall be plotted instead')
group_type.add_argument(
'-O', '--full-sphere', dest='plot_full_sphere',
action='store_true',
help='if full sphere shall be plotted instead')
group_geo.add_argument(
'-V', '--viewpoint', dest='plot_viewpoint',
metavar='<lat,lon,azi>',
help='coordinates of the | |
<filename>seaice/data/test/test_getter.py
from datetime import date
from unittest.mock import patch
import copy
import datetime as dt
import os
import unittest
from nose.tools import assert_equals, assert_true, raises
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as pdt
from seaice.data.errors import DateOutOfRangeError
from seaice.data.errors import YearMonthOutOfRangeError
import seaice.data.getter as getter
import seaice.data.gridset_filters as gridset_filters
import seaice.data.locator as locator
from .util import mock_today
import seaice.nasateam as nt
TEST_DATA = os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir,
'test_data', 'seaice.data')
SOUTH_DAILY_FILE = os.path.join(TEST_DATA, 'nt_19871118_f08_v01_s.bin')
NORTH_DAILY_FILE = os.path.join(TEST_DATA, 'nt_20010107_f13_v01_n.bin')
OCEAN = 0
ICE = 1
COAST = 253
LAND = 254
MISSING = 255
GRIDSET_STUB = {'data': np.array([]), 'metadata': {'period': None,
'temporality': 'D',
'period_index': pd.PeriodIndex([], freq='D'),
'valid_data_range': (0, 100),
'flags': {},
'missing_value': None,
'hemi': 'N',
'files': []}}
class Test_concentration_daily(unittest.TestCase):
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.getter.empty_gridset')
@patch('os.walk')
def test_daily_no_file_gets_empty_grid(self, mock_walk, mock_empty_gridset,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
# no files found
mock_walk.return_value = [('/anyroot', [], [])]
date_ = date(2015, 9, 1)
hemisphere = nt.NORTH
search_paths = ['/anyroot']
mock_empty_gridset.return_value = {
'data': np.full((448, 304), 255, dtype=np.int),
'metadata': {}
}
# act
getter.concentration_daily(hemisphere, date_, search_paths)
# assert
getter.empty_gridset.assert_called_with((448, 304), 'D')
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.gridset_filters._interpolate_missing')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
def test_daily_single_file_not_interpolated(self, mock_daily_file_path,
_mockgridset_by_filelist,
mock__interpolate_missing,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
files = ['files.1_s.bin']
gridset = {'data': [], 'metadata': {'files': []}}
mock_daily_file_path.return_value = files
_mockgridset_by_filelist.return_value = gridset
mock__interpolate_missing.return_value = []
date_ = date(2015, 9, 1)
hemisphere = nt.NORTH
search_paths = ['/anyroot']
# act
getter.concentration_daily(hemisphere, date_, search_paths, 1)
# assert
getter._concentration_gridset_by_filelist.assert_called_with(files)
gridset_filters._interpolate_missing.assert_not_called()
@mock_today(1995, 11, 24)
@raises(DateOutOfRangeError)
def test_daily_throws_error_for_dates_today_or_later(self, ):
getter.concentration_daily(nt.NORTH, date(1995, 11, 24), ['/who/cares'])
@mock_today(1990, 11, 24)
@raises(DateOutOfRangeError)
def test_daily_throws_error_for_future_date(self, ):
getter.concentration_daily(nt.NORTH, date(1992, 1, 10), ['/who/cares'])
@raises(DateOutOfRangeError)
def test_daily_throws_error_before_october_26_1978(self, ):
getter.concentration_daily(nt.NORTH, date(1978, 10, 25), ['/who/cares'])
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@mock_today(2014, 11, 24)
def test_daily_works_with_yesterday(self, mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
actual = getter.concentration_daily(nt.NORTH, date(2014, 11, 23), ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
@patch('seaice.datastore.get_bad_days_for_hemisphere')
def test_daily_works_with_october_26_1978(self, mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
actual = getter.concentration_daily(nt.NORTH, date(1978, 10, 26), ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.gridset_filters._interpolate_missing')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
def test_interpolation_with_skipped_day_in_SMMR_period(self,
mock_daily_file_path,
mock__gridset_by_filelist,
mock__interpolate_missing,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
files = ['nt_19810529_n07_v1.1_s.bin',
'nt_19810531_n07_v1.1_s.bin']
gridset = {'data': np.full((2, 2, 2), 4, dtype=np.int),
'metadata': {'files': files}}
mock_daily_file_path.return_value = files
mock__gridset_by_filelist.return_value = gridset
mock__interpolate_missing.return_value = np.full((2, 2), 4, dtype=np.int)
interpolation_radius = 1
nt_hemi = {'short_name': 'N'}
anydate = dt.date(1981, 5, 30)
actual_gridset = getter.concentration_daily(nt_hemi,
anydate,
['/anypaths'],
interpolation_radius=interpolation_radius)
actual = actual_gridset['metadata']['files']
expected = ['nt_19810529_n07_v1.1_s.bin', 'nt_19810531_n07_v1.1_s.bin']
self.assertEqual(actual, expected)
class Test_concentration_daily___failed_qa_logic(unittest.TestCase):
def setUp(self):
self.day_before_grid = np.full(nt.NORTH['shape'], 1, dtype=np.int)
target_grid = np.full(nt.NORTH['shape'], 2, dtype=np.int)
target_grid[0:3, 0:3] = nt.FLAGS['missing']
self.target_grid = target_grid.copy()
self.day_after_grid = np.full(nt.NORTH['shape'], 11, dtype=np.int)
self.cube = np.dstack([self.day_before_grid, target_grid, self.day_after_grid])
target_grid[0:3, 0:3] = (1 + 11) / 2
self.interpolated_grid = target_grid.copy()
self.empty_grid = np.full(nt.NORTH['shape'], nt.FLAGS['missing'], dtype=np.int)
self.target_date = dt.date(1980, 10, 25)
self.file_list = ['nt_19801024_n07_v1.1_n.bin',
'nt_19801025_n07_v1.1_n.bin',
'nt_19801026_n07_v1.1_n.bin']
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
@patch('seaice.datastore.get_bad_days_for_hemisphere')
def test_returns_bad_data_gridset(self,
mock_get_bad_days_for_hemisphere,
mock_daily_file_path,
mock__concentration_gridset_by_filelist):
interpolation_radius = 0
mock_get_bad_days_for_hemisphere.return_value = [pd.Period(self.target_date, 'D')]
file_list = self.file_list[1:2]
mock_daily_file_path.return_value = file_list
gridset = {'data': self.target_grid,
'metadata': {'files': file_list}}
mock__concentration_gridset_by_filelist.return_value = gridset
actual = getter.concentration_daily(nt.NORTH,
self.target_date,
['/who/cares'],
interpolation_radius=interpolation_radius)
expected_grid = self.target_grid
npt.assert_array_equal(actual['data'], expected_grid)
expected_files = self.file_list[1:2]
self.assertEqual(actual['metadata']['files'], expected_files)
class Test_concentration_monthly(unittest.TestCase):
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_gets_data_when_at_least_twenty_days_present(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset,
_mockgridset_by_filelist
):
locator.all_daily_file_paths_for_month.return_value = ['nt_20120901_f08_v01_n.bin'] * 20
locator.monthly_file_path.return_value = 'nt_201209_f08_v01_n.bin'
getter.empty_gridset.return_value = None
getter._concentration_gridset_by_filelist.return_value = {
'data': np.ma.array([1, 2]),
'metadata': {}
}
year = 2012
month = 9
hemisphere = nt.NORTH
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths)
getter._concentration_gridset_by_filelist.assert_called_with(['nt_201209_f08_v01_n.bin'])
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_gets_data_when_more_than_twenty_files_present_simmr(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset,
_mockgridset_by_filelist
):
locator.all_daily_file_paths_for_month.return_value = ['nt_19781101_n07_v01_n.bin'] * 20
locator.monthly_file_path.return_value = 'nt_197811_n07_v01_n.bin'
getter.empty_gridset.return_value = None
getter._concentration_gridset_by_filelist.return_value = {
'data': np.ma.array([1, 2]),
'metadata': {}
}
year = 1978
month = 11
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_monthly(hemisphere, year, month, search_paths)
getter._concentration_gridset_by_filelist.assert_called_with(['nt_197811_n07_v01_n.bin'])
npt.assert_array_equal(actual['data'], np.ma.array([1, 2]))
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_uses_daily_for_nrt(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset,
_mockgridset_by_filelist
):
daily_files = ['nt_20120915_f08_v01_n.bin'] * 20
locator.all_daily_file_paths_for_month.return_value = daily_files
locator.monthly_file_path.return_value = None
getter.empty_gridset.return_value = None
day1_grid = np.ma.array([[10., 30.], [50., 60.]])
day2_grid = np.ma.array([[20., 50.], [80., 100.]])
getter._concentration_gridset_by_filelist.return_value = {
'data': np.ma.dstack([day1_grid, day2_grid]),
'metadata': {'missing_value': 255., 'valid_data_range': (0., 100.)}
}
year = 1979
month = 3
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_monthly(hemisphere, year, month, search_paths)
expected = np.ma.array([[15., 40.], [65., 80.]])
getter._concentration_gridset_by_filelist.assert_called_with(daily_files)
npt.assert_array_equal(expected, actual['data'])
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_under_threshold_empty_grid(self, mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset):
locator.all_daily_file_paths_for_month.return_value = []
locator.monthly_file_path.return_value = 'nt_201209_f08_v01_n.bin'
getter.empty_gridset.return_value = None
year = 2012
month = 9
hemisphere = nt.NORTH
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths)
getter.empty_gridset.assert_called_with((448, 304), 'M')
@patch('seaice.data.getter.empty_gridset')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_missing_empty_grid(self, mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_empty_gridset):
locator.all_daily_file_paths_for_month.return_value = []
locator.monthly_file_path.return_value = None
getter.empty_gridset.return_value = None
year = 2012
month = 9
hemisphere = nt.NORTH
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths)
getter.empty_gridset.assert_called_with((448, 304), 'M')
@patch('seaice.nasateam.LAST_DAY_WITH_VALID_FINAL_DATA', date(2005, 4, 30))
@patch('seaice.data.getter._concentration_average_gridset_from_daily_filelist')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.getter.double_weight_smmr_files')
@patch('seaice.data.locator.all_daily_file_paths_for_month')
@patch('seaice.data.locator.monthly_file_path')
def test_monthly_uses_daily_when_final_month_is_outside_of_valid_final_data(
self,
mock_monthly_file_path,
mock_all_daily_file_paths_for_month,
mock_double_weight_smmr_files,
mock__concentration_gridset_by_filelist,
mock__concentration_average_gridset_from_daily_filelist
):
daily_files = ['some', 'daily', 'files']
mock_monthly_file_path.return_value = ['final_monthly_file']
mock_all_daily_file_paths_for_month.return_value = daily_files
mock_double_weight_smmr_files.return_value = daily_files
mock__concentration_gridset_by_filelist.return_value = {'data': np.array([]),
'metadata': {}}
hemisphere = nt.NORTH
year = 2005
month = 5
search_paths = ['wherever']
getter.concentration_monthly(hemisphere, year, month, search_paths, 3)
# technically _concentration_gridset_by_filelist is called by
# _concentration_average_gridset_from_daily_filelist, but here they are
# both mocked, so they return right away and we can only worry about
# which of these two functions concentration_monthly() calls directly
getter._concentration_gridset_by_filelist.assert_not_called()
getter._concentration_average_gridset_from_daily_filelist.assert_called_with(daily_files)
@mock_today(1995, 11, 24)
@raises(YearMonthOutOfRangeError)
def test_monthly_throws_error_for_current_month(self):
getter.concentration_monthly(nt.NORTH, 1995, 11, ['/who/cares'])
@mock_today(2014, 11, 24)
@raises(YearMonthOutOfRangeError)
def test_monthly_throws_error_for_future_month(self):
getter.concentration_monthly(nt.NORTH, 2014, 12, ['/who/cares'])
@mock_today(2014, 11, 24)
def test_monthly_works_with_last_month(self):
actual = getter.concentration_monthly(nt.NORTH, 2014, 10, ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
def test_monthly_works_with_october_1978(self):
actual = getter.concentration_monthly(nt.NORTH, 1978, 10, ['/who/cares'])
assert_equals(actual['data'].shape, (448, 304))
@raises(YearMonthOutOfRangeError)
def test_monthly_throws_error_before_october_1978(self):
getter.concentration_monthly(nt.NORTH, 1978, 9, ['/who/cares'])
class Test_concentration_seasonal(unittest.TestCase):
@patch('seaice.data.getter.concentration_monthly')
def test_metadata(self, _mock_concentration_monthly):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([]),
'metadata': {'files': ['nt_201209_f08_v01_n.bin']}
},
{
'data': np.ma.array([]),
'metadata': {'files': ['nt_201210_f08_v01_n.bin']}
},
{
'data': np.ma.array([]),
'metadata': {'files': ['nt_201211_f08_v01_n.bin']}
}
]
year = 2012
months = (9, 10, 11)
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
expected_metadata = {
'files': [['nt_201209_f08_v01_n.bin'],
['nt_201210_f08_v01_n.bin'],
['nt_201211_f08_v01_n.bin']],
'temporality': 'seasonal',
'hemi': 'N',
'season': (2012, (9, 10, 11)),
'search_paths': ['wherever'],
'valid_data_range': (0.0, 100.0),
'missing_value': 255,
'flags': {
'pole': 251,
'unused': 252,
'coast': 253,
'land': 254
}
}
for key, expected_value in expected_metadata.items():
self.assertEqual(actual['metadata'][key], expected_value)
@patch('seaice.data.getter.concentration_monthly')
def test_averages_monthly_data(self, _mock_concentration_monthly):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([[5, 7],
[5, 7]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[9, 3.5],
[9, 3.5]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[10, 6],
[10, 6]]),
'metadata': {'files': []}
}
]
expected_data = np.array([[8, 5.5],
[8, 5.5]])
year = 2012
months = (9, 10, 11)
hemisphere = nt.NORTH
search_paths = ['wherever']
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 2012, 9, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 2012, 10, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 2012, 11, ['wherever'], 20
)
npt.assert_array_equal(actual['data'], expected_data)
@patch('seaice.data.getter.concentration_monthly')
def test_uses_december_from_previous_year(self, _mock_concentration_monthly):
getter.concentration_monthly.return_value = {
'data': np.ma.array([[]]),
'metadata': {'files': []}
}
year = 2012
months = (12, 1, 2)
hemisphere = nt.SOUTH
search_paths = ['wherever']
min_days_for_valid_month = 20
getter.concentration_seasonal(hemisphere,
year,
months,
search_paths,
min_days_for_valid_month)
getter.concentration_monthly.assert_any_call(
nt.SOUTH, 2011, 12, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.SOUTH, 2012, 1, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.SOUTH, 2012, 2, ['wherever'], 20
)
@patch('seaice.data.getter.concentration_monthly')
def test_does_not_average_missing_but_fills_with_flags(self, _mock_concentration_monthly):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([[255, 255, 255]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[9, 5, 251]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[10, 6, 251]]),
'metadata': {'files': []}
}
]
year = 2012
months = (9, 10, 11)
hemisphere = nt.NORTH
search_paths = ['wherever']
expected_data = np.array([[9.5, 5.5, 251]])
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
npt.assert_array_equal(actual['data'], expected_data)
@patch('seaice.data.getter.concentration_monthly')
def test_takes_values_from_one_month_if_others_are_missing(
self,
_mock_concentration_monthly
):
getter.concentration_monthly.side_effect = [
{
'data': np.ma.array([[255, 255, 255]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[255, 255, 255]]),
'metadata': {'files': []}
},
{
'data': np.ma.array([[10, 6, 7]]),
'metadata': {'files': []}
}
]
year = 1988
months = (12, 1, 2)
hemisphere = nt.NORTH
search_paths = ['wherever']
expected_data = np.array([[10, 6, 7]])
actual = getter.concentration_seasonal(hemisphere, year, months, search_paths)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 1987, 12, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 1988, 1, ['wherever'], 20
)
getter.concentration_monthly.assert_any_call(
nt.NORTH, 1988, 2, ['wherever'], 20
)
npt.assert_array_equal(actual['data'], expected_data)
class Test_concentration_seasonal_over_years(unittest.TestCase):
@patch('seaice.data.getter.concentration_seasonal')
def test_calls_concentration_seasonal_for_every_year_inclusive(
self,
_mock_concentration_seasonal
):
months = (12, 1, 2)
hemisphere = nt.NORTH
search_paths = ['wherever']
min_valid_days = 20
years = [1980, 1981, 1982, 1983, 1984,
1985, 1986, 1987, 1988, 1989,
1990, 1991, 1992, 1993, 1994,
1995, 1996, 1997, 1998, 1999,
2000]
start_year = years[0]
end_year = years[-1]
getter.concentration_seasonal_over_years(
hemisphere, start_year, end_year, months, search_paths, min_valid_days
)
| |
from __future__ import division
import numpy as np
import pandas as pd
from sklearn import metrics
import lightgbm as lgb
import time
from multiprocessing import cpu_count
import warnings
from sklearn.cross_validation import train_test_split
warnings.filterwarnings('ignore')
# Constants define
ROOT_PATH = './'
ONLINE = 1
target = 'label'
train_len = 4999
threshold = 0.5
########################################### Helper function ###########################################
def log(info):
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ' ' + str(info))
def merge_feat_count(df, df_feat, columns_groupby, new_column_name, type='int'):
df_count = pd.DataFrame(df_feat.groupby(columns_groupby).size()).fillna(0).astype(type).reset_index()
df_count.columns = columns_groupby + [new_column_name]
df = df.merge(df_count, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_onehot_count(df, df_feat, columns_groupby, prefix, type='int'):
df_count = df_feat.groupby(columns_groupby).size().unstack().fillna(0).astype(type).reset_index()
df_count.columns = [i if i == columns_groupby[0] else prefix + '_' + str(i) for i in df_count.columns]
df = df.merge(df_count, on=columns_groupby[0], how='left')
return df, list(np.delete(df_count.columns.values, 0))
def merge_feat_nunique(df, df_feat, columns_groupby, column, new_column_name, type='int'):
df_nunique = pd.DataFrame(df_feat.groupby(columns_groupby)[column].nunique()).fillna(0).astype(type).reset_index()
df_nunique.columns = columns_groupby + [new_column_name]
df = df.merge(df_nunique, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_min(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_min = pd.DataFrame(df_feat.groupby(columns_groupby)[column].min()).fillna(0).astype(type).reset_index()
df_min.columns = columns_groupby + [new_column_name]
df = df.merge(df_min, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_max(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_max = pd.DataFrame(df_feat.groupby(columns_groupby)[column].max()).fillna(0).astype(type).reset_index()
df_max.columns = columns_groupby + [new_column_name]
df = df.merge(df_max, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_mean(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_mean = pd.DataFrame(df_feat.groupby(columns_groupby)[column].mean()).fillna(0).astype(type).reset_index()
df_mean.columns = columns_groupby + [new_column_name]
df = df.merge(df_mean, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_std(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_std = pd.DataFrame(df_feat.groupby(columns_groupby)[column].std()).fillna(0).astype(type).reset_index()
df_std.columns = columns_groupby + [new_column_name]
df = df.merge(df_std, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_median(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_median = pd.DataFrame(df_feat.groupby(columns_groupby)[column].median()).fillna(0).astype(type).reset_index()
df_median.columns = columns_groupby + [new_column_name]
df = df.merge(df_median, on=columns_groupby, how='left')
return df, [new_column_name]
def merge_feat_sum(df, df_feat, columns_groupby, column, new_column_name, type='float'):
df_sum = pd.DataFrame(df_feat.groupby(columns_groupby)[column].sum()).fillna(0).astype(type).reset_index()
df_sum.columns = columns_groupby + [new_column_name]
df = df.merge(df_sum, on=columns_groupby, how='left')
return df, [new_column_name]
def eval_auc_f1(preds, dtrain):
df = pd.DataFrame({'y_true': dtrain.get_label(), 'y_score': preds})
df['y_pred'] = df['y_score'].apply(lambda x: 1 if x >= threshold else 0)
auc = metrics.roc_auc_score(df.y_true, df.y_score)
f1 = metrics.f1_score(df.y_true, df.y_pred)
return 'feval', (auc * 0.6 + f1 * 0.4), True
def lgb_cv(train_x, train_y, params, rounds, folds):
start = time.clock()
log(str(train_x.columns))
dtrain = lgb.Dataset(train_x, label=train_y)
log('run cv: ' + 'round: ' + str(rounds))
res = lgb.cv(params, dtrain, rounds, nfold=folds,
metrics=['eval_auc_f1', 'auc'], feval=eval_auc_f1,
early_stopping_rounds=200, verbose_eval=5)
elapsed = (time.clock() - start)
log('Time used:' + str(elapsed) + 's')
return len(res['feval-mean']), res['feval-mean'][len(res['feval-mean']) - 1], res['auc-mean'][len(res['auc-mean']) - 1]
def lgb_train_predict(train_x, train_y, test_x, params, rounds):
start = time.clock()
log(str(train_x.columns))
dtrain = lgb.Dataset(train_x, label=train_y)
valid_sets = [dtrain]
model = lgb.train(params, dtrain, rounds, valid_sets, feval=eval_auc_f1, verbose_eval=5)
pred = model.predict(test_x)
elapsed = (time.clock() - start)
log('Time used:' + str(elapsed) + 's')
return model, pred
def store_result(test_index, pred, threshold, name):
result = pd.DataFrame({'uid': test_index, 'prob': pred})
result = result.sort_values('prob', ascending=False)
result['label'] = 0
result.loc[result.prob > threshold, 'label'] = 1
result.to_csv('./0606V1/' + name + '.csv', index=0, header=0, columns=['uid', 'label'])
return result
########################################### Read data ###########################################
train = pd.read_csv(ROOT_PATH + '/uid_train.txt', header=None, sep='\t')
train.columns = ['uid', 'label']
train_voice = pd.read_csv(ROOT_PATH + '/voice_train.txt', header=None, sep='\t')
train_voice.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'end_time', 'call_type', 'in_out']
train_sms = pd.read_csv(ROOT_PATH + '/sms_train.txt', header=None, sep='\t')
train_sms.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'in_out']
train_wa = pd.read_csv(ROOT_PATH + '/wa_train.txt', header=None, sep='\t')
train_wa.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow', 'down_flow', 'wa_type', 'date']
test = pd.DataFrame({'uid': ['u' + str(i) for i in range(7000, 10000)]})
test_voice = pd.read_csv(ROOT_PATH + '/voice_test_b.txt', header=None, sep='\t')
test_voice.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'end_time', 'call_type', 'in_out']
test_sms = pd.read_csv(ROOT_PATH + '/sms_test_b.txt', header=None, sep='\t')
test_sms.columns = ['uid', 'opp_num', 'opp_head', 'opp_len', 'start_time', 'in_out']
test_wa = pd.read_csv(ROOT_PATH + '/wa_test_b.txt', header=None, sep='\t')
test_wa.columns = ['uid', 'wa_name', 'visit_cnt', 'visit_dura', 'up_flow', 'down_flow', 'wa_type', 'date']
############################## Next I will sperate the start date of voice (date and hour) ################################
time_feature = test_voice['start_time']
time_feature = time_feature.as_matrix()
voice_start_date = []
voice_start_hour = []
print(np.size(time_feature,0))
for i in range(np.size(time_feature,0)):
tmp = str(time_feature[i])
voice_start_date.append(tmp[0:2])
voice_start_hour.append(tmp[2:4])
voice_start_date = pd.DataFrame(voice_start_date)
voice_start_hour = pd.DataFrame(voice_start_hour)
voice_start_date.columns = ['voice_start_date']
voice_start_hour.columns = ['voice_start_hour']
test_voice['voice_start_date'] = voice_start_date
test_voice['voice_start_hour'] = voice_start_hour
time_feature = train_voice['start_time']
time_feature = time_feature.as_matrix()
voice_start_date = []
voice_start_hour = []
print(np.size(time_feature,0))
for i in range(np.size(time_feature,0)):
tmp = str(time_feature[i])
voice_start_date.append(tmp[0:2])
voice_start_hour.append(tmp[2:4])
voice_start_date = pd.DataFrame(voice_start_date)
voice_start_hour = pd.DataFrame(voice_start_hour)
voice_start_date.columns = ['voice_start_date']
voice_start_hour.columns = ['voice_start_hour']
train_voice['voice_start_date'] = voice_start_date
train_voice['voice_start_hour'] = voice_start_hour
############################## Next I will sperate the start date of sms (date and hour) ################################
time_feature = test_sms['start_time']
time_feature = time_feature.as_matrix()
sms_start_date = []
sms_start_hour = []
print(np.size(time_feature,0))
for i in range(np.size(time_feature,0)):
tmp = str(time_feature[i])
#print(tmp)
sms_start_date.append(tmp[0:2])
sms_start_hour.append(tmp[2:4])
sms_start_date = pd.DataFrame(sms_start_date)
sms_start_hour = pd.DataFrame(sms_start_hour)
sms_start_date.columns = ['sms_start_date']
sms_start_hour.columns = ['sms_start_hour']
test_sms['sms_start_date'] = sms_start_date
test_sms['sms_start_hour'] = sms_start_hour
time_feature = train_sms['start_time']
time_feature = time_feature.as_matrix()
sms_start_date = []
sms_start_hour = []
print(np.size(time_feature,0))
for i in range(np.size(time_feature,0)):
tmp = str(time_feature[i])
#print(tmp)
sms_start_date.append(tmp[0:2])
sms_start_hour.append(tmp[2:4])
sms_start_date = pd.DataFrame(sms_start_date)
sms_start_hour = pd.DataFrame(sms_start_hour)
sms_start_date.columns = ['sms_start_date']
sms_start_hour.columns = ['sms_start_hour']
train_sms['sms_start_date'] = sms_start_date
train_sms['sms_start_hour'] = sms_start_hour
############################## Next I will sperate the end date of voice(date and hour) ################################
time_feature = test_voice['end_time']
time_feature = time_feature.as_matrix()
voice_end_date = []
voice_end_hour = []
print(np.size(time_feature,0))
for i in range(np.size(time_feature,0)):
tmp = str(time_feature[i])
#print(tmp)
voice_end_date.append(tmp[0:2])
voice_end_hour.append(tmp[2:4])
voice_end_date = pd.DataFrame(voice_end_date)
voice_end_hour = pd.DataFrame(voice_end_hour)
voice_end_date.columns = ['voice_end_date']
voice_end_hour.columns = ['voice_end_hour']
test_voice['voice_end_date'] = voice_end_date
test_voice['voice_end_hour'] = voice_end_hour
time_feature = train_voice['end_time']
time_feature = time_feature.as_matrix()
voice_end_date = []
voice_end_hour = []
print(np.size(time_feature,0))
for i in range(np.size(time_feature,0)):
tmp = str(time_feature[i])
#print(tmp)
voice_end_date.append(tmp[0:2])
voice_end_hour.append(tmp[2:4])
voice_end_date = pd.DataFrame(voice_end_date)
voice_end_hour = pd.DataFrame(voice_end_hour)
voice_end_date.columns = ['voice_end_date']
voice_end_hour.columns = ['voice_end_hour']
train_voice['voice_end_date'] = voice_end_date
train_voice['voice_end_hour'] = voice_end_hour
############################## Next I will sperate the duration of train_voice (date and hour) ################################
duraVStart = train_voice['start_time']
duraVStart = duraVStart.as_matrix()
print(type(duraVStart))
duraSec = np.zeros(np.size(duraVStart,0))
for index in range(np.size(duraVStart,0)):
duraVStartt = str(duraVStart[index])
if len(duraVStartt) == 8:
ddSec = int(duraVStartt[:2])*86400
hhSec = int(duraVStartt[2:4])*3600
mmSec = int(duraVStartt[4:6])*60
ssSec = int(duraVStartt[6:8])
elif len(duraVStartt) == 7:
duraVStartt = '0'+ duraVStartt
ddSec = int(duraVStartt[:2])*86400
hhSec = int(duraVStartt[2:4])*3600
mmSec = int(duraVStartt[4:6])*60
ssSec = int(duraVStartt[6:8])
else:
duraVStartt = '00'+ duraVStartt
ddSec = int(duraVStartt[:2])*86400
hhSec = int(duraVStartt[2:4])*3600
mmSec = int(duraVStartt[4:6])*60
ssSec = int(duraVStartt[6:8])
duraSec[index] = ddSec+hhSec+mmSec+ssSec
print(duraSec)
duraVEnd = train_voice['end_time']
duraVEnd = duraVEnd.as_matrix()
print(type(duraVEnd))
duraSec1 = np.zeros(np.size(duraVEnd,0))
for index in range(np.size(duraVEnd,0)):
duraVEndd = str(duraVEnd[index])
if len(duraVEndd) == 8:
duraVEndd = duraVEndd
ddSec = int(duraVEndd[:2])*86400
hhSec = int(duraVEndd[2:4])*3600
mmSec = int(duraVEndd[4:6])*60
ssSec = int(duraVEndd[6:8])
elif len(duraVEndd) == 7:
duraVEndd = '0' + duraVEndd
ddSec = int(duraVEndd[:2])*86400
hhSec = int(duraVEndd[2:4])*3600
mmSec = int(duraVEndd[4:6])*60
ssSec = int(duraVEndd[6:8])
else:
duraVEndd = '00' + duraVEndd
ddSec = int(duraVEndd[:2])*86400
hhSec = int(duraVEndd[2:4])*3600
mmSec = int(duraVEndd[4:6])*60
ssSec = int(duraVEndd[6:8])
duraSec1[index] = ddSec+hhSec+mmSec+ssSec
print(duraSec1)
duration = np.zeros(np.size(duraSec1,0))
for i in range(np.size(duraSec1,0)):
if duraSec1[i] >= duraSec[i]:
duration[i] = duraSec1[i] - duraSec[i]
else:
print('1..',duraSec1[i],'2..',duraSec[i])
duration[i] = duraSec1[i]+86400-duraSec[i]
maxx = duration.max()
minn = duration.min()
meann = duration.mean()
print('maxx..',maxx,'minn..',minn, 'mean..',meann)
duration = (duration-minn)/(maxx-minn)
print(duration)
duration = pd.DataFrame(duration)
train_voice['duration'] = duration
############################## Next I will sperate the duration of test_voice (date and hour) ################################
duraVStart = test_voice['start_time']
duraVStart = duraVStart.as_matrix()
print(type(duraVStart))
duraSec = np.zeros(np.size(duraVStart,0))
for index in range(np.size(duraVStart,0)):
duraVStartt = str(duraVStart[index])
if len(duraVStartt) == 8:
ddSec = int(duraVStartt[:2])*86400
hhSec = int(duraVStartt[2:4])*3600
mmSec = int(duraVStartt[4:6])*60
ssSec = int(duraVStartt[6:8])
elif len(duraVStartt) == 7:
duraVStartt = '0'+ duraVStartt
ddSec = int(duraVStartt[:2])*86400
hhSec = int(duraVStartt[2:4])*3600
mmSec = int(duraVStartt[4:6])*60
ssSec = int(duraVStartt[6:8])
else:
duraVStartt = '00'+ duraVStartt
ddSec = int(duraVStartt[:2])*86400
hhSec = int(duraVStartt[2:4])*3600
mmSec = int(duraVStartt[4:6])*60
ssSec = int(duraVStartt[6:8])
duraSec[index] = ddSec+hhSec+mmSec+ssSec
print(duraSec)
duraVEnd = test_voice['end_time']
duraVEnd = duraVEnd.as_matrix()
print(type(duraVEnd))
duraSec1 = np.zeros(np.size(duraVEnd,0))
for index in range(np.size(duraVEnd,0)):
duraVEndd = str(duraVEnd[index])
if len(duraVEndd) == 8:
duraVEndd = duraVEndd
ddSec = int(duraVEndd[:2])*86400
hhSec = int(duraVEndd[2:4])*3600
mmSec = int(duraVEndd[4:6])*60
ssSec = int(duraVEndd[6:8])
elif len(duraVEndd) == 7:
duraVEndd = '0' + duraVEndd
ddSec = int(duraVEndd[:2])*86400
hhSec = int(duraVEndd[2:4])*3600
mmSec = int(duraVEndd[4:6])*60
ssSec = int(duraVEndd[6:8])
else:
duraVEndd = '00' + duraVEndd
ddSec = int(duraVEndd[:2])*86400
hhSec = int(duraVEndd[2:4])*3600
mmSec = int(duraVEndd[4:6])*60
ssSec = int(duraVEndd[6:8])
duraSec1[index] = ddSec+hhSec+mmSec+ssSec
print(duraSec1)
duration = np.zeros(np.size(duraSec1,0))
for i in range(np.size(duraSec1,0)):
if duraSec1[i] >= duraSec[i]:
duration[i] = duraSec1[i] - duraSec[i]
else:
print('1..',duraSec1[i],'2..',duraSec[i])
duration[i] = duraSec1[i]+86400-duraSec[i]
maxx = duration.max()
minn = duration.min()
meann = duration.mean()
print('maxx..',maxx,'minn..',minn, 'mean..',meann)
duration = (duration-minn)/(maxx-minn)
print(duration)
duration = pd.DataFrame(duration)
test_voice['duration'] = duration
############################## Normalize visist_cnt and visit_dura ################################
test_visit_cnt_normal = test_wa['visit_cnt']
maxx = test_visit_cnt_normal.max()
minn = test_visit_cnt_normal.min()
meann = test_visit_cnt_normal.mean()
print('maxx..',maxx,'minn..',minn, 'mean..',meann)
test_visit_cnt_normal = (test_visit_cnt_normal-minn)/(maxx-minn)
test_visit_cnt_normal = test_visit_cnt_normal*10000
test_wa['visit_cnt'] = test_visit_cnt_normal
train_visit_cnt_normal = train_wa['visit_cnt']
maxx = train_visit_cnt_normal.max()
minn = train_visit_cnt_normal.min()
meann = train_visit_cnt_normal.mean()
print('maxx..',maxx,'minn..',minn, 'mean..',meann)
train_visit_cnt_normal = (train_visit_cnt_normal-minn)/(maxx-minn)
trainvisit_cnt_normal = train_visit_cnt_normal*10000
train_wa['visit_cnt'] = train_visit_cnt_normal
train_visit_dura_normal = train_wa['visit_dura']
maxx = train_visit_dura_normal.max()
minn = train_visit_dura_normal.min()
meann = train_visit_dura_normal.mean()
print('maxx..',maxx,'minn..',minn, 'mean..',meann)
train_visit_dura_normal = (train_visit_dura_normal-minn)/(maxx-minn)
train_visit_dura_normal = train_visit_dura_normal*10000
train_wa['visit_dura'] = train_visit_dura_normal
test_visit_dura_normal = test_wa['visit_dura']
maxx = test_visit_dura_normal.max()
minn = test_visit_dura_normal.min()
meann = test_visit_dura_normal.mean()
print('maxx..',maxx,'minn..',minn, 'mean..',meann)
test_visit_dura_normal = (test_visit_dura_normal-minn)/(maxx-minn)
test_visit_dura_normal = test_visit_dura_normal*10000
test_wa['visit_dura'] = test_visit_dura_normal
#################################################### Feature engineer ###########################################
df = pd.concat([train, test]).reset_index(drop=True)
df_voice = pd.concat([train_voice, test_voice]).reset_index(drop=True)
df_sms = pd.concat([train_sms, test_sms]).reset_index(drop=True)
df_wa = pd.concat([train_wa, test_wa]).reset_index(drop=True)
predictors = []
#-------------------------------------------- To get wa_name_number freature ---------------------------------------------
wa_name = df_wa['wa_name'].fillna(0)
wa_name = np.array(wa_name)
Size = np.size(wa_name,0)
wa_name_set = pd.read_csv('./wa_name_set_strr.csv', header=None, sep='\t')
wa_name_set = np.array(wa_name_set)
wa_name_set = np.reshape(wa_name_set,(15160,))
print(wa_name_set[0])
wa_name_set = np.array(wa_name_set).tolist()
#print(wa_name_set.shape)
wa_name_number = np.zeros(Size)
for i in range(Size):
cur_wa_name = wa_name[i]
print(cur_wa_name)
if(cur_wa_name!=0 and cur_wa_name!='00934'):
wa_name_number[i] = wa_name_set.index(cur_wa_name)
elif cur_wa_name == '00934':
wa_name_number[i] = wa_name_set.index('00934')
else:
wa_name_number[i] = 15161
df_wa['wa_name_number'] = wa_name_number
#------------------------------------------------------------- To get uid --------------------------------------------
df, predictors_tmp = merge_feat_count(df, df_voice, ['uid'], 'count_gb_uid_in_voice'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_count(df, df_sms, ['uid'], 'count_gb_uid_in_sms'); predictors += predictors_tmp
df, predictors_tmp = merge_feat_count(df, df_wa, ['uid'], 'count_gb_uid_in_wa'); predictors += predictors_tmp
#---------------------------------------------------- To get one-hot feature ------------------------------------------
df, predictors_tmp | |
call signatures
if int(vim_eval("g:jedi#show_call_signatures")) == 2:
vim_command('echo ""')
return
cursor = vim.current.window.cursor
e = vim_eval('g:jedi#call_signature_escape')
# We need two turns here to search and replace certain lines:
# 1. Search for a line with a call signature and save the appended
# characters
# 2. Actually replace the line and redo the status quo.
py_regex = r'%sjedi=([0-9]+), (.*?)%s.*?%sjedi%s'.replace(
'%s', re.escape(e))
for i, line in enumerate(vim.current.buffer):
match = re.search(py_regex, line)
if match is not None:
# Some signs were added to minimize syntax changes due to call
# signatures. We have to remove them again. The number of them is
# specified in `match.group(1)`.
after = line[match.end() + int(match.group(1)):]
line = line[:match.start()] + match.group(2) + after
vim.current.buffer[i] = line
vim.current.window.cursor = cursor
cached_signatures = None
cached_line = -1
show_signature = True
@catch_and_print_exceptions
def show_cache_signatures():
global cached_signatures
global show_signature
show_signature = True
if cached_signatures is not None:
show_call_signatures(cached_signatures)
# else:
# show_call_signatures()
@catch_and_print_exceptions
def clear_cache_signatures():
global cached_signatures
cached_signatures = None
clear_call_signatures()
def begin_completion():
global show_signature
show_signature = False
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def show_call_signatures(signatures=()):
global show_signature
if show_signature is False:
return
global cached_signatures
line = vim_eval("getline('.')")
if cached_signatures is not None and len(line.strip()) > 2:
if len(cached_signatures) > 0:
signatures = cached_signatures
if int(vim_eval("has('conceal') && g:jedi#show_call_signatures")) == 0:
return
if signatures == ():
signatures = get_script().call_signatures()
cached_signatures = signatures
clear_call_signatures()
if not signatures:
return
if int(vim_eval("g:jedi#show_call_signatures")) == 2:
return cmdline_call_signatures(signatures)
seen_sigs = []
for i, signature in enumerate(signatures):
line, column = signature.bracket_start
# signatures are listed above each other
line_to_replace = line - i - 1
# because there's a space before the bracket
insert_column = column - 1
if insert_column < 0 or line_to_replace <= 0:
# Edge cases, when the call signature has no space on the screen.
break
# TODO check if completion menu is above or below
line = vim_eval("getline(%s)" % line_to_replace)
# Descriptions are usually looking like `param name`, remove the param.
params = [p.description.replace('\n', '').replace('param ', '', 1)
for p in signature.params]
try:
# *_*PLACEHOLDER*_* makes something fat. See after/syntax file.
params[signature.index] = '*_*%s*_*' % params[signature.index]
except (IndexError, TypeError):
pass
# Skip duplicates.
if params in seen_sigs:
continue
seen_sigs.append(params)
# This stuff is reaaaaally a hack! I cannot stress enough, that
# this is a stupid solution. But there is really no other yet.
# There is no possibility in VIM to draw on the screen, but there
# will be one (see :help todo Patch to access screen under Python.
# (<NAME>, 2010 Jul 18))
text = " (%s) " % ', '.join(params)
text = ' ' * (insert_column - len(line)) + text
end_column = insert_column + len(text) - 2 # -2 due to bold symbols
# Need to decode it with utf8, because vim returns always a python 2
# string even if it is unicode.
e = vim_eval('g:jedi#call_signature_escape')
if hasattr(e, 'decode'):
e = e.decode('UTF-8')
# replace line before with cursor
regex = "xjedi=%sx%sxjedix".replace('x', e)
prefix, replace = line[:insert_column], line[insert_column:end_column]
# Check the replace stuff for strings, to append them
# (don't want to break the syntax)
regex_quotes = r'''\\*["']+'''
# `add` are all the quotation marks.
# join them with a space to avoid producing '''
add = ' '.join(re.findall(regex_quotes, replace))
# search backwards
if add and replace[0] in ['"', "'"]:
a = re.search(regex_quotes + '$', prefix)
add = ('' if a is None else a.group(0)) + add
tup = '%s, %s' % (len(add), replace)
repl = prefix + (regex % (tup, text)) + add + line[end_column:]
vim_eval('setline(%s, %s)' % (line_to_replace, repr(PythonToVimStr(repl))))
@catch_and_print_exceptions
def cmdline_call_signatures(signatures):
def get_params(s):
return [p.description.replace('\n', '').replace('param ', '', 1) for p in s.params]
def escape(string):
return string.replace('"', '\\"').replace(r'\n', r'\\n')
def join():
return ', '.join(filter(None, (left, center, right)))
def too_long():
return len(join()) > max_msg_len
if len(signatures) > 1:
params = zip_longest(*map(get_params, signatures), fillvalue='_')
params = ['(' + ', '.join(p) + ')' for p in params]
else:
params = get_params(signatures[0])
index = next(iter(s.index for s in signatures if s.index is not None), None)
# Allow 12 characters for showcmd plus 18 for ruler - setting
# noruler/noshowcmd here causes incorrect undo history
max_msg_len = int(vim_eval('&columns')) - 12
if int(vim_eval('&ruler')):
max_msg_len -= 18
max_msg_len -= len(signatures[0].name) + 2 # call name + parentheses
if max_msg_len < (1 if params else 0):
return
elif index is None:
text = escape(', '.join(params))
if params and len(text) > max_msg_len:
text = ELLIPSIS
elif max_msg_len < len(ELLIPSIS):
return
else:
left = escape(', '.join(params[:index]))
center = escape(params[index])
right = escape(', '.join(params[index + 1:]))
while too_long():
if left and left != ELLIPSIS:
left = ELLIPSIS
continue
if right and right != ELLIPSIS:
right = ELLIPSIS
continue
if (left or right) and center != ELLIPSIS:
left = right = None
center = ELLIPSIS
continue
if too_long():
# Should never reach here
return
max_num_spaces = max_msg_len
if index is not None:
max_num_spaces -= len(join())
_, column = signatures[0].bracket_start
spaces = min(int(vim_eval('g:jedi#first_col +'
'wincol() - col(".")')) +
column - len(signatures[0].name),
max_num_spaces) * ' '
if index is not None:
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl jediFat | echon "%s" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].name,
left + ', ' if left else '',
center, ', ' + right if right else ''))
else:
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(%s)"'
% (spaces, signatures[0].name, text))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def rename():
if not int(vim.eval('a:0')):
# Need to save the cursor position before insert mode
cursor = vim.current.window.cursor
changenr = vim.eval('changenr()') # track undo tree
vim_command('augroup jedi_rename')
vim_command('autocmd InsertLeave <buffer> call jedi#rename'
'({}, {}, {})'.format(cursor[0], cursor[1], changenr))
vim_command('augroup END')
vim_command("let s:jedi_replace_orig = expand('<cword>')")
line = vim_eval('getline(".")')
vim_command('normal! diw')
if re.match(r'\w+$', line[cursor[1]:]):
# In case the deleted word is at the end of the line we need to
# move the cursor to the end.
vim_command('startinsert!')
else:
vim_command('startinsert')
else:
# Remove autocommand.
vim_command('autocmd! jedi_rename InsertLeave')
args = vim.eval('a:000')
cursor = tuple(int(x) for x in args[:2])
changenr = args[2]
# Get replacement, if there is something on the cursor.
# This won't be the case when the user ends insert mode right away,
# and `<cword>` would pick up the nearest word instead.
if vim_eval('getline(".")[getpos(".")[2]-1]') != ' ':
replace = vim_eval("expand('<cword>')")
else:
replace = None
vim_command('undo {}'.format(changenr))
vim.current.window.cursor = cursor
if replace:
return do_rename(replace)
def rename_visual():
replace = vim.eval('input("Rename to: ")')
orig = vim.eval('getline(".")[(getpos("\'<")[2]-1):getpos("\'>")[2]]')
do_rename(replace, orig)
def do_rename(replace, orig=None):
if not len(replace):
echo_highlight('No rename possible without name.')
return
if orig is None:
orig = vim_eval('s:jedi_replace_orig')
# Save original window / tab.
saved_tab = int(vim_eval('tabpagenr()'))
saved_win = int(vim_eval('winnr()'))
temp_rename = usages(visuals=False)
# Sort the whole thing reverse (positions at the end of the line
# must be first, because they move the stuff before the position).
temp_rename = sorted(temp_rename, reverse=True,
key=lambda x: (x.module_path, x.line, x.column))
buffers = set()
for r in temp_rename:
if r.in_builtin_module():
continue
if os.path.abspath(vim.current.buffer.name) != r.module_path:
assert r.module_path is not None
result = new_buffer(r.module_path)
if not result:
echo_highlight('Failed to create buffer window for %s!' % (
r.module_path))
continue
buffers.add(vim.current.buffer.name)
# Save view.
saved_view = vim_eval('string(winsaveview())')
# Replace original word.
vim.current.window.cursor = (r.line, r.column)
vim_command('normal! c{0:d}l{1}'.format(len(orig), replace))
# Restore view.
vim_command('call winrestview(%s)' % saved_view)
highlight_usages([r], length=len(replace))
# Restore previous tab and window.
vim_command('tabnext {0:d}'.format(saved_tab))
vim_command('{0:d}wincmd w'.format(saved_win))
if len(buffers) > 1:
echo_highlight('Jedi did {0:d} renames in {1:d} buffers!'.format(
len(temp_rename), len(buffers)))
else:
echo_highlight('Jedi did {0:d} renames!'.format(len(temp_rename)))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def py_import():
# args are the same as for the :edit command
args = shsplit(vim.eval('a:args'))
import_path = args.pop()
text = 'import %s' % import_path
scr = jedi.Script(text, 1, len(text), '', environment=get_environment())
try:
completion = scr.goto_assignments()[0]
except IndexError:
echo_highlight('Cannot find %s in sys.path!' % import_path)
else:
if completion.in_builtin_module():
echo_highlight('%s is a builtin module.' % import_path)
else:
cmd_args = ' '.join([a.replace(' ', '\\ ') for a in args])
new_buffer(completion.module_path, cmd_args)
@catch_and_print_exceptions
def py_import_completions():
argl = | |
originals, and point to the same
`Component <Component>` object : `Graph`
"""
g = Graph()
for vertex in self.vertices:
g.add_vertex(Vertex(vertex.component, feedback=vertex.feedback))
for i in range(len(self.vertices)):
g.vertices[i].parents = [g.comp_to_vertex[parent_vertex.component] for parent_vertex in
self.vertices[i].parents]
g.vertices[i].children = [g.comp_to_vertex[parent_vertex.component] for parent_vertex in
self.vertices[i].children]
return g
def add_component(self, component, feedback=False):
if component in [vertex.component for vertex in self.vertices]:
logger.info('Component {1} is already in graph {0}'.format(component, self))
else:
vertex = Vertex(component, feedback=feedback)
self.comp_to_vertex[component] = vertex
self.add_vertex(vertex)
def add_vertex(self, vertex):
if vertex in self.vertices:
logger.info('Vertex {1} is already in graph {0}'.format(vertex, self))
else:
self.vertices.append(vertex)
self.comp_to_vertex[vertex.component] = vertex
def remove_component(self, component):
try:
self.remove_vertex(self.comp_to_vertex[component])
except KeyError as e:
raise CompositionError('Component {1} not found in graph {2}: {0}'.format(e, component, self))
def remove_vertex(self, vertex):
try:
for parent in vertex.parents:
parent.children.remove(vertex)
for child in vertex.children:
child.parents.remove(vertex)
self.vertices.remove(vertex)
del self.comp_to_vertex[vertex.component]
# TODO:
# check if this removal puts the graph in an inconsistent state
except ValueError as e:
raise CompositionError('Vertex {1} not found in graph {2}: {0}'.format(e, vertex, self))
def connect_components(self, parent, child):
try:
self.connect_vertices(self.comp_to_vertex[parent], self.comp_to_vertex[child])
except KeyError as e:
if parent not in self.comp_to_vertex:
raise CompositionError("Sender ({}) of {} ({}) not (yet) assigned".
format(repr(parent.name), Projection.__name__, repr(child.name)))
elif child not in self.comp_to_vertex:
raise CompositionError("{} ({}) to {} not (yet) assigned".
format(Projection.__name__, repr(parent.name), repr(child.name)))
else:
raise KeyError(e)
def connect_vertices(self, parent, child):
if child not in parent.children:
parent.children.append(child)
if parent not in child.parents:
child.parents.append(parent)
def get_parents_from_component(self, component):
"""
Arguments
---------
component : Component
the Component whose parents will be returned
Returns
-------
A list[Vertex] of the parent `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`]
"""
return self.comp_to_vertex[component].parents
def get_children_from_component(self, component):
"""
Arguments
---------
component : Component
the Component whose children will be returned
Returns
-------
A list[Vertex] of the child `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`]
"""
return self.comp_to_vertex[component].children
def get_forward_children_from_component(self, component):
"""
Arguments
---------
component : Component
the Component whose parents will be returned
Returns
-------
# FIX 8/12/19: MODIFIED FEEDBACK -
# IS THIS A CORRECT DESCRIPTION? (SAME AS get_forward_parents_from_component)
A list[Vertex] of the parent `Vertices <Vertex>` of the Vertex associated with **component**: list[`Vertex`]
"""
forward_children = []
for child in self.comp_to_vertex[component].children:
if component not in self.comp_to_vertex[child.component].backward_sources:
forward_children.append(child)
return forward_children
def get_forward_parents_from_component(self, component):
"""
Arguments
---------
component : Component
the Component whose parents will be returned
Returns
-------
# FIX 8/12/19: MODIFIED FEEDBACK -
# IS THIS A CORRECT DESCRIPTION? (SAME AS get_forward_children_from_component)
A list[Vertex] of the parent `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`]
"""
forward_parents = []
for parent in self.comp_to_vertex[component].parents:
if parent.component not in self.comp_to_vertex[component].backward_sources:
forward_parents.append(parent)
return forward_parents
def get_backward_children_from_component(self, component):
"""
Arguments
---------
component : Component
the Component whose children will be returned
Returns
-------
A list[Vertex] of the child `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`]
"""
backward_children = []
for child in self.comp_to_vertex[component].children:
if component in self.comp_to_vertex[child.component].backward_sources:
backward_children.append(child)
return backward_children
def get_backward_parents_from_component(self, component):
"""
Arguments
---------
component : Component
the Component whose children will be returned
Returns
-------
A list[Vertex] of the child `Vertices <Vertex>` of the Vertex associated with **component** : list[`Vertex`]
"""
return list(self.comp_to_vertex[component].backward_sources)
@property
def dependency_dict(self):
return dict((v.component,set(d.component for d in v.parents)) for v in self.vertices)
# Options for show_node_structure argument of show_graph()
MECH_FUNCTION_PARAMS = "MECHANISM_FUNCTION_PARAMS"
STATE_FUNCTION_PARAMS = "STATE_FUNCTION_PARAMS"
class Composition(Composition_Base, metaclass=ComponentsMeta):
"""
Composition(
controller=None,
enable_controller=None,
controller_mode=AFTER,
controller_condition=Always,
enable_learning=True,
name=None,
prefs=Composition.classPreferences
context=None)
Base class for Composition.
Arguments
---------
controller: `OptimizationControlmechanism` : default None
specifies the `OptimizationControlMechanism` to use as the Composition's `controller
<Composition.controller>` (see `Composition_Controller` for details).
enable_controller: bool : default None
specifies whether the Composition's `controller <Composition.controller>` is executed when the
Composition is executed. Set to True by default if **controller** specified; if set to False,
the `controller <Composition.controller>` is ignored when the Composition is executed.
controller_mode: Enum[BEOFRE|AFTER] : default AFTER
specifies whether the controller is executed before or after the rest of the Composition
in each trial. Must be either the keyword *BEFORE* or *AFTER*.
controller_condition: Condition : default Always
specifies when the Composition's `controller <Composition.controller>` is executed in a trial.
enable_learning: bool : default True
specifies whether `LearningMechanisms <LearningMechanism>` in the Composition are executed when it is
executed.
name : str : default see `name <Composition.name>`
specifies the name of the Composition.
prefs : PreferenceSet or specification dict : default Composition.classPreferences
specifies the `PreferenceSet` for the Composition; see `prefs <Composition.prefs>` for details.
Attributes
----------
graph : `Graph`
the full `Graph` associated with this Composition. Contains both Nodes (`Mechanisms <Mechanism>` or
`Compositions <Composition>`) and `Projections <Projection>`
nodes : `list[Mechanisms and Compositions]`
a list of all Nodes (`Mechanisms <Mechanism>` and/or `Compositions <Composition>`) contained in
this Composition
input_CIM : `CompositionInterfaceMechanism`
mediates input values for the INPUT nodes of the Composition. If the Composition is nested, then the
input_CIM and its InputPorts serve as proxies for the Composition itself in terms of afferent projections.
input_CIM_ports : dict
a dictionary in which keys are InputPorts of INPUT Nodes in a composition, and values are lists
containing two items: the corresponding InputPort and OutputPort on the input_CIM.
afferents : ContentAddressableList
a list of all of the `Projections <Projection>` to the Composition's `input_CIM`.
output_CIM : `CompositionInterfaceMechanism`
aggregates output values from the OUTPUT nodes of the Composition. If the Composition is nested, then the
output_CIM and its OutputPorts serve as proxies for Composition itself in terms of efferent projections.
output_CIM_ports : dict
a dictionary in which keys are OutputPorts of OUTPUT Nodes in a composition, and values are lists
containing two items: the corresponding InputPort and OutputPort on the input_CIM.
efferents : ContentAddressableList
a list of all of the `Projections <Projection>` from the Composition's `output_CIM`.
env : Gym Forager Environment : default: None
stores a Gym Forager Environment so that the Composition may interact with this environment within a
single call to `run <Composition.run>`.
shadows : dict
a dictionary in which the keys are all in the Composition and the values are lists of any Nodes that
`shadow <InputPort_Shadow_Inputs>` the original Node's input.
controller : OptimizationControlMechanism
identifies the `OptimizationControlMechanism` used as the Composition's controller
(see `Composition_Controller` for details).
enable_controller : bool
determines whether the Composition's `controller <Composition.controller>` is executed in each trial
(see controller_mode <Composition.controller_mode>` for timing of execution). Set to True by default
if `controller <Composition.controller>` is specified. Setting it to False suppresses exectuion of the
`controller <Composition.controller>`.
controller_mode : BEFORE or AFTER
determines whether the controller is executed before or after the rest of the `Composition`
is executed on each trial.
controller_condition : Condition
specifies whether the controller is executed in a given trial. The default is `Always`, which
executes the controller on every trial.
default_execution_id
if no *context* is specified in a call to run, this *context* is used; by default,
it is the Composition's `name <Composition.name>`.
execution_ids : set
stores all execution_ids used by this Composition.
enable_learning: bool : default True
determines whether `LearningMechanisms <LearningMechanism>` in the Composition are executed when it is
executed.
learning_components : list
contains the learning-related components in the Composition, all or many of which may have been
created automatically in a call to one of its `add_<*learning_type*>_pathway' methods (see
`Composition_Learning` for details). This does *not* contain the `ProcessingMechanisms
<ProcessingMechanism>` or `MappingProjections <MappingProjection>` in the pathway(s) being learned;
those are contained in `learning_pathways <Composition.learning_pathways>` attribute.
learned_components : list[list]
contains a list of the components subject to learning in the Composition (`ProcessingMechanisms
<ProcessingMechanism>` and `MappingProjections <MappingProjection>`); this does *not* contain the
components used for learning; those are contained in `learning_components
<Composition.learning_components>` attribute.
COMMENT:
learning_pathways : list[list]
contains a list of the learning pathways specified for the Composition; each item contains a list of the
`ProcessingMechanisms <ProcessingMechanism>` and `MappingProjection(s) <MappingProjection>` specified a
a call to one of the Composition's `add_<*learning_type*>_pathway' methods (see `Composition_Learning`
for details). This does *not* contain the components used for learning; those are contained in
`learning_components <Composition.learning_components>` attribute.
COMMENT
results : 3d array
stores the `output_values <Mechanism_Base.output_values>` of the `OUTPUT` Mechanisms | |
import unittest
from datetime import datetime
from rx.observable import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime
from rx.disposables import SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestWhen(unittest.TestCase):
def test_then1(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, 1),
on_completed(220)
)
def create():
def selector(a):
return a
return Observable.when(xs.then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_next(210, 1),
on_completed(220)
)
def test_then1_error(self):
ex = Exception()
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_error(210, ex)
)
def create():
def selector(a):
return a
return Observable.when(xs.then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_error(210, ex)
)
def test_then1_throws(self):
ex = Exception()
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, 1),
on_completed(220)
)
def create():
def selector(a):
raise ex
return Observable.when(xs.then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_error(210, ex)
)
def test_and2(self):
scheduler = TestScheduler()
N = 2
obs = []
for n in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b):
return a + b
return Observable.when(obs[0].and_(obs[1]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_next(210, N),
on_completed(220)
)
def test_and2_error(self):
ex = Exception()
N = 2
for n in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == n:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b):
return a + b
return Observable.when(obs[0].and_(obs[1]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_error(210, ex)
)
def test_then2_throws(self):
scheduler = TestScheduler()
ex = Exception()
obs = []
N = 2
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b):
raise ex
return Observable.when(obs[0].and_(obs[1]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_error(210, ex)
)
def test_and3(self):
scheduler = TestScheduler()
obs = []
N = 3
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c):
return a + b + c
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_next(210, N),
on_completed(220)
)
def test_and3_error(self):
ex = Exception()
N = 3
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c):
return a + b + c
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_error(210, ex)
)
def test_then3_throws(self):
ex = Exception()
N = 3
scheduler = TestScheduler()
obs = []
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c):
raise ex
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(
on_error(210, ex)
)
def test_and4(self):
N = 4
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d):
return a + b + c + d
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, N), on_completed(220))
def test_and4_error(self):
ex = 'ex'
N = 4
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d):
return a + b + c + d
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_then4_throws(self):
ex = 'ex'
N = 4
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d):
raise Exception(ex)
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_and5(self):
N = 5
scheduler = TestScheduler()
obs = []
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e):
return a + b + c + d + e
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, N), on_completed(220))
def test_and5_error(self):
ex = 'ex'
N = 5
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e):
return a + b + c + d + e
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_then5_throws(self):
ex = 'ex'
N = 5
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e):
raise Exception(ex)
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_and6(self):
N = 6
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f):
return a + b + c + d + e + f
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, N), on_completed(220))
def test_and6_error(self):
ex = 'ex'
N = 6
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f):
return a + b + c + d + e + f
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_Then6Throws(self):
ex = 'ex'
N = 6
scheduler = TestScheduler()
obs = []
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(*args):
raise Exception(ex)
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_and7(self):
N = 7
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f, g):
return a + b + c + d + e + f + g
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, N), on_completed(220))
def test_and7_error(self):
ex = 'ex'
N = 7
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f, g):
return a + b + c + d + e + f + g
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_then7_throws(self):
ex = 'ex'
N = 7
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(*args):
raise Exception(ex)
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_and8(self):
N = 8
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f, g, h):
return a + b + c + d + e + f + g + h
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).and_(obs[7]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, N), on_completed(220))
def test_and8_error(self):
ex = 'ex'
N = 8
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f, g, h):
return a + b + c + d + e + f + g + h
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).and_(obs[7]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_then8_throws(self):
ex = 'ex'
N = 8
scheduler = TestScheduler()
obs = []
for _ in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(*args):
raise Exception(ex)
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).and_(obs[7]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_And9(self):
N = 9
scheduler = TestScheduler()
obs = []
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f, g, h, _i):
return a + b + c + d + e + f + g + h + _i
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).and_(obs[7]).and_(obs[8]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, N), on_completed(220))
def test_and9_error(self):
ex = 'ex'
N = 9
for i in range(N):
scheduler = TestScheduler()
obs = []
for j in range(N):
if j == i:
obs.append(scheduler.create_hot_observable(on_error(210, ex)))
else:
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(a, b, c, d, e, f, g, h, _i):
return a + b + c + d + e + f + g + h + _i
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).and_(obs[7]).and_(obs[8]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_then9_throws(self):
ex = 'ex'
N = 9
scheduler = TestScheduler()
obs = []
for i in range(N):
obs.append(scheduler.create_hot_observable(on_next(210, 1), on_completed(220)))
def create():
def selector(*args):
raise Exception(ex)
return Observable.when(obs[0].and_(obs[1]).and_(obs[2]).and_(obs[3]).and_(obs[4]).and_(obs[5]).and_(obs[6]).and_(obs[7]).and_(obs[8]).then_do(selector))
results = scheduler.start(create)
results.messages.assert_equal(on_error(210, ex))
def test_WhenMultipleDataSymmetric(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, 1),
on_next(220, 2),
on_next(230, 3),
on_completed(240)
)
ys = scheduler.create_hot_observable(
on_next(240, 4),
on_next(250, 5),
on_next(260, | |
import string
def filter(c):
ret = '#'
if c in string.letters:
ret = c
return ret
def totcscore(str):
ret = 0;
for i in range(len(str)):
symbol = chr(str[i])
try:
if symbol in (string.letters + string.digits + ' '):
ret = ret + totc[symbol]*totc_magic
except:
pass
return ret
# Score based on presence of digraphs, weighted according to popularity
# of candidate digraph in very large English-language corpus described
# at http://norvig.com/mayzner.html
def norvigscore(str):
ret = 0;
mystr = str.upper()
for i in range(len(mystr) - 1):
digram = chr(mystr[i]) + chr(mystr[i+1])
try:
ret = ret + norvig[digram]/100.0
except:
pass
return ret
# Score based on trigraphs, from http://home.ccil.org/~cowan/trigrams
# Since Cowan uses '#' for non-letters, we transform input string accordingly
def cowanscore(str):
ret = 0;
for i in range(len(str) - 2):
trigram = filter(chr(str[i])).upper() + filter(chr(str[i+1])).upper() + filter(chr(str[i+2])).upper()
#print trigram
try:
ret = ret + cowan[trigram]
except:
pass
return ret
# Unigram counts from Proj. Gutenberg copy of Tale of Two Cities
# normalize by multiplying by totc_magic
totc_magic = 0.000001578733824
totc = {
'e': 74424,
't': 52356,
'a': 47273,
'o': 46209,
'n': 41973,
'i': 38145,
'h': 38087,
'r': 36990,
's': 36692,
'd': 27056,
'l': 21213,
'u': 16657,
'm': 13670,
'w': 13516,
',': 13274,
'c': 13146,
'f': 13127,
'g': 12163,
'y': 11748,
'p': 9464 ,
'b': 7887 ,
'.': 6815 ,
'"': 5681 ,
'v': 5088 ,
'k': 4732 ,
'I': 2874 ,
'-': 2431 ,
'T': 1695 ,
'M': 1625 ,
'\'' : 1269 ,
';': 1108 ,
'D': 990 ,
'!': 955 ,
'?': 913 ,
'A': 897 ,
'S': 896 ,
'H': 887 ,
'L': 834 ,
'C': 753 ,
'x': 663 ,
'q': 637 ,
'W': 605 ,
'B': 535 ,
'P': 496 ,
'Y': 438 ,
'F': 437 ,
'j': 433 ,
'E': 413 ,
'N': 412 ,
'G': 385 ,
'O': 328 ,
'J': 282 ,
':': 269 ,
'R': 220 ,
'z': 215 ,
'_': 182 ,
')': 151 ,
'(': 151 ,
'V': 115 ,
'*': 90 ,
'U': 81 ,
'1': 64 ,
'X': 60 ,
'K': 55 ,
'Q': 29 ,
'/': 24 ,
'0': 20 ,
'9': 18 ,
'8': 16 ,
'2': 14 ,
'7': 13 ,
'5': 13 ,
'3': 13 ,
'4': 10 ,
'6': 9 ,
']': 2 ,
'[': 2 ,
'@': 2 ,
'$': 2 ,
'%': 1 ,
'#': 1
}
# from http://home.ccil.org/~cowan/trigrams
cowan = { '#TH': 230,
'THE': 192,
'HE#': 172,
'#S#': 129,
'ED#': 86,
'#OF': 81,
'#AN': 78,
'OF#': 77,
'ND#': 75,
'#IN': 71,
'AND': 70,
'#TO': 64,
'ING': 62,
'NG#': 62,
'ER#': 61,
'TO#': 58,
'ON#': 58,
'IN#': 54,
'IS#': 49,
'#A#': 48,
'#CO': 47,
'ES#': 47,
'ION': 47,
'RE#': 46,
'AS#': 42,
'AT#': 41,
'ENT': 38,
'#BE': 38,
'TIO': 37,
'OR#': 37,
'#HE': 36,
'#RE': 34,
'LY#': 34,
'#HA': 33,
'HER': 32,
'FOR': 32,
'#FO': 31,
'#WA': 31,
'EN#': 31,
'AL#': 30,
'AN#': 30,
'#WH': 29,
'NT#': 28,
'#MA': 27,
'#WI': 27,
'HAT': 27,
'TER': 26,
'THA': 26,
'ST#': 26,
'ATI': 26,
'HIS': 26,
'#ON': 26,
'ERE': 26,
'#PR': 25,
'#ST': 25,
'#HI': 25,
'TH#': 25,
'LL#': 24,
'#IT': 23,
'LE#': 23,
'CE#': 23,
'#IS': 23,
'#NO': 23,
'TS#': 23,
'ATE': 22,
'IT#': 22,
'VE#': 22,
'SE#': 21,
'ALL': 21,
'WAS': 20,
'UT#': 20,
'VER': 20,
'#SE': 20,
'#AS': 20,
'#WE': 20,
'#DE': 19,
'#CA': 19,
'RS#': 19,
'CH#': 19,
'ME#': 19,
'CON': 19,
'ITH': 18,
'LD#': 18,
'THI': 18,
'RES': 18,
'#SO': 18,
'TED': 18,
'#MO': 18,
'NCE': 17,
'WIT': 17,
'#SH': 17,
'ERS': 17,
'MEN': 17,
'NE#': 17,
'#AR': 16,
'NS#': 16,
'ONS': 16,
'ARE': 16,
'#DI': 16,
'#AL': 16,
'PRO': 16,
'RY#': 16,
'REA': 15,
'EVE': 15,
'#FR': 15,
'STA': 15,
'#WO': 15,
'TE#': 15,
'#LI': 15,
'ESS': 15,
'AD#': 15,
'#SU': 14,
'COM': 14,
'#AT': 14,
'IVE': 14,
'#BU': 14,
'#PA': 14,
'TY#': 14,
'#ME': 14,
'ONE': 14,
'BE#': 14,
'EST': 14,
'EAR': 14,
'NOT': 13,
'AY#': 13,
'#FI': 13,
'PER': 13,
'OUT': 13,
'INT': 13,
'#SA': 13,
'ECT': 13,
'OT#': 13,
'ILL': 13,
'OME': 12,
'#PO': 12,
'MAN': 12,
'#NE': 12,
'#OR': 12,
'#CH': 12,
'IST': 12,
'#HO': 12,
'#DO': 12,
'ICA': 12,
'AVE': 12,
'GHT': 11,
'BY#': 11,
'TIN': 11,
'OUL': 11,
'OM#': 11,
'AIN': 11,
'IGH': 11,
'OVE': 11,
'IDE': 11,
'SS#': 11,
'#UN': 11,
'ULD': 11,
'#EX': 11,
'INE': 11,
'#BY': 11,
'DER': 11,
'#LO': 11,
'ORE': 11,
'CTI': 11,
'STR': 11,
'#YO': 10,
'#FA': 10,
'NTE': 10,
'EY#': 10,
'ROM': 10,
'RED': 10,
'#LE': 10,
'#I#': 10,
'#PE': 10,
'OUR': 10,
'RAT': 10,
'OUN': 10,
'#LA': 10,
'HAD': 10,
'#TR': 10,
'BLE': 10,
'PRE': 10,
'HIN': 10,
'WHI': 10,
'AR#': 10,
'OTH': 10,
'STI': 10,
'#MI': 10,
'FRO': 10,
'TRA': 10,
'YOU': 10,
'STE': 10,
'OW#': 10,
'#SI': 10,
'ITI': 10,
'UND': 10,
'BUT': 10,
'ET#': 10,
'CAL': 9 ,
'ANT': 9 ,
'ART': 9 ,
'IC#': 9 ,
'HOU': 9 ,
'GE#': 9 ,
'HAN': 9 ,
'#GR': 9 ,
'WER': 9 ,
'#BA': 9 ,
'ERI': 9 ,
'ACT': 9 ,
'ORT': 9 ,
'DE#': 9 ,
'COU': 9 ,
'URE': 9 ,
'NTI': 9 ,
'TUR': 9 ,
'SHE': 9 ,
'TIC': 9 ,
'HAV': 9 ,
'HEN': 9 ,
'IND': 9 ,
'ERA': 9 ,
'HT#': 9 ,
'ENC': 9 ,
'#GO': 9 ,
'#BO': 9 ,
'EEN': 8 ,
'THO': 8 ,
'HIC': 8 ,
'#OU': 8 ,
'US#': 8 ,
'AME': 8 ,
'OUS': 8 ,
'DS#': 8 ,
'PAR': 8 ,
'ITY': 8 ,
'ID#': 8 ,
'NDE': 8 ,
'ROU': 8 ,
'WHE': 8 ,
'UGH': 8 ,
'#EN': 8 ,
'RIN': 8 ,
'ICH': 8 ,
'#TE': 8 ,
'USE': 8 ,
'IES': 8 ,
'LIN': 8 ,
'CAN': 8 ,
'TEN': 8 ,
'EAS': 8 ,
'TOR': 8 ,
'MIN': 7 ,
'#AC': 7 ,
'NED': 7 ,
'UST': 7 ,
'EAT': 7 ,
'KE#': 7 ,
'PLA': 7 ,
'HEY': 7 ,
'SSI': 7 ,
'#SP': 7 ,
'ARD': 7 ,
'TAT': 7 ,
'LIT': 7 ,
'OUG': 7 ,
'NAL': 7 ,
'#PL': 7 ,
'AGE': 7 ,
'MOR': 7 ,
'LES': 7 ,
'IR#': 7 ,
'ANC': 7 ,
'ABL': 7 ,
'LLY': 7 ,
'END': 7 ,
'WOR': 7 ,
'DEN': 7 ,
'RD#': 7 ,
'AST': 7 ,
'ITE': 7 ,
'#FE': 7 ,
'#RA': 7 ,
'ELL': 7 ,
'NY#': 7 ,
'#CL': 7 ,
'#BR': 7 ,
'TTE': 7 ,
'ESE': 7 ,
'ANY': 7 ,
'CK#': 7 ,
'#EV': 7 ,
'VEN': 7 ,
'RT#': 7 ,
'OU#': 7 ,
'IAL': 7 ,
'PLE': 6 ,
'DIN': 6 ,
'OSE': 6 ,
'EW#': 6 ,
'#TA': 6 ,
'CES': 6 ,
'OWN': 6 ,
'ONT': 6 ,
'ASS': 6 ,
'REC': 6 ,
'INC': 6 ,
'RAN': 6 ,
'INS': 6 ,
'#MU': 6 ,
'SIN': 6 ,
'NIN': 6 ,
'SED': 6 ,
'TAN': 6 ,
'#AB': 6 ,
'LAT': 6 ,
'LAN': 6 ,
'NAT': 6 ,
'HIM': 6 ,
'IME': 6 ,
'#DA': 6 ,
'CHA': 6 ,
'NTS': 6 ,
'CT#': 6 ,
'ONG': 6 ,
'EAD': 6 ,
'REN': 6 ,
'SO#': 6 ,
'SHO': 6 ,
'TIM': 6 ,
'EME': 6 ,
'RIE': 6 ,
'ICE': 6 ,
'SIO': 6 ,
'UR#': 6 ,
'LLE': 6 ,
'REE': 6 ,
'DIS': 6 ,
'WHO': 6 ,
'LEA': 6 ,
'OST': 6 ,
'LED': 6 ,
'RAL': 6 ,
'EM#': 6 ,
'TIV': 6 ,
'NES': 6 ,
'#RO': 6 ,
'CHE': 6 ,
'GH#': 6 ,
'BER': 6 ,
'WN#': 6 ,
'ONA': 6 ,
'HES': 6 ,
'IM#': 6 ,
'LS#': 6 ,
'ACE': 6 ,
'APP': 6 ,
'#TI': 6 ,
'HAS': 6 ,
'SOM': 6 ,
'SON': 6 ,
'WIL': 6 ,
'AKE': 6 ,
'RIC': 6 ,
'TRI': 6 ,
'ERN': 5 ,
'HEI': 5 ,
'POS': 5 ,
'SEN': 5 ,
'WE#': 5 ,
'WOU': 5 ,
'ACH': 5 ,
'GRE': 5 ,
'#AF': 5 ,
'ADE': 5 ,
'UNI': 5 ,
'MER': 5 ,
'KIN': 5 ,
'#GE': 5 ,
'TES': 5 ,
'STO': 5 ,
'SPE': 5 ,
'ITS': 5 ,
'NOW': 5 ,
'OMP': 5 ,
'OPE': 5 ,
'EIR': 5 ,
'NDI': 5 ,
'ENE': 5 ,
'#UP': 5 ,
'#SC': 5 ,
'EE#': 5 ,
'IRE': 5 ,
'HEA': 5 ,
'CHI': 5 ,
'POR': 5 ,
'ACK': 5 ,
'#YE': 5 ,
'BEE': 5 ,
'OD#': 5 ,
'NER': 5 ,
'DED': 5 ,
'#PU': 5 ,
'#EA': 5 ,
'LAR': 5 ,
'CAT': 5 ,
'SEL': 5 ,
'#AD': 5 ,
'RIT': 5 ,
'ATT': 5 ,
'SID': 5 ,
'ORM': 5 ,
'GRA': 5 ,
'ABO': 5 ,
'UNT': 5 ,
'HAR': 5 ,
'#US': 5 ,
'MAT': 5 ,
'OLD': 5 ,
'NGE': 5 ,
'NEW': 5 ,
'ANG': 5 ,
'UCH': 5 ,
'ANS': 5 ,
'#AG': 5 ,
'NST': 5 ,
'#NA': 5 ,
'#DR': 5 ,
'ENS': 5 ,
'ORD': 5 ,
'THR': 5 ,
'DES': 5 ,
'UP#': 5 ,
'ELY': 5 ,
'MS#': 5 ,
'HOW': 5 ,
'HEM': 5 ,
'EXP': 5 ,
'NSI': 5 ,
'#AP': 5 ,
'AID': 5 ,
'INA': 5 ,
'ILE': 5 ,
'ECO': 4 ,
'ARI': 4 ,
'LOW': 4 ,
'ALI': 4 ,
'MON': 4 ,
'#IM': 4 ,
'SER': 4 ,
'MPL': 4 ,
'SIT': 4 ,
'CEN': 4 ,
'UAL': 4 ,
'ERY': 4 ,
'NO#': 4 ,
'FIC': 4 ,
'WAR': 4 ,
'MAR': 4 ,
'TAL': 4 ,
'HO#': 4 ,
'IEN': 4 ,
'PEN': 4 ,
'SEE': 4 ,
'WAY': 4 ,
'ARY': 4 ,
'#CR': 4 ,
'#AM': 4 ,
'OWE': 4 ,
'ISH': 4 ,
'LIC': 4 ,
'SES': 4 ,
'PRI': 4 ,
'NTR': 4 ,
'ATH': 4 ,
'EAC': 4 ,
'SSE': 4 ,
'OMM': 4 ,
'ELE': 4 ,
'NLY': 4 ,
'ERT': 4 ,
'OOD': 4 ,
'SUR': 4 ,
'LON': 4 ,
'#CE': 4 ,
'IF#': 4 ,
'TAI': 4 ,
'#RI': 4 ,
'ASE': 4 ,
'WHA': 4 ,
'INI': 4 ,
'#IF': 4 ,
'FAC': 4 ,
'#OT': 4 ,
'DAY': 4 ,
'ALS': 4 ,
'CRE': 4 ,
'OND': 4 ,
'VEL': 4 ,
'ELI': 4 ,
'OOK': 4 ,
'NTO': 4 ,
'BOU': 4 ,
'OLL': 4 ,
'RAC': 4 ,
'PEC': 4 ,
'IMP': 4 ,
'ILI': 4 ,
'CIA': 4 ,
'REL': 4 ,
'NIT': 4 ,
'#VI': 4 ,
'MIL': 4 ,
'FIR': 4 ,
'HIL': 4 ,
'TRO': 4 ,
'KED': 4 ,
'FFE': 4 ,
'GEN': 4 ,
'#JU': 4 ,
'TRE': 4 ,
'ARS': 4 ,
'RIS': 4 ,
'EAL': 4 ,
'NTA': 4 ,
'CAR': 4 ,
'RST': 4 ,
'MED': 4 ,
'#TW': 4 ,
'LF#': 4 ,
'EL#': 4 ,
'LAS': 4 ,
'#T#': 4 ,
'FER': 4 ,
'FIN': 4 ,
'TLE': 4 ,
'#KN': 4 ,
'DY#': 4 ,
'#OP': 4 ,
'#QU': 4 ,
'LIS': 4 ,
'MBE': 4 ,
'IKE': 4 ,
'EED': 4 ,
'SAI': 4 ,
'RTI': 4 ,
'MOS': 4 ,
'TLY': 4 ,
'RN#': 3 ,
'TIE': 3 ,
'ITT': 3 ,
'ORK': 3 ,
'SHI': 3 ,
'#VE': 3 ,
'ORI': 3 ,
'IVI': 3 ,
'ETE': 3 ,
'REP': 3 ,
'YEA': 3 ,
'LLI': 3 ,
'QUI': 3 ,
'GER': 3 ,
'REM': 3 ,
'OLI': 3 ,
'HED': 3 ,
'NDS': 3 ,
'ANI': 3 ,
'TEM': 3 ,
'UE#': 3 ,
'#FU': 3 ,
'PPE': 3 ,
'FTE': 3 ,
'GS#': 3 ,
'GIN': 3 ,
'COR': 3 ,
'GRO': 3 ,
'#GA': 3 ,
'RK#': 3 ,
'VES': 3 ,
'OFF': 3 ,
'VED': 3 ,
'ONL': 3 ,
'ETH': 3 ,
'OIN': 3 ,
'ERV': 3 ,
'YS#': 3 ,
'IAN': 3 ,
'DUC': 3 ,
'ESI': 3 ,
'RIA': 3 ,
'MET': 3 ,
'TIL': 3 ,
'MUS': 3 ,
'ISE': 3 ,
'ONC': 3 ,
'RGE': 3 ,
'CER': 3 ,
'ULT': 3 ,
'FUL': 3 ,
'#OV': 3 ,
'SUC': 3 ,
'ULA': 3 ,
'LIE': 3 ,
'#PI': 3 ,
'QUE': 3 ,
'RET': 3 ,
'ARR': 3 ,
'LIK': 3 ,
'TAR': 3 ,
'TEL': 3 ,
'SH#': 3 ,
'COL': 3 ,
'USI': 3 ,
'BEC': 3 ,
'TRU': 3 ,
'WIN': 3 ,
'MY#': 3 ,
'IRS': 3 ,
'IED': 3 ,
'LAC': 3 ,
'#VA': 3 ,
'ETT': 3 ,
'ITA': 3 ,
'EQU': 3 ,
'HOL': 3 ,
'LOO': 3 ,
'#GI': 3 ,
'ORS': 3 ,
'RSE': 3 ,
'LET': 3 ,
'IGN': 3 ,
'IOU': 3 ,
'TON': 3 ,
'MAY': 3 ,
'VIN': 3 ,
'ERM': 3 ,
'ROP': 3 ,
'TOO': 3 ,
'#BI': 3 ,
'OSS': 3 ,
'RTH': 3 ,
'MIS': 3 ,
'IL#': 3 ,
'#HU': 3 ,
'ISS': 3 ,
'MES': 3 ,
'ECE': 3 ,
'ECI': 3 ,
'OTE': 3 ,
'#FL': 3 ,
'URN': 3 ,
'AM#': 3 ,
'CLE': 3 ,
'CHO': 3 ,
'RAI': 3 ,
'ICI': 3 ,
'#BL': 3 ,
'RON': 3 ,
'AIL': 3 ,
'PON': 3 ,
'NGS': 3 ,
'#JO': 3 ,
'TWO': 3 ,
'ARL': 3 ,
'AIR': 3 ,
'BLI': 3 ,
'ELF': 3 ,
'CED': 3 ,
'SEC': 3 ,
'GET': 3 ,
'#EL': 3 ,
'HOS': 3 ,
'PEA': 3 ,
'LIG': 3 ,
'NSE': 3 ,
'#CU': 3 ,
'LLO': 3 ,
'CTE': 3 ,
'ISI': 3 ,
'EAN': 3 ,
'RCH': 3 ,
'WO#': 3 ,
'UES': 3 ,
'HEL': 3 ,
'ATU': 3 ,
'#MY': 3 ,
'DIT': 3 ,
'EDI': 3 ,
'KS#': 3 ,
'MAL': 3 ,
'HRO': 3 ,
'VID': 3 ,
'CUL': 3 ,
'ANN': 3 ,
'RTA': 3 ,
'HOR': 3 ,
'DID': 3 ,
'DO#': 3 ,
'UTI': 3 ,
'RIG': 3 ,
'RMA': 3 ,
'#RU': 3 ,
'LEC': 3 ,
'NIS': 3 ,
'AFT': 3 ,
'AYS': 3 ,
'WEL': 3 ,
'CLA': 3 ,
'ETI': 3 ,
'POL': 3 ,
'PS#': 3 ,
'GES': 3 ,
'IZE': 3 ,
'ROV': 3 ,
'DEA': 3 ,
'FFI': 3 ,
'SHA': 3 ,
'WEE': 3 ,
'SIS': 3 ,
'RAD': 2 ,
'AUS': 2 ,
'OMI': 2 ,
'ARK': 2 ,
'ECA': 2 ,
'GAI': 2 ,
'KNO': 2 ,
'#DU': 2 ,
'NOR': 2 ,
'URI': 2 ,
'RNE': 2 ,
'MEA': 2 ,
'EPT': 2 ,
'XPE': 2 ,
'ALE': 2 ,
'EMB': 2 ,
'UCT': 2 ,
'DOW': 2 ,
'ACC': 2 ,
'ICT': 2 ,
'#CI': 2 ,
'UTH': 2 ,
'AGA': 2 ,
'RIO': 2 ,
'RCE': 2 ,
'ADI': 2 ,
'EET': 2 ,
'#MR': 2 ,
'URS': 2 ,
'ARG': 2 ,
'CAU': 2 ,
'ROW': 2 ,
'ESP': 2 ,
'LEN': 2 ,
'TTL': 2 ,
'EMP': 2 ,
'STU': 2 ,
'TAK': 2 ,
'LOS': 2 ,
'ARA': 2 ,
'ILD': 2 ,
'PAT': 2 ,
'FOU': 2 ,
'BRO': 2 ,
'TCH': 2 ,
'TIT': 2 ,
'MAK': 2 ,
'EFO': 2 ,
'EXT': 2 ,
'NNE': 2 ,
'RM#': 2 ,
'ICK': 2 ,
'CIE': 2 ,
'SOU': 2 ,
'PED': 2 ,
'QUA': 2 ,
'MIT': 2 ,
'SPO': 2 ,
'OL#': 2 ,
'IMA': 2 ,
'FRE': 2 ,
'BET': 2 ,
'IFI': 2 ,
'OLE': 2 ,
'BAC': 2 ,
'SIG': 2 ,
'ASI': 2 ,
'MAD': 2 ,
'OOL': 2 ,
'STS': 2 ,
'SCH': 2 ,
'ENG': 2 ,
'RDE': 2 ,
'ROB': 2 ,
'HOO': 2 ,
'ELO': 2 ,
'IAT': 2 ,
'BRI': 2 ,
'CIT': 2 ,
'UBL': 2 ,
'MPA': 2 ,
'BIL': 2 ,
'ORY': 2 ,
'NCI': 2 ,
'#SL': 2 ,
'VAL': 2 ,
'LT#': 2 ,
'CAM': 2 ,
'OCK': 2 ,
'RRI': 2 ,
'RTE': 2 ,
'RLY': 2 ,
'DIC': 2 ,
'ITU': 2 ,
'HAL': 2 ,
'DRE': 2 ,
'ORA': 2 ,
'RAM': 2 ,
'IFF': 2 ,
'#EM': 2 ,
'OK#': 2 ,
'JUS': 2 ,
'REF': 2 ,
'OLO': 2 ,
'CUR': 2 ,
'LSO': 2 ,
'FE#': 2 ,
'REV': 2 ,
'MPO': 2 ,
'#KI': 2 ,
'#OB': 2 ,
'#SM': 2 ,
'LEM': 2 ,
'ULL': 2 ,
'EMA': 2 ,
'ARM': 2 ,
'SUP': 2 ,
'NGL': 2 ,
'TAB': 2 ,
'EVI': 2 ,
'OVI': 2 ,
'UDE': 2 ,
'DON': 2 ,
'NIC': 2 ,
'LAY': 2 ,
'REG': 2 ,
'UTE': 2 ,
'IBL': 2 ,
'ROS': 2 ,
'RRE': 2 ,
'YIN': 2 ,
'IFE': 2 ,
'OCI': 2 ,
'#TU': 2 ,
'TUD': 2 ,
'EIN': 2 ,
'BEL': 2 ,
'UNC': 2 ,
'URA': 2 ,
'TRY': 2 ,
'VIS': 2 ,
'#PH': 2 ,
'CAS': 2 ,
'ISC': 2 ,
'TIA': 2 ,
'PIN': 2 ,
'#WR': 2 ,
'LIF': 2 ,
'RIV': 2 ,
'SOC': 2 ,
'GAN': 2 ,
'EMS': 2 ,
'ALT': 2 ,
'NTL': 2 ,
'PAN': 2 ,
'SUB': 2 ,
'#KE': 2 ,
'#SY': 2 ,
'PE#': 2 ,
'DEC': 2 ,
'CKE': 2 ,
'GO#': 2 ,
'CTO': 2 ,
'FEE': 2 ,
'EFF': 2 ,
'CTU': 2 ,
'HOM': 2 ,
'HAP': 2 ,
'UL#': 2 ,
'CRI': 2 ,
'DEV': 2 ,
'#AU': 2 ,
'ELA': 2 ,
'ROO': 2 ,
'#NU': 2 ,
'AF#': 2 ,
'AMP': 2 ,
'LER': 2 ,
'PPO': 2 ,
'LOC': 2 ,
'AMI': 2 ,
'FF#': 2 ,
'ILY': 2 ,
'OSI': 2 ,
'EGA': 2 ,
'SIB': 2 ,
'#NI': 2 ,
'NDA': 2 ,
'GIV': 2 ,
'VIC': 2 ,
'HIG': 2 ,
'EMO': 2 ,
'ROA': 2 ,
'SIC': 2 ,
'IA#': 2 ,
'#ES': 2 ,
'DIF': 2 ,
'PAS': 2 ,
'CLU': 2 ,
'MME': 2 ,
'TIS': 2 ,
'TWE': 2 ,
'DEP': 2 ,
'NK#': 2 ,
'TUA': 2 ,
'LEG': 2 ,
'IP#': 2 ,
'ANO': 2 ,
'CY#': 2 ,
'SPI': 2 ,
'PLI': 2 ,
'NEE': 2 ,
'NCH': 2 ,
'#VO': 2 ,
'ATO': 2 ,
'#GU': 2 ,
'GED': 2 ,
'ANE': 2 ,
'OGR': 2 ,
'EEM': 2 ,
'IMI': 2 ,
'CLO': 2 ,
'SOL': 2 ,
'INK': 2 ,
'RDS': 2 ,
'BEF': 2 ,
'MIC': 2 ,
'ROD': 2 ,
'CTS': 2 ,
'DIE': 2 ,
'#OW': 2 ,
'BOR': 2 ,
'HRE': 2 ,
'MEM': 2 ,
'TEE': 2 ,
'BOT': 2 ,
'HIP': 2 ,
'ROC': 2 ,
'SET': 2 ,
'ROL': 2 ,
'DGE': 2 ,
'ODE': 2 ,
'ENI': 2 ,
'DIA': 2 ,
'ANA': 2 ,
'UMB': 2 ,
'#EF': 2 ,
'AUT': 2 ,
'SAM': 2 ,
'MOT': 2 ,
'SSU': 2 ,
'IET': 2 ,
'URT': 2 ,
'WS#': 2 ,
'EIG': 2 ,
'JEC': 2 ,
'ORG': 2 ,
'RNI': 2 ,
'TTI': 2 ,
'KEN': 2 ,
'ORL': 2 ,
'RUS': 2 ,
'RVE': 2 ,
'BAS': 2 ,
'MOV': 2 ,
'FIE': 2 ,
'OKE': 1 ,
'ELD': 1 ,
'WAT': 1 ,
'ORN': 1 ,
'INF': 1 ,
'OTI': 1 ,
'PT#': 1 ,
'UPP': 1 ,
'MAI': 1 ,
'GOO': 1 ,
'MUC': 1 ,
'EGI': 1 ,
'TOW': 1 ,
'POI': 1 ,
'EOP': 1 ,
'#OL': 1 ,
'OPL': 1 ,
'AVI': 1 ,
'ROG': 1 ,
'PTI': 1 ,
'SMA': 1 ,
'SEA': 1 ,
'ROT': 1 ,
'ETA': 1 ,
'EDU': 1 ,
'URC': 1 ,
'UME': 1 ,
'FT#': 1 ,
'RMI': 1 ,
'FAR': 1 ,
'CEP': 1 ,
'PUB': 1 ,
'PEO': 1 ,
'CRO': 1 ,
'LOR': 1 ,
'RIB': 1 ,
'LUE': 1 ,
'SCO': 1 ,
'NEV': 1 ,
'EXC': 1 ,
'EEP': 1 ,
'RTS': 1 ,
'MOU': 1 ,
'LVE': 1 ,
'RVI': 1 ,
'APE': 1 ,
'ISM': 1 ,
'LOG': 1 ,
'ERC': 1 ,
'MSE': 1 ,
'LOP': 1 ,
'FEC': 1 ,
'RSO': 1 ,
'#JA': 1 ,
'EPA': 1 ,
'LAI': 1 ,
'RLD': 1 ,
'TEA': 1 ,
'RKE': 1 ,
'UNG': 1 ,
'YST': 1 ,
'AW#': 1 ,
'ADD': 1 ,
'SCR': 1 ,
'RIM': 1 ,
'DIV': 1 ,
'ODU': 1 ,
'#ED': 1 ,
'LIV': 1 ,
'OLU': 1 ,
'RAG': 1 ,
'ASH': 1 ,
'UM#': 1 ,
'OES': 1 ,
'INV': 1 ,
'HY#': 1 ,
'ONO': 1 ,
'INU': 1 ,
'DEL': 1 ,
'TOM': 1 ,
'VOL': 1 ,
'CAP': 1 ,
'ONI': 1 ,
'ICU': 1 ,
'OOR': 1 ,
'#AI': 1 ,
'DUR': 1 ,
'BAN': 1 ,
'EEL': 1 ,
'OMA': 1 ,
'TME': 1 ,
'ANK': 1 ,
'ARC': 1 ,
'PAI': 1 ,
'OPP': 1 ,
'BLY': 1 ,
'UIT': 1 ,
'EXA': 1 ,
'LIA': 1 ,
'VIE': 1 ,
'OO#': 1 ,
'WAL': 1 ,
'SAY': 1 ,
'OCA': 1 ,
'SAL': 1 ,
'ERR': 1 ,
'#SW': 1 ,
'DIR': 1 ,
'NUM': 1 ,
'CCE': 1 ,
'EDE': 1 ,
'MR#': 1 ,
'AWA': 1 ,
'LLA': 1 ,
'FOL': 1 ,
'ORC': 1 ,
'GAT': 1 ,
'OAD': 1 ,
'AMO': 1 ,
'OP#': 1 ,
'DEM': 1 ,
'EEK': 1 ,
'DET': 1 ,
'BUR': 1 ,
'SIM': 1 ,
'PIC': 1 ,
'RME': 1 ,
'IE#': 1 ,
'MAG': 1 ,
'PUT': 1 ,
'MUN': 1 ,
'#ID': 1 ,
'NTH': 1 ,
'MPE': 1 ,
'RUC': 1 ,
'SAN': 1 ,
'KER': 1 ,
'NME': 1 ,
'WED': 1 ,
'ETW': 1 ,
'ASK': 1 ,
'#AW': 1 ,
'ELS': 1 ,
'SM#': 1 ,
'SUM': 1 ,
'FRA': 1 ,
'SSA': 1 ,
'NCL': 1 ,
'SLA': 1 ,
'RAP': 1 ,
'OUP': 1 ,
'CRA': 1 ,
'NOM': 1 ,
'BEI': 1 ,
'PIT': 1 ,
'HUR': 1 ,
'ARO': 1 ,
'ZED': 1 ,
'RID': 1 ,
'EAK': 1 ,
'FRI': 1 ,
'ALO': 1 ,
'MIG': 1 ,
'BEA': 1 ,
'MMU': 1 ,
'WRI': 1 ,
'IMS': 1 ,
'TEC': 1 ,
'PPR': 1 ,
'ONF': 1 ,
'HIT': 1 ,
'UAT': 1 ,
'IER': 1 ,
'NVE': 1 ,
'SOR': 1 ,
'EP#': 1 ,
'PPL': 1 ,
'REQ': 1 ,
'BAR': 1 ,
'EMI': 1 ,
'FAI': 1 ,
'EA#': 1 ,
'NNI': 1 ,
'EFE': 1 ,
'ENO': 1 ,
'WEN': 1 ,
'NDU': 1 ,
'ERF': 1 ,
'DRI': 1 ,
'NOU': 1 ,
'LAB': 1 ,
'BRA': 1 ,
'WAN': 1 ,
'EGR': 1 ,
'RTY': 1 ,
'PUR': 1 ,
'DAR': 1 ,
'ALK': 1 ,
'GOV': 1 ,
'ETS': 1 ,
'LLS': 1 ,
'SEV': 1 ,
'BEG': 1 ,
'ARN': 1 ,
'ASO': 1 ,
'ESU': 1 ,
'#GL': 1 ,
'UAR': 1 ,
'WIS': 1 ,
'MAS': 1 ,
'LIZ': 1 ,
'UDI': 1 | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from django import forms
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.forms.widgets import Textarea
# from google.appengine.api import users
from framework import users
from internals import models
from internals import processes
class MultiEmailField(forms.Field):
def to_python(self, value):
"""Normalize data to a list of strings."""
# Return an empty list if no input was given.
if not value:
return []
return value.split(',')
def validate(self, value):
"""Check if value consists only of valid emails."""
# Use the parent's handling of required fields, etc.
super(MultiEmailField, self).validate(value)
for email in value:
validate_email(email.strip())
def validate_url(value):
"""Check that the value matches the single URL regex."""
if (re.match(URL_REGEX, value)):
pass
else:
raise ValidationError('Invalid URL', code=None, params={'value': value})
class MultiUrlField(forms.Field):
def to_python(self, value):
"""Normalize data to a list of strings."""
# Return an empty list if no input was given.
if not value:
return []
return value.split('\n')
def validate(self, value):
"""Check if value consists only of valid urls."""
# Use the parent's handling of required fields, etc.
super(MultiUrlField, self).validate(value)
for url in value:
validate_url(url.strip())
SHIPPED_HELP_TXT = (
'First milestone to ship with this status. Applies to: Enabled by '
'default, Browser Intervention, Deprecated and Removed.')
SHIPPED_WEBVIEW_HELP_TXT = ('First milestone to ship with this status. '
'Applies to Enabled by default, Browser '
'Intervention, Deprecated, and Removed.')
SUMMARY_PLACEHOLDER_TXT = (
'NOTE: Text in the beta release post, the enterprise release notes, '
'and other external sources will be based on this text.\n\n'
'Begin with one line explaining what the feature does. Add one or two '
'lines explaining how this feature helps developers. Avoid language such '
'as "a new feature". They all are or have been new features.\n\n'
'Write it from a web developer\'s point of view.\n\n'
'Follow the example link below for more guidance.')
# Patterns from https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s01.html
# Removing single quote ('), backtick (`), and pipe (|) since they are risky unless properly escaped everywhere.
# Also removing ! and % because they have special meaning for some older email routing systems.
USER_REGEX = '[A-Za-z0-9_#$&*+/=?{}~^.-]+'
DOMAIN_REGEX = '(([A-Za-z0-9-]+\.)+[A-Za-z]{2,6})'
EMAIL_ADDRESS_REGEX = USER_REGEX + '@' + DOMAIN_REGEX
EMAIL_ADDRESSES_REGEX = EMAIL_ADDRESS_REGEX + '([ ]*,[ ]*' + EMAIL_ADDRESS_REGEX + ')*'
MULTI_EMAIL_FIELD_ATTRS = {
'title':"Enter one or more comma-separated complete email addresses.",
'multiple': True,
'placeholder': '<EMAIL>, <EMAIL>',
'pattern': EMAIL_ADDRESSES_REGEX
}
# From https://rodneyrehm.de/t/url-regex.html#imme_emosol+ht-%26f-tp%28s%29
# Using imme_emosol but without ftp, torrent, image, and irc
URL_REGEX = '[ ]*(https?)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?[ ]*'
# Multiple URLs, one per line
MULTI_URL_REGEX = URL_REGEX + '(\\n' + URL_REGEX + ')*'
URL_FIELD_ATTRS = {
'title': 'Enter a full URL https://...',
'placeholder': 'https://...',
'pattern': URL_REGEX
}
MULTI_URL_FIELD_ATTRS = {
'title': 'Enter one or more full URLs, one per line:\nhttps://...\nhttps://...',
'placeholder': 'https://...\nhttps://...',
'rows': 4, 'cols': 50, 'maxlength': 5000
# 'pattern': MULTI_URL_REGEX, # pattern is not yet used with textarea.
}
# We define all form fields here so that they can be include in one or more
# stage-specific fields without repeating the details and help text.
ALL_FIELDS = {
'name': forms.CharField(
required=True, label='Feature name',
# Use a specific autocomplete value to avoid "name" autofill.
# https://bugs.chromium.org/p/chromium/issues/detail?id=468153#c164
widget=forms.TextInput(attrs={'autocomplete': 'feature-name'}),
help_text=
('Capitalize only the first letter and the beginnings of '
'proper nouns. '
'<a target="_blank" href="'
'https://github.com/GoogleChrome/chromium-dashboard/wiki/'
'EditingHelp#feature-name">Learn more</a>. '
'<a target="_blank" href="'
'https://github.com/GoogleChrome/chromium-dashboard/wiki/'
'EditingHelp#feature-name-example">Example</a>.'
)),
'summary': forms.CharField(
required=True,
widget=forms.Textarea(
attrs={'cols': 50, 'maxlength': 500,
'placeholder': SUMMARY_PLACEHOLDER_TXT}),
help_text=
('<a target="_blank" href="'
'https://github.com/GoogleChrome/chromium-dashboard/wiki/'
'EditingHelp#summary-example">Guidelines and example</a>.'
)),
'owner': MultiEmailField(
required=True, label='Feature owners',
widget=forms.EmailInput(attrs=MULTI_EMAIL_FIELD_ATTRS),
help_text=('Comma separated list of full email addresses. '
'Prefer @chromium.org.')),
'category': forms.ChoiceField(
required=False,
help_text=('Select the most specific category. If unsure, '
'leave as "%s".' % models.FEATURE_CATEGORIES[models.MISC]),
initial=models.MISC,
choices=sorted(models.FEATURE_CATEGORIES.items(), key=lambda x: x[1])),
'feature_type': forms.ChoiceField(
required=False,
help_text=('Select the feature type.'),
initial=models.FEATURE_TYPE_INCUBATE_ID,
choices=sorted(models.FEATURE_TYPES.items())),
'intent_stage': forms.ChoiceField(
required=False, label='Process stage',
help_text='Select the appropriate process stage.',
initial=models.INTENT_IMPLEMENT,
choices=list(models.INTENT_STAGES.items())),
'motivation': forms.CharField(
label='Motivation', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text=
('Explain why the web needs this change. It may be useful '
'to describe what web developers are forced to do without '
'it. When possible, add links to your explainer '
'backing up your claims. '
'<a target="_blank" href="'
'https://github.com/GoogleChrome/chromium-dashboard/wiki/'
'EditingHelp#motivation-example">Example</a>.'
)),
'deprecation_motivation': forms.CharField( # Sets motivation DB field.
label='Motivation', required=False,
widget=forms.Textarea(attrs={'cols': 50, 'maxlength': 1480}),
help_text=
('Deprecations and removals must have strong reasons, backed up '
'by measurements. There must be clear and actionable paths forward '
'for developers. Please see '
'<a target="_blank" href="'
'https://docs.google.com/a/chromium.org/document/d/'
'1LdqUfUILyzM5WEcOgeAWGupQILKrZHidEXrUxevyi_Y/edit?usp=sharing'
'">Removal guidelines</a>.'
)),
'doc_links': MultiUrlField(
label='Doc link(s)', required=False,
widget=forms.Textarea(attrs=MULTI_URL_FIELD_ATTRS),
help_text=('Links to design doc(s) (one URL per line), if and when '
'available. [This is not required to send out an Intent '
'to Prototype. Please update the intent thread with the '
'design doc when ready]. An explainer and/or design doc '
'is sufficient to start this process. [Note: Please '
'include links and data, where possible, to support any '
'claims.]')),
'measurement': forms.CharField(
label='Measurement', required=False,
widget=forms.Textarea(
attrs={'rows': 4, 'cols': 50, 'maxlength': 500}),
help_text=
('It\'s important to measure the adoption and success of web-exposed '
'features. Note here what measurements you have added to track the '
'success of this feature, such as a link to the UseCounter(s) you '
'have set up.')),
# 'standardization' is deprecated
'standard_maturity': forms.ChoiceField(
required=False, label='Standard maturity',
choices=list(models.STANDARD_MATURITY_CHOICES.items()),
initial=models.PROPOSAL_STD,
help_text=('How far along is the standard that this '
'feature implements?')),
'unlisted': forms.BooleanField(
required=False, initial=False,
help_text=('Check this box for draft features that should not appear '
'in the feature list. Anyone with the link will be able to '
'view the feature on the detail page.')),
'spec_link': forms.URLField(
required=False, label='Spec link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('Link to spec, if and when available. Please update the '
'chromestatus.com entry and the intent thread(s) with the '
'spec link when available.')),
'api_spec': forms.BooleanField(
required=False, initial=False, label='API spec',
help_text=('The spec document has details in a specification language '
'such as Web IDL, or there is an exsting MDN page.')),
'spec_mentors': MultiEmailField(
required=False, label='Spec mentor',
widget=forms.EmailInput(attrs=MULTI_EMAIL_FIELD_ATTRS),
help_text=
('Experienced <a target="_blank" '
'href="https://www.chromium.org/blink/spec-mentors">'
'spec mentors</a> are available to help you improve your '
'feature spec.')),
'explainer_links': MultiUrlField(
label='Explainer link(s)', required=False,
widget=forms.Textarea(attrs=MULTI_URL_FIELD_ATTRS),
help_text=('Link to explainer(s) (one URL per line). You should have '
'at least an explainer in hand and have shared it on a '
'public forum before sending an Intent to Prototype in '
'order to enable discussion with other browser vendors, '
'standards bodies, or other interested parties.')),
'security_review_status': forms.ChoiceField(
required=False,
choices=list(models.REVIEW_STATUS_CHOICES.items()),
initial=models.REVIEW_PENDING,
help_text=('Status of the security review.')),
'privacy_review_status': forms.ChoiceField(
required=False,
choices=list(models.REVIEW_STATUS_CHOICES.items()),
initial=models.REVIEW_PENDING,
help_text=('Status of the privacy review.')),
'tag_review': forms.CharField(
label='TAG Review', required=False,
widget=forms.Textarea(attrs={'rows': 2, 'cols': 50, 'maxlength': 1480}),
help_text=('Link(s) to TAG review(s), or explanation why this is '
'not needed.')),
'tag_review_status': forms.ChoiceField(
required=False,
choices=list(models.REVIEW_STATUS_CHOICES.items()),
initial=models.REVIEW_PENDING,
help_text=('Status of the tag review.')),
'intent_to_implement_url': forms.URLField(
required=False, label='Intent to Prototype link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('After you have started the "Intent to Prototype" '
' discussion thread, link to it here.')),
'intent_to_ship_url': forms.URLField(
required=False, label='Intent to Ship link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('After you have started the "Intent to Ship" discussion '
'thread, link to it here.')),
'ready_for_trial_url': forms.URLField(
required=False, label='Ready for Trial link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('After you have started the "Ready for Trial" discussion '
'thread, link to it here.')),
'intent_to_experiment_url': forms.URLField(
required=False, label='Intent to Experiment link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('After you have started the "Intent to Experiment" '
' discussion thread, link to it here.')),
'intent_to_extend_experiment_url': forms.URLField(
required=False, label='Intent to Extend Experiment link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('If this feature has an "Intent to Extend Experiment" '
' discussion thread, link to it here.')),
'r4dt_url': forms.URLField( # Sets intent_to_experiment_url in DB
required=False, label='Request for Deprecation Trial link',
widget=forms.URLInput(attrs=URL_FIELD_ATTRS),
help_text=('After you have started the "Request for Deprecation Trial" '
'discussion thread, link to it here.')),
'interop_compat_risks': forms.CharField(
required=False, label='Interoperability and Compatibility Risks',
| |
"-5",
"гов": "-5",
"ебар": "-5",
"насасыва": "-5",
"отъеба": "-5",
"шибздик": "-5",
"блядствова": "-5",
"дристун": "-5",
"пиздецов": "-5",
"наебк": "-5",
"кун": "0",
"шиш": "0",
"анус": "-5",
"покака": "-5",
"жопк": "-5",
"целк": "-5",
"подмахива": "-5",
"минетчиц": "-5",
"поджопник": "-5",
"накака": "-5",
"блядун": "-5",
"херак": "-5",
"блядова": "-5",
"высра": "-5",
"вакханк": "-5",
"бздет": "-5",
"сри": "-5",
"трепак": "-5",
"кодеш": "-5",
"бара": "-5",
"хар": "-5",
"говнист": "-5",
"захерач": "-5",
"говновоз": "-5",
"говномер": "-5",
"минетк": "-5",
"ебун": "-5",
"закака": "-5",
"бздун": "-5",
"уссыва": "-5",
"надроч": "-5",
"бзднут": "-5",
"говнода": "-5",
"захренач": "-5",
"пох": "-5",
"гузн": "-5",
"зажоп": "-5",
"ебот": "-5",
"мандец": "-5",
"жополизан": "-5",
"блудилищ": "-5",
"дриста": "-5",
"проблем": "-5",
"смерт": "-5",
"ошибк": "-5",
"конфликт": "-5",
"протест": "-5",
"нарушен": "-5",
"убийств": "-5",
"жертв": "-5",
"враг": "-5",
"преступлен": "-5",
"отсутств": "0",
"сожален": "-5",
"тюрьм": "-5",
"коррупц": "-5",
"битв": "-5",
"грех": "-5",
"угроз": "-5",
"трудност": "-5",
"сканда": "-5",
"сомнен": "-5",
"взрыв": "-5",
"вор": "-5",
"ужас": "-5",
"заболеван": "-5",
"авар": "-5",
"наркотик": "-5",
"критик": "-5",
"потер": "-5",
"наказан": "-5",
"обвинен": "-5",
"катастроф": "-5",
"трагед": "-5",
"пожар": "-5",
"паден": "-5",
"отказ": "-5",
"убийц": "-5",
"арест": "-5",
"дурак": "-5",
"поражен": "-5",
"происшеств": "-5",
"жалоб": "-5",
"гибел": "-5",
"рабств": "-5",
"насил": "-5",
"обид": "-5",
"нападен": "-5",
"ненавист": "-5",
"столкновен": "-5",
"неудач": "-5",
"террорист": "-5",
"драк": "-5",
"агресс": "-5",
"страдан": "-5",
"преступник": "-5",
"погиб": "-5",
"беспорядок": "-5",
"травм": "-5",
"уничтожен": "-5",
"жест": "-5",
"теракт": "-5",
"сопротивлен": "-5",
"претенз": "-5",
"заключен": "-5",
"фальсификац": "-5",
"глупост": "-5",
"депресс": "-5",
"лишен": "-5",
"обыск": "-5",
"пытк": "-5",
"ущерб": "-5",
"предупрежден": "-5",
"труп": "-5",
"захват": "-5",
"козел": "-5",
"ликвидац": "-5",
"тревог": "-5",
"отчаян": "-5",
"хаос": "-5",
"апокалипсис": "-5",
"дьявол": "-5",
"самоубийств": "-5",
"стрельб": "-5",
"поступлен": "-5",
"произвол": "-5",
"проститутк": "-5",
"подозрен": "0",
"осужден": "-5",
"разочарован": "-5",
"хищен": "-5",
"предательств": "-5",
"фашизм": "-5",
"сплетн": "-5",
"яд": "-5",
"блят": "-5",
"кримина": "-5",
"сражен": "-5",
"крах": "-5",
"обл": "0",
"триллер": "-5",
"крушен": "-5",
"избиен": "-5",
"краж": "-5",
"восстан": "-5",
"педоф": "-5",
"плен": "-5",
"опасен": "-5",
"чп": "-5",
"забастовк": "-5",
"несчаст": "-5",
"помех": "-5",
"паник": "-5",
"бомж": "-5",
"воровств": "-5",
"быдл": "-5",
"покушен": "-5",
"предател": "-5",
"волнен": "-5",
"твар": "-5",
"терроризм": "-5",
"диктатор": "-5",
"экстремизм": "-5",
"изнасилован": "-5",
"умерш": "-5",
"непотребнаг": "-5",
"порн": "-5",
"заложник": "-5",
"расстройств": "-5",
"террор": "-5",
"деградац": "-5",
"нищет": "-5",
"деб": "-5",
"рассеян": "-5",
"безум": "-5",
"поврежден": "-5",
"расстрел": "-5",
"сарказм": "-5",
"похищен": "-5",
"гордын": "-5",
"фу": "-5",
"маньяк": "-5",
"утрат": "-5",
"хулига": "-5",
"упадок": "-5",
"нашеств": "-5",
"унижен": "-5",
"подонок": "-5",
"хитрост": "-5",
"порок": "-5",
"недовер": "-5",
"грабител": "-5",
"нацист": "-5",
"сум": "0",
"грабеж": "-5",
"порнограф": "-5",
"подлец": "-5",
"правонарушен": "-5",
"издевательств": "-5",
"тормоз": "-5",
"нарушител": "-5",
"ограблен": "-5",
"скорб": "-5",
"отравлен": "-5",
"арестова": "-5",
"мертвец": "-5",
"пипец": "-5",
"неправд": "-5",
"подсудим": "-5",
"жадност": "-5",
"кончин": "-5",
"ярост": "-5",
"злоупотреблен": "-5",
"гадост": "-5",
"взлом": "-5",
"сноб": "-5",
"злост": "-5",
"придурок": "-5",
"обстрел": "-5",
"подлост": "-5",
"пофиг": "-5",
"вран": "-5",
"гопник": "-5",
"злоумышленик": "-5",
"педофил": "-5",
"ухудшен": "-5",
"ужесточен": "-5",
"цинизм": "-5",
"хулиганств": "-5",
"хамств": "-5",
"нафиг": "-5",
"жид": "-5",
"бездейств": "-5",
"смертност": "-5",
"соболезнован": "-5",
"неприязн": "-5",
"мраз": "-5",
"напаст": "-5",
"бойн": "-5",
"фря": "0",
"экстремист": "-5",
"высокомер": "-5",
"джихад": "-5",
"наглост": "-5",
"принужден": "-5",
"авиакатастроф": "-5",
"пессимизм": "-5",
"апат": "-5",
"недоразумен": "-5",
"нацизм": "-5",
"потрясен": "-5",
"неудобств": "-5",
"проституц": "-5",
"мерзост": "-5",
"резн": "-5",
"заражен": "-5",
"вымогательств": "-5",
"антисемитизм": "-5",
"лжец": "-5",
"гонен": "-5",
"сговор": "-5",
"жут": "-5",
"ппц": "-5",
"отвращен": "-5",
"шпионаж": "-5",
"тьфу": "-5",
"фиф": "-5",
"пидарас": "-5",
"безразлич": "-5",
"травл": "-5",
"писец": "-5",
"капец": "-5",
"упрямств": "-5",
"трэш": "-5",
"троллинг": "-5",
"конфл": "-5",
"порабощен": "-5",
"завоеван": "-5",
"мерзавец": "-5",
"мракобес": "-5",
"бомбардировк": "-5",
"порк": "-5",
"извращенец": "-5",
"переживан": "-5",
"покойник": "-5",
"фейк": "-5",
"чс": "-5",
"чудак": "-5",
"бе": "-5",
"легкомысл": "-5",
"смертник": "-5",
"захватчик": "-5",
"обрушен": "-5",
"бухар": "-5",
"вскрыт": "-5",
"поджог": "-5",
"нахр": "-5",
"фрик": "-5",
"гоблин": "-5",
"уголовник": "-5",
"глупец": "-5",
"параной": "-5",
"отст": "-5",
"пенис": "-5",
"шантаж": "-5",
"ахтунг": "-5",
"фак": "-5",
"мутк": "-5",
"шайта": "-5",
"грудк": "-5",
"гандапас": "-5",
"глюк": "-5",
"автокатастроф": "-5",
"ляп": "-5",
"отморозок": "-5",
"прыщ": "-5",
"эмпат": "-5",
"фи": "-5",
"падал": "-5",
"гомофоб": "-5",
"жертвоприношен": "-5",
"побо": "-5",
"пошлост": "-5",
"упыр": "-5",
"дикар": "-5",
"отмазк": "-5",
"агрессор": "-5",
"ер": "-5",
"тварин": "-5",
"неповиновен": "-5",
"грубост": "-5",
"вымиран": "-5",
"бездеятельн": "-5",
"угон": "-5",
"трусост": "-5",
"подавлен": "-5",
"извращен": "-5",
"зверств": "-5",
"изг": "-5",
"психоз": "-5",
"анарх": "-5",
"ирод": "-5",
"нахер": "-5",
"треш": "-5",
"антисем": "-5",
"вандализм": "-5",
"урон": "-5",
"обеспокоен": "-5",
"пренебрежен": "-5",
"стычк": "-5",
"мля": "-5",
"разграблен": "-5",
"корыст": "-5",
"лузер": "-5",
"жлоб": "-5",
"троя": "-5",
"бандитизм": "-5",
"рыл": "-5",
"несдержан": "-5",
"тошнот": "-5",
"троллит": "-5",
"панаце": "-5",
"умершаг": "-5",
"нечистаг": "-5",
"мерзкаг": "-5",
"разб": "-5",
"нифиг": "-5",
"хач": "-5",
"скин": "-5",
"ничтожеств": "-5",
"червяк": "-5",
"сумасшеств": "-5",
"ругательств": "-5",
"ненаст": "-5",
"гнид": "-5",
"гондурас": "-5",
"экстрема": "-5",
"стеб": "-5",
"непродуман": "-5",
"едош": "-5",
"кровопролит": "-5",
"позорищ": "-5",
"кретин": "-5",
"стукач": "-5",
"крейз": "-5",
"баба": "-5",
"выродок": "-5",
"черн": "0",
"показух": "-5",
"задрот": "-5",
"прот": "-5",
"жал": "-5",
"страшн": "-3.3",
"сложн": "-3.3",
"безумн": "-3.3",
"скандальн": "-3.3",
"грешн": "-3.3",
"безразличн": "-3.3",
"недопустим": "-3.3",
"чертов": "-3.3",
"душн": "-3.3",
"заразн": "-3.3",
"мелочн": "-3.3",
"тягостн": "-3.3",
"негож": "-3.3",
"гуля": "-3.3",
"зазорн": "-3.3",
"чумн": "-3.3",
"муторн": "-3.3",
"лев": "0",
"стар": "-3.3",
"невозможн": "-3.3",
"уголовн": "-3.3",
"минусов": "-3.3",
"лишн": "-3.3",
"остр": "-3.3",
"темн": "-3.3",
"строг": "-3.3",
"толст": "-3.3",
"вооружен": "-3.3",
"бездомн": "-3.3",
"недалек": "-3.3",
"психическ": "-3.3",
"критическ": "-3.3",
"бессмыслен": "-3.3",
"болезн": "-3.3",
"судн": "-3.3",
"вирусн": "-3.3",
"глух": "-3.3",
"козлов": "-3.3",
"потеря": "-3.3",
"тюремн": "-3.3",
"роков": "-3.3",
"рискова": "-3.3",
"избит": "-3.3",
"пиратск": "-3.3",
"недоступн": "-3.3",
"властн": "-3.3",
"мят": "0",
"необоснова": "-3.3",
"безработн": "-3.3",
"несанкционирова": "-3.3",
"запретн": "-3.3",
"ветх": "-3.3",
"чужд": "-3.3",
"противоречив": "-3.3",
"безжалостн": "-3.3",
"дорогостоя": "-3.3",
"повин": "-3.3",
"онкологическ": "-3.3",
"провокацион": "-3.3",
"обвинительн": "-3.3",
"поверхностн": "-3.3",
"тума": "-3.3",
"совк": "-3.3",
"подпольн": "-3.3",
"непосильн": "-3.3",
"дождлив": "-3.3",
"неподходя": "-3.3",
"хаотичн": "-3.3",
"кисл": "-3.3",
"пресловут": "-3.3",
"избыточн": "-3.3",
"инвалидн": "-3.3",
"радиоактивн": "-3.3",
"навязчив": "-3.3",
"окровавлен": "-3.3",
"рабск": "-3.3",
"тенев": "-3.3",
"бродяч": "-3.3",
"адочайш": "-3.3",
"туг": "-3.3",
"поспешн": "-3.3",
"сереньк": "-3.3",
"чиновнич": "-3.3",
"липов": "-3.3",
"мним": "-3.3",
"натянут": "-3.3",
"бюрократическ": "-3.3",
"козловск": "-3.3",
"тощ": "-3.3",
"ржав": "-3.3",
"рван": "-3.3",
"пресн": "-3.3",
"травматическ": "-3.3",
"пьющ": "-3.3",
"раним": "-3.3",
"нереалистичн": "-3.3",
"нашист": "-3.3",
"анальн": "-3.3",
"токсичн": "-3.3",
"патологическ": "-3.3",
"тускл": "-3.3",
"нелегитимн": "-3.3",
"несостоя": "-3.3",
"ломк": "-3.3",
"болев": "-3.3",
"истощен": "-3.3",
"опальн": "-3.3",
"малоимущ": "-3.3",
"оккупацион": "-3.3",
"неуверен": "-3.3",
"тормозн": "-3.3",
"несовершен": "-3.3",
"глухонем": "-3.3",
"порушен": "-3.3",
"нежил": "-3.3",
"неровн": "-3.3",
"непоправим": "-3.3",
"сквернаг": "-3.3",
"непопулярн": "-3.3",
"слезоточив": "-3.3",
"несбыточн": "-3.3",
"психотропн": "-3.3",
"беспорядочн": "-3.3",
"непредвиден": "-3.3",
"неуправля": "-3.3",
"бесплодн": "-3.3",
"безлик": "-3.3",
"неспокойн": "-3.3",
"обманчив": "-3.3",
"неработа": "-3.3",
"бесцельн": "-3.3",
"неестествен": "-3.3",
"безбашен": "-3.3",
"неопозна": "-3.3",
"скова": "-3.3",
"дыряв": "-3.3",
"супертяжел": "-3.3",
"зациклен": "-3.3",
"препятствен": "-3.3",
"антиправительствен": "-3.3",
"желчн": "-3.3",
"повержен": "-3.3",
"шальн": "-3.3",
"вспыльчив": "-3.3",
"старческ": "-3.3",
"разгромн": "-3.3",
"людоедск": "-3.3",
"мещанск": "-3.3",
"непослушн": "-3.3",
"инакомысля": "-3.3",
"тупиков": "-3.3",
"заблудш": "-3.3",
"скоропостижн": "-3.3",
"незатейлив": "-3.3",
"дорогущ": "-3.3",
"нерешен": "-3.3",
"ненастоя": "-3.3",
"затратн": "-3.3",
"скрытн": "-3.3",
"грибков": "-3.3",
"душераздира": "-3.3",
"срны": "-3.3",
"немощн": "-3.3",
"феминистск": "-3.3",
"неотвратим": "-3.3",
"онкобольн": "-3.3",
"неохотн": "-3.3",
"недоношен": "-3.3",
"нестерпим": "-3.3",
"нелогичн": "-3.3",
"небогат": "-3.3",
"помят": "-3.3",
"леденя": "-3.3",
"гробов": "-3.3",
"демоническ": "-3.3",
"невзрачн": "-3.3",
"стыдлив": "-3.3",
"нечестив": "-3.3",
"неисправим": "-3.3",
"кровопролитн": "-3.3",
"несогласова": "-3.3",
"летальн": "-3.3",
"суетлив": "-3.3",
"апокалиптическ": "-3.3",
"неправедн": "-3.3",
"незван": "-3.3",
"непозволительн": "-3.3",
"неудовлетворительн": "-3.3",
"душевнобольн": "-3.3",
"неположен": "-3.3",
"расчетлив": "-3.3",
"ненадлежа": "-3.3",
"обидчив": "-3.3",
"неисправн": "-3.3",
"эгоистическ": "-3.3",
"брезглив": "-3.3",
"заклят": "-3.3",
"жидовск": "-3.3",
"безвкусн": "-3.3",
"зловон": "-3.3",
"неразборчив": "-3.3",
"издевательск": "-3.3",
"необразова": "-3.3",
"шлаков": "-3.3",
"контрафактн": "-3.3",
"антиконституцион": "-3.3",
"заносчив": "-3.3",
"несвободн": "-3.3",
"безрук": "-3.3",
"беспросветн": "-3.3",
"раздут": "-3.3",
"расстрельн": "-3.3",
"диктаторск": "-3.3",
"окая": "-3.3",
"беспринципн": "-3.3",
"непроходим": "-3.3",
"неактивн": "-3.3",
"тривиальн": "-3.3",
"несостоятельн": "-3.3",
"каверзн": "-3.3",
"нецелесообразн": "-3.3",
"безвольн": "-3.3",
"злополучн": "-3.3",
"беспризорн": "-3.3",
"вероломн": "-3.3",
"промозгл": "-3.3",
"шопоголик": "-3.3",
"незадачлив": "-3.3",
"погребальн": "-3.3",
"противоестествен": "-3.3",
"дефицитн": "-3.3",
"нереализова": "-3.3",
"могильн": "-3.3",
"безучастн": "-3.3",
"беззуб": "-3.3",
"бесовск": "-3.3",
"отъявлен": "-3.3",
"потн": "-3.3",
"показн": | |
in the batch
errorEpoch += errorBatch/batchSize
# print(errorBatch)
# Get the derivative of the output cost function wrt to the output vector of the output layer
# The input arguments should always be an array
dc_daL = gradErrorFunc(a[nLayers], outExpected)
# if np.isnan(dc_daL).any():
# print('loss grad is nan')
# print(a[nLayers])
# print(iEpoch)
# print(iBatch)
# Average it out
dc_daL = dc_daL/batchSize
if iEpoch==0 and iBatch==0:
opt_expr = generateExpressions(nLayers, batchSize, z, a, dc_daL, activationFunc, weights)
# Perform Back Propagation and get the derivatives wrt the weights and biases
derWeights, derBiases, weights, biases = back_propagation_fast(z, a, activationFunc, nLayers, batchSize, weights, biases, eeta, dc_daL,opt_expr)
# Average over the batches
errors.append(errorEpoch/nBatches)
if get_accuracy:
accuracies.append(accuracy_epoch/nBatches)
if(iEpoch==0):
print('Average Error with initial weights and biases:', errorEpoch/nBatches)
if get_accuracy:
return weights, biases, errors, accuracies
else:
return weights, biases, errors
# act_func_dict = {'Sigmoid':activation.Sigmoid,'ReLU':activation.ReLU,'Softmax':activation.Softmax}
# act_func_grad_dict = {'Sigmoid':activation.Sigmoid_grad,'ReLU':activation.ReLU_grad,'Softmax':activation.Softmax_grad}
##------------------CUPY----------------------
# act_func_dict_cupy = {'Sigmoid':activation.Sigmoid_cupy,'ReLU':activation.ReLU_cupy,'Softmax':activation.Softmax_cupy}
# act_func_grad_dict_cupy = {'Sigmoid':activation.Sigmoid_grad_cupy,'ReLU':activation.ReLU_grad_cupy,'Softmax':activation.Softmax_grad_cupy}
def init_params_cupy(nInputs, neurons_per_layer, method='random2', dtype='float32'):
# References:
# https://machinelearningmastery.com/weight-initialization-for-deep-learning-neural-networks/
# https://towardsdatascience.com/weight-initialization-in-neural-networks-a-journey-from-the-basics-to-kaiming-954fb9b47c79
# A very good read, for implementing future methods: https://adityassrana.github.io/blog/theory/2020/08/26/Weight-Init.html
# TODO Fixup init https://paperswithcode.com/method/fixup-initialization
nLayers = len(neurons_per_layer)
weights = [None] * (nLayers)
biases = [None] * (nLayers)
for i in range(nLayers):
if method=='random1':
# Initialize weights with random numbers [0.0,1.0]
if i==0:
weights[i] = cp.random.uniform(low=0.0, high=1.0, size=(neurons_per_layer[i], nInputs))
else:
weights[i] = cp.random.uniform(low=0.0, high=1.0, size=(neurons_per_layer[i], neurons_per_layer[i-1]))
if method=='random2':
# Initialize weights with random numbers [-0.3,0.3]
if i==0:
weights[i] = cp.random.uniform(low=-0.3, high=0.3, size=(neurons_per_layer[i], nInputs))
else:
weights[i] = cp.random.uniform(low=-0.3, high=0.3, size=(neurons_per_layer[i], neurons_per_layer[i-1]))
if method=='random3':
# Initialize weights with random numbers [-1.0,1.0]
if i==0:
weights[i] = cp.random.uniform(low=-1.0, high=1.0, size=(neurons_per_layer[i], nInputs))
else:
weights[i] = cp.random.uniform(low=-1.0, high=1.0, size=(neurons_per_layer[i], neurons_per_layer[i-1]))
if method=='Xavier':
# Reference: https://paperswithcode.com/method/xavier-initialization
# Initialize weights with random numbers [-1/sqrt(N),1/sqrt(N)] where N is the number of nodes
if i==0:
sqrtN = cp.sqrt(nInputs)
weights[i] = cp.random.uniform(low=-1./sqrtN, high=1./sqrtN, size=(neurons_per_layer[i], nInputs))
else:
sqrtN = cp.sqrt(neurons_per_layer[i-1])
weights[i] = cp.random.uniform(low=-1./sqrtN, high=1./sqrtN, size=(neurons_per_layer[i], neurons_per_layer[i-1]))
if method=='NormXavier':
# Reference: https://paperswithcode.com/method/xavier-initialization
# Initialize weights with random numbers [-1/sqrt(N),1/sqrt(N)] where N is the number of nodes
if i==0:
sqrtN = cp.sqrt(nInputs)
sqrtM = cp.sqrt(neurons_per_layer[i])
weights[i] = cp.random.uniform(low=-6./(sqrtN+sqrtM), high=6./(sqrtN+sqrtM), size=(neurons_per_layer[i], nInputs))
else:
sqrtN = cp.sqrt(neurons_per_layer[i-1])
sqrtM = cp.sqrt(neurons_per_layer[i])
weights[i] = cp.random.uniform(low=-6./(sqrtN+sqrtM), high=6./(sqrtN+sqrtM), size=(neurons_per_layer[i], neurons_per_layer[i-1]))
if method=='He':
# Reference: https://paperswithcode.com/method/xavier-initialization
# Initialize weights with random numbers [-1/sqrt(N),1/sqrt(N)] where N is the number of nodes
if i==0:
weights[i] = cp.random.normal(loc=0.0, scale=cp.sqrt(2./nInputs), size=(neurons_per_layer[i], nInputs))
else:
weights[i] = cp.random.normal(loc=0.0, scale=cp.sqrt(2./neurons_per_layer[i-1]), size=(neurons_per_layer[i], neurons_per_layer[i-1]))
# Initialize biases
biases[i] = cp.zeros(neurons_per_layer[i])
# Ensure the dtype is same as that expected
if dtype=='float32':
for i in range(len(weights)):
weights[i] = weights[i].astype(cp.float32)
biases[i] = biases[i].astype(cp.float32)
return weights, biases
def forward_feed_cupy(x, nLayers, weights, biases, activationFunc):
'''
Performs forward feed
Parameters:
x: matrix (numpy 2D array) of input arrays of size mxn; m: batch size; n: no. of inputs/features
nLayers: the no. of layers in the network excluding the input layer
weights: python list of weights;
the size of the list is equal to the number of layers (excluding the input layer)
the weights are numpy 2D ndarrays of size (k,n) where k: is the number of nodes in the current layer
and n: is the number of nodes in the preceeding layer ;
weights (matrix) of the ith layer are accessed as weights[i]
biases: python list of biases;
the size of the list is equal to the number of layers (excluding the input layer)
the biases are numpy 2D ndarrays of size (k,1) where k: is the number of nodes in the current layer
and n: is the number of nodes in the preceeding layer ;
biases (matrix) of the ith layer are accessed as biases[i]
activationFunc: a list of the names of suppoted activation functions;
the size of the list is equal to the number of layers (excluding the input layer)
Returns:
a: the list of activated outputs coming out of each layer (including the input layer);
the size of the list is equal to the number of layers+1 (i.e., the input layer is included)
z: the list of inputs going into each layer (excluding the input layer);
the size of the list is equal to the number of layers (excluding the input layer)
'''
# Forward feed begin
a = [None] * (nLayers+1)
z = [None] * nLayers
a[0] = x
for l in range(1,nLayers+1):
# z[l-1] = cp.einsum('ij,kj->ik',a[l-1],weights[l-1])+biases[l-1] #np.dot(a[l-1],weights[l-1])#np.asarray(biases[l-1] + np.dot(a[l-1],weights[l-1])) #np.einsum('jk,k->j',weights[l-1],a[l-1])s #weights[l-1]*a[l-1]
z[l-1] = contract('ij,kj->ik',a[l-1],weights[l-1],backend='cupy')+biases[l-1] #np.dot(a[l-1],weights[l-1])#np.asarray(biases[l-1] + np.dot(a[l-1],weights[l-1])) #np.einsum('jk,k->j',weights[l-1],a[l-1])s #weights[l-1]*a[l-1]
actFunc_layer = utils.act_func_dict_cupy[activationFunc[l-1]] #Activation function for this layer
a[l] = actFunc_layer(z[l-1])
return a, z
def back_propagation_cupy(z, a, sigmaPrime, nLayers, nSamples, weights, biases, eeta, dc_daL):
'''
z: list of input vectors (different sizes) at each layer
a: list of output vectors after the application of act func (different sizes) at each layer
sigmaPrime: the function that gives the derivative of the activation function
dc_daL: a vector that gives the derivative of the Cost function wrt to the output
vector coming out of the output layer
'''
nSamples = a[0].shape[0]
delta = [None] * (nLayers+1)
derWeights = [None] * nLayers
derBiases = [None] * nLayers
sigmaPrime_layer = utils.act_func_grad_dict_cupy[sigmaPrime[nLayers-1]] # Act func gradient for this layer
if sigmaPrime[nLayers-1] =='Softmax':
delta[nLayers] = softmaxTimesVector_cupy(sigmaPrime_layer(z[nLayers-1]).astype(cp.float32),dc_daL.astype(cp.float32))
else:
delta[nLayers] = sigmaPrime_layer(z[nLayers-1])*dc_daL
newWeights = weights[:]#.copy()
newBiases = biases[:]#.copy()
derWeights[nLayers-1] = cp.einsum('ji,jk->ik',delta[nLayers],a[nLayers-1])
newWeights[nLayers-1] = weights[nLayers-1] - eeta*derWeights[nLayers-1]
# derWeights[nLayers-1] = contract('ji,jk->ik',delta[nLayers],a[nLayers-1])
derBiases[nLayers-1] = cp.sum(delta[nLayers],axis=0)
newBiases[nLayers-1] = biases[nLayers-1] - eeta*derBiases[nLayers-1]
for l in range(nLayers-1,0,-1):
temp = cp.einsum('ik,lk->li',weights[l].T, delta[l+1])
# temp = cp.array([cp.dot(weights[l].T, delta[l+1][i,:]).T for i in range(nSamples)])
# temp = tempEval(np.float32(weights[l]),np.float32(delta[l+1]),nSamples)
# temp = np.dot(weights[l].T, list(delta[l+1].T)).T # Slower
sigmaPrime_layer = utils.act_func_grad_dict_cupy[sigmaPrime[l-1]] # Act func gradient for this layer
if sigmaPrime[l-1] =='Softmax':
delta[l] = softmaxTimesVector_cupy(sigmaPrime_layer(z[l-1]).astype(cp.float32),temp)
else:
delta[l] = sigmaPrime_layer(z[l-1])*temp
#
derWeights[l-1] = cp.einsum('ji,jk->ik',delta[l],a[l-1])
# derWeights[l-1] = contract('ji,jk->ik',delta[l],a[l-1])
derBiases[l-1] = cp.asarray(np.sum(delta[l],axis=0))
newWeights[l-1] = weights[l-1] - eeta*derWeights[l-1]
newBiases[l-1] = biases[l-1] - eeta*derBiases[l-1]
return derWeights, derBiases, newWeights, newBiases
def softmaxTimesVector_cupy(a,b):
# Reference: https://stackoverflow.com/questions/59289754/numpy-multiply-3d-array-with-2d-array
## Both the following methods are equally fast and give correct results
# output = cp.einsum('ijk,ik->ij',a,b)
output = contract('ijk,ik->ij',a,b, backend='cupy')
# output = (a @ b[..., cp.newaxis])[..., 0]
return output
def back_propagation_fast_cupy(z, a, sigmaPrime, nLayers, nSamples, weights, biases, eeta, dc_daL, opt_einsum_expr):
'''
z: list of input vectors (different sizes) at each layer
a: list of output vectors after the application of act func (different sizes) at each layer
sigmaPrime: the function that gives the derivative of the activation function
dc_daL: a vector that gives the derivative of the Cost function wrt to the output
vector coming out of the output layer
'''
nSamples = a[0].shape[0]
delta = [None] * (nLayers+1)
derWeights = [None] * nLayers
derBiases = [None] * nLayers
sigmaPrime_layer = utils.act_func_grad_dict_cupy[sigmaPrime[nLayers-1]] # Act func gradient for this layer
if sigmaPrime[nLayers-1] =='Softmax':
delta[nLayers] = softmaxTimesVector_cupy(sigmaPrime_layer(z[nLayers-1]).astype(z[0].dtype),dc_daL.astype(z[0].dtype))
# delta[nLayers] = softmaxTimesVector(sigmaPrime_layer(z[nLayers-1]),dc_daL)
else:
delta[nLayers] = sigmaPrime_layer(z[nLayers-1])*dc_daL
newWeights = weights[:]#.copy()
newBiases = biases[:]#.copy()
derWeights[nLayers-1] = opt_einsum_expr[0](delta[nLayers],a[nLayers-1], backend='cupy')
# derWeights[nLayers-1] = contract('ji,jk->ik',delta[nLayers],a[nLayers-1])
newWeights[nLayers-1] = weights[nLayers-1] - eeta*derWeights[nLayers-1]
derBiases[nLayers-1] = cp.sum(delta[nLayers],axis=0)
newBiases[nLayers-1] = biases[nLayers-1] - eeta*derBiases[nLayers-1]
ioptexpr=1
for l in range(nLayers-1,0,-1):
temp = contract('ik,lk->li',weights[l].T, delta[l+1], backend='cupy',dtype=z[0].dtype)
# temp = np.array([np.dot(weights[l].T, delta[l+1][i,:]).T for i in range(nSamples)])
# temp = tempEval(np.float32(weights[l]),np.float32(delta[l+1]),nSamples)
# temp = tempEval(weights[l],delta[l+1],nSamples)
# temp = np.dot(weights[l].T, list(delta[l+1].T)).T # Slower
sigmaPrime_layer = utils.act_func_grad_dict_cupy[sigmaPrime[l-1]] # Act func gradient for this layer
if sigmaPrime[l-1] =='Softmax':
delta[l] = softmaxTimesVector_cupy(sigmaPrime_layer(z[l-1]).astype(z[0].dtype),temp)
# delta[l] = softmaxTimesVector(sigmaPrime_layer(z[l-1]),temp)
else:
delta[l] = sigmaPrime_layer(z[l-1])*temp
derWeights[l-1] = opt_einsum_expr[ioptexpr](delta[l],a[l-1], backend='cupy')
ioptexpr=ioptexpr+1
# derWeights[l-1] = contract('ji,jk->ik',delta[l],a[l-1])
derBiases[l-1] = cp.asarray(cp.sum(delta[l],axis=0))
newWeights[l-1] = weights[l-1] - eeta*derWeights[l-1]
newBiases[l-1] = biases[l-1] - eeta*derBiases[l-1]
return derWeights, derBiases, newWeights, newBiases
def generateExpressions_cupy(nLayers, nSamples, z, a, dc_daL, sigmaPrime, weights):
delta = [None] * (nLayers+1)
opt_einsum_expr = []
sigmaPrime_layer = utils.act_func_grad_dict_cupy[sigmaPrime[nLayers-1]] # Act func gradient for this layer
if sigmaPrime[nLayers-1] =='Softmax':
delta[nLayers] = softmaxTimesVector(sigmaPrime_layer(z[nLayers-1]).astype(cp.float32),dc_daL.astype(cp.float32))
else:
delta[nLayers] = sigmaPrime_layer(z[nLayers-1])*dc_daL
opt_einsum_expr.append(contract_expression('ji,jk->ik',delta[nLayers].shape,a[nLayers-1].shape))
for l in range(nLayers-1,0,-1):
temp = cp.array([cp.dot(weights[l].T, delta[l+1][i,:]).T for i in range(nSamples)])
sigmaPrime_layer = utils.act_func_grad_dict_cupy[sigmaPrime[l-1]] # Act func gradient for this layer
if sigmaPrime[l-1] =='Softmax':
delta[l] = softmaxTimesVector(sigmaPrime_layer(z[l-1]).astype(cp.float32),temp)
else:
delta[l] = sigmaPrime_layer(z[l-1])*temp
opt_einsum_expr.append(contract_expression('ji,jk->ik',delta[l].shape,a[l-1].shape))
return opt_einsum_expr
def nn_optimize_cupy(inputs, outputs, activationFunc, nLayers, nEpochs=10, batchSize=None, eeta=0.5, weights=None, biases=None, | |
"""
Code here stores automatic analysis routines for ABF files given their protocol.
There are several analysis routines which are general (show all sweeps
continuously, show sweeps stacked, show sweeps overlayed, etc) and can be used
for almost any protocol (or ABFs with unknown protocol).
Some analysis routines are specific for specific protocols.
These routines are highly specific to the nature of the scientific work I do,
and this document may not be useful to others beyond an example of how to use
pyABF to set-up an automatic analysis pipeline for electrophysiology data.
"""
import os
PATH_HERE = os.path.dirname(__file__)
PATH_DATA = os.path.abspath(os.path.dirname(__file__)+"/../../data/abfs/")
import sys
sys.path.insert(0, PATH_HERE+"/../../src/")
sys.path.append(R"C:\Users\swharden\Documents\GitHub\pyABF\src")
import pyabf
import os
import numpy as np
import matplotlib.pyplot as plt
import logging
log = logging.getLogger(__name__)
log.debug(f"autoabf imported")
log.setLevel(level=logging.WARN)
# default size of the images being made
FIGSIZE = (8, 6)
FIGSIZE_WIDE = (FIGSIZE[0]*1.6, FIGSIZE[1]*1)
# automatically generated figures are saved in this subfolder
from abfnav import DATAFOLDER
# Little operations to apply on graphs
def _secLookUp(abf, timeSec1, timeSec2, returnPoints=False):
"""returns tangible times in seconds."""
assert isinstance(abf, pyabf.ABF)
if timeSec1 is None:
timeSec1 = 0
if timeSec2 is None:
timeSec2 = abf.sweepLengthSec
if returnPoints:
return int(timeSec1*abf.dataRate), int(timeSec2*abf.dataRate)
else:
return timeSec1, timeSec2
def shadeDigitalOutput(abf, digitalOutputChannel=4, color='r'):
"""In sweep view, shade the epoch number."""
log.debug("shading digital outputs")
digitalWaveforms = pyabf.stimulus.digitalWaveformEpochs(abf)
epochPoints = pyabf.stimulus.epochPoints(abf)
outputStateByEpoch = digitalWaveforms[digitalOutputChannel]
for epochNumber, outputState in enumerate(outputStateByEpoch):
if outputState == 1:
t1 = epochPoints[epochNumber]*abf.dataSecPerPoint
t2 = epochPoints[epochNumber+1]*abf.dataSecPerPoint
plt.axvspan(t1, t2, color=color, alpha=.3, lw=0)
def shadeAllBackgrounds(color=(1.0, 1.0, 0.9)):
"""make the background color a certain color for every subplot."""
log.debug("shading all backgrounds", color)
for i, ax in enumerate(plt.gcf().axes):
ax.set_facecolor(color)
def addComments(abf, minutes = False, showLabels = True):
"""
Call on a graph with a horizontal time in seconds to add vertical lines and
labels to every abf comment.
"""
log.debug("adding comments to graphs")
assert isinstance(abf, pyabf.ABF)
if not abf.tagComments:
return
for comment, timeSec in zip(abf.tagComments, abf.tagTimesSec):
xPos = timeSec
if minutes:
xPos /= 60.0
plt.axvline(xPos, color='r', lw=2, alpha=.5, ls='--')
X1, X2, Y1, Y2 = plt.axis()
Y2 = Y2-abs(Y2-Y1)*.02
if showLabels:
plt.text(xPos, Y2, comment, color='r', rotation='vertical',
ha='right', va='top', weight='bold', alpha=.5, size=8)
### Code here acts on the active matplotlib figure or subplot ###
def plotFigNew(abf, figsize=FIGSIZE):
"""create a figure"""
log.debug("creating new figure")
plt.figure(figsize=figsize)
return
def plotFigSave(abf, tag="", tight=True, closeToo=True, grid=True,
unknown=False, title=None, labelAxes=True):
"""save a figure"""
log.debug("saving figure outputs")
assert isinstance(abf, pyabf.ABF)
# apply title only to single-subplot figures
if len(plt.gcf().axes) == 1:
if title:
plt.title(title)
if labelAxes:
plt.ylabel(abf.sweepLabelY)
plt.xlabel(abf.sweepLabelX)
# apply a grid to all subplots
if grid:
for i, ax in enumerate(plt.gcf().axes):
ax.grid(alpha=.5, ls="--")
# decorate unknown plots in a special way
shade_unknown_graphs = True
if unknown:
abf.protocol = abf.protocol + "(UNKNOWN)"
protocolColor = "r"
if unknown and shade_unknown_graphs:
for i, ax in enumerate(plt.gcf().axes):
ax.set_facecolor((1.0, 0.9, 0.9))
else:
protocolColor = '.5'
# optionally tight
if tight:
plt.tight_layout()
# convert horizontal units to minutes
for ax in plt.gcf().axes:
if not "sec" in ax.get_xlabel():
continue
if ax.get_xticks()[-1] < 120:
continue
xticks = ["%.02f" % (x/60) for x in ax.get_xticks()]
ax.set_xticklabels(xticks)
ax.set_xlabel("time (minutes)")
# add text to the lower corner
plt.gcf().text(0.005, 0.005, f"{abf.abfID}\n{abf.protocol}",
transform=plt.gca().transAxes, fontsize=10,
verticalalignment='bottom', family='monospace',
color=protocolColor)
abfDir = os.path.dirname(abf.abfFilePath)
if unknown:
fnOut = abf.abfID+"_UNKNOWN_"+tag+".png"
else:
fnOut = abf.abfID+"_"+tag+".png"
pathOut = os.path.join(abfDir, DATAFOLDER, fnOut)
if not os.path.exists(os.path.dirname(pathOut)):
log.info(f"creating {os.path.dirname(pathOut)}")
os.mkdir(os.path.dirname(pathOut))
log.debug(f"saving {fnOut}")
plt.savefig(pathOut)
if closeToo:
plt.close()
return
# Code here indicates how to make common graphs
def generic_ap_steps(abf):
"""Create a plot for generic AP steps."""
log.debug("generic plot: AP steps")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
# all sweeps overlayed
axOvr = plt.gcf().add_subplot(2, 2, 1)
pyabf.plot.sweeps(abf, axis=axOvr, alpha=.5)
axOvr.set_title(f"Sweep Overlay")
# all sweeps stacked
axStack = plt.gcf().add_subplot(2, 2, 2)
pyabf.plot.sweeps(abf, axis=axStack, alpha=.5, offsetYunits=100)
axStack.set_title(f"Sweeps Stacked")
# first sweep with APs
axAp = plt.gcf().add_subplot(2, 2, 3)
p1, p2 = _secLookUp(abf, 0, 1, True)
for sweep in abf.sweepList:
abf.setSweep(sweep)
if np.max(abf.sweepY[p1:p2]) > 10:
break
pyabf.plot.sweeps(abf, sweepNumbers=[abf.sweepNumber], axis=axAp, alpha=1)
axAp.axis([p1/abf.dataRate, p2/abf.dataRate, None, None])
axAp.set_title(f"First Action Potential")
# AP gain curve
axGain = plt.gcf().add_subplot(2, 2, 4)
for epochNumber, color in zip([1, 4], ['C0', 'C1']):
if epochNumber >= len(pyabf.stimulus.epochValues(abf)[0]):
continue
currents = pyabf.stimulus.epochValues(abf)[:, epochNumber]
epochSec1 = pyabf.stimulus.epochPoints(abf)[epochNumber]/abf.dataRate
epochSec2 = pyabf.stimulus.epochPoints(abf)[epochNumber+1]/abf.dataRate
[apFreqInBin, apFreqFirst] = pyabf.ap.ap_freq_per_sweep(
abf, epochNumber)
axGain.plot(currents, apFreqInBin, '.-', color=color)
axGain.plot(currents, apFreqFirst, '.:', color=color)
axStack.axvspan(epochSec1, epochSec2, color=color, alpha=.1)
axGain.set_title(f"AP Gain Curve")
axGain.set_ylabel("AP Frequency (Hz)")
axGain.set_xlabel("Applied Current (pA)")
axGain.axhline(40, color='r', alpha=.2, ls='--', lw=2)
plotFigSave(abf, tag="generic-overlay", labelAxes=False)
def generic_iv(abf, timeSec1, timeSec2, sweepStepMv, firstSweepMv, filter=True):
"""Create a graph plotting the I/V between two points."""
log.debug("generic plot: IV curve")
# enable lowpass filter
if filter:
pyabf.filter.gaussian(abf, 2)
# measure currents for each step
currentAvg = pyabf.stats.rangeAverage(abf, timeSec1, timeSec2)
currentErr = pyabf.stats.rangeStdev(abf, timeSec1, timeSec2)
voltage = np.arange(abf.sweepCount)*sweepStepMv+firstSweepMv
plotFigNew(abf, figsize=FIGSIZE_WIDE) # double wide
ax1 = plt.gcf().add_subplot(1, 2, 1)
ax2 = plt.gcf().add_subplot(1, 2, 2)
# create the overlay figure
pyabf.plot.sweeps(abf, axis=ax1, linewidth=2, alpha=.8)
ax1.axvspan(timeSec1, timeSec2, color='r', alpha=.1)
ax1.set_title(f"{abf.abfID} I/V Source Sweeps")
dY = (np.nanmax(currentAvg) - np.nanmin(currentAvg))*.2
ax1.axis([None, None, np.nanmin(currentAvg)-dY, np.nanmax(currentAvg)+dY])
# create the IV curve
ax2.axhline(0, ls='--', alpha=.5, color='k')
ax2.axvline(-70, ls='--', alpha=.5, color='k')
ax2.plot(voltage, currentAvg, '.-', lw=2, ms=20)
ax2.set_ylabel("Current (pA)")
ax2.set_xlabel("Voltage (mV)")
ax2.set_title(f"{abf.abfID} I/V Relationship")
plotFigSave(abf, tag="IV", labelAxes=False)
def generic_overlay(abf, color=None, unknown=False, alpha=None):
"""plot every sweep semi-transparent on top of the next."""
log.debug("generic plot: overlay")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
pyabf.plot.sweeps(abf, axis=ax, color=color,
channel=channel, alpha=alpha)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) Sweep Overlay")
plotFigSave(abf, tag="generic-overlay", unknown=unknown)
return
def generic_overlay_average(abf, baselineSec1=None, baselineSec2=None):
"""plot every sweep semi-transparent on top of the next and show the average of all."""
log.debug("generic plot: overlay average")
assert isinstance(abf, pyabf.ABF)
if baselineSec2:
abf.sweepBaseline(baselineSec1, baselineSec2)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
if baselineSec2:
ax.axhline(0, color='k', ls=':')
pyabf.plot.sweeps(abf, axis=ax, color='C0', channel=channel, alpha=.2)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) Sweep Overlay")
averageSweep = pyabf.sweep.averageTrace(abf, channel)
ax.plot(abf.sweepX, averageSweep, color='k')
plotFigSave(abf, tag="generic-overlay")
return
def generic_continuous(abf, unknown=False, alpha=1):
"""plot every sweep continuously through time."""
log.debug("generic plot: continuous")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
pyabf.plot.sweeps(abf, axis=ax, continuous=True,
channel=channel, color='b', alpha=alpha,
linewidth=.5)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) Continuous Signal")
addComments(abf)
plotFigSave(abf, tag="generic-continuous", unknown=unknown)
return
def generic_first_sweep(abf, timeSec1=None, timeSec2=None):
"""plot every sweep continuously through time."""
log.debug("generic plot: first sweep")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
pyabf.plot.sweeps(abf, sweepNumbers=[0], axis=ax,
channel=channel, color='b', alpha=1,
startAtSec=timeSec1, endAtSec=timeSec2)
ax.set_title(f"{abf.abfID} (Ch{channel+1}) First Sweep")
plotFigSave(abf, tag="generic-first-sweep")
return
def generic_average_over_time(abf, timeSec1=None, timeSec2=None):
"""plot the average of every sweep continuously through time."""
log.debug("generic plot: average over time")
assert isinstance(abf, pyabf.ABF)
plotFigNew(abf)
for channel in abf.channelList:
ax = plt.gcf().add_subplot(abf.channelCount, 1, channel+1)
sweepTimes = np.arange(abf.sweepCount)*abf.sweepLengthSec
sweepAvgs = pyabf.stats.rangeAverage(
abf, timeSec1, timeSec2, channel=channel)
sweepErr = pyabf.stats.rangeStdev(
abf, timeSec1, timeSec2, channel=channel)
if len(sweepTimes) > 20:
ax.errorbar(sweepTimes, sweepAvgs, sweepErr, alpha=.3)
ax.plot(sweepTimes, sweepAvgs, ".-", color='C0')
ax.margins(0, .1)
else:
ax.errorbar(sweepTimes, sweepAvgs, sweepErr, alpha=1,
ms=10, marker='.', ls='-', capsize=5)
timeNote = "%.02f - %.02f sec" % (_secLookUp(abf, timeSec1, timeSec2))
ax.set_title(f"{abf.abfID} (Ch{channel+1}) [{timeNote}]")
addComments(abf)
plotFigSave(abf, tag=f"generic-average-over-time")
return
def generic_memtest_over_time(abf):
"""The first epoch is a VC step, so show memtest properties over time."""
log.debug("generic plot: memtest analysis")
assert isinstance(abf, pyabf.ABF)
IhBySweep, RmBySweep, RaBySweep, CmBySweep = pyabf.memtest.step_valuesBySweep(abf)
sweepTimesMin = np.arange(abf.sweepCount)*abf.sweepLengthSec/60
plotFigNew(abf)
ax1 = plt.gcf().add_subplot(2, 2, 1)
ax1.plot(sweepTimesMin, IhBySweep, '.', color='C0')
ax1.set_title("Holding Current (Ih)")
ax1.set_ylabel("Clamp Current (pA)")
ax1.set_xlabel("Experiment Time (min)")
ax1.margins(0,.4)
addComments(abf, True, False)
ax2 = plt.gcf().add_subplot(2, 2, 2)
ax2.plot(sweepTimesMin, RmBySweep, '.', color='C3')
ax2.set_title("Membrane Resistance (Rm)")
ax2.set_ylabel("Resistance (MOhm)")
ax2.set_xlabel("Experiment Time (min)")
ax2.margins(0,.4)
ax2.axis([None,None,0,None])
addComments(abf, True, False)
ax3 = plt.gcf().add_subplot(2, 2, 3)
ax3.plot(sweepTimesMin, RaBySweep, '.', color='C7')
ax3.set_title("Access Resistance (Ra)")
ax3.set_ylabel("Resistance (MOhm)")
ax3.set_xlabel("Experiment Time (min)")
ax3.margins(0,.4)
ax3.axis([None,None,0,None])
addComments(abf, True, False)
ax4 = plt.gcf().add_subplot(2, 2, 4)
ax4.plot(sweepTimesMin, CmBySweep, '.', color='C1')
ax4.set_title("Membrane Capacitance (Cm)")
ax4.set_ylabel("Capacitance (pF)")
ax4.set_xlabel("Experiment Time (min)")
ax4.margins(0,.4)
ax4.axis([None,None,0,None])
addComments(abf, True, False)
plotFigSave(abf, tag=f"generic-memtest", labelAxes=False)
return
def generic_paired_pulse(abf, p1sec1, p1sec2, p2sec1, p2sec2):
"""single pulse or paired pulse analysis."""
log.debug("generic plot: pulse analysis")
assert isinstance(abf, pyabf.ABF)
sweepTimes = np.arange(abf.sweepCount)*abf.sweepLengthSec
# PULSE 1
plotFigNew(abf)
ax = plt.gcf().add_subplot(2, 1, 1)
sweepAvgs1 = pyabf.stats.rangeAverage(abf, p1sec1, p1sec2)
sweepErr1 = pyabf.stats.rangeStdev(abf, p1sec1, p1sec2)
ax.errorbar(sweepTimes, sweepAvgs1, sweepErr1, ms=10,
marker='.', ls='-', capsize=5, color='r')
timeNote = "%.02f - %.02f sec" % (_secLookUp(abf, p1sec1, p1sec2))
ax.set_title(f"{abf.abfID} Pulse 1 [{timeNote}]")
ax.set_ylabel(abf.sweepLabelY)
ax.set_xlabel(abf.sweepLabelX)
addComments(abf)
# PULSE 2
ax = plt.gcf().add_subplot(2, 1, 2)
sweepAvgs2 = pyabf.stats.rangeAverage(abf, p2sec1, p2sec2)
sweepErr2 = pyabf.stats.rangeStdev(abf, p2sec1, p2sec2)
ax.errorbar(sweepTimes, sweepAvgs1, sweepErr2, ms=10,
marker='.', ls='-', capsize=5, color='r')
timeNote = "%.02f - %.02f sec" % (_secLookUp(abf, p2sec1, | |
<filename>improved_str_r-tree/improved_str_r-tree.py
# 2015-08-29
# stored items in containers are id values
# have root entry instead of solely root node
# updated on 2016-08-22 to fix some auxiliary bugs
# updated on 2016-08-23 to fix traditional/non-traditional isLeafNode() distinction
# updated on 2016-11-03 to re-structure and modify adjustTree();
# stop at root instead of non-existent parent of root;
# note that there is a bug with setting M to two;
# also, we implement delete(); note that our tree
# lacks entry-aware nodes; made bug fix for adjustTree()
# feel free to ignore topic/question-related parts
# note that we don't necessarily need Image, ImageDraw, or PythonMagick
import sys
import Image, ImageDraw
import PythonMagick
# from priority_queue.QuickRemovePriorityQueue import *
def getTopicDistance(topic_id, topic_id_to_point_dict, query_point):
point = topic_id_to_point_dict[topic_id]
distance = getDistance(point, query_point)
return distance
def getQuestionDistance(question_id, question_id_to_topic_id_dict, topic_id_to_point_dict, query_point):
topic_id_list = question_id_to_topic_id_dict[question_id]
point_list = [topic_id_to_point_dict[x] for x in topic_id_list]
distance_list = [getDistance(x, query_point) for x in point_list]
min_distance = min(distance_list)
return min_distance
# a proper key for a topic would be (distance, topic_id)
def getTopicKey(topic_id, topic_id_to_point_dict, query_point):
distance = getTopicDistance(topic_id, topic_id_to_point_dict, query_point)
id_value = topic_id
result = (-1 * distance, id_value)
return result
# a proper key for a question would be (distance, question_id)
def getQuestionKey(question_id, question_id_to_topic_id_dict, topic_id_to_point_dict, query_point):
distance = getQuestionDistance(question_id, question_id_to_topic_id_dict, topic_id_to_point_dict, query_point)
id_value = question_id
result = (-1 * distance, id_value)
return result
# equivalent to having a min-heap priority queue
# with (-1 * dist, id) as priority
def compare_items(pair_a, pair_b):
# note that distances are non-negative values
# our threshold is 0.001, which results in a span of 0.002
dist_a = -1 * pair_a[0]
id_a = pair_a[1]
dist_b = -1 * pair_b[0]
id_b = pair_b[1]
if dist_a < dist_b - 0.001:
return -1
elif dist_a > dist_b + 0.001:
return 1
else:
if id_a > id_b:
return -1
elif id_a < id_b:
return 1
elif id_a == id_b:
return 0
import heapq
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
# priorities are (-1 * distance, id-value) pairs
def __init__(self):
self.heap = []
# self.priority_queue = QuickRemovePriorityQueue()
self.item_count_dict = {}
def push(self, item, priority):
pair = (priority,item)
heapq.heappush(self.heap,pair)
"""
(self.priority_queue).insertItem(priority, item)
"""
if item not in self.item_count_dict:
(self.item_count_dict)[item] = 1
else:
(self.item_count_dict)[item] = (self.item_count_dict)[item] + 1
# raise Exception()
def pop(self):
(priority,item) = heapq.heappop(self.heap)
# return item
"""
entry = (self.priority_queue).removeMin()
priority = entry.getKey()
item = entry.getValue()
"""
# we assume that a count already exists
(self.item_count_dict)[item] = (self.item_count_dict)[item] - 1
return item
def isEmpty(self):
return len(self.heap) == 0
# return self.getSize() == 0
# returns a (priority, item) pair
def peek(self):
heap = self.heap
pair = heap[0]
# item = pair[1]
result = pair
return result
"""
entry = (self.priority_queue).min()
priority = entry.getKey()
item = entry.getValue()
pair = (priority, item)
return pair
"""
def toList(self):
pair_list = self.heap
items = [x[1] for x in pair_list]
return items
"""
entries = (self.priority_queue).toEntryList()
items = [x.getValue() for x in entries]
# priorities = [x.getKey() for x in entries]
# print "priorities:", priorities
return items
"""
def getSize(self):
return len(self.heap)
# return (self.priority_queue).getSize()
def haveItem(self, item):
# return (self.priority_queue).haveItem(item)
if item in self.item_count_dict:
count = (self.item_count_dict)[item]
return count > 0
else:
return False
"""
def removeItem(self, item):
return (self.priority_queue).removeItem(item)
"""
import math
def getDistance(point1, point2):
x1, y1 = point1
x2, y2 = point2
change_x = x2 - x1
change_y = y2 - y1
distance = math.sqrt(change_x ** 2 + change_y ** 2)
return distance
def getDistanceComponent(point1, point2, axis):
x1, y1 = point1
x2, y2 = point2
change_x = x2 - x1
change_y = y2 - y1
if axis == 0:
distance = abs(change_x)
return distance
elif axis == 1:
distance = abs(change_y)
return distance
class NearestNeighbor:
def __init__(self, close_item = None, distance = float("inf")):
self.close_item = close_item
self.distance = distance
def getCloseItem(self):
return self.close_item
def getDistance(self):
return self.distance
def setCloseItem(self, close_item):
self.close_item = close_item
def setDistance(self, distance):
self.distance = distance
def toString(self):
result_str = str(self.getCloseItem()) + " " + str(self.getDistance())
return result_str
"""
def toDepthString(self, depth):
result_str = str(depth)
return resullt_str
"""
class KNearestNeighbor:
def __init__(self, query_point, close_item_pq, topic_id_to_point_dict, k = 100):
self.query_point = query_point
# priority queue we use uses a min-heap
# as a result, we negate priorities
self.close_item_pq = close_item_pq
self.topic_id_to_point_dict = topic_id_to_point_dict
self.k = k
# note that we do not always retrieve k items - could be less or could be more due to threshold
def getCloseItems(self):
return (self.close_item_pq).toList()
# have special behavior for when no items are in queue
def getFarthestCloseDistance(self):
if self.getNumCloseItems() == 0:
return float("inf")
else:
result = (self.close_item_pq).peek()
priority, item = result
# distance = -1 * priority
distance = -1 * priority[0]
id_value = priority[1]
# print "distance:", distance
return distance
def addCloseItem(self, close_item):
id_value = close_item
point_location = self.topic_id_to_point_dict[id_value]
# id_value = point.getIDValue()
query_point = self.query_point
# point_location = (point.getX(), point.getY())
# id_value = point.getIDValue()
# distance = getDistance(query_point, point_location)
distance = getDistance(query_point, point_location)
priority = (-1 * distance, id_value)
# print "priority:", priority
(self.close_item_pq).push(close_item, priority)
def removeCloseItem(self):
(self.close_item_pq).pop()
def getNumCloseItems(self):
# print self.close_item_pq.getSize()
return (self.close_item_pq).getSize()
def addAndRemoveIfNecessary(self, close_item):
# print close_item, self.isFull()
# do_remove = self.isFull() == True
# use this so that we have enough to work with when we sort and cull
# print "close item:", close_item
do_remove = self.isFull() == True and self.passesThresholdForFarthestCloseItem(close_item) == False
# do_remove = self.isFull() == True
self.addCloseItem(close_item)
if do_remove == True:
self.removeCloseItem()
def isFull(self):
return self.getNumCloseItems() >= self.k
# returns True if distance for item 'close_item' is >= 0.001 that of the farthest close item
def passesThresholdForFarthestCloseItem(self, close_item):
distance = self.getFarthestCloseDistance()
query_point = self.query_point
topic_id_value = close_item
point_location = (self.topic_id_to_point_dict)[topic_id_value]
"""
point = close_item
point_location = (point.getX(), point.getY())
"""
curr_distance = getDistance(query_point, point_location)
return curr_distance > distance + 0.001
class TopicKNearestNeighbor(KNearestNeighbor):
def __init__(self, query_point, close_item_pq, topic_id_to_point_dict, k = 100):
KNearestNeighbor.__init__(self, query_point, close_item_pq, topic_id_to_point_dict, k)
class QuestionKNearestNeighbor(KNearestNeighbor):
def __init__(self, query_point, close_item_pq, topic_id_to_point_dict, question_id_to_topic_id_dict,
k = 100):
KNearestNeighbor.__init__(self, query_point, close_item_pq, topic_id_to_point_dict, k)
# priority queue we use uses a min-heap
# as a result, we negate priorities
self.question_id_to_topic_id_dict = question_id_to_topic_id_dict
def addCloseItem(self, close_item, question_key):
# provided a question id value, we take associated topics
# and use their locations to update
# point = close_item
question_id = close_item
# id_value = point.getIDValue()
query_point = self.query_point
# point_location = (point.getX(), point.getY())
# id_value = point.getIDValue()
# distance = getDistance(query_point, point_location)
# distance = getDistance(query_point, point_location)
# priority = (-1 * distance, id_value)
priority = question_key
# print "priority:", priority
(self.close_item_pq).push(close_item, priority)
# close_items is a list of question id values
# add questions associated with a topic
def addMultipleAndRemoveIfNecessary(self, close_items, topic_id):
# print "close items:", close_items
question_id_values = close_items
for question_id_value in question_id_values:
"""
if not (self.close_item_pq).haveItem(question_id_value):
# (self.close_item_pq).removeItem(question_id_value)
"""
self.addAndRemoveIfNecessary(question_id_value, topic_id)
def addAndRemoveIfNecessary(self, question_id_value, topic_id):
# question_id_value = close_item
query_location = self.query_point
# print close_item, self.isFull()
# do_remove = self.isFull() == True
# use this so that we have enough to work with when we sort and cull
# print "close item:", close_item
# question_key = getQuestionKey(question_id_value, self.question_id_to_topic_id_dict, self.topic_id_to_point_dict, query_location)
question_key = (-1 * getTopicDistance(topic_id, self.topic_id_to_point_dict, query_location), question_id_value)
do_add = (self.close_item_pq).haveItem(question_id_value) == False
do_remove = self.isFull() == True and self.passesThresholdForFarthestCloseItem(question_id_value, question_key) == False
if do_add == True:
# do_remove = self.isFull() == True
self.addCloseItem(question_id_value, question_key)
if do_remove == True:
self.removeCloseItem()
# returns True if distance for item 'close_item' is >= 0.001 that of the farthest close item
def passesThresholdForFarthestCloseItem(self, question_id_value, question_key):
distance = self.getFarthestCloseDistance()
query_point = self.query_point
# point = close_item
# question_id_value = close_item
distance = -1 * question_key[0]
# point_location = (point.getX(), point.getY())
# curr_distance = getDistance(query_point, point_location)
curr_distance = distance
return curr_distance > | |
: False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
For complex regex patterns, it can be useful to avoid the need for complex
quoting and escape sequences by making use of YAML's multiline string
syntax.
.. code-block:: yaml
complex_search_and_replace:
file.replace:
# <...snip...>
- pattern: |
CentOS \(2.6.32[^\n]+\n\s+root[^\n]+\n\)+
.. note::
When using YAML multiline string syntax in ``pattern:``, make sure to
also use that syntax in the ``repl:`` part, or you might loose line
feeds.
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.replace')
check_res, check_msg = _check_file(name)
if not check_res:
if ignore_if_missing and 'file not found' in check_msg:
ret['comment'] = 'No changes needed to be made'
return ret
else:
return _error(ret, check_msg)
changes = __salt__['file.replace'](name,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes,
ignore_if_missing=ignore_if_missing,
backslash_literal=backslash_literal)
if changes:
ret['pchanges']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def blockreplace(
name,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
show_changes=True):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
maintaining an un-managed file containing manual edits.
Note: this function will store two copies of the file in-memory
(the original version and the edited version) in order to detect changes
and only edit the targeted file if necessary.
name
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output. Note: you can use file.accumulated and target this state.
All accumulated data dictionaries content will be added as new lines in
the content
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_end``
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored.
.. code-block:: yaml
file_override_example:
file.blockreplace:
- name: /etc/example.conf
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template
The named templating engine will be used to render the downloaded file.
Defaults to ``jinja``. The following templates are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
append_if_not_found
If markers are not found and set to True then the markers and content
will be appended to the file. Default is ``False``
prepend_if_not_found
If markers are not found and set to True then the markers and content
will be prepended to the file. Default is ``False``
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made
Example of usage with an accumulator and with a variable:
.. code-block:: yaml
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
file.blockreplace:
- name: /etc/hosts
- marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-"
- marker_end: "# END managed zone {{ myvar }} --"
- content: 'First line of content'
- append_if_not_found: True
- backup: '.bak'
- show_changes: True
hosts-config-block-{{ myvar }}-accumulated1:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: "text 2"
- require_in:
- file: hosts-config-block-{{ myvar }}
hosts-config-block-{{ myvar }}-accumulated2:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: |
text 3
text 4
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
text 2
text 3
text 4
# END managed zone 42 --
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.blockreplace')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
accum_data, accum_deps = _load_accumulators()
if name in accum_data:
accumulator = accum_data[name]
# if we have multiple accumulators for a file, only apply the one
# required at a time
deps = accum_deps.get(name, [])
filtered = [a for a in deps if
__low__['__id__'] in deps[a] and a in accumulator]
if not filtered:
filtered = [a for a in accumulator]
for acc in filtered:
acc_content = accumulator[acc]
for line in acc_content:
if content == '':
content = line
else:
content += "\n" + line
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
for index, item in enumerate(text):
content += str(item)
changes = __salt__['file.blockreplace'](
name,
marker_start,
marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes
)
if changes:
ret['pchanges'] = {'diff': changes}
if __opts__['test']:
ret['changes']['diff'] = ret['pchanges']['diff']
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['changes']['diff'] = ret['pchanges']['diff']
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = | |
"""Tests for API endpoints that performs stack analysis."""
import requests
import time
import os
from behave import given, then, when
from urllib.parse import urljoin
import jsonschema
from src.attribute_checks import *
from src.parsing import *
from src.utils import *
from src.json_utils import *
from src.authorization_tokens import *
STACK_ANALYSIS_CONSTANT_FILE_URL = "https://raw.githubusercontent.com/" \
"fabric8-analytics/fabric8-analytics-stack-analysis/master/" \
"analytics_platform/kronos/pgm/src/pgm_constants.py"
STACK_ANALYSIS_OUTLIER_PROBABILITY_CONSTANT_NAME = \
"KRONOS_OUTLIER_PROBABILITY_THRESHOLD_VALUE"
def contains_alternate_node(json_resp):
"""Check for the existence of alternate node in the stack analysis."""
result = json_resp.get('result')
return bool(result) and isinstance(result, list) \
and (result[0].get('recommendation', {}) or {}).get('alternate', None) is not None
@when("I wait for stack analysis to finish")
@when("I wait for stack analysis to finish {token} authorization token")
@when("I wait for stack analysis version {version:d} to finish {token} authorization token")
def wait_for_stack_analysis_completion(context, version=3, token="without"):
"""Try to wait for the stack analysis to be finished.
This step assumes that stack analysis has been started previously and
thus that the job ID is known
Current API implementation returns just three HTTP codes:
200 OK : analysis is already finished
202 Accepted: analysis is started or is in progress (or other state!)
401 UNAUTHORIZED : missing or improper authorization token
"""
timeout = context.stack_analysis_timeout # in seconds
sleep_amount = 10 # we don't have to overload the API with too many calls
use_token = parse_token_clause(token)
id = context.response.json().get("id")
context.stack_analysis_id = id
# log.info("REQUEST ID: {}\n\n".format(context.stack_analysis_id))
url = urljoin(stack_analysis_endpoint(context, version), id)
# log.info("RECOMMENDER API URL: {}\n\n".format(url))
for _ in range(timeout // sleep_amount):
if use_token:
context.response = requests.get(url, headers=authorization(context))
else:
context.response = requests.get(url)
status_code = context.response.status_code
# log.info("%r" % context.response.json())
if status_code == 200:
json_resp = context.response.json()
if contains_alternate_node(json_resp):
# log.info('Recommendation found')
break
# 401 code should be checked later
elif status_code == 401:
break
elif status_code != 202:
raise Exception('Bad HTTP status code {c}'.format(c=status_code))
time.sleep(sleep_amount)
else:
raise Exception('Timeout waiting for the stack analysis results')
@when("I post a valid {manifest} to {url}")
def perform_valid_manifest_post(context, manifest, url):
"""Post a manifest to selected core API endpont."""
filename = "data/{manifest}".format(manifest=manifest)
files = {'manifest[]': open(filename, 'rb')}
endpoint = "{coreapi_url}{url}".format(coreapi_url=context.coreapi_url, url=url)
response = requests.post(endpoint, files=files)
response.raise_for_status()
context.response = response.json()
print(response.json())
def send_manifest_to_stack_analysis(context, manifest, name, endpoint, use_token):
"""Send the selected manifest file to stack analysis."""
filename = 'data/{manifest}'.format(manifest=manifest)
manifest_file_dir = os.path.dirname(filename)
path_to_manifest_file = os.path.abspath(manifest_file_dir)
# please note that the trick with (None, path_to_manifest_file) has to be
# used here so the REST API call would work properly. It is similar to use
# curl -F 'manifest[]=@filename' -F 'filePath[]=PATH_TO_FILE'
files = {'manifest[]': (name, open(filename, 'rb')),
'filePath[]': (None, path_to_manifest_file)}
if use_token:
response = requests.post(endpoint, files=files,
headers=authorization(context))
else:
response = requests.post(endpoint, files=files)
context.response = response
def stack_analysis_endpoint(context, version):
"""Return endpoint for the stack analysis of selected version."""
# Two available endpoints for stack analysis are /stack-analyses and /analyse
# /analyse endpoint was developed to meet the performance norms at production
endpoints = ["/api/v1/stack-analyses-v1",
"/api/v1/analyse",
"/api/v1/stack-analyses/"]
if version < 1 or version > len(endpoints):
raise Exception("Wrong version specified: {v}".format(v=version))
endpoint = endpoints[version - 1]
return urljoin(context.coreapi_url, endpoint)
@when("I send NPM package manifest {manifest} to stack analysis")
@when("I send NPM package manifest {manifest} to stack analysis {token} authorization token")
@when("I send NPM package manifest {manifest} to stack analysis version {version:d}")
@when("I send NPM package manifest {manifest} to stack analysis version {version:d} {token} "
"authorization token")
def npm_manifest_stack_analysis(context, manifest, version=3, token="without"):
"""Send the NPM package manifest file to the stack analysis."""
endpoint = stack_analysis_endpoint(context, version)
use_token = parse_token_clause(token)
send_manifest_to_stack_analysis(context, manifest, 'package.json',
endpoint, use_token)
@when("I send Python package manifest {manifest} to stack analysis")
@when("I send Python package manifest {manifest} to stack analysis {token} authorization token")
@when("I send Python package manifest {manifest} to stack analysis version {version:d}")
@when("I send Python package manifest {manifest} to stack analysis version {version:d} {token} "
"authorization token")
def python_manifest_stack_analysis(context, manifest, version=3, token="without"):
"""Send the Python package manifest file to the stack analysis."""
endpoint = stack_analysis_endpoint(context, version)
use_token = parse_token_clause(token)
send_manifest_to_stack_analysis(context, manifest, 'requirements.txt',
endpoint, use_token)
@when("I send Maven package manifest {manifest} to stack analysis")
@when("I send Maven package manifest {manifest} to stack analysis {token} authorization token")
@when("I send Maven package manifest {manifest} to stack analysis version {version:d}")
@when("I send Maven package manifest {manifest} to stack analysis version {version:d} {token} "
"authorization token")
def maven_manifest_stack_analysis(context, manifest, version=3, token="without"):
"""Send the Maven package manifest file to the stack analysis."""
endpoint = stack_analysis_endpoint(context, version)
use_token = parse_token_clause(token)
send_manifest_to_stack_analysis(context, manifest, 'pom.xml',
endpoint, use_token)
@then("stack analyses response is available via {url}")
def check_stack_analyses_response(context, url):
"""Check the stack analyses response available on the given URL."""
response = context.response
resp = response.json()
assert len(resp["results"]) >= 1
request_id = resp["results"][0]["id"]
url = "{base_url}{endpoint}{request_id}".format(
base_url=context.coreapi_url,
endpoint=url, request_id=request_id)
get_resp = requests.get(url)
if get_resp.status_code == 202: # in progress
# Allowing enough retries for stack analyses to complete
retry_count = 30
retry_interval = 20
iter = 0
while iter < retry_count:
iter += 1
get_resp = requests.get(url)
if get_resp.status_code != 202: # not in progress
break
time.sleep(retry_interval)
if iter == retry_count:
err = "Stack analyses could not be completed within {t} seconds".format(
t=iter * retry_interval)
resp_json = get_resp.json()
# ensure that the stack analyses result has been asserted in the loop
assert resp_json.get("status") == "success", err
# ensure that the response is in accordance to the Stack Analyses schema
schema = requests.get(resp_json["schema"]["url"]).json()
jsonschema.validate(resp_json, schema)
@when('I download and parse outlier probability threshold value')
def download_and_parse_outlier_probability_threshold_value(context):
"""Download and parse outlier probability threshold value.
This Special step that is needed to get the stack analysis outlier
probability threshold.
"""
content = download_file_from_url(STACK_ANALYSIS_CONSTANT_FILE_URL)
context.outlier_probability_threshold = parse_float_value_from_text_stream(
content, STACK_ANALYSIS_OUTLIER_PROBABILITY_CONSTANT_NAME)
@then('I should have outlier probability threshold value between {min:f} and {max:f}')
def check_outlier_probability_threshold_value(context, min, max):
"""Check that the outlier probability falls between selected range."""
v = context.outlier_probability_threshold
assert v is not None
assert v >= min
assert v <= max
def check_outlier_probability(usage_outliers, package_name, threshold_value):
"""Check the outlier probability.
Try to find outlier probability for given package is found and that
its probability is within permitted range.
"""
# NOTE: there's a typo in the attribute name (issue #73)
# the following line should be updated after the issue ^^^ will be fixed
outlier_probability_attribute = "outlier_prbability"
for usage_outlier in usage_outliers:
if usage_outlier["package_name"] == package_name:
assert outlier_probability_attribute in usage_outlier, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (outlier_probability_attribute,
", ".join(usage_outlier.keys()))
probability = usage_outlier[outlier_probability_attribute]
assert probability is not None
v = float(probability)
assert v >= threshold_value and v <= 1.0, \
"outlier_prbability value should fall within %f..1.0 range, "\
"found %f value instead" % (threshold_value, v)
return
raise Exception("Can not find usage outlier for the package {p}".format(p=package_name))
@then('I should find the proper outlier record for the {component} component')
def stack_analysis_check_outliers(context, component):
"""Check the outlier record in the stack analysis."""
json_data = context.response.json()
threshold = context.outlier_probability_threshold
# log.info('Usage outlier threshold: %r' % threshold)
path = "result/0/recommendation/usage_outliers"
usage_outliers = get_value_using_path(json_data, path)
check_outlier_probability(usage_outliers, component, threshold)
@then('I should find that total {count} outliers are reported')
def check_outlier_count(context, count=2):
"""Check the number of outliers in the stack analysis."""
json_data = context.response.json()
path = "result/0/recommendation/usage_outliers"
usage_outliers = get_value_using_path(json_data, path)
assert len(usage_outliers) == int(count)
@then('I should find that valid outliers are reported')
def check_outlier_validity(context):
"""Check the outlier validity in the stack analysis."""
json_data = context.response.json()
threshold = 0.9
path = "result/0/recommendation/usage_outliers"
usage_outliers = get_value_using_path(json_data, path)
for usage_outlier in usage_outliers:
# log.info("PACKAGE: {}".format(usage_outlier["package_name"]))
check_outlier_probability(usage_outliers, usage_outlier["package_name"], threshold)
@then('I should find that greater than {min_count} companions are reported')
def check_companion_count(context, min_count=0):
"""Check that we have more than min_count companions."""
json_data = context.response.json()
path = "result/0/recommendation/companion"
companions = get_value_using_path(json_data, path)
assert len(companions) > int(min_count)
def check_licenses(licenses, expected_licenses):
"""Compare list of read licenses with list of expected licenses.
Check that all expected licenses and only such licenses can be found in the list of licenses.
"""
assert licenses is not None
for license in licenses:
if license not in expected_licenses:
raise Exception("Unexpected license found: {license}".format(
license=license))
for expected_license in expected_licenses:
if expected_license not in licenses:
raise Exception("Required license could not be found: {license}".format(
license=expected_license))
@then('I should find the following licenses ({licenses}) under the path {path}')
def stack_analysis_check_licenses(context, licenses, path):
"""Check the license(s) in the stack analysis."""
licenses = split_comma_separated_list(licenses)
json_data = context.response.json()
node = get_value_using_path(json_data, path)
assert node is not None
check_licenses(node, licenses)
def get_attribute_values(list, attribute_name):
"""Return attribute values as a sequence."""
return [item[attribute_name] for item in list]
def get_analyzed_packages(json_data):
| |
# Set default on both
member_set.__dict__[DEFAULT_OPTION_NAME] = defaultValue
member_get.__dict__[DEFAULT_OPTION_NAME] = defaultValue
# Add to list
setters[name.lower()] = member_set
# Done
return setters
def __setOptions(self, setters, options):
"""Sets the options, given the list-of-tuples methods and an
options dict.
"""
# List of invalid keys
invalidKeys = []
# Set options
for key1 in options:
key2 = key1.lower()
# Allow using the setter name
if key2.startswith("set"):
key2 = key2[3:]
# Check if exists. If so, call!
if key2 in setters:
fun = setters[key2]
val = options[key1]
fun(val)
else:
invalidKeys.append(key1)
# Check if invalid keys were given
if invalidKeys:
print("Warning, invalid options given: " + ", ".join(invalidKeys))
def __initOptions(self, options=None):
"""Init the options with their default values.
Also applies the docstrings of one to the other.
"""
# Make options an empty dict if not given
if not options:
options = {}
# Get setters
setters = self.__getOptionSetters()
# Set default value
for member_set in setters.values():
defaultVal = member_set.__dict__[DEFAULT_OPTION_NAME]
if defaultVal != DEFAULT_OPTION_NONE:
try:
member_set(defaultVal)
except Exception:
print("Error initing option ", member_set.__name__)
# Also set using given opions?
if options:
self.__setOptions(setters, options)
def setOptions(self, options=None, **kwargs):
"""setOptions(options=None, **kwargs)
Set the code editor options (e.g. highlightCurrentLine) using
a dict-like object, or using keyword arguments (options given
in the latter overrule opions in the first).
The keys in the dict are case insensitive and one can use the
option's setter or getter name.
"""
# Process options
if options:
D = {}
for key in options:
D[key] = options[key]
D.update(kwargs)
else:
D = kwargs
# Get setters
setters = self.__getOptionSetters()
# Go
self.__setOptions(setters, D)
## Font
def setFont(self, font=None):
"""setFont(font=None)
Set the font for the editor. Should be a monospace font. If not,
Qt will select the best matching monospace font.
"""
defaultFont = Manager.defaultFont()
# Get font object
if font is None:
font = defaultFont
elif isinstance(font, QtGui.QFont):
pass
elif isinstance(font, str):
font = QtGui.QFont(font)
else:
raise ValueError("setFont accepts None, QFont or string.")
# Hint Qt that it should be monospace
font.setStyleHint(font.TypeWriter, font.PreferDefault)
# Get family, fall back to default if qt could not produce monospace
fontInfo = QtGui.QFontInfo(font)
if fontInfo.fixedPitch():
family = fontInfo.family()
else:
family = defaultFont.family()
# Get size: default size + zoom
size = defaultFont.pointSize() + self.__zoom
# Create font instance
font = QtGui.QFont(family, size)
# Set, emit and return
QtWidgets.QPlainTextEdit.setFont(self, font)
self.fontChanged.emit()
return font
def setZoom(self, zoom):
"""setZoom(zoom)
Set the zooming of the document. The font size is always the default
font size + the zoom factor.
The final zoom is returned, this may not be the same as the given
zoom factor if the given factor is too small.
"""
# Set zoom (limit such that final pointSize >= 1)
size = Manager.defaultFont().pointSize()
self.__zoom = int(max(1 - size, zoom))
# Set font
self.setFont(self.fontInfo().family())
# Return zoom
return self.__zoom
## Syntax / styling
@classmethod
def getStyleElementDescriptions(cls):
"""getStyleElementDescriptions()
This classmethod returns a list of the StyleElementDescription
instances used by this class. This includes the descriptions for
the syntax highlighting of all parsers.
"""
# Collect members by walking the class bases
elements = []
def collectElements(cls, iter=1):
# Valid class?
if cls is object or cls is QtWidgets.QPlainTextEdit:
return
# Check members
if hasattr(cls, "_styleElements"):
for element in cls._styleElements:
elements.append(element)
# Recurse
for c in cls.__bases__:
collectElements(c, iter + 1)
collectElements(cls)
# Make style element descriptions
# (Use a dict to ensure there are no duplicate keys)
elements2 = {}
for element in elements:
# Check
if isinstance(element, StyleElementDescription):
pass
elif isinstance(element, tuple):
element = StyleElementDescription(*element)
else:
print("Warning: invalid element: " + repr(element))
# Store using the name as a key to prevent duplicates
elements2[element.key] = element
# Done
return list(elements2.values())
def getStyleElementFormat(self, name):
"""getStyleElementFormat(name)
Get the style format for the style element corresponding with
the given name. The name is case insensitive and invariant to
the use of spaces.
"""
key = name.replace(" ", "").lower()
try:
return self.__style[key]
except KeyError:
raise KeyError('Not a known style element name: "%s".' % name)
def setStyle(self, style=None, **kwargs):
"""setStyle(style=None, **kwargs)
Updates the formatting per style element.
The style consists of a dictionary that maps style names to
style formats. The style names are case insensitive and invariant
to the use of spaces.
For convenience, keyword arguments may also be used. In this case,
underscores are interpreted as dots.
This function can also be called without arguments to force the
editor to restyle (and rehighlight) itself.
Use getStyleElementDescriptions() to get information about the
available styles and their default values.
Examples
--------
# To make the classname in underline, but keep the color and boldness:
setStyle(syntax_classname='underline')
# To set all values for function names:
setStyle(syntax_functionname='#883,bold:no,italic:no')
# To set line number and indent guides colors
setStyle({ 'editor.LineNumbers':'fore:#000,back:#777',
'editor.indentationGuides':'#f88' })
"""
# Combine user input
D = {}
if style:
for key in style:
D[key] = style[key]
if True:
for key in kwargs:
key2 = key.replace("_", ".")
D[key2] = kwargs[key]
# List of given invalid style element names
invalidKeys = []
# Set style elements
for key in D:
normKey = key.replace(" ", "").lower()
if normKey in self.__style:
# self.__style[normKey] = StyleFormat(D[key])
self.__style[normKey].update(D[key])
else:
invalidKeys.append(key)
# Give warning for invalid keys
if invalidKeys:
print("Warning, invalid style names given: " + ",".join(invalidKeys))
# Notify that style changed, adopt a lazy approach to make loading
# quicker.
if self.isVisible():
callLater(self.styleChanged.emit)
self.__styleChangedPending = False
else:
self.__styleChangedPending = True
def showEvent(self, event):
super(CodeEditorBase, self).showEvent(event)
# Does the style need updating?
if self.__styleChangedPending:
callLater(self.styleChanged.emit)
self.__styleChangedPending = False
def __afterSetStyle(self):
"""_afterSetStyle()
Method to call after the style has been set.
"""
# Set text style using editor style sheet
format = self.getStyleElementFormat("editor.text")
ss = "QPlainTextEdit{ color:%s; background-color:%s; }" % (
format["fore"],
format["back"],
)
self.setStyleSheet(ss)
# Make sure the style is applied
self.viewport().update()
# Re-highlight
callLater(self.__highlighter.rehighlight)
## Some basic options
@ce_option(4)
def indentWidth(self):
"""Get the width of a tab character, and also the amount of spaces
to use for indentation when indentUsingSpaces() is True.
"""
return self.__indentWidth
def setIndentWidth(self, value):
value = int(value)
if value <= 0:
raise ValueError("indentWidth must be >0")
self.__indentWidth = value
self.setTabStopWidth(self.fontMetrics().width("i" * self.__indentWidth))
@ce_option(False)
def indentUsingSpaces(self):
"""Get whether to use spaces (if True) or tabs (if False) to indent
when the tab key is pressed
"""
return self.__indentUsingSpaces
def setIndentUsingSpaces(self, value):
self.__indentUsingSpaces = bool(value)
self.__highlighter.rehighlight()
## Misc
def gotoLine(self, lineNumber):
"""gotoLine(lineNumber)
Move the cursor to the block given by the line number
(first line is number 1) and show that line.
"""
return self.gotoBlock(lineNumber - 1)
def gotoBlock(self, blockNumber):
"""gotoBlock(blockNumber)
Move the cursor to the block given by the block number
(first block is number 0) and show that line.
"""
# Two implementatios. I know that the latter works, so lets
# just use that.
cursor = self.textCursor()
# block = self.document().findBlockByNumber( blockNumber )
# cursor.setPosition(block.position())
cursor.movePosition(cursor.Start) # move to begin of the document
cursor.movePosition(cursor.NextBlock, n=blockNumber) # n blocks down
try:
self.setTextCursor(cursor)
except Exception:
pass # File is smaller then the caller thought
# TODO make this user configurable (setting relativeMargin to anything above
# 0.5 will cause cursor to center on each move)
relativeMargin = 0.2 # 20% margin on both sides of the window
margin = self.height() * relativeMargin
cursorRect = self.cursorRect(cursor)
if cursorRect.top() < margin or cursorRect.bottom() + margin > self.height():
self.centerCursor()
def doForSelectedBlocks(self, function):
"""doForSelectedBlocks(function)
Call the given function(cursor) for all blocks in the current selection
A block is considered to be in the current selection if a part of it is in
the current selection
The supplied cursor will be located at the beginning of each block. This
cursor may be modified by the function as required
"""
# Note: a 'TextCursor' does not represent the actual on-screen cursor, so
# movements do not move the on-screen cursor
# Note 2: when the text is changed, the cursor and selection start/end
# positions of | |
#!/usr/bin/env python3
import unittest as ut
import os.path
import c4.cmany.util as util
import c4.cmany.vsinfo as vsinfo
_sfx = " Win64" if util.in_64bit() else ""
_arc = "x64" if util.in_64bit() else "Win32"
class Test01VisualStudioInfo(ut.TestCase):
def test00_instances(self):
if not util.in_windows():
return
for k in vsinfo.order:
#print("testing visual studio:", k)
vs = vsinfo.VisualStudioInfo(k)
self.assertIsInstance(vs.year, int)
def c(w):
if not os.path.exists(w):
self.fail(vs.name + ": " + w + " does not exist")
if vs.is_installed:
print("\nFound", vs.name, "aka", str(vs.gen) + ":", vs.dir)
for i in ('dir', 'vcvarsall', 'devenv', 'msbuild', 'cxx_compiler', 'c_compiler'):
print(i, "----", getattr(vs, i))
c(vs.dir)
c(vs.vcvarsall)
c(vs.devenv)
c(vs.msbuild)
c(vs.cxx_compiler)
c(vs.c_compiler)
else:
print("not installed:", k, vs.name)
def test01_find_any(self):
if not util.in_windows():
return
any = vsinfo.find_any()
if any is None:
self.fail("could not find any VS installation")
self.assertIsNotNone(any)
class Test00VisualStudioAliases(ut.TestCase):
def test01_name_to_gen(self):
def c(a, s):
sc = vsinfo.to_gen(a)
if sc != s:
self.fail(f"{a} should be '{s}' but is '{sc}'")
c('vs2019' , ['Visual Studio 16 2019', '-A', _arc])
c('vs2019_32' , ['Visual Studio 16 2019', '-A', 'Win32'])
c('vs2019_64' , ['Visual Studio 16 2019', '-A', 'x64'])
c('vs2019_arm' , ['Visual Studio 16 2019', '-A', 'ARM'])
c('vs2019_arm32', ['Visual Studio 16 2019', '-A', 'ARM'])
c('vs2019_arm64', ['Visual Studio 16 2019', '-A', 'ARM64'])
c('vs2017' , 'Visual Studio 15 2017' + _sfx )
c('vs2017_32' , 'Visual Studio 15 2017' )
c('vs2017_64' , 'Visual Studio 15 2017 Win64' )
c('vs2017_arm' , 'Visual Studio 15 2017 ARM' )
c('vs2015' , 'Visual Studio 14 2015' + _sfx )
c('vs2015_32' , 'Visual Studio 14 2015' )
c('vs2015_64' , 'Visual Studio 14 2015 Win64' )
c('vs2015_arm' , 'Visual Studio 14 2015 ARM' )
c('vs2013' , 'Visual Studio 12 2013' + _sfx )
c('vs2013_32' , 'Visual Studio 12 2013' )
c('vs2013_64' , 'Visual Studio 12 2013 Win64' )
c('vs2013_arm' , 'Visual Studio 12 2013 ARM' )
c('vs2012' , 'Visual Studio 11 2012' + _sfx )
c('vs2012_32' , 'Visual Studio 11 2012' )
c('vs2012_64' , 'Visual Studio 11 2012 Win64' )
c('vs2012_arm' , 'Visual Studio 11 2012 ARM' )
c('vs2010' , 'Visual Studio 10 2010' + _sfx )
c('vs2010_32' , 'Visual Studio 10 2010' )
c('vs2010_64' , 'Visual Studio 10 2010 Win64' )
c('vs2010_ia64' , 'Visual Studio 10 2010 IA64' )
c('vs2008' , 'Visual Studio 9 2008' + _sfx )
c('vs2008_32' , 'Visual Studio 9 2008' )
c('vs2008_64' , 'Visual Studio 9 2008 Win64' )
c('vs2008_ia64' , 'Visual Studio 9 2008 IA64' )
c('vs2005' , 'Visual Studio 8 2005' + _sfx )
c('vs2005_32' , 'Visual Studio 8 2005' )
c('vs2005_64' , 'Visual Studio 8 2005 Win64' )
c('Visual Studio 16 2019' + _sfx , 'Visual Studio 16 2019' + _sfx )
c('Visual Studio 16 2019' , 'Visual Studio 16 2019' )
c('Visual Studio 16 2019 Win64' , 'Visual Studio 16 2019 Win64' )
c('Visual Studio 16 2019 ARM' , 'Visual Studio 16 2019 ARM' )
c('Visual Studio 15 2017' + _sfx , 'Visual Studio 15 2017' + _sfx )
c('Visual Studio 15 2017' , 'Visual Studio 15 2017' )
c('Visual Studio 15 2017 Win64' , 'Visual Studio 15 2017 Win64' )
c('Visual Studio 15 2017 ARM' , 'Visual Studio 15 2017 ARM' )
c('Visual Studio 14 2015' + _sfx , 'Visual Studio 14 2015' + _sfx )
c('Visual Studio 14 2015' , 'Visual Studio 14 2015' )
c('Visual Studio 14 2015 Win64' , 'Visual Studio 14 2015 Win64' )
c('Visual Studio 14 2015 ARM' , 'Visual Studio 14 2015 ARM' )
c('Visual Studio 12 2013' + _sfx , 'Visual Studio 12 2013' + _sfx )
c('Visual Studio 12 2013' , 'Visual Studio 12 2013' )
c('Visual Studio 12 2013 Win64' , 'Visual Studio 12 2013 Win64' )
c('Visual Studio 12 2013 ARM' , 'Visual Studio 12 2013 ARM' )
c('Visual Studio 11 2012' + _sfx , 'Visual Studio 11 2012' + _sfx )
c('Visual Studio 11 2012' , 'Visual Studio 11 2012' )
c('Visual Studio 11 2012 Win64' , 'Visual Studio 11 2012 Win64' )
c('Visual Studio 11 2012 ARM' , 'Visual Studio 11 2012 ARM' )
c('Visual Studio 10 2010' + _sfx , 'Visual Studio 10 2010' + _sfx )
c('Visual Studio 10 2010' , 'Visual Studio 10 2010' )
c('Visual Studio 10 2010 Win64' , 'Visual Studio 10 2010 Win64' )
c('Visual Studio 10 2010 IA64' , 'Visual Studio 10 2010 IA64' )
c('Visual Studio 9 2008' + _sfx , 'Visual Studio 9 2008' + _sfx )
c('Visual Studio 9 2008' , 'Visual Studio 9 2008' )
c('Visual Studio 9 2008 Win64' , 'Visual Studio 9 2008 Win64' )
c('Visual Studio 9 2008 IA64' , 'Visual Studio 9 2008 IA64' )
c('Visual Studio 8 2005' + _sfx , 'Visual Studio 8 2005' + _sfx )
c('Visual Studio 8 2005' , 'Visual Studio 8 2005' )
c('Visual Studio 8 2005 Win64' , 'Visual Studio 8 2005 Win64' )
def test02_gen_to_name(self):
def c(a, s):
sc = vsinfo.to_name(a)
if sc != s:
self.fail("{} should be '{}' but is '{}'".format(a, s, sc))
c('Visual Studio 16 2019' , 'vs2019_32' )
c('Visual Studio 16 2019 Win64' , 'vs2019_64' )
c('Visual Studio 16 2019 ARM' , 'vs2019_arm' )
c('Visual Studio 16 2019 ARM32' , 'vs2019_arm32')
c('Visual Studio 16 2019 ARM64' , 'vs2019_arm64')
c('Visual Studio 15 2017' , 'vs2017_32' )
c('Visual Studio 15 2017 Win64' , 'vs2017_64' )
c('Visual Studio 15 2017 ARM' , 'vs2017_arm' )
c('Visual Studio 14 2015' , 'vs2015_32' )
c('Visual Studio 14 2015 Win64' , 'vs2015_64' )
c('Visual Studio 14 2015 ARM' , 'vs2015_arm' )
c('Visual Studio 12 2013' , 'vs2013_32' )
c('Visual Studio 12 2013 Win64' , 'vs2013_64' )
c('Visual Studio 12 2013 ARM' , 'vs2013_arm' )
c('Visual Studio 11 2012' , 'vs2012_32' )
c('Visual Studio 11 2012 Win64' , 'vs2012_64' )
c('Visual Studio 11 2012 ARM' , 'vs2012_arm' )
c('Visual Studio 10 2010' , 'vs2010_32' )
c('Visual Studio 10 2010 Win64' , 'vs2010_64' )
c('Visual Studio 10 2010 IA64' , 'vs2010_ia64' )
c('Visual Studio 9 2008' , 'vs2008_32' )
c('Visual Studio 9 2008 Win64' , 'vs2008_64' )
c('Visual Studio 9 2008 IA64' , 'vs2008_ia64' )
c('Visual Studio 8 2005' , 'vs2005_32' )
c('Visual Studio 8 2005 Win64' , 'vs2005_64' )
c('vs2019' , 'vs2019' )
c('vs2019_32' , 'vs2019_32' )
c('vs2019_64' , 'vs2019_64' )
c('vs2019_arm' , 'vs2019_arm' )
c('vs2019_arm32', 'vs2019_arm32')
c('vs2019_arm64', 'vs2019_arm64')
c('vs2017' , 'vs2017' )
c('vs2017_32' , 'vs2017_32' )
c('vs2017_64' , 'vs2017_64' )
c('vs2017_arm' , 'vs2017_arm' )
c('vs2015' , 'vs2015' )
c('vs2015_32' , 'vs2015_32' )
c('vs2015_64' , 'vs2015_64' )
c('vs2015_arm' , 'vs2015_arm' )
c('vs2013' , 'vs2013' )
c('vs2013_32' , 'vs2013_32' )
c('vs2013_64' , 'vs2013_64' )
c('vs2013_arm' , 'vs2013_arm' )
c('vs2012' , 'vs2012' )
c('vs2012_32' , 'vs2012_32' )
c('vs2012_64' , 'vs2012_64' )
c('vs2012_arm' , 'vs2012_arm' )
c('vs2010' , 'vs2010' )
c('vs2010_32' , 'vs2010_32' )
c('vs2010_64' , 'vs2010_64' )
c('vs2010_ia64' , 'vs2010_ia64' )
c('vs2008' , 'vs2008' )
c('vs2008_32' , 'vs2008_32' )
c('vs2008_64' , 'vs2008_64' )
c('vs2008_ia64' , 'vs2008_ia64' )
c('vs2005' , 'vs2005' )
c('vs2005_32' , 'vs2005_32' )
c('vs2005_64' , 'vs2005_64' )
def test03_parse_toolset(self):
def t(spec, name_vs, ts_vs):
cname_vs,cts_vs = vsinfo.sep_name_toolset(spec)
if cname_vs != name_vs:
self.fail("{} should be '{}' but is '{}'".format(spec, name_vs, cname_vs))
if cts_vs != ts_vs:
self.fail("{} should be '{}' but is '{}'".format(spec, ts_vs, cts_vs))
t('vs2019' , 'vs2019' , None )
t('vs2019_clang' , 'vs2019' , 'v142_clang_c2')
t('vs2019_xp' , 'vs2019' , 'v142_xp' )
t('vs2019_v142' , 'vs2019' , 'v142' )
t('vs2019_v142_xp' , 'vs2019' , 'v142_xp' )
t('vs2019_v142_clang' , 'vs2019' , 'v142_clang_c2')
t('vs2019_v141' , 'vs2019' , 'v141' )
t('vs2019_v141_xp' , 'vs2019' , 'v141_xp' )
t('vs2019_v141_clang' , 'vs2019' , 'v141_clang_c2')
t('vs2019_v140' , 'vs2019' , 'v140' )
t('vs2019_v140_xp' , 'vs2019' , 'v140_xp' )
t('vs2019_v140_clang' , 'vs2019' , 'v140_clang_c2')
t('vs2019_v120' , 'vs2019' , 'v120' )
t('vs2019_v120_xp' , 'vs2019' , 'v120_xp' )
t('vs2019_v110' , 'vs2019' , 'v110' )
t('vs2019_v110_xp' , 'vs2019' , 'v110_xp' )
t('vs2019_v100' , 'vs2019' , 'v100' )
t('vs2019_v100_xp' , 'vs2019' , 'v100_xp' )
t('vs2019_v90' , 'vs2019' , 'v90' )
t('vs2019_v90_xp' , 'vs2019' , 'v90_xp' )
t('vs2019_v80' , 'vs2019' , 'v80' )
t('vs2019_32' , 'vs2019_32' , None )
t('vs2019_32_clang' , 'vs2019_32' , 'v142_clang_c2')
t('vs2019_32_xp' , 'vs2019_32' , 'v142_xp' | |
*any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 @= other._proxy_3141592
else:
self._proxy_3141592 @= other
return self
def __itruediv__(self, other):
''' Wrap __itruediv__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 /= other._proxy_3141592
else:
self._proxy_3141592 /= other
return self
def __ifloordiv__(self, other):
''' Wrap __ifloordiv__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 //= other._proxy_3141592
else:
self._proxy_3141592 //= other
return self
def __imod__(self, other):
''' Wrap __imod__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 %= other._proxy_3141592
else:
self._proxy_3141592 %= other
return self
def __ipow__(self, other):
''' Wrap __ipow__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 **= other._proxy_3141592
else:
self._proxy_3141592 **= other
return self
def __ilshift__(self, other):
''' Wrap __ilshift__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 <<= other._proxy_3141592
else:
self._proxy_3141592 <<= other
return self
def __irshift__(self, other):
''' Wrap __irshift__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 >>= other._proxy_3141592
else:
self._proxy_3141592 >>= other
return self
def __iand__(self, other):
''' Wrap __iand__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 &= other._proxy_3141592
else:
self._proxy_3141592 &= other
return self
def __ixor__(self, other):
''' Wrap __ixor__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 ^= other._proxy_3141592
else:
self._proxy_3141592 ^= other
return self
def __ior__(self, other):
''' Wrap __ior__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
# Note that no incremental operations are PASSED *args or **kwargs
# We could do this to *any* ObjBase, but I don't like the idea of
# forcibly upgrading those, since they might do, for example, some
# different comparison operation or something. This seems like a
# much safer bet.
if isinstance(other, ProxyBase):
# Other proxies are very likely to fail, since the reveresed call
# would normally have already been called -- but try them anyways.
self._proxy_3141592 |= other._proxy_3141592
else:
self._proxy_3141592 |= other
return self
def __neg__(self):
''' Wrap __neg__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
return -(self._proxy_3141592)
def __pos__(self):
''' Wrap __pos__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
return +(self._proxy_3141592)
def __abs__(self):
''' Wrap __abs__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
return abs(self._proxy_3141592)
def __invert__(self):
''' Wrap __invert__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
return ~(self._proxy_3141592)
def __complex__(self):
''' Wrap __complex__ to pass into the _proxy object.
This method was (partially?) programmatically generated by a
purpose-built script.
'''
return complex(self._proxy_3141592)
def __int__(self):
''' Wrap __int__ to pass into | |
35s","nikto4",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto5",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto6",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto7",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto8",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto9",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto10",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto11",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto12",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto13",["ERROR: Cannot resolve hostname","0 item(s) reported","No web server found","0 host(s) tested"]],
["0 item(s) reported",1,proc_low," < 35s","nikto14","ERROR: Cannot resolve hostname , 0 item(s) reported"],
["#1",0,proc_high," < 30m","dnsmap_brute",["[+] 0 (sub)domains and 0 IP address(es) found"]],
["open",0,proc_low," < 15s","nmapmssql",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmapmysql",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmaporacle",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmapudprdp",["Failed to resolve"]],
["open",0,proc_low," < 15s","nmaptcprdp",["Failed to resolve"]],
["open",0,proc_high," > 50m","nmapfulltcp",["Failed to resolve"]],
["open",0,proc_high," > 75m","nmapfulludp",["Failed to resolve"]],
["open",0,proc_low," < 30s","nmapsnmp",["Failed to resolve"]],
["Microsoft SQL Server Error Log",0,proc_low," < 30s","elmahxd",["unable to resolve host address","Connection timed out"]],
["open",0,proc_low," < 20s","nmaptcpsmb",["Failed to resolve"]],
["open",0,proc_low," < 20s","nmapudpsmb",["Failed to resolve"]],
["Host:",0,proc_med," < 5m","wapiti",["none"]],
["WebDAV is ENABLED",0,proc_low," < 40s","nmapwebdaviis",["Failed to resolve"]],
["X-XSS-Protection[1",1,proc_med," < 3m","whatweb",["Timed out","Socket error","X-XSS-Protection[1"]]
]
# Vulnerabilities and Remediation
tools_fix = [
[1, "Not a vulnerability, just an informational alert. The host does not have IPv6 support. IPv6 provides more security as IPSec (responsible for CIA - Confidentiality, Integrity and Availablity) is incorporated into this model. So it is good to have IPv6 Support.",
"It is recommended to implement IPv6. More information on how to implement IPv6 can be found from this resource. https://www.cisco.com/c/en/us/solutions/collateral/enterprise/cisco-on-cisco/IPv6-Implementation_CS.html"],
[2, "Sensitive Information Leakage Detected. The ASP.Net application does not filter out illegal characters in the URL. The attacker injects a special character (%7C~.aspx) to make the application spit sensitive information about the server stack.",
"It is recommended to filter out special charaters in the URL and set a custom error page on such situations instead of showing default error messages. This resource helps you in setting up a custom error page on a Microsoft .Net Application. https://docs.microsoft.com/en-us/aspnet/web-forms/overview/older-versions-getting-started/deploying-web-site-projects/displaying-a-custom-error-page-cs"],
[3, "It is not bad to have a CMS in WordPress. There are chances that the version may contain vulnerabilities or any third party scripts associated with it may possess vulnerabilities",
"It is recommended to conceal the version of WordPress. This resource contains more information on how to secure your WordPress Blog. https://codex.wordpress.org/Hardening_WordPress"],
[4, "It is not bad to have a CMS in Drupal. There are chances that the version may contain vulnerabilities or any third party scripts associated with it may possess vulnerabilities",
"It is recommended to conceal the version of Drupal. This resource contains more information on how to secure your Drupal Blog. https://www.drupal.org/docs/7/site-building-best-practices/ensure-that-your-site-is-secure"],
[5, "It is not bad to have a CMS in Joomla. There are chances that the version may contain vulnerabilities or any third party scripts associated with it may possess vulnerabilities",
"It is recommended to conceal the version of Joomla. This resource contains more information on how to secure your Joomla Blog. https://www.incapsula.com/blog/10-tips-to-improve-your-joomla-website-security.html"],
[6, "Sometimes robots.txt or sitemap.xml may contain rules such that certain links that are not supposed to be accessed/indexed by crawlers and search engines. Search engines may skip those links but attackers will be able to access it directly.",
"It is a good practice not to include sensitive links in the robots or sitemap files."],
[7, "Without a Web Application Firewall, An attacker may try to inject various attack patterns either manually or using automated scanners. An automated scanner may send hordes of attack vectors and patterns to validate an attack, there are also chances for the application to get DoS`ed (Denial of Service)",
"Web Application Firewalls offer great protection against common web attacks like XSS, SQLi, etc. They also provide an additional line of defense to your security infrastructure. This resource contains information on web application firewalls that could suit your application. https://www.gartner.com/reviews/market/web-application-firewall"],
[8, "Open Ports give attackers a hint to exploit the services. Attackers try to retrieve banner information through the ports and understand what type of service the host is running",
"It is recommended to close the ports of unused services and use a firewall to filter the ports wherever necessary. This resource may give more insights. https://security.stackexchange.com/a/145781/6137"],
[9, "Chances are very less to compromise a target with email addresses. However, attackers use this as a supporting data to gather information around the target. An attacker may make use of the username on the email address and perform brute-force attacks on not just email servers, but also on other legitimate panels like SSH, CMS, etc with a password list as they have a legitimate name. This is however a shoot in the dark scenario, the attacker may or may not be successful depending on the level of interest",
"Since the chances of exploitation is feeble there is no need to take action. Perfect remediation would be choosing different usernames for different services will be more thoughtful."],
[10, "Zone Transfer reveals critical topological information about the target. The attacker will be able to query all records and will have more or less complete knowledge about your host.",
"Good practice is to restrict the Zone Transfer by telling the Master which are the IPs of the slaves that can be given access for the query. This SANS resource provides more information. https://www.sans.org/reading-room/whitepapers/dns/securing-dns-zone-transfer-868"],
[11, "The email address of the administrator and other information (address, phone, etc) is available publicly. An attacker may use these information to leverage an attack. This may not be used to carry out a direct attack as this is not a vulnerability. However, an attacker makes use of these data to build information about the target.",
"Some administrators intentionally would have made this information public, in this case it can be ignored. If not, it is recommended to mask the information. This resource provides information on this fix. http://www.name.com/blog/how-tos/tutorial-2/2013/06/protect-your-personal-information-with-whois-privacy/"],
[12, "As the target is lacking this header, older browsers will be prone to Reflected XSS attacks.",
"Modern browsers does not face any issues with this vulnerability (missing headers). However, older browsers are strongly recommended to be upgraded."],
[13, "This attack works by opening multiple simultaneous connections to the web server and it keeps them alive as long as possible by continously sending partial HTTP requests, which never gets completed. They easily slip through IDS by sending partial requests.",
"If you are using Apache Module, `mod_antiloris` would help. For other setup you can find more detailed remediation on this resource. https://www.acunetix.com/blog/articles/slow-http-dos-attacks-mitigate-apache-http-server/"],
[14, "This vulnerability seriously leaks private information of your host. An attacker can keep the TLS connection alive and can retrieve a maximum of 64K of data per heartbeat.",
"PFS (Perfect Forward Secrecy) can be implemented to make decryption difficult. Complete remediation and resource information is available here. http://heartbleed.com/"],
[15, "By exploiting this vulnerability, an attacker will be able gain access to sensitive data in a n encrypted session such as session ids, cookies and with those data obtained, will be able to impersonate that particular user.",
"This is a flaw in the SSL 3.0 Protocol. A better remediation would be to disable using the SSL 3.0 protocol. For more information, check this resource. https://www.us-cert.gov/ncas/alerts/TA14-290A"],
[16, "This attacks takes place in the SSL Negotiation (Handshake) which makes the client unaware of the attack. By successfully altering the handshake, the attacker will be able to pry on all the information that is sent from the client | |
<reponame>MatthewTe/ETL_pipelines
# Importing Data Manipulation packages:
import pandas as pd
import sqlite3
import bonobo
import os
# Python Reddit API Wrapper:
import praw
# Importing the Base Pipeline API Object:
from ETL_pipelines.base_pipeline import Pipeline
class RedditContentPipeline(Pipeline):
"""An object that contains all the logic and methods
necessary to construct a ETL pipeline for extracting
and ingesting daily relevant subreddit posts to a database.
It inherits from the Pipeline API object with allows the extract,
transform and load methods to be overwritten but the graph creation
and pipeline execution methods to be inherited.
The object extracts filings from the "Top" and "Rising" tabs of a subreddit.
Each of these Tab's context is extracted by a sperate Extraction method which
are then both fed into a transformation method which normalizes the data into
a standard format to be written to the database.
See graphviz plots of the bonobo graph for a structure outline of how data flows.
Once again all credit goes to Bonobo and Pandas for the actual heavy lifting.
Example:
test_pipeline = EDGARFilingsPipeline("test.sqlite", "learnpython")
Arguments:
dbpath (str): The relative or absoloute database URL pointing to
the database where stock price data should be written.
subreddit (str): The string that indicates the specific subreddit
that the data is to be scraped from.
"""
def __init__(self, dbpath, subreddit_name, **kwargs):
# Initalizing the parent Pipeline object:
super(RedditContentPipeline, self).__init__(dbpath)
self.subreddit_name = subreddit_name
# Creating a reddit praw instance based on specified subreddit:
# TODO: Add logic to extract praw config from KWARGS instead of env params.
self.reddit = praw.Reddit(
client_id = os.environ["CLIENT_ID"],
client_secret= os.environ["CLIENT_SECRET"],
user_agent = os.environ["USER_AGENT"]
)
self.subreddit = self.reddit.subreddit(self.subreddit_name)
print(f"Reddit Instance Initalized with Read Status:{self.reddit.read_only}")
# Execuring all of the ETL functions mapped in the graph:
self.execute_pipeline()
def extract_rising_posts(self):
"""Method extracts the current rising reddit submissions from a subreddit
via the praw API wrapper.
The generator yields a dictionary containing relevant information extracted
from each post generated from the subreddit.rising() praw method. All rising posts
are compiled into this dict that is then passed into a data transformation method.
The data is compiled into a dict for speed as it is then converted into a dataframe
in the data transformation method. All seaching and transformation of raw data is done
prior to it being converted to a dataframe.
Yields: Dict
A dictionary containing all the relevant information for each reddit post
necessary to compile a dataframe:
{
id1: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author],
id2: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author],
...
idn: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author],
}
"""
posts_dict = {}
# Iterating through the rising posts constructing and generating dicts:
for post in self.subreddit.rising():
# Building the single dict key-value pair:
post_content_lst = [
post.title,
post.selftext,
post.upvote_ratio,
post.score,
post.num_comments,
post.created_utc,
post.stickied,
post.over_18,
post.spoiler,
post.permalink,
post.author
]
posts_dict[post.id] = post_content_lst
yield posts_dict
def extract_daily_top_posts(self):
"""Method extracts the daily top reddit submissions from a subreddit
via the praw API wrapper.
The generator yields a dictionary containing relevant information extracted
from each post generated from the subreddit.top(day) praw method. All top posts
are compiled into this dict that is then passed into a data transformation method.
The data is compiled into a dict for speed as it is then converted into a dataframe
in the data transformation method. All seaching and transformation of raw data is done
prior to it being converted to a dataframe.
Yields: Dict
A dictionary containing all the relevant information for each reddit post
necessary to compile a dataframe:
{
id1: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author],
id2: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author],
...
idn: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink ,author],
}
"""
posts_dict = {}
# Iterating through the rising posts constructing and generating dicts:
for post in self.subreddit.top("day"):
# Building the single dict key-value pair:
post_content_lst = [
post.title,
post.selftext,
post.upvote_ratio,
post.score,
post.num_comments,
post.created_utc,
post.stickied,
post.over_18,
post.spoiler,
post.permalink,
post.author
]
posts_dict[post.id] = post_content_lst
yield posts_dict
def transform_posts(self, *args):
"""The method recieves a length 1 tuple containing a dict of reddit posts generated
from the extraction methods and performs transformation on the dict to convert it
into a dataframe of elements that are not already stored in the database.
The dictionary recieved from the extraction methods are in the format:
{
id1: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author],
idn: [title, content, upvote_ratio, score, num_comments, created_on, stickied, over_18, spoiler, permalink, author]
}
The transformation method queries the database for existing posts posted on the
day that the pipeline is executed. It compares the index of the database data
and the index of the data recieved from the extraction methods. Only unique
elements not already in the database are passed into the load method.
When converting a dictionary to a unique elements Dataframe the method unpacks
the information and converts all data to the correct data types.
Yields: DataFrame
A DataFrame containing all of the relevant information for each submission in
the format:
+----------+-------+-------+------------+-----+------------+----------+--------+-------+-------+---------+------+--------------+----------+-------------------------+--------------+-------------+
|id (index)| title |content|upvote_ratio|score|num_comments|created_on|stickied|over_18|spoiler|permalink|author|author_is_gold|author_mod|author_has_verified_email|author_created|comment_karma|
+----------+-------+-------+------------+-----+------------+----------+--------+-------+-------+---------+------+--------------+----------+-------------------------+--------------+-------------+
| string | string| string| float | int | int | datetime | Bool | Bool | Bool | str | str | Bool | Bool | Bool | str | int |
+----------+-------+-------+------------+-----+------------+----------+--------+-------+-------+---------+------+--------------+----------+-------------------------+--------------+-------------+
"""
# Unpacking Args Tuple:
posts_dict = args[0]
# Querying existing posts from the database during current day:
con = sqlite3.connect(self.dbpath)
# TODO: Refine SQL Query to only extract data from database from the current day:
existing_posts_id = []
try:
existing_posts = pd.read_sql_query(f"SELECT * FROM {self.subreddit_name}_posts", con, index_col="id")
existing_posts_id = existing_posts.index
except:
pass
# Extracting unqiue keys from the posts_dict.keys() that are not present in the existing_post_id:
unique_id_keys = list(set(posts_dict.keys()) - set(existing_posts_id))
# Unpacking the "Author" parameter and extending Author derived params to the end of the content
# list for each dict key-value pair that is unique (not in the database):
unique_posts_dict = {
# Unpacking list for faster appending:
post_id:self._transform_post_content_lst(content_lst) for post_id, content_lst
in posts_dict.items() if post_id in unique_id_keys
}
#print(unique_posts_dict.items())
# Converting Dictionary of Unique Post elements to a dataframe:
posts_df = pd.DataFrame.from_dict(
unique_posts_dict,
orient='index',
columns=[
"title", "content", "upvote_ratio", "score", "num_comments", "created_on", "stickied", "over_18",
"spolier", "permalink", "author", "author_gold", "mod_status", "verified_email_status", "acc_created_on",
"comment_karma"])
# Converting 'author' column data type to string:
posts_df['author'] = posts_df.author.astype(str)
yield posts_df
def load_posts(self, *args):
"""Method writes the reddit posts dataframe into
the sqlite database.
The reddit posts dataframe that is wrtiten to the database is in the following
format:
+----------+-------+-------+------------+-----+------------+----------+--------+-------+-------+---------+------+--------------+----------+-------------------------+--------------+-------------+
|id (index)| title |content|upvote_ratio|score|num_comments|created_on|stickied|over_18|spoiler|permalink|author|author_is_gold|author_mod|author_has_verified_email|author_created|comment_karma|
+----------+-------+-------+------------+-----+------------+----------+--------+-------+-------+---------+------+--------------+----------+-------------------------+--------------+-------------+
| string | string| string| float | int | int | datetime | Bool | Bool | Bool | str | str | Bool | Bool | Bool | str | int |
+----------+-------+-------+------------+-----+------------+----------+--------+-------+-------+---------+------+--------------+----------+-------------------------+--------------+-------------+
Arguments:
args (tuple): The arguments passed into the load method by the transform method
containing the dataframe.
"""
posts_df = args[0]
# Creating connection to the database:
con = sqlite3.connect(self.dbpath)
# Writing the data to the database via pandas API:
posts_df.to_sql(f"{self.subreddit_name}_posts", con, if_exists="append", index_label="id")
def build_graph(self, **options):
"""The method that is used to construct a Bonobo ETL pipeline
DAG that schedules the following ETL methods:
- Extraction: extract_daily_top_posts, extract_rising_posts
- Transformation: transform_posts
- Loading: load_posts
Returns:
bonobo.Graph: The Bonobo Graph that is declared as an instance
parameter and that will be executed by the self.execute_pipeline method.
"""
# Building the Graph:
self.graph = bonobo.Graph()
# Creating the main method chain for the graph:
self.graph.add_chain(
self.transform_posts,
self.load_posts,
_input=None # Input set to None so self.transform_posts does not stat untill params are passed.
)
# Adding the first leg that extracts the top posts for the day:
| |
= child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'indSitPJ')
self.indSitPJ = ival_
# end class situacaoPJ
class indSitPJ(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, indSitPJ)
if subclass is not None:
return subclass(*args_, **kwargs_)
if indSitPJ.subclass:
return indSitPJ.subclass(*args_, **kwargs_)
else:
return indSitPJ(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='indSitPJ', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('indSitPJ')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='indSitPJ')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='indSitPJ', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='indSitPJ'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='indSitPJ', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class indSitPJ
class situacaoPF(GeneratedsSuper):
"""Informações Complementares - Pessoa Física"""
subclass = None
superclass = None
def __init__(self, indSitPF=None):
self.original_tagname_ = None
self.indSitPF = indSitPF
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, situacaoPF)
if subclass is not None:
return subclass(*args_, **kwargs_)
if situacaoPF.subclass:
return situacaoPF.subclass(*args_, **kwargs_)
else:
return situacaoPF(*args_, **kwargs_)
factory = staticmethod(factory)
def get_indSitPF(self): return self.indSitPF
def set_indSitPF(self, indSitPF): self.indSitPF = indSitPF
def hasContent_(self):
if (
self.indSitPF is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='situacaoPF', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('situacaoPF')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='situacaoPF')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='situacaoPF', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='situacaoPF'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='situacaoPF', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.indSitPF is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sindSitPF>%s</%sindSitPF>%s' % (namespace_, self.gds_format_integer(self.indSitPF, input_name='indSitPF'), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'indSitPF':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'indSitPF')
self.indSitPF = ival_
# end class situacaoPF
class indSitPF(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self):
self.original_tagname_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, indSitPF)
if subclass is not None:
return subclass(*args_, **kwargs_)
if indSitPF.subclass:
return indSitPF.subclass(*args_, **kwargs_)
else:
return indSitPF(*args_, **kwargs_)
factory = staticmethod(factory)
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='indSitPF', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('indSitPF')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='indSitPF')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='indSitPF', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='indSitPF'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='indSitPF', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class indSitPF
class TPeriodoValidade(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, iniValid=None, fimValid=None):
self.original_tagname_ = None
self.iniValid = iniValid
self.fimValid = fimValid
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TPeriodoValidade)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TPeriodoValidade.subclass:
return TPeriodoValidade.subclass(*args_, **kwargs_)
else:
return TPeriodoValidade(*args_, **kwargs_)
factory = staticmethod(factory)
def get_iniValid(self): return self.iniValid
def set_iniValid(self, iniValid): self.iniValid = iniValid
def get_fimValid(self): return self.fimValid
def set_fimValid(self, fimValid): self.fimValid = fimValid
def hasContent_(self):
if (
self.iniValid is not None or
self.fimValid is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='TPeriodoValidade', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('TPeriodoValidade')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TPeriodoValidade')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='TPeriodoValidade', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TPeriodoValidade'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='TPeriodoValidade', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.iniValid is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%siniValid>%s</%siniValid>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.iniValid), input_name='iniValid')), namespace_, eol_))
if self.fimValid is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfimValid>%s</%sfimValid>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.fimValid), input_name='fimValid')), namespace_, eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'iniValid':
iniValid_ = child_.text
iniValid_ = self.gds_validate_string(iniValid_, node, 'iniValid')
self.iniValid = iniValid_
elif nodeName_ == 'fimValid':
fimValid_ = child_.text
fimValid_ = self.gds_validate_string(fimValid_, node, 'fimValid')
self.fimValid = fimValid_
# end class TPeriodoValidade
GDSClassesMapping = {
'ideEmpregador': TEmpregador,
'ideEvento': TIdeCadastro,
'idePeriodo': TIdePeriodo,
'infoCadastro': TInfoEmpregador,
'novaValidade': TPeriodoValidade,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
if sys.version_info.major == 2:
from StringIO import StringIO as IOBuffer
else:
from io import BytesIO as IOBuffer
parser = None
doc = parsexml_(IOBuffer(inString), parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'eSocial'
rootClass = eSocial
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from evtInfoEmpregador import *\n\n')
sys.stdout.write('import evtInfoEmpregador as model_\n\n')
| |
<reponame>odinn13/Tilings
import json
from itertools import chain, product
import pytest
import sympy
from permuta import Perm
from tilings import GriddedPerm, Tiling
from tilings.exception import InvalidOperationError
@pytest.fixture
def compresstil():
"""Returns a tiling that has both obstructions and requirements. For
testing compression and json."""
return Tiling(
obstructions=(
GriddedPerm(Perm((0,)), ((1, 0),)),
GriddedPerm(Perm((0,)), ((2, 1),)),
GriddedPerm(Perm((0, 1)), ((1, 1), (1, 1))),
GriddedPerm(Perm((0, 1)), ((2, 0), (2, 0))),
GriddedPerm(Perm((1, 0)), ((1, 1), (1, 1))),
GriddedPerm(Perm((1, 0)), ((1, 1), (2, 0))),
GriddedPerm(Perm((1, 0)), ((2, 0), (2, 0))),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (0, 0), (0, 1))),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (0, 0), (2, 0))),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (0, 1), (0, 1))),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (0, 1), (1, 1))),
GriddedPerm(Perm((0, 2, 1)), ((0, 0), (0, 0), (0, 0))),
GriddedPerm(Perm((0, 2, 1)), ((0, 0), (0, 0), (2, 0))),
GriddedPerm(Perm((1, 0, 2)), ((0, 1), (0, 0), (1, 1))),
GriddedPerm(Perm((2, 0, 1)), ((0, 0), (0, 0), (0, 0))),
GriddedPerm(Perm((0, 1, 3, 2)), ((0, 1), (0, 1), (0, 1), (0, 1))),
GriddedPerm(Perm((0, 1, 3, 2)), ((0, 1), (0, 1), (0, 1), (1, 1))),
GriddedPerm(Perm((0, 2, 1, 3)), ((0, 1), (0, 1), (0, 1), (0, 1))),
GriddedPerm(Perm((0, 2, 1, 3)), ((0, 1), (0, 1), (0, 1), (1, 1))),
GriddedPerm(Perm((0, 2, 3, 1)), ((0, 1), (0, 1), (0, 1), (0, 1))),
GriddedPerm(Perm((0, 2, 3, 1)), ((0, 1), (0, 1), (0, 1), (1, 1))),
GriddedPerm(Perm((2, 0, 1, 3)), ((0, 1), (0, 1), (0, 1), (0, 1))),
GriddedPerm(Perm((2, 0, 1, 3)), ((0, 1), (0, 1), (0, 1), (1, 1))),
),
requirements=(
(GriddedPerm(Perm((0,)), ((1, 1),)), GriddedPerm(Perm((0,)), ((2, 0),))),
(GriddedPerm(Perm((1, 0, 2)), ((0, 0), (0, 0), (0, 0))),),
),
)
@pytest.fixture
def empty_tiling():
return Tiling(
obstructions=(
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 1))),
GriddedPerm(Perm((1, 0)), ((0, 1), (0, 0))),
),
requirements=(
(GriddedPerm(Perm((0,)), ((0, 0),)),),
(GriddedPerm(Perm((0,)), ((0, 1),)),),
),
)
@pytest.fixture
def finite_tiling():
return Tiling(
obstructions=(
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 0))),
GriddedPerm(Perm((0, 1)), ((0, 1), (0, 1))),
GriddedPerm(Perm((2, 1, 0)), ((0, 0), (0, 0), (0, 0))),
GriddedPerm(Perm((2, 1, 0)), ((0, 1), (0, 1), (0, 1))),
GriddedPerm(Perm((3, 2, 1, 0)), ((0, 1), (0, 1), (0, 0), (0, 0))),
),
requirements=((GriddedPerm(Perm((0,)), ((0, 0),)),),),
)
@pytest.fixture
def factorable_tiling():
return Tiling(
obstructions=[
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (0, 0), (0, 0))),
GriddedPerm(Perm((0, 2, 1)), ((1, 0), (1, 0), (1, 0))),
GriddedPerm(Perm((2, 1, 0)), ((2, 2), (2, 2), (2, 2))),
GriddedPerm(Perm((2, 0, 1)), ((2, 3), (2, 3), (2, 3))),
GriddedPerm(Perm((1, 0, 2)), ((5, 4), (5, 4), (5, 4))),
GriddedPerm(Perm((2, 0, 1)), ((5, 4), (5, 4), (5, 4))),
GriddedPerm(Perm((1, 2, 0)), ((4, 6), (4, 6), (4, 6))),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (0, 0), (2, 2))),
GriddedPerm(Perm((0, 1, 2, 3)), ((2, 2), (2, 2), (2, 3), (2, 3))),
GriddedPerm(Perm((0, 1)), ((6, 4), (6, 4))),
GriddedPerm(Perm((1, 0)), ((6, 4), (6, 4))),
GriddedPerm(Perm((0, 1)), ((7, 7), (7, 7))),
],
requirements=[
[
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 0))),
GriddedPerm(Perm((1, 0)), ((4, 6), (4, 6))),
],
[GriddedPerm(Perm((0,)), ((6, 4),))],
],
)
@pytest.fixture
def obs_inf_til():
return Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 1), (0, 1))),
GriddedPerm(Perm((1, 0)), ((0, 0), (0, 0))),
GriddedPerm(Perm((1, 0)), ((0, 1), (0, 1))),
GriddedPerm(Perm((0, 3, 2, 1)), ((0, 0), (0, 2), (0, 1), (0, 0))),
GriddedPerm(Perm((0, 3, 2, 1)), ((0, 0), (0, 2), (0, 2), (0, 0))),
GriddedPerm(Perm((0, 3, 2, 1)), ((0, 0), (0, 2), (0, 2), (0, 1))),
GriddedPerm(Perm((0, 3, 2, 1)), ((0, 0), (0, 2), (0, 2), (0, 2))),
GriddedPerm(Perm((0, 3, 2, 1)), ((0, 1), (0, 2), (0, 2), (0, 2))),
GriddedPerm(Perm((0, 3, 2, 1)), ((0, 2), (0, 2), (0, 2), (0, 2))),
GriddedPerm(Perm((1, 0, 3, 2)), ((0, 1), (0, 0), (0, 2), (0, 2))),
GriddedPerm(Perm((1, 0, 3, 2)), ((0, 2), (0, 0), (0, 2), (0, 2))),
GriddedPerm(Perm((1, 0, 3, 2)), ((0, 2), (0, 1), (0, 2), (0, 2))),
GriddedPerm(Perm((1, 0, 3, 2)), ((0, 2), (0, 2), (0, 2), (0, 2))),
],
requirements=[[GriddedPerm(Perm((1, 0)), ((0, 1), (0, 0)))]],
)
@pytest.fixture
def typical_redundant_obstructions():
"""Returns a very typical list of obstructions clustered together in a
corner of a tiling. """
return [
GriddedPerm(Perm((0, 1)), ((1, 0), (1, 0))),
GriddedPerm(Perm((0, 1)), ((1, 0), (2, 0))),
GriddedPerm(Perm((0, 1)), ((1, 0), (3, 0))),
GriddedPerm(Perm((0, 1)), ((2, 0), (2, 0))),
GriddedPerm(Perm((0, 1)), ((2, 0), (3, 0))),
GriddedPerm(Perm((0, 1)), ((3, 1), (3, 1))),
GriddedPerm(Perm((1, 0)), ((3, 0), (3, 0))),
GriddedPerm(Perm((1, 0)), ((3, 1), (3, 0))),
GriddedPerm(Perm((1, 0)), ((3, 1), (3, 1))),
GriddedPerm(Perm((0, 1, 2)), ((3, 0), (3, 0), (3, 0))),
GriddedPerm(Perm((0, 1, 2)), ((3, 0), (3, 0), (3, 1))),
GriddedPerm(Perm((2, 1, 0)), ((1, 0), (1, 0), (1, 0))),
GriddedPerm(Perm((2, 1, 0)), ((1, 0), (1, 0), (2, 0))),
GriddedPerm(Perm((2, 1, 0)), ((1, 0), (1, 0), (3, 0))),
GriddedPerm(Perm((2, 1, 0)), ((1, 0), (2, 0), (2, 0))),
GriddedPerm(Perm((2, 1, 0)), ((1, 0), (2, 0), (3, 0))),
GriddedPerm(Perm((2, 1, 0)), ((2, 0), (2, 0), (2, 0))),
GriddedPerm(Perm((2, 1, 0)), ((2, 0), (2, 0), (3, 0))),
GriddedPerm(Perm((3, 2, 1, 0)), ((1, 1), (2, 0), (2, 0), (2, 0))),
GriddedPerm(Perm((3, 2, 1, 0)), ((2, 1), (2, 1), (3, 0), (3, 0))),
]
@pytest.fixture
def typical_redundant_requirements():
"""Returns a very typical list of requirements of a tiling. """
return [
[
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (2, 3))),
GriddedPerm(Perm((0, 1, 2)), ((0, 0), (1, 0), (2, 4))),
GriddedPerm(Perm((1, 0, 2)), ((0, 0), (1, 0), (2, 3))),
GriddedPerm(Perm((1, 0, 2)), ((0, 1), (1, 0), (2, 3))),
],
[
GriddedPerm(Perm((0, 1, 2)), ((2, 3), (2, 3), (2, 3))),
GriddedPerm(Perm((1, 0, 2)), ((0, 0), (0, 0), (0, 0))),
GriddedPerm(Perm((0, 1, 2)), ((1, 0), (1, 0), (1, 0))),
],
[
GriddedPerm(Perm((0, 1)), ((1, 0), (3, 0))),
GriddedPerm(Perm((0, 1)), ((2, 0), (2, 0))),
GriddedPerm(Perm((0, 1)), ((2, 0), (3, 0))),
GriddedPerm(Perm((0, 1)), ((2, 0), (3, 1))),
],
[
GriddedPerm(Perm((1, 0)), ((3, 3), (3, 1))),
GriddedPerm(Perm((1, 0)), ((3, 1), (3, 1))),
GriddedPerm(Perm((1, 0)), ((3, 1), (3, 0))),
],
]
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_constructor_no_requirements(typical_redundant_obstructions):
"""Tests the constructor of Tiling, thereby the minimization methods used
in the constructor with different options for remove_empty_rows_and_cols and
derive_empty. Proper update of the dimensions of the tiling and proper
computation of empty and active cells.
Tests without any requirements.
"""
tiling = Tiling(
obstructions=typical_redundant_obstructions,
remove_empty_rows_and_cols=False,
derive_empty=False,
simplify=False,
)
assert len(tiling._obstructions) == 20
assert len(tiling._requirements) == 0
(i, j) = tiling.dimensions
assert i == 4
assert j == 2
tiling = Tiling(
obstructions=typical_redundant_obstructions,
remove_empty_rows_and_cols=False,
derive_empty=False,
simplify=True,
)
assert len(tiling._obstructions) == 18
assert len(tiling._requirements) == 0
(i, j) = tiling.dimensions
assert i == 4
assert j == 2
tiling = Tiling(
obstructions=typical_redundant_obstructions,
remove_empty_rows_and_cols=False,
derive_empty=True,
simplify=False,
)
assert len(tiling._obstructions) == 22
assert len(tiling._requirements) == 0
(i, j) = tiling.dimensions
assert i == 4
assert j == 2
assert tiling.empty_cells == {(0, 0), (0, 1)}
assert tiling.active_cells == {(1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)}
tiling = Tiling(
obstructions=typical_redundant_obstructions,
remove_empty_rows_and_cols=False,
derive_empty=True,
simplify=True,
)
assert len(tiling._obstructions) == 22
assert len(tiling._requirements) == 0
(i, j) = tiling.dimensions
assert i == 4
assert j == 2
assert tiling.empty_cells == {(0, 0), (0, 1), (1, 1), (2, 1)}
assert tiling.active_cells == {(1, 0), (2, 0), (3, 0), (3, 1)}
tiling = Tiling(
obstructions=typical_redundant_obstructions,
remove_empty_rows_and_cols=True,
derive_empty=True,
simplify=False,
)
(i, j) = tiling.dimensions
assert i == 3
assert j == 2
assert tiling.empty_cells == set()
assert tiling.active_cells == {(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)}
assert len(tiling._obstructions) == 20
assert len(tiling._requirements) == 0
tiling = Tiling(
obstructions=typical_redundant_obstructions,
remove_empty_rows_and_cols=True,
derive_empty=True,
simplify=True,
)
(i, j) = tiling.dimensions
assert i == 3
assert j == 2
assert tiling.empty_cells == {(0, 1), (1, 1)}
assert tiling.active_cells == {(0, 0), (1, 0), (2, 0), (2, 1)}
assert len(tiling._obstructions) == 20
assert len(tiling._requirements) == 0
tiling2 = Tiling(
obstructions=[
GriddedPerm(Perm((0, 1)), ((0, 0), (0, 0))),
GriddedPerm(Perm((0, 1)), ((0, 0), (1, 0))),
GriddedPerm(Perm((0, 1)), ((0, 0), (2, 0))),
GriddedPerm(Perm((0, 1)), ((1, 0), (1, 0))),
GriddedPerm(Perm((0, 1)), ((1, 0), (2, 0))),
GriddedPerm(Perm((0, 1)), ((2, 1), (2, 1))),
GriddedPerm(Perm((1, 0)), ((2, 0), (2, 0))),
GriddedPerm(Perm((1, 0)), ((2, 1), (2, 0))),
GriddedPerm(Perm((1, 0)), ((2, 1), (2, 1))),
GriddedPerm(Perm((0, 1, 2)), ((2, 0), (2, 0), (2, 0))),
GriddedPerm(Perm((0, 1, | |
/ (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
+ (R_values[58] / (1 + w * 1j * t_values[58]))
+ (R_values[59] / (1 + w * 1j * t_values[59]))
+ (R_values[60] / (1 + w * 1j * t_values[60]))
+ (R_values[61] / (1 + w * 1j * t_values[61]))
+ (R_values[62] / (1 + w * 1j * t_values[62]))
+ (R_values[63] / (1 + w * 1j * t_values[63]))
)
def KK_RC65(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
+ (R_values[48] / (1 + w * 1j * t_values[48]))
+ (R_values[49] / (1 + w * 1j * t_values[49]))
+ (R_values[50] / (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
+ (R_values[58] / (1 + w * 1j * t_values[58]))
+ (R_values[59] / (1 + w * 1j * t_values[59]))
+ (R_values[60] / (1 + w * 1j * t_values[60]))
+ (R_values[61] / (1 + w * 1j * t_values[61]))
+ (R_values[62] / (1 + w * 1j * t_values[62]))
+ (R_values[63] / (1 + w * 1j * t_values[63]))
+ (R_values[64] / (1 + w * 1j * t_values[64]))
)
def KK_RC66(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j | |
<reponame>pkarande/Benchmarks-1
#! /usr/bin/env python
"""Multilayer Perceptron for drug response problem"""
from __future__ import division, print_function
import argparse
import csv
import logging
import sys
import numpy as np
import pandas as pd
from itertools import tee, islice
from keras import backend as K
from keras import metrics
from keras.models import Sequential
from keras.layers import Activation, BatchNormalization, Dense, Dropout, LocallyConnected1D, Conv1D, MaxPooling1D, Flatten
from keras.callbacks import Callback, ModelCheckpoint, ProgbarLogger
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
# For non-interactive plotting
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import p1b3
from p1b3 import logger
# Model and Training parameters
# Seed for random generation
SEED = 2016
# Size of batch for training
BATCH_SIZE = 100
# Number of training epochs
EPOCHS = 20
# Number of data generator workers
WORKERS = 1
# Percentage of dropout used in training
DROP = 0.1
# Activation function (options: 'relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear')
ACTIVATION = 'relu'
LOSS = 'mse'
OPTIMIZER = 'sgd'
# OPTIMIZER = 'adam'
# Type of feature scaling (options: 'maxabs': to [-1,1]
# 'minmax': to [0,1]
# None : standard normalization
SCALING = 'std'
# Features to (randomly) sample from cell lines or drug descriptors
# FEATURE_SUBSAMPLE = 500
FEATURE_SUBSAMPLE = 0
# Number of units in fully connected (dense) layers
D1 = 1000
D2 = 500
D3 = 100
D4 = 50
DENSE_LAYERS = [D1, D2, D3, D4]
# Number of units per convolution layer or locally connected layer
CONV_LAYERS = [0, 0, 0] # filters, filter_len, stride
POOL = 10
MIN_LOGCONC = -5.
MAX_LOGCONC = -4.
CATEGORY_CUTOFFS = [0.]
VAL_SPLIT = 0.2
TEST_CELL_SPLIT = 0.15
np.set_printoptions(threshold=np.nan)
np.random.seed(SEED)
def get_parser():
parser = argparse.ArgumentParser(prog='p1b3_baseline',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument("-a", "--activation",
default=ACTIVATION,
help="keras activation function to use in inner layers: relu, tanh, sigmoid...")
parser.add_argument("-e", "--epochs", type=int,
default=EPOCHS,
help="number of training epochs")
parser.add_argument('-l', '--log', dest='logfile',
default=None,
help="log file")
parser.add_argument("-z", "--batch_size", type=int,
default=BATCH_SIZE,
help="batch size")
parser.add_argument("--batch_normalization", action="store_true",
help="use batch normalization")
parser.add_argument("--conv", nargs='+', type=int,
default=CONV_LAYERS,
help="integer array describing convolution layers: conv1_filters, conv1_filter_len, conv1_stride, conv2_filters, conv2_filter_len, conv2_stride ...")
parser.add_argument("--dense", nargs='+', type=int,
default=DENSE_LAYERS,
help="number of units in fully connected layers in an integer array")
parser.add_argument("--drop", type=float,
default=DROP,
help="ratio of dropout used in fully connected layers")
parser.add_argument("--locally_connected", action="store_true",
default=False,
help="use locally connected layers instead of convolution layers")
parser.add_argument("--optimizer",
default=OPTIMIZER,
help="keras optimizer to use: sgd, rmsprop, ...")
parser.add_argument("--loss",
default=LOSS,
help="keras loss function to use: mse, ...")
parser.add_argument("--pool", type=int,
default=POOL,
help="pooling layer length")
parser.add_argument("--scaling",
default=SCALING,
choices=['minabs', 'minmax', 'std', 'none'],
help="type of feature scaling; 'minabs': to [-1,1]; 'minmax': to [0,1], 'std': standard unit normalization; 'none': no normalization")
parser.add_argument("--cell_features", nargs='+',
default=['expression'],
choices=['expression', 'mirna', 'proteome', 'all', 'categorical'],
help="use one or more cell line feature sets: 'expression', 'mirna', 'proteome', 'all'; or use 'categorical' for one-hot encoding of cell lines")
parser.add_argument("--drug_features", nargs='+',
default=['descriptors'],
choices=['descriptors', 'latent', 'all', 'noise'],
help="use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder, or both, or random features; 'descriptors','latent', 'all', 'noise'")
parser.add_argument("--feature_subsample", type=int,
default=FEATURE_SUBSAMPLE,
help="number of features to randomly sample from each category (cellline expression, drug descriptors, etc), 0 means using all features")
parser.add_argument("--min_logconc", type=float,
default=MIN_LOGCONC,
help="min log concentration of dose response data to use: -3.0 to -7.0")
parser.add_argument("--max_logconc", type=float,
default=MAX_LOGCONC,
help="max log concentration of dose response data to use: -3.0 to -7.0")
parser.add_argument("--subsample",
default='naive_balancing',
choices=['naive_balancing', 'none'],
help="dose response subsample strategy; 'none' or 'naive_balancing'")
parser.add_argument("--category_cutoffs", nargs='+', type=float,
default=CATEGORY_CUTOFFS,
help="list of growth cutoffs (between -1 and +1) seperating non-response and response categories")
parser.add_argument("--val_split", type=float,
default=VAL_SPLIT,
help="fraction of data to use in validation")
parser.add_argument("--test_cell_split", type=float,
default=TEST_CELL_SPLIT,
help="cell lines to use in test; if None use predefined unseen cell lines instead of sampling cell lines used in training")
parser.add_argument("--train_steps", type=int,
default=0,
help="overrides the number of training batches per epoch if set to nonzero")
parser.add_argument("--val_steps", type=int,
default=0,
help="overrides the number of validation batches per epoch if set to nonzero")
parser.add_argument("--test_steps", type=int,
default=0,
help="overrides the number of test batches per epoch if set to nonzero")
parser.add_argument("--save",
default='save',
help="prefix of output files")
parser.add_argument("--scramble", action="store_true",
help="randomly shuffle dose response data")
parser.add_argument("--workers", type=int,
default=WORKERS,
help="number of data generator workers")
return parser
def extension_from_parameters(args):
"""Construct string for saving model with annotation of parameters"""
ext = ''
ext += '.A={}'.format(args.activation)
ext += '.B={}'.format(args.batch_size)
ext += '.D={}'.format(args.drop)
ext += '.E={}'.format(args.epochs)
if args.feature_subsample:
ext += '.F={}'.format(args.feature_subsample)
if args.conv:
name = 'LC' if args.locally_connected else 'C'
layer_list = list(range(0, len(args.conv), 3))
for l, i in enumerate(layer_list):
filters = args.conv[i]
filter_len = args.conv[i+1]
stride = args.conv[i+2]
if filters <= 0 or filter_len <= 0 or stride <= 0:
break
ext += '.{}{}={},{},{}'.format(name, l+1, filters, filter_len, stride)
if args.pool and args.conv[0] and args.conv[1]:
ext += '.P={}'.format(args.pool)
for i, n in enumerate(args.dense):
if n:
ext += '.D{}={}'.format(i+1, n)
if args.batch_normalization:
ext += '.BN'
ext += '.S={}'.format(args.scaling)
return ext
def evaluate_keras_metric(y_true, y_pred, metric):
objective_function = metrics.get(metric)
objective = objective_function(y_true, y_pred)
return K.eval(objective)
def evaluate_model(model, generator, steps, metric, category_cutoffs=[0.]):
y_true, y_pred = None, None
count = 0
while count < steps:
x_batch, y_batch = next(generator)
y_batch_pred = model.predict_on_batch(x_batch)
y_batch_pred = y_batch_pred.ravel()
y_true = np.concatenate((y_true, y_batch)) if y_true is not None else y_batch
y_pred = np.concatenate((y_pred, y_batch_pred)) if y_pred is not None else y_batch_pred
count += 1
loss = evaluate_keras_metric(y_true.astype(np.float32), y_pred.astype(np.float32), metric)
y_true_class = np.digitize(y_true, category_cutoffs)
y_pred_class = np.digitize(y_pred, category_cutoffs)
# theano does not like integer input
acc = evaluate_keras_metric(y_true_class.astype(np.float32), y_pred_class.astype(np.float32), 'binary_accuracy') # works for multiclass labels as well
return loss, acc, y_true, y_pred, y_true_class, y_pred_class
def plot_error(y_true, y_pred, batch, file_ext, file_pre='save', subsample=1000):
if batch % 10:
return
total = len(y_true)
if subsample and subsample < total:
usecols = np.random.choice(total, size=subsample, replace=False)
y_true = y_true[usecols]
y_pred = y_pred[usecols]
y_true = y_true * 100
y_pred = y_pred * 100
diffs = y_pred - y_true
bins = np.linspace(-200, 200, 100)
if batch == 0:
y_shuf = np.random.permutation(y_true)
plt.hist(y_shuf - y_true, bins, alpha=0.5, label='Random')
#plt.hist(diffs, bins, alpha=0.35-batch/100., label='Epoch {}'.format(batch+1))
plt.hist(diffs, bins, alpha=0.3, label='Epoch {}'.format(batch+1))
plt.title("Histogram of errors in percentage growth")
plt.legend(loc='upper right')
plt.savefig(file_pre+'.histogram'+file_ext+'.b'+str(batch)+'.png')
plt.close()
# Plot measured vs. predicted values
fig, ax = plt.subplots()
plt.grid('on')
ax.scatter(y_true, y_pred, color='red', s=10)
ax.plot([y_true.min(), y_true.max()],
[y_true.min(), y_true.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.savefig(file_pre+'.diff'+file_ext+'.b'+str(batch)+'.png')
plt.close()
class MyLossHistory(Callback):
def __init__(self, progbar, val_gen, test_gen, val_steps, test_steps, metric, category_cutoffs=[0.], ext='', pre='save'):
super(MyLossHistory, self).__init__()
self.progbar = progbar
self.val_gen = val_gen
self.test_gen = test_gen
self.val_steps = val_steps
self.test_steps = test_steps
self.metric = metric
self.category_cutoffs = category_cutoffs
self.pre = pre
self.ext = ext
def on_train_begin(self, logs={}):
self.best_val_loss = np.Inf
self.best_val_acc = -np.Inf
def on_epoch_end(self, batch, logs={}):
val_loss, val_acc, y_true, y_pred, y_true_class, y_pred_class = evaluate_model(self.model, self.val_gen, self.val_steps, self.metric, self.category_cutoffs)
test_loss, test_acc, _, _, _, _ = evaluate_model(self.model, self.test_gen, self.test_steps, self.metric, self.category_cutoffs)
self.progbar.append_extra_log_values([('val_acc', val_acc), ('test_loss', test_loss), ('test_acc', test_acc)])
if float(logs.get('val_loss', 0)) < self.best_val_loss:
plot_error(y_true, y_pred, batch, self.ext, self.pre)
self.best_val_loss = min(float(logs.get('val_loss', 0)), self.best_val_loss)
self.best_val_acc = max(float(logs.get('val_acc', 0)), self.best_val_acc)
class MyProgbarLogger(ProgbarLogger):
def __init__(self, samples):
super(MyProgbarLogger, self).__init__(count_mode='samples')
self.samples = samples
def on_train_begin(self, logs=None):
super(MyProgbarLogger, self).on_train_begin(logs)
self.verbose = 1
self.extra_log_values = []
self.params['samples'] = self.samples
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
self.extra_log_values = []
def append_extra_log_values(self, tuples):
for k, v in tuples:
self.extra_log_values.append((k, v))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
epoch_log = 'Epoch {}/{}'.format(epoch + 1, self.epochs)
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
epoch_log += ' - {}: {:.4f}'.format(k, logs[k])
for k, v in self.extra_log_values:
self.log_values.append((k, v))
epoch_log += ' - {}: {:.4f}'.format(k, float(v))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
logger.debug(epoch_log)
def main():
parser = get_parser()
args = parser.parse_args()
ext = extension_from_parameters(args)
logfile = args.logfile if args.logfile else args.save+ext+'.log'
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(sh)
logger.info('Args: {}'.format(args))
loader = p1b3.DataLoader(val_split=args.val_split,
test_cell_split=args.test_cell_split,
cell_features=args.cell_features,
drug_features=args.drug_features,
feature_subsample=args.feature_subsample,
scaling=args.scaling,
scramble=args.scramble,
min_logconc=args.min_logconc,
max_logconc=args.max_logconc,
subsample=args.subsample,
category_cutoffs=args.category_cutoffs)
gen_shape = None
out_dim = 1
model = Sequential()
if args.conv and args.conv[0]:
gen_shape = 'add_1d'
layer_list = list(range(0, len(args.conv), 3))
for l, i in enumerate(layer_list):
filters = args.conv[i]
filter_len = args.conv[i+1]
stride = args.conv[i+2]
if filters <= 0 or filter_len <= 0 or stride <= 0:
break
if args.locally_connected:
model.add(LocallyConnected1D(filters, filter_len, strides=stride, input_shape=(loader.input_dim, 1)))
else:
model.add(Conv1D(filters, filter_len, strides=stride, input_shape=(loader.input_dim, 1)))
if args.batch_normalization:
model.add(BatchNormalization())
model.add(Activation(args.activation))
if args.pool:
model.add(MaxPooling1D(pool_size=args.pool))
model.add(Flatten())
for layer in args.dense:
if layer:
model.add(Dense(layer, input_dim=loader.input_dim))
if args.batch_normalization:
model.add(BatchNormalization())
model.add(Activation(args.activation))
if args.drop:
model.add(Dropout(args.drop))
model.add(Dense(out_dim))
model.summary()
logger.debug('Model: {}'.format(model.to_json()))
model.compile(loss=args.loss, optimizer=args.optimizer)
train_gen = p1b3.DataGenerator(loader, batch_size=args.batch_size, shape=gen_shape, name='train_gen').flow()
val_gen = p1b3.DataGenerator(loader, partition='val', batch_size=args.batch_size, shape=gen_shape, name='val_gen').flow()
val_gen2 = p1b3.DataGenerator(loader, partition='val', | |
<filename>lib/services/vautoscaling/ncloud_vautoscaling/api/v2_api.py
# coding: utf-8
"""
vautoscaling
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ncloud_vautoscaling.api_client import ApiClient
class V2Api(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_auto_scaling_group(self, create_auto_scaling_group_request, **kwargs): # noqa: E501
"""create_auto_scaling_group # noqa: E501
오토스케일링그룹생성 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_auto_scaling_group(create_auto_scaling_group_request, async=True)
>>> result = thread.get()
:param async bool
:param CreateAutoScalingGroupRequest create_auto_scaling_group_request: createAutoScalingGroupRequest (required)
:return: CreateAutoScalingGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_auto_scaling_group_with_http_info(create_auto_scaling_group_request, **kwargs) # noqa: E501
else:
(data) = self.create_auto_scaling_group_with_http_info(create_auto_scaling_group_request, **kwargs) # noqa: E501
return data
def create_auto_scaling_group_with_http_info(self, create_auto_scaling_group_request, **kwargs): # noqa: E501
"""create_auto_scaling_group # noqa: E501
오토스케일링그룹생성 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_auto_scaling_group_with_http_info(create_auto_scaling_group_request, async=True)
>>> result = thread.get()
:param async bool
:param CreateAutoScalingGroupRequest create_auto_scaling_group_request: createAutoScalingGroupRequest (required)
:return: CreateAutoScalingGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['create_auto_scaling_group_request'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_auto_scaling_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'create_auto_scaling_group_request' is set
if ('create_auto_scaling_group_request' not in params or
params['create_auto_scaling_group_request'] is None):
raise ValueError("Missing the required parameter `create_auto_scaling_group_request` when calling `create_auto_scaling_group`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
query_params.append(('responseFormatType', 'json')) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_auto_scaling_group_request' in params:
body_params = params['create_auto_scaling_group_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['x-ncp-iam'] # noqa: E501
return self.api_client.call_api(
'/createAutoScalingGroup', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateAutoScalingGroupResponse', # noqa: E501
auth_settings=auth_settings,
_async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_launch_configuration(self, create_launch_configuration_request, **kwargs): # noqa: E501
"""create_launch_configuration # noqa: E501
론치설정생성 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_launch_configuration(create_launch_configuration_request, async=True)
>>> result = thread.get()
:param async bool
:param CreateLaunchConfigurationRequest create_launch_configuration_request: createLaunchConfigurationRequest (required)
:return: CreateLaunchConfigurationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_launch_configuration_with_http_info(create_launch_configuration_request, **kwargs) # noqa: E501
else:
(data) = self.create_launch_configuration_with_http_info(create_launch_configuration_request, **kwargs) # noqa: E501
return data
def create_launch_configuration_with_http_info(self, create_launch_configuration_request, **kwargs): # noqa: E501
"""create_launch_configuration # noqa: E501
론치설정생성 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_launch_configuration_with_http_info(create_launch_configuration_request, async=True)
>>> result = thread.get()
:param async bool
:param CreateLaunchConfigurationRequest create_launch_configuration_request: createLaunchConfigurationRequest (required)
:return: CreateLaunchConfigurationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['create_launch_configuration_request'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_launch_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'create_launch_configuration_request' is set
if ('create_launch_configuration_request' not in params or
params['create_launch_configuration_request'] is None):
raise ValueError("Missing the required parameter `create_launch_configuration_request` when calling `create_launch_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
query_params.append(('responseFormatType', 'json')) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'create_launch_configuration_request' in params:
body_params = params['create_launch_configuration_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['x-ncp-iam'] # noqa: E501
return self.api_client.call_api(
'/createLaunchConfiguration', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateLaunchConfigurationResponse', # noqa: E501
auth_settings=auth_settings,
_async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_auto_scaling_group(self, delete_auto_scaling_group_request, **kwargs): # noqa: E501
"""delete_auto_scaling_group # noqa: E501
오토스케일링그룹삭제 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_auto_scaling_group(delete_auto_scaling_group_request, async=True)
>>> result = thread.get()
:param async bool
:param DeleteAutoScalingGroupRequest delete_auto_scaling_group_request: deleteAutoScalingGroupRequest (required)
:return: DeleteAutoScalingGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_auto_scaling_group_with_http_info(delete_auto_scaling_group_request, **kwargs) # noqa: E501
else:
(data) = self.delete_auto_scaling_group_with_http_info(delete_auto_scaling_group_request, **kwargs) # noqa: E501
return data
def delete_auto_scaling_group_with_http_info(self, delete_auto_scaling_group_request, **kwargs): # noqa: E501
"""delete_auto_scaling_group # noqa: E501
오토스케일링그룹삭제 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_auto_scaling_group_with_http_info(delete_auto_scaling_group_request, async=True)
>>> result = thread.get()
:param async bool
:param DeleteAutoScalingGroupRequest delete_auto_scaling_group_request: deleteAutoScalingGroupRequest (required)
:return: DeleteAutoScalingGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['delete_auto_scaling_group_request'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_auto_scaling_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'delete_auto_scaling_group_request' is set
if ('delete_auto_scaling_group_request' not in params or
params['delete_auto_scaling_group_request'] is None):
raise ValueError("Missing the required parameter `delete_auto_scaling_group_request` when calling `delete_auto_scaling_group`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
query_params.append(('responseFormatType', 'json')) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'delete_auto_scaling_group_request' in params:
body_params = params['delete_auto_scaling_group_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['x-ncp-iam'] # noqa: E501
return self.api_client.call_api(
'/deleteAutoScalingGroup', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeleteAutoScalingGroupResponse', # noqa: E501
auth_settings=auth_settings,
_async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_launch_configuration(self, delete_launch_configuration_request, **kwargs): # noqa: E501
"""delete_launch_configuration # noqa: E501
론치설정삭제 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_launch_configuration(delete_launch_configuration_request, async=True)
>>> result = thread.get()
:param async bool
:param DeleteLaunchConfigurationRequest delete_launch_configuration_request: deleteLaunchConfigurationRequest (required)
:return: DeleteLaunchConfigurationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_launch_configuration_with_http_info(delete_launch_configuration_request, **kwargs) # noqa: E501
else:
(data) = self.delete_launch_configuration_with_http_info(delete_launch_configuration_request, **kwargs) # noqa: E501
return data
def delete_launch_configuration_with_http_info(self, delete_launch_configuration_request, **kwargs): # noqa: E501
"""delete_launch_configuration # noqa: E501
론치설정삭제 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_launch_configuration_with_http_info(delete_launch_configuration_request, async=True)
>>> result = thread.get()
:param async bool
:param DeleteLaunchConfigurationRequest delete_launch_configuration_request: deleteLaunchConfigurationRequest (required)
:return: DeleteLaunchConfigurationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['delete_launch_configuration_request'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_launch_configuration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'delete_launch_configuration_request' is set
if ('delete_launch_configuration_request' not in params or
params['delete_launch_configuration_request'] is None):
raise ValueError("Missing the required parameter `delete_launch_configuration_request` when calling `delete_launch_configuration`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
query_params.append(('responseFormatType', 'json')) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'delete_launch_configuration_request' in params:
body_params = params['delete_launch_configuration_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['x-ncp-iam'] # noqa: E501
return self.api_client.call_api(
'/deleteLaunchConfiguration', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeleteLaunchConfigurationResponse', # noqa: E501
auth_settings=auth_settings,
_async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_scaling_policy(self, delete_scaling_policy_request, **kwargs): # noqa: E501
"""delete_scaling_policy # noqa: E501
스케일링정책삭제 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread | |
lw_entrances.append('Desert Palace Entrance (West)')
else:
lw_dungeon_entrances_must_exit.append('Desert Palace Entrance (West)')
lw_entrances.append('Desert Palace Entrance (North)')
dungeon_exits.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)']
else:
lw_entrances.append('Inverted Ganons Tower')
dungeon_exits.append('Inverted Ganons Tower Exit')
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)', 'Inverted Ganons Tower']
# shuffle aga door first. If it's on HC ledge, remaining HC ledge door must be must-exit
all_entrances_aga = lw_entrances + dw_entrances
aga_doors = [i for i in all_entrances_aga]
world.random.shuffle(aga_doors)
aga_door = aga_doors.pop()
if aga_door in hc_ledge_entrances:
lw_entrances.remove(aga_door)
hc_ledge_entrances.remove(aga_door)
world.random.shuffle(hc_ledge_entrances)
hc_ledge_must_exit = hc_ledge_entrances.pop()
lw_entrances.remove(hc_ledge_must_exit)
lw_dungeon_entrances_must_exit.append(hc_ledge_must_exit)
if aga_door in lw_entrances:
lw_entrances.remove(aga_door)
elif aga_door in dw_entrances:
dw_entrances.remove(aga_door)
connect_two_way(world, aga_door, 'Inverted Agahnims Tower Exit', player)
dungeon_exits.remove('Inverted Agahnims Tower Exit')
connect_mandatory_exits(world, lw_entrances, dungeon_exits, lw_dungeon_entrances_must_exit, player)
connect_caves(world, lw_entrances, dw_entrances, dungeon_exits, player)
elif world.shuffle[player] == 'simple':
simple_shuffle_dungeons(world, player)
old_man_entrances = list(Inverted_Old_Man_Entrances)
caves = list(Cave_Exits)
three_exit_caves = list(Cave_Three_Exits)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
# we shuffle all 2 entrance caves as pairs as a start
# start with the ones that need to be directed
two_door_caves = list(Inverted_Two_Door_Caves_Directional)
world.random.shuffle(two_door_caves)
world.random.shuffle(caves)
while two_door_caves:
entrance1, entrance2 = two_door_caves.pop()
exit1, exit2 = caves.pop()
connect_two_way(world, entrance1, exit1, player)
connect_two_way(world, entrance2, exit2, player)
# now the remaining pairs
two_door_caves = list(Inverted_Two_Door_Caves)
world.random.shuffle(two_door_caves)
while two_door_caves:
entrance1, entrance2 = two_door_caves.pop()
exit1, exit2 = caves.pop()
connect_two_way(world, entrance1, exit1, player)
connect_two_way(world, entrance2, exit2, player)
# place links house
links_house_doors = [i for i in bomb_shop_doors + blacksmith_doors if
i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = world.random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in bomb_shop_doors:
bomb_shop_doors.remove(links_house)
if links_house in blacksmith_doors:
blacksmith_doors.remove(links_house)
if links_house in old_man_entrances:
old_man_entrances.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in bomb_shop_doors]
sanc_door = world.random.choice(sanc_doors)
bomb_shop_doors.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
lw_dm_entrances = ['Paradox Cave (Bottom)', 'Paradox Cave (Middle)', 'Paradox Cave (Top)', 'Old Man House (Bottom)',
'Fairy Ascension Cave (Bottom)', 'Fairy Ascension Cave (Top)', 'Spiral Cave (Bottom)', 'Old Man Cave (East)',
'Death Mountain Return Cave (East)', 'Spiral Cave', 'Old Man House (Top)', 'Spectacle Rock Cave',
'Spectacle Rock Cave Peak', 'Spectacle Rock Cave (Bottom)']
# place old man, bumper cave bottom to DDM entrances not in east bottom
world.random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, 'Bumper Cave (Bottom)', 'Old Man Cave Exit (West)', player)
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
if old_man_exit == 'Spike Cave':
bomb_shop_doors.remove('Spike Cave')
bomb_shop_doors.extend(old_man_entrances)
# add old man house to ensure it is always somewhere on light death mountain
caves.extend(list(Old_Man_House))
caves.extend(list(three_exit_caves))
# connect rest
connect_caves(world, lw_dm_entrances, [], caves, player)
# scramble holes
scramble_inverted_holes(world, player)
# place blacksmith, has limited options
blacksmith_doors = [door for door in blacksmith_doors[:]]
world.random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
bomb_shop_doors = [door for door in bomb_shop_doors[:]]
world.random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
elif world.shuffle[player] == 'restricted':
simple_shuffle_dungeons(world, player)
lw_entrances = list(Inverted_LW_Entrances + Inverted_LW_Single_Cave_Doors)
dw_entrances = list(Inverted_DW_Entrances + Inverted_DW_Single_Cave_Doors + Inverted_Old_Man_Entrances)
lw_must_exits = list(Inverted_LW_Entrances_Must_Exit)
old_man_entrances = list(Inverted_Old_Man_Entrances)
caves = list(Cave_Exits + Cave_Three_Exits + Old_Man_House)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors + Inverted_Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Inverted_Blacksmith_Multi_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
# place links house
links_house_doors = [i for i in lw_entrances + dw_entrances + lw_must_exits if
i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = world.random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in lw_entrances:
lw_entrances.remove(links_house)
elif links_house in dw_entrances:
dw_entrances.remove(links_house)
elif links_house in lw_must_exits:
lw_must_exits.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in dw_entrances]
sanc_door = world.random.choice(sanc_doors)
dw_entrances.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place must exits
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in dw_entrances]
world.random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
dw_entrances.remove(old_man_exit)
# place blacksmith, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in all_entrances]
world.random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
if blacksmith_hut in lw_entrances:
lw_entrances.remove(blacksmith_hut)
if blacksmith_hut in dw_entrances:
dw_entrances.remove(blacksmith_hut)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in all_entrances]
world.random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
if bomb_shop in lw_entrances:
lw_entrances.remove(bomb_shop)
if bomb_shop in dw_entrances:
dw_entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere in the dark world
world.random.shuffle(dw_entrances)
old_man_entrance = dw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# now scramble the rest
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_inverted_holes(world, player)
doors = lw_entrances + dw_entrances
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'full':
skull_woods_shuffle(world, player)
lw_entrances = list(Inverted_LW_Entrances + Inverted_LW_Dungeon_Entrances + Inverted_LW_Single_Cave_Doors)
dw_entrances = list(Inverted_DW_Entrances + Inverted_DW_Dungeon_Entrances + Inverted_DW_Single_Cave_Doors + Inverted_Old_Man_Entrances)
lw_must_exits = list(Inverted_LW_Dungeon_Entrances_Must_Exit + Inverted_LW_Entrances_Must_Exit)
old_man_entrances = list(Inverted_Old_Man_Entrances + Old_Man_Entrances + ['Inverted Agahnims Tower', 'Tower of Hera'])
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits) # don't need to consider three exit caves, have one exit caves to avoid parity issues
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors + Inverted_Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Inverted_Blacksmith_Multi_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
old_man_house = list(Old_Man_House)
# randomize which desert ledge door is a must-exit
if world.random.randint(0, 1) == 0:
lw_must_exits.append('Desert Palace Entrance (North)')
lw_entrances.append('Desert Palace Entrance (West)')
else:
lw_must_exits.append('Desert Palace Entrance (West)')
lw_entrances.append('Desert Palace Entrance (North)')
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
caves.append(tuple(world.random.sample(
['Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'], 3)))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)']
else:
lw_entrances.append('Inverted Ganons Tower')
caves.append('Inverted Ganons Tower Exit')
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)', 'Inverted Ganons Tower']
# shuffle aga door first. if it's on hc ledge, then one other hc ledge door has to be must_exit
all_entrances_aga = lw_entrances + dw_entrances
aga_doors = [i for i in all_entrances_aga]
world.random.shuffle(aga_doors)
aga_door = aga_doors.pop()
if aga_door in hc_ledge_entrances:
lw_entrances.remove(aga_door)
hc_ledge_entrances.remove(aga_door)
world.random.shuffle(hc_ledge_entrances)
hc_ledge_must_exit = hc_ledge_entrances.pop()
lw_entrances.remove(hc_ledge_must_exit)
lw_must_exits.append(hc_ledge_must_exit)
if aga_door in lw_entrances:
lw_entrances.remove(aga_door)
elif aga_door in dw_entrances:
dw_entrances.remove(aga_door)
connect_two_way(world, aga_door, 'Inverted Agahnims Tower Exit', player)
caves.remove('Inverted Agahnims Tower Exit')
# place links house
links_house_doors = [i for i in lw_entrances + dw_entrances + lw_must_exits if
i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = world.random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in lw_entrances:
lw_entrances.remove(links_house)
if links_house in dw_entrances:
dw_entrances.remove(links_house)
if links_house in lw_must_exits:
lw_must_exits.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in dw_entrances]
sanc_door = world.random.choice(sanc_doors)
dw_entrances.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
# place old man house
# no dw must exits in inverted, but we randomize whether cave is in light or dark world
if world.random.randint(0, 1) == 0:
caves += old_man_house
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
try:
caves.remove(old_man_house[0])
except ValueError:
pass
else: # if the cave wasn't placed we get here
connect_caves(world, lw_entrances, [], old_man_house, | |
0)
toolbar = Gtk.Toolbar()
save_btn = Gtk.ToolButton.new_from_stock(Gtk.STOCK_SAVE)
save_btn.connect("clicked", self.on_bat_name_save_clicked)
toolbar.insert(save_btn, 1)
# bat_name_box.pack_start(toolbar, False, True, 0)
scrolledwindow = Gtk.ScrolledWindow()
scrolledwindow.set_hexpand(True)
scrolledwindow.set_vexpand(True)
self.textview = Gtk.TextView()
self.textbuffer = self.textview.get_buffer()
scrolledwindow.add(self.textview)
bat_name_box.pack_start(scrolledwindow, True, True, 0)
selected_file = '/home/tegwyn/ultrasonic_classifier/helpers/bat_name.txt'
with open(selected_file, 'r') as f:
data = f.read()
self.textbuffer.set_text(data)
#######################################################################################################################
#######################################################################################################################
hbox2 = Gtk.Box(spacing=6)
hbox2.set_orientation(Gtk.Orientation.VERTICAL)
# buttonW4 = Gtk.RadioButton.new_with_label_from_widget(None, "Button 4")
# buttonW4.connect("toggled", self.on_button_toggled_2, "empty 2")
# hbox2.pack_start(buttonW4, False, False, 0) # This button does nothing except remove default 'clicked' radio box.
# buttonW1 = Gtk.RadioButton.new_with_label_from_widget(buttonW4, "Text reporting")
# buttonW1.connect("toggled", self.on_button_toggled_2, "text")
# hbox2.pack_start(buttonW1, False, False, 0)
# buttonW2 = Gtk.RadioButton.new_with_label_from_widget(buttonW4, "Spectogram")
# buttonW2.connect("toggled", self.on_button_toggled_2, "spectogram")
# hbox2.pack_start(buttonW2, False, False, 0)
buttonW1 = Gtk.Button.new_with_mnemonic("_Text reporting")
buttonW1.connect("clicked", self.text_reporting_clicked)
buttonW1.set_margin_top(10)
hbox2.pack_start(buttonW1, False, False, 0)
buttonW2 = Gtk.Button.new_with_mnemonic("_Spectograms")
buttonW2.connect("clicked", self.spectogram_clicked)
hbox2.pack_start(buttonW2, False, False, 0)
buttonW3 = Gtk.Button.new_with_mnemonic("_Graphical reporting")
buttonW3.connect("clicked", self.graph_clicked)
hbox2.pack_start(buttonW3, False, False, 0)
# hbox2.set_position(300)
# buttonW3 = Gtk.RadioButton.new_with_label_from_widget(buttonW4, "Button 3")
# buttonW3.connect("toggled", self.on_button_toggled_2, "empty 1")
# hbox2.pack_start(buttonW3, False, False, 0)
#######################################################################################################################
#######################################################################################################################
start_media_box = Gtk.EventBox()
start_image = Gtk.Image()
pixbuf_start = GdkPixbuf.Pixbuf.new_from_file_at_size("/home/tegwyn/ultrasonic_classifier/images/start_250.png", 90, 90)
start_image.set_from_pixbuf(pixbuf_start)
start_media_box.add(start_image)
start_media_box.connect("button_press_event",self.start) # Starts the window of results in app.
# start_media_box.connect("button_press_event",self.record_and_classify)
# record_and_calssify does not connect until 'stop' is pressed! Not useful!!!!
stop_media_box = Gtk.EventBox()
stop_image = Gtk.Image()
pixbuf_stop = GdkPixbuf.Pixbuf.new_from_file_at_size("/home/tegwyn/ultrasonic_classifier/images/stop_250.png", 90, 90)
stop_image.set_from_pixbuf(pixbuf_stop)
stop_media_box.add(stop_image)
stop_media_box.connect("button_press_event",self.stop)
grid_03.add(media_box)
grid_04.add(start_media_box) # Record
grid_04.attach(stop_media_box, 1, 0, 1, 1) # Stop
box1 = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
box1.set_homogeneous(False)
#####################################################################################################
self.label1 = Gtk.Label()
self.label1.set_width_chars(60)
file = '/home/tegwyn/ultrasonic_classifier/instructions.txt'
if os.path.isfile(file):
with open(file) as fp:
text2 = fp.read()
fp.close()
self.label1.set_text(text2)
box1.pack_start(self.label1, True, True, 0)
########################################################################
self.label2 = Gtk.Label()
self.label2.set_width_chars(60)
box1.pack_start(self.label2, True, True, 0)
# grid_05.add(box1)
#######################################################################################################################
#######################################################################################################################
# if (textToggled == "record") and (textToggled2 == "spectogram"):
# grid_05.add(specto_box)
# elif (textToggled == "record") and (textToggled2 == "text"):
# grid_05.add(box1)
if (textToggled == "record") and (textToggled2 == "spectogram"):
grid_05.add(specto_box)
elif (textToggled == "record") and (textToggled2 == "graph"):
grid_05.add(graph_box)
elif (textToggled == "record") and (textToggled2 == "text"):
grid_05.add(box1)
if (textToggled == "process") and (textToggled2 == "spectogram"):
grid_05.add(specto_box)
elif (textToggled == "process") and (textToggled2 == "graph"):
grid_05.add(graph_box)
elif (textToggled == "process") and (textToggled2 == "text"):
grid_05.add(box1)
if (textToggled2 == "settings"):
# print("Try to view the settings spin buttons.")
grid_05.add(settings_box_1)
grid_05.attach(settings_box_2, 0, 1, 1, 1)
grid_05.attach(settings_box_3, 0, 2, 1, 1)
grid_05.attach(settings_box_4, 0, 3, 1, 1)
grid_05.attach(settings_box_5, 0, 4, 1, 1)
grid_05.attach(settings_box_6, 0, 5, 1, 1)
grid_05.attach(settings_box_7, 0, 6, 1, 1)
grid_05.attach(bat_name_box, 0, 7, 1, 1)
# grid_05.attach_next_to(settings_box_6, bat_name_box, Gtk.PositionType.RIGHT, 1, 1)
# grid_05.attach(settings_box_7, 0, 7, 1, 1)
# grid_01.attach(button2, 0, 1, 1, 1) # Drink another coffee
# grid_01.attach_next_to(button3, button2, Gtk.PositionType.RIGHT, 1, 1) # Shutdown the pi.
#######################################################################################################################
#######################################################################################################################
# self.add(vboxCombo)
vp3.add(vboxCombo) # species / genus combo boxes.
vp3.add(hbox2) # Spectogram check boxes
hp3.add1(vp3)
hp3.add2(grid_05) # Display text file
hp3.set_position(200)
##########################################################################
hp2.add1(grid_03) # Goat logo
hp2.add2(grid_04) # Record / Stop recording.
#hp2.set_position(310)
##########################################################################
vp1.add1(hp1) # Check boxes and buttons
vp1.add2(hp3) # Species / genus combo boxes and main display box.
vp1.set_position(130)
##########################################################################
vp2.add1(vp1) # TODO: vp2 may be unnecessary!
vp2.set_position(370)
vp2.add2(hp2) # Got logo and recording start stop controls.
##########################################################################
self.add(vp2)
#######################################################################################################################
#######################################################################################################################
# selected_folder = "/home/tegwyn/ultrasonic_classifier/my_audio"
# The following 2 lines are from progressbar_example.py and update the battery and temperature info:
self.timeout_id = GLib.timeout_add(5000, self.on_timeout, None)
self.activity_mode = False
# waittime = 3
# while Gtk.events_pending():
# Gtk.main_iteration()
# t.sleep(waittime)
def on_bat_name_save_clicked(self, widget):
save_file = '/home/tegwyn/ultrasonic_classifier/helpers/bat_name.txt'
start_iter = self.textbuffer.get_start_iter()
end_iter = self.textbuffer.get_end_iter()
text = self.textbuffer.get_text(start_iter, end_iter, True)
print("This is the text: ",text)
with open(save_file, 'w') as f:
f.write(text)
def on_fullscreen_clicked(self, wid):
if wid.get_active():
self.set_title("")
state = "on"
value = "HIGH"
else:
state = "off"
value = "LOW"
print("Button was turned", state)
print(value)
file = "/home/tegwyn/ultrasonic_classifier/helpers/fullscreen.txt"
f= open(file, "w+") # Create the file fullscreen.txt
f.write(value)
f.close()
def on_specto_res_clicked(self, wid):
if wid.get_active():
self.set_title("")
state = "on"
value = "HIGH"
else:
state = "off"
value = "LOW"
print("Button was turned", state)
print(value)
file = "/home/tegwyn/ultrasonic_classifier/helpers/specto_resolution.txt"
f= open(file, "w+") # Create the file specto_resolution.txt
f.write(value)
f.close()
# callback function: the signal of the spinbutton is used to change the text of the label
def spin_selected_1(self, event):
# print("Tried to update spin file !!!!!")
value = str(self.spinbutton_01.get_value_as_int())
# print(value)
# self.spinLabel.set_text("Threshold value selected is: " + value + ".")
file = "/home/tegwyn/ultrasonic_classifier/helpers/threshold.txt"
f= open(file, "w+") # Create the file threshold.txt
f.write(value)
f.close()
def spin_selected_2(self, event):
# print("Tried to update spin file !!!!!")
value = str(self.spinbutton_02.get_value_as_int())
# print(value)
# self.spinLabel.set_text("Threshold value selected is: " + value + ".")
file = "/home/tegwyn/ultrasonic_classifier/helpers/barchart_time.txt"
f= open(file, "w+") # Create the file threshold.txt
f.write(value)
f.close()
def spin_selected_7(self, event):
# print("Tried to update spin file !!!!!")
value = str(self.spinbutton_07.get_value_as_int())
# print(value)
# self.spinLabel.set_text("Threshold value selected is: " + value + ".")
file = "/home/tegwyn/ultrasonic_classifier/helpers/chunk_size_record.txt"
f= open(file, "w+") # Create the file threshold.txt
f.write(value)
f.close()
def spin_selected_8(self, event):
# print("Tried to update spin file !!!!!")
value = str(self.spinbutton_08.get_value_as_int())
# print(value)
# self.spinLabel.set_text("Threshold value selected is: " + value + ".")
file = "/home/tegwyn/ultrasonic_classifier/helpers/chunk_size_process.txt"
f= open(file, "w+") # Create the file threshold.txt
f.write(value)
f.close()
def on_timeout(self, user_data):
"""
Update status, battery and temperature info
"""
# print("\nFrom GUI.py: Update the battery info .... ")
file = '/home/tegwyn/ultrasonic_classifier/helpers/battery_info.txt'
if os.path.isfile(file):
with open(file, "r") as fp:
battery = fp.read()
fp.close()
self.label3.set_text(battery)
# print("\nFrom GUI.py: Update the status info .... ")
file = '/home/tegwyn/ultrasonic_classifier/helpers/status_update.txt'
if os.path.isfile(file):
with open(file, "r") as fp:
status = fp.read()
fp.close()
self.label4.set_text(status)
# As this is a timeout function, return True so that it
# continues to get called
return True
########################################################################################################################
########################################################################################################################
def restart_clicked(self, button):
print("\"Click me\" button was clicked")
file = "/home/tegwyn/ultrasonic_classifier/alert_sounds/Go_for_Deploy.wav"
os.system("aplay " + file)
restartFile = "/home/tegwyn/ultrasonic_classifier/helpers/restart.txt"
f= open(restartFile, "w+")
print("restart File created !!")
f.close()
def on_close_clicked(self, button):
print("Stopping application")
file = "/home/tegwyn/ultrasonic_classifier/alert_sounds/Go_for_Deploy.wav"
os.system("aplay " + file)
# os.system(exit) # This is close the app.
# os.system(return [n])
print("Attempting to close down the app .........")
close_app = "/home/tegwyn/ultrasonic_classifier/helpers/close_app.txt"
f= open(close_app, "w+")
if os.path.isfile(startFile):
os.remove(startFile)
print("start file removed")
print("close_appfile created !!")
f.close()
def editPixbuf(self, button):
self.image = GdkPixbuf.Pixbuf.new_from_file(self.spectoFile)
self.image_renderer.set_from_pixbuf (self.image)
print(self.spectoFile)
def open_some_files(self): # Toggled values are read.
print("opening some files ..... ")
file = "/home/tegwyn/ultrasonic_classifier/helpers/toggled_01.txt"
with open(file) as fp:
textToggled = fp.read()
fp.close()
print(textToggled)
file = "/home/tegwyn/ultrasonic_classifier/helpers/toggled_02.txt"
with open(file) as fp:
textToggled2 = fp.read()
fp.close()
print(textToggled2)
def replace_line(self, file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
def on_name_combo1_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter is not None:
model = combo.get_model()
row_id, name = model[tree_iter][:2]
print("Selected: ID=%d, name=%s" % (row_id, name))
self.replace_line('/home/tegwyn/ultrasonic_classifier/helpers/combo_01.txt', 0, name + '\n')
else:
entry = combo.get_child()
print("Entered: %s" % entry.get_text())
def on_name_combo2_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter is not None:
model = combo.get_model()
row_id, name = model[tree_iter][:2]
print("Selected: ID=%d, name=%s" % (row_id, name))
self.replace_line('/home/tegwyn/ultrasonic_classifier/helpers/combo_01.txt', 1, name + '\n')
else:
entry = combo.get_child()
print("Entered: %s" % entry.get_text())
def on_name_combo3_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter is not None:
model = combo.get_model()
row_id, name = model[tree_iter][:2]
print("Selected: ID=%d, name=%s" % (row_id, name))
self.replace_line('/home/tegwyn/ultrasonic_classifier/helpers/combo_01.txt', 2, name + '\n')
else:
entry = combo.get_child()
print("Entered: %s" % entry.get_text())
def select_folder_clicked(self, widget):
# selected_folder = "/home/tegwyn/ultrasonic_classifier/my_audio"
dialog = Gtk.FileChooserDialog("Please choose the folder containing your audio files", self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
response = dialog.run()
# selected_folder = "/home/tegwyn/ultrasonic_classifier/my_audio"
selected_folder = dialog.get_filename()
if response == Gtk.ResponseType.OK:
selected_folder = dialog.get_filename()
print(selected_folder)
file = "/home/tegwyn/ultrasonic_classifier/helpers/audio_files_path.txt"
f= open(file, "w+") # Create the file audio_files_path.txt
f.write(selected_folder)
f.close()
self.textbuffer.set_text(selected_folder)
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
dialog.destroy()
def on_timeout_pulse(self, user_data):
if self.activity_mode:
self.activitybar.pulse()
# As this is a timeout function, return True so that it
# continues to get called
return True
def record_and_classify(self, button, event):
print("\"Open\" button was clicked")
file = "/home/tegwyn/ultrasonic_classifier/bash_app"
os.system("bash " + file)
def stop(self, widget, event): # Stop image.
print("stop")
stopFile = "/home/tegwyn/ultrasonic_classifier/helpers/stop.txt"
startFile = "/home/tegwyn/ultrasonic_classifier/helpers/start.txt"
f= open(stopFile, "w+")
if os.path.isfile(startFile):
os.remove(startFile)
print("start file removed")
print("stop file created !!")
f.close()
def shut_down_clicked(self, button): # Shut down Pi.
| |
<filename>pyscf/dh/polar/udfdh.py<gh_stars>1-10
from __future__ import annotations
# dh import
try:
from dh.udfdh import UDFDH
from dh.polar.rdfdh import Polar as RPolar
from dh.dhutil import gen_batch, get_rho_from_dm_gga, tot_size, hermi_sum_last2dim
except ImportError:
from pyscf.dh.udfdh import UDFDH
from pyscf.dh.polar.rdfdh import Polar as RPolar
from pyscf.dh.dhutil import gen_batch, get_rho_from_dm_gga, tot_size, hermi_sum_last2dim
# pyscf import
from pyscf import gto, lib, dft
from pyscf.dft.numint import _scale_ao
from pyscf.lib.numpy_helper import ANTIHERMI
# other import
import numpy as np
import itertools
einsum = lib.einsum
α, β = 0, 1
αα, αβ, ββ = 0, 1, 2
def xc_kernel_full(f, nr, ng):
s = set()
l = lambda: itertools.product(*([range(2)] * nr + [range(3)] * ng))
for t in l():
s.add(tuple(sorted(t[:nr]) + sorted(t[nr:])))
m = dict()
for i, t in enumerate(sorted(s)):
m[t] = i
c = np.zeros([2] * nr + [3] * ng, dtype=int)
for t in l():
c[t] = m[tuple(sorted(t[:nr]) + sorted(t[nr:]))]
return np.array([f.T[i] for i in c.flatten()]).reshape(c.shape + (f.shape[0],))
def _uks_gga_wv2_generator(fxc, kxc, weight):
frr = xc_kernel_full(fxc[0], 2, 0)
frg = xc_kernel_full(fxc[1], 1, 1)
fgg = xc_kernel_full(fxc[2], 0, 2)
frrr = xc_kernel_full(kxc[0], 3, 0)
frrg = xc_kernel_full(kxc[1], 2, 1)
frgg = xc_kernel_full(kxc[2], 1, 2)
fggg = xc_kernel_full(kxc[3], 0, 3)
ngrid = frr.shape[-1]
z = np.zeros((ngrid,))
pd1_fr = np.concatenate([frr, frg], axis=1)
pd1_fg = np.concatenate([frg.transpose(1, 0, 2), fgg], axis=1)
pd2_fr = np.zeros((2, 5, 5, ngrid))
pd2_fr[:, :2, :2] = frrr
pd2_fr[:, :2, 2:] = frrg
pd2_fr[:, 2:, :2] = frrg.swapaxes(1, 2)
pd2_fr[:, 2:, 2:] = frgg
pd2_fg = np.zeros((3, 5, 5, ngrid))
pd2_fg[:, :2, :2] = frrg.swapaxes(0, 2)
pd2_fg[:, :2, 2:] = frgg.swapaxes(0, 1)
pd2_fg[:, 2:, :2] = frgg.swapaxes(0, 2)
pd2_fg[:, 2:, 2:] = fggg
def _uks_gga_wv2_inner(rho0, rho1, rho2):
rho0 = np.asarray(rho0)
rho1 = np.asarray(rho1)
rho2 = np.asarray(rho2)
r0, r1, r2 = rho0[:, 0], rho1[:, 0], rho2[:, 0]
n0, n1, n2 = rho0[:, 1:4], rho1[:, 1:4], rho2[:, 1:4]
g01 = einsum("atg, btg -> abg", n0, n1)
g02 = einsum("atg, btg -> abg", n0, n2)
g12 = einsum("atg, btg -> abg", n1, n2)
x1 = np.array([r1[0], r1[1], 2 * g01[0, 0], g01[0, 1] + g01[1, 0], 2 * g01[1, 1]])
x2 = np.array([r2[0], r2[1], 2 * g02[0, 0], g02[0, 1] + g02[1, 0], 2 * g02[1, 1]])
pd1_x1 = np.array([z, z, 2 * g12[0, 0], g12[0, 1] + g12[1, 0], 2 * g12[1, 1]])
wv = np.zeros((2, 4, ngrid))
wva, wvb = wv
wva[0] += np.einsum("xyg, xg, yg -> g", pd2_fr[0], x1, x2)
wvb[0] += np.einsum("xyg, xg, yg -> g", pd2_fr[1], x1, x2)
wva[0] += np.einsum("xg, xg -> g", pd1_fr[0], pd1_x1)
wvb[0] += np.einsum("xg, xg -> g", pd1_fr[1], pd1_x1)
wva[1:] += np.einsum("xyg, xg, yg -> g", pd2_fg[0], x1, x2) * n0[0] * 2
wva[1:] += np.einsum("xyg, xg, yg -> g", pd2_fg[1], x1, x2) * n0[1]
wvb[1:] += np.einsum("xyg, xg, yg -> g", pd2_fg[1], x1, x2) * n0[0]
wvb[1:] += np.einsum("xyg, xg, yg -> g", pd2_fg[2], x1, x2) * n0[1] * 2
wva[1:] += np.einsum("xg, xg -> g", pd1_fg[0], pd1_x1) * n0[0] * 2
wva[1:] += np.einsum("xg, xg -> g", pd1_fg[1], pd1_x1) * n0[1]
wvb[1:] += np.einsum("xg, xg -> g", pd1_fg[1], pd1_x1) * n0[0]
wvb[1:] += np.einsum("xg, xg -> g", pd1_fg[2], pd1_x1) * n0[1] * 2
wva[1:] += np.einsum("xg, xg -> g", pd1_fg[0], x1) * n2[0] * 2
wva[1:] += np.einsum("xg, xg -> g", pd1_fg[1], x1) * n2[1]
wvb[1:] += np.einsum("xg, xg -> g", pd1_fg[1], x1) * n2[0]
wvb[1:] += np.einsum("xg, xg -> g", pd1_fg[2], x1) * n2[1] * 2
wva[1:] += einsum("xg, xg -> g", pd1_fg[0], x2) * n1[0] * 2
wva[1:] += einsum("xg, xg -> g", pd1_fg[1], x2) * n1[1]
wvb[1:] += einsum("xg, xg -> g", pd1_fg[1], x2) * n1[0]
wvb[1:] += einsum("xg, xg -> g", pd1_fg[2], x2) * n1[1] * 2
wva *= weight
wva[0] *= .5 # v+v.T should be applied in the caller
wvb *= weight
wvb[0] *= .5 # v+v.T should be applied in the caller
return wva, wvb
return _uks_gga_wv2_inner
class Polar(UDFDH, RPolar):
def __init__(self, mol: gto.Mole, *args, skip_construct=False, **kwargs):
if not skip_construct:
super(Polar, self).__init__(mol, *args, **kwargs)
self.pol_scf = NotImplemented
self.pol_corr = NotImplemented
self.pol_tot = NotImplemented
self.de = NotImplemented
def prepare_H_1(self):
tensors = self.tensors
mol, C = self.mol, self.C
H_1_ao = - mol.intor("int1e_r")
H_1_mo = np.array([C[σ].T @ H_1_ao @ C[σ] for σ in (α, β)])
tensors.create("H_1_ao", H_1_ao)
tensors.create("H_1_mo", H_1_mo)
return self
def prepare_U_1(self):
tensors = self.tensors
sv, so = self.sv, self.so
H_1_mo = tensors.load("H_1_mo")
H_1_ai = [H_1_mo[σ, :, sv[σ], so[σ]] for σ in (α, β)]
U_1_ai = self.solve_cpks(H_1_ai)
U_1 = np.zeros_like(H_1_mo)
for σ in (α, β):
U_1[σ, :, sv[σ], so[σ]] = U_1_ai[σ]
U_1[σ, :, so[σ], sv[σ]] = - U_1_ai[σ].swapaxes(-1, -2)
tensors.create("U_1", U_1)
return self
def prepare_dms(self):
tensors = self.tensors
U_1 = tensors.load("U_1")
D_r = tensors.load("D_r")
rho = tensors.load("rho")
C, Co = self.C, self.Co
so = self.so
mol, grids, xc = self.mol, self.grids, self.xc
# ni = dft.numint.NumInt() # intended not to use self.ni, and xcfun as engine
# ni.libxc = dft.xcfun
ni = self.ni
dmU = np.array([C[σ] @ U_1[σ, :, :, so[σ]] @ C[σ, :, so[σ]].T for σ in (α, β)])
dmU += dmU.swapaxes(-1, -2)
dmR = np.array([C[σ] @ D_r[σ] @ C[σ].T for σ in (α, β)])
dmR += dmR.swapaxes(-1, -2)
dmX = np.concatenate([dmU, dmR[:, None]], axis=1)
rhoX = get_rho_from_dm_gga(ni, mol, grids, dmX)
_, _, _, kxc = ni.eval_xc(xc, rho, spin=1, deriv=3)
tensors.create("rhoU", rhoX[:, :-1])
tensors.create("rhoR", rhoX[:, -1])
tensors.create("kxc" + xc, kxc)
return self
def prepare_pdA_F_0_mo(self):
tensors = self.tensors
so, sa = self.so, self.sa
U_1 = tensors.load("U_1")
U_1_pi = [U_1[σ, :, :, so[σ]] for σ in (α, β)]
pdA_F_0_mo = tensors.load("H_1_mo").copy()
pdA_F_0_mo += einsum("sApq, sp -> sApq", U_1, self.e)
pdA_F_0_mo += einsum("sAqp, sq -> sApq", U_1, self.e)
pdA_F_0_mo += self.Ax0_Core(sa, sa, sa, so)(U_1_pi)
tensors.create("pdA_F_0_mo", pdA_F_0_mo)
if self.mf_n:
F_0_ao_n = self.mf_n.get_fock(dm=self.D)
F_0_mo_n = einsum("sup, suv, svq -> spq", self.C, F_0_ao_n, self.C)
pdA_F_0_mo_n = np.array(tensors.load("H_1_mo"))
pdA_F_0_mo_n += einsum("sAmp, smq -> sApq", U_1, F_0_mo_n)
pdA_F_0_mo_n += einsum("sAmq, spm -> sApq", U_1, F_0_mo_n)
pdA_F_0_mo_n += self.Ax0_Core(sa, sa, sa, so, xc=self.xc_n)(U_1_pi)
tensors.create("pdA_F_0_mo_n", pdA_F_0_mo_n)
return self
def prepare_pdA_Y_ia_ri(self):
tensors = self.tensors
U_1 = tensors.load("U_1")
Y_mo_ri = [tensors["Y_mo_ri" + str(σ)] for σ in (α, β)]
nocc, nvir, nmo, naux = self.nocc, self.nvir, self.nmo, self.df_ri.get_naoaux()
mocc, mvir = max(nocc), max(nvir)
so, sv = self.so, self.sv
nprop = self.nprop
nbatch = self.calc_batch_size(8 * nmo**2, U_1.size + nprop*naux*mocc*mvir)
for σ in (α, β):
pdA_Y_ia_ri = np.zeros((nprop, naux, nocc[σ], nvir[σ]))
for saux in gen_batch(0, naux, nbatch):
pdA_Y_ia_ri[:, saux] = (
+ einsum("Ami, Pma -> APia", U_1[σ][:, :, so[σ]], Y_mo_ri[σ][saux, :, sv[σ]])
+ einsum("Ama, Pmi -> APia", U_1[σ][:, :, sv[σ]], Y_mo_ri[σ][saux, :, so[σ]]))
tensors.create("pdA_Y_ia_ri" + str(σ), pdA_Y_ia_ri)
return self
def prepare_pt2_deriv(self):
tensors = self.tensors
cc, c_os, c_ss = self.cc, self.c_os, self.c_ss
nocc, nvir, nmo, naux = self.nocc, self.nvir, self.nmo, self.df_ri.get_naoaux()
mocc, mvir = max(nocc), max(nvir)
so, sv = self.so, self.sv
eo, ev = self.eo, self.ev
nprop = self.nprop
pdA_D_rdm1 = tensors.create("pdA_D_rdm1", shape=(2, nprop, nmo, nmo))
if not self.eval_pt2:
return self
pdA_F_0_mo = tensors.load("pdA_F_0_mo")
Y_ia_ri = [tensors["Y_mo_ri" + str(σ)][:, so[σ], sv[σ]] for σ in (α, β)]
pdA_Y_ia_ri = [tensors["pdA_Y_ia_ri" + str(σ)] for σ in (α, β)]
pdA_G_ia_ri = [tensors.create("pdA_G_ia_ri" + str(σ), shape=(nprop, naux, nocc[σ], nvir[σ])) for σ in (α, β)]
nbatch = self.calc_batch_size(8*mocc*mvir**2, tot_size(Y_ia_ri, pdA_Y_ia_ri, pdA_G_ia_ri, pdA_F_0_mo, pdA_D_rdm1))
eval_ss = True if abs(c_ss) > 1e-7 else False
for σς, σ, ς in (αα, α, α), (αβ, α, β), (ββ, β, β):
if σς in (αα, ββ) and not eval_ss:
continue
D_jab = eo[ς][:, None, None] - ev[σ][None, :, None] - ev[ς][None, None, :]
for sI in gen_batch(0, nocc[σ], nbatch):
t_ijab = np.asarray(tensors["t_ijab" + str(σς)][sI])
D_ijab = eo[σ][sI, None, None, None] + D_jab
pdA_t_ijab = einsum("APia, Pjb -> Aijab", pdA_Y_ia_ri[σ][:, :, sI], Y_ia_ri[ς])
pdA_t_ijab += einsum("APjb, Pia -> Aijab", pdA_Y_ia_ri[ς], Y_ia_ri[σ][:, sI])
for sK in gen_batch(0, nocc[σ], nbatch):
t_kjab = t_ijab if sK == sI else tensors["t_ijab" + str(σς)][sK]
pdA_t_ijab -= einsum("Aki, kjab -> Aijab", pdA_F_0_mo[σ][:, sK, sI], t_kjab)
pdA_t_ijab -= einsum("Akj, ikab -> Aijab", pdA_F_0_mo[ς][:, so[ς], so[ς]], t_ijab)
pdA_t_ijab += einsum("Aca, ijcb -> Aijab", pdA_F_0_mo[σ][:, sv[σ], sv[σ]], t_ijab)
pdA_t_ijab += einsum("Acb, ijac -> Aijab", pdA_F_0_mo[ς][:, sv[ς], sv[ς]], t_ijab)
pdA_t_ijab | |
<reponame>Z1R343L/dislash.py<filename>dislash/application_commands/slash_client.py
import asyncio
import discord
from discord.abc import Messageable
from discord.state import ConnectionState
from discord.http import Route
from discord.ext.commands import Context
from typing import Any, Dict, List
from .slash_core import slash_command
from .context_menus_core import user_command, message_command
from .utils import ClickListener, _on_button_click
from ._decohub import _HANDLER
from ..interactions import (
ComponentType,
BaseInteraction,
SlashInteraction,
MessageInteraction,
ContextMenuInteraction,
ApplicationCommand,
ApplicationCommandPermissions,
application_command_factory
)
__all__ = ("InteractionClient", "SlashClient")
class InteractionClient:
"""
The main purpose of this class is to track ``INTERACTION_CREATE`` API event.
Parameters
----------
client : :class:`commands.Bot` | :class:`commands.AutoShardedBot`
The discord.py Bot instance
show_warnings : :class:`bool`
Whether to show the warnings or not. Defaults to ``True``
modify_send : :class:`bool`
Whether to modify :class:`Messageable.send` and :class:`Message.edit`.
Modified methods allow to specify the ``components`` parameter.
Attributes
----------
client : :class:`commands.Bot` | :class:`commands.AutoShardedBot`
an instance of any class inherited from :class:`discord.Client`
application_id : :class:`int`
the ID of the application your bot is related to
global_commands : List[:class:`ApplicationCommand`]
All registered global application commands
slash_commands : :class:`Dict[str, CommandParent]`
All invokable slash commands from your code
user_commands : :class:`Dict[str, InvokableUserCommand]`
All invokable user commands from your code
message_commands : :class:`Dict[str, InvokableMessageCommand]`
All invokable message commands from your code
commands : :class:`Dict[str, InvokableApplicationCommand]`
All invokable application commands from your code
is_ready : bool
Equals to ``True`` if SlashClient is ready, otherwise it's ``False``
"""
def __init__(self, client, *, test_guilds: List[int] = None, sync_commands: bool = True,
show_warnings: bool = True, modify_send: bool = True):
self._uses_discord_2 = hasattr(client, "add_view")
_HANDLER.client = client
self.client = _HANDLER.client
self.application_id = None
self.events = {}
self._listeners = {}
self._global_commands = {}
self._guild_commands = {}
self._cogs_with_err_listeners = {
"on_slash_command_error": [],
"on_user_command_error": [],
"on_message_command_error": []
}
self._test_guilds = test_guilds
self._sync_commands = sync_commands
self._show_warnings = show_warnings
self._modify_send = modify_send
self.active_shard_count = 0
self.is_ready = False
# Add listeners
self._register_listeners()
# Modify old discord.py methods
self._modify_discord()
# Link the slash ext to client if doesn't exist yet
if not hasattr(self.client, "slash"):
self.client.slash = self
# Inject cogs that are already loaded
for cog in self.client.cogs.values():
self._inject_cogs(cog)
def _register_listeners(self):
self.client.add_listener(self._on_guild_remove, 'on_guild_remove')
self.client.add_listener(self._on_socket_response, 'on_socket_response')
if isinstance(self.client, discord.AutoShardedClient):
self.client.add_listener(self._on_shard_connect, 'on_shard_connect')
self.client.add_listener(self._on_ready, 'on_ready')
else:
self.client.add_listener(self._on_connect, 'on_connect')
# For nice click listener
self.client.add_listener(_on_button_click, 'on_button_click')
def _modify_discord(self):
# Modify cog loader
_add_cog = self.client.add_cog
def add_cog_2(cog):
self._inject_cogs(cog)
_add_cog(cog)
self.client.add_cog = add_cog_2
# Modify cog unloader
_rem_cog = self.client.remove_cog
def rem_cog_2(name):
self._eject_cogs(name)
_rem_cog(name)
self.client.remove_cog = rem_cog_2
# Multiple wait for
self.client.multiple_wait_for = self.multiple_wait_for
# Change other class methods
async def ctx_wait_for_button_click(ctx, check=None, timeout=None):
return await self.wait_for_button_click(check=check, timeout=timeout)
async def message_wait_for_button_click(message, check=None, timeout=None):
if check is None:
check = lambda inter: True
def auto_check(inter):
if message.id != inter.message.id:
return False
return check(inter)
return await self.wait_for_button_click(auto_check, timeout)
async def message_wait_for_dropdown(message, check=None, timeout=None):
if check is None:
check = lambda inter: True
def auto_check(inter):
if message.id != inter.message.id:
return False
return check(inter)
return await self.wait_for_dropdown(auto_check, timeout)
async def fetch_commands(guild):
return await self.fetch_guild_commands(guild.id)
async def fetch_command(guild, command_id):
return await self.fetch_guild_command(guild.id, command_id)
async def edit_command(guild, command_id, slash_command):
return await self.edit_guild_slash_command(guild.id, command_id, slash_command)
async def edit_command_permissions(guild, command_id, permissions):
return await self.edit_guild_command_permissions(guild.id, command_id, permissions)
async def batch_edit_command_permissions(guild, permissions):
return await self.batch_edit_guild_command_permissions(guild.id, permissions)
async def delete_command(guild, command_id):
return await self.delete_guild_command(guild.id, command_id)
async def delete_commands(guild):
return await self.delete_guild_commands(guild.id)
def get_commands(guild):
return self.get_guild_commands(guild.id)
def get_command(guild, command_id):
return self.get_guild_command(guild.id, command_id)
def get_command_named(guild, name):
return self.get_guild_command_named(guild.id, name)
def create_click_listener(message, timeout=None):
return ClickListener(message.id, timeout)
if self._modify_send:
if self._uses_discord_2:
from ._modifications.new import (
send as send_with_components,
edit as edit_with_components
)
else:
from ._modifications.old import (
create_message_with_components,
send_with_components,
edit_with_components
)
ConnectionState.create_message = create_message_with_components
Messageable.send = send_with_components
discord.Message.edit = edit_with_components
Context.wait_for_button_click = ctx_wait_for_button_click
discord.Message.create_click_listener = create_click_listener
discord.Message.wait_for_button_click = message_wait_for_button_click
discord.Message.wait_for_dropdown = message_wait_for_dropdown
discord.Guild.get_commands = get_commands
discord.Guild.get_command = get_command
discord.Guild.get_command_named = get_command_named
discord.Guild.fetch_commands = fetch_commands
discord.Guild.fetch_command = fetch_command
discord.Guild.edit_command = edit_command
discord.Guild.edit_command_permissions = edit_command_permissions
discord.Guild.batch_edit_command_permissions = batch_edit_command_permissions
discord.Guild.delete_command = delete_command
discord.Guild.delete_commands = delete_commands
def teardown(self):
'''Cleanup the client by removing all registered listeners and caches.'''
self.client.remove_listener(self._on_guild_remove, 'on_guild_remove')
self.client.remove_listener(self._on_socket_response, 'on_socket_response')
if isinstance(self.client, discord.AutoShardedClient):
self.client.remove_listener(self._on_shard_connect, 'on_shard_connect')
self.client.remove_listener(self._on_ready, 'on_ready')
else:
self.client.remove_listener(self._on_connect, 'on_connect')
self.events.clear()
self._listeners.clear()
self._global_commands.clear()
self._guild_commands.clear()
if hasattr(self.client, "slash"):
del self.client.slash
self.is_ready = False
@property
def slash_commands(self):
return _HANDLER.slash_commands
@property
def user_commands(self):
return _HANDLER.user_commands
@property
def message_commands(self):
return _HANDLER.message_commands
@property
def commands(self):
return dict(
**_HANDLER.slash_commands,
**_HANDLER.user_commands,
**_HANDLER.message_commands
)
@property
def global_commands(self):
return [sc for sc in self._global_commands.values()]
def event(self, func):
"""
Decorator
::
@slash.event
async def on_ready():
print("SlashClient is ready")
| All possible events:
| ``on_ready``, ``on_auto_register``,
| ``on_slash_command``, ``on_slash_command_error``
"""
if not asyncio.iscoroutinefunction(func):
raise TypeError(f'<{func.__qualname__}> must be a coroutine function')
name = func.__name__
if name.startswith('on_'):
name = name[3:]
self.events[name] = func
return func
def slash_command(self, *args, **kwargs):
"""
A decorator that allows to build a slash command.
Parameters
----------
auto_sync : :class:`bool`
whether to automatically register the command or not. Defaults to ``True``
name : :class:`str`
name of the slash command you want to respond to (equals to function name by default).
description : :class:`str`
the description of the slash command. It will be visible in Discord.
options : :class:`List[Option]`
the list of slash command options. The options will be visible in Discord.
default_permission : :class:`bool`
whether the command is enabled by default when the app is added to a guild.
guild_ids : :class:`List[int]`
if specified, the client will register a command in these guilds.
Otherwise this command will be registered globally.
connectors : :class:`dict`
which function param states for each option. If the name
of an option already matches the corresponding function param,
you don't have to specify the connectors. Connectors template:
``{"option-name": "param_name", ...}``
"""
return slash_command(*args, **kwargs)
def user_command(self, *args, **kwargs):
return user_command(*args, **kwargs)
def message_command(self, *args, **kwargs):
return message_command(*args, **kwargs)
# Getters
def get_global_command(self, command_id: int):
"""
Get a cached global command
Parameters
----------
command_id : int
the ID of the command
Returns
-------
slash_command : SlashCommand | None
"""
return self._global_commands.get(command_id)
def get_global_command_named(self, name: str):
"""
Get a cached global command matching the specified name
Parameters
----------
name : str
the name of the command
Returns
-------
slash_command : SlashCommand | None
"""
for cmd in self._global_commands.values():
if cmd.name == name:
return cmd
def get_guild_command(self, guild_id: int, command_id: int):
"""
Get a cached guild command
Parameters
----------
guild_id : int
the ID of the guild
command_id : int
the ID of the command
Returns
-------
slash_command : SlashCommand | None
"""
granula = self._guild_commands.get(guild_id)
if granula is not None:
return granula.get(command_id)
def get_guild_command_named(self, guild_id: int, name: str):
"""
Get a cached guild command matching the specified name
Parameters
----------
guild_id : int
the ID of the guild
name : str
the name of the command
Returns
-------
slash_command : SlashCommand | None
"""
granula = self._guild_commands.get(guild_id)
if granula is not None:
for cmd in granula.values():
if cmd.name == name:
return cmd
def get_guild_commands(self, guild_id: int):
"""
Get cached guild commands
Parameters
----------
guild_id : int
the ID of the guild
Returns
-------
~:class:`List[ApplicationCommand]`
"""
granula = self._guild_commands.get(guild_id, {})
return [sc for sc in granula.values()]
# Straight references to API
async def fetch_global_commands(self):
"""
Requests a list of global registered commands from the API
Returns
-------
global_commands : List[ApplicationCommand]
"""
data = await self.client.http.request(
Route(
'GET',
'/applications/{application_id}/commands',
application_id=self.application_id
)
)
return [application_command_factory(dat) for dat in data]
async def fetch_guild_commands(self, guild_id: int):
"""
Requests a list of registered commands for a specific guild
Parameters
----------
guild_id : int
Returns
-------
guild_commands : List[ApplicationCommand]
"""
data = await self.client.http.request(
Route(
'GET', '/applications/{application_id}/guilds/{guild_id}/commands',
application_id=self.application_id,
guild_id=guild_id
)
)
return [application_command_factory(dat) for dat in data]
async def fetch_global_command(self, command_id: int):
"""
Requests a registered global command
Parameters
----------
command_id : int
Returns
-------
global_command : ApplicationCommand
"""
data = await self.client.http.request(
Route(
"GET",
"/applications/{application_id}/commands/{cmd_id}",
application_id=self.application_id,
cmd_id=command_id
)
)
return application_command_factory(data)
async def fetch_guild_command(self, guild_id: int, command_id: int):
"""
Requests a registered guild command
Parameters
----------
guild_id : int
command_id : int
Returns
-------
guild_command : ApplicationCommand
"""
data = await self.client.http.request(
Route(
"GET",
"/applications/{application_id}/guilds/{guild_id}/commands/{cmd_id}",
application_id=self.application_id,
guild_id=guild_id,
cmd_id=command_id
)
)
return application_command_factory(data)
async def register_global_command(self, app_command: ApplicationCommand):
"""
Registers a global application command
Parameters
----------
app_command : ApplicationCommand
"""
if not isinstance(app_command, ApplicationCommand):
| |
<gh_stars>10-100
""" Routines to:
Parse cat files
Run SPFIT and/or SPCAT
"""
import os
import subprocess
import shutil
import json
import types
from typing import List, Any, Union, Dict, Tuple
from glob import glob
from warnings import warn
import ruamel.yaml as yaml
import numpy as np
import joblib
import paramiko
def run_spcat(filename: str, temperature=None):
# Run SPCAT
parameter_file = filename + ".var"
if os.path.isfile(filename + ".var") is False:
print("VAR file unavailable. Attempting to run with PAR file.")
if os.path.isfile(filename + ".par") is False:
raise FileNotFoundError("No .var or .par file found.")
else:
shutil.copy2(filename + ".par", parameter_file)
process = subprocess.Popen(
["spcat", filename + ".int", parameter_file],
stdout=subprocess.PIPE, # suppress stdout
)
process.wait()
# Extract the partition function at the specified temperature
if temperature is not None:
# Read in the piped standard output, and format into a list
stdout = str(process.communicate()[0]).split("\\n")
for line in stdout:
if temperature in line:
# If the specified temperature is found, get the partition
# function
Q = float(line.split()[1])
return Q
def run_calbak(filename: str):
""" Runs the calbak routine, which generates a .lin file from the .cat """
if os.path.isfile(filename + ".cat") is False:
raise FileNotFoundError(filename + ".cat is missing; cannot run calbak.")
process = subprocess.Popen(
["calbak", filename + ".cat", filename + ".lin"], stdout=subprocess.DEVNULL
)
process.wait()
with open(filename + ".lin") as read_file:
lin_length = read_file.readlines()
if lin_length == 0:
raise RuntimeError("No lines produced in calbak! Check .cat file.")
def run_spfit(filename: str):
"""
Parameters
----------
filename
Returns
-------
"""
process = subprocess.run(
["spfit", filename + ".lin", filename + ".par"],
timeout=20.0,
capture_output=True,
)
if process.returncode != 0:
raise OSError("SPFIT failed to run.")
def list_chunks(target: List[Any], n: int):
"""
Split a list into a number of chunks with length n. If there are not enough elements,
the last chunk will finish the remaining elements.
Parameters
----------
target: list
List to split into chunks
n: int
Number of elements per chunk
Returns
-------
split_list: list
Nested list of chunks
"""
split_list = [target[i : i + n] for i in range(0, len(target), n)]
return split_list
def human2pickett(name: str, reduction="A", linear=True, nuclei=0):
""" Function for translating a Hamiltonian parameter to a Pickett
identifier.
An alternative way of doing this is to programmatically
generate the Pickett identifiers, and just use format string
to output the identifier.
"""
pickett_parameters = read_yaml(
os.path.expanduser("~") + "/.pyspectools/pickett_terms.yml"
)
if name is "B" and linear is True:
# Haven't thought of a clever way of doing this yet...
identifier = 100
elif name is "B" and linear is False:
identifier = 20000
else:
# Hyperfine terms
if name in ["eQq", "eQq/2"]:
identifier = str(pickett_parameters[name]).format(nuclei)
elif "D_" in name or "del" in name:
identifier = str(pickett_parameters[name][reduction])
else:
try:
identifier = pickett_parameters[name]
except KeyError:
print("Parameter name unknown!")
return identifier
def read_json(json_filepath: str) -> Dict[Any, Any]:
"""
Load a JSON file into memory as a Python dictionary.
Parameters
----------
json_filepath : str
Path to the JSON file
Returns
-------
Dict[Any, Any]
Dictionary from JSON file
"""
with open(json_filepath, "r") as read_file:
json_data = json.load(read_file)
return json_data
def dump_json(json_filepath: str, json_dict: Dict[Any, Any]):
"""
Function to serialize a Python dictionary into a JSON file.
The pretty printing is enabled by default.
Parameters
----------
json_filepath : str
Path to the JSON file to save to
json_dict : Dict[Any, Any]
Dictionary to be serialized
"""
with open(json_filepath, "w+") as write_file:
json.dump(json_dict, write_file, indent=4, sort_keys=True)
def read_yaml(yaml_filepath: str) -> Dict[Any, Any]:
"""
Function to load in a YAML file into a Python dictionary.
Parameters
----------
yaml_filepath : str
Path to the YAML file
Returns
-------
Dict[Any, Any]
Dictionary based on the YAML contents
"""
with open(yaml_filepath) as read_file:
yaml_data = yaml.load(read_file, Loader=yaml.Loader)
return yaml_data
def dump_yaml(yaml_filepath: str, yaml_dict: Dict[Any, Any]):
"""
Function to serialize a Python dictionary into a YAML file.
Parameters
----------
yaml_filepath : str
Path to the YAML file
yaml_dict : Dict[Any, Any]
Dictionary to be serialized
"""
with open(yaml_filepath, "w+") as write_file:
yaml.dump(yaml_dict, write_file)
def generate_folder():
"""
Generates the folder for the next calculation
and returns the next calculation number
"""
folderlist = list_directories() # get every file/folder in directory
# filter out any non-folders that happen to be here
shortlist = list()
for folder in folderlist:
try:
shortlist.append(int(folder))
except ValueError: # if it's not an integer
pass
if len(shortlist) == 0:
lastcalc = 0
else:
lastcalc = max(shortlist)
# lastcalc = len(folderlist)
os.mkdir(str(lastcalc + 1))
return lastcalc + 1
def format_uncertainty(value: float, uncertainty: float):
""" Function to determine the number of decimal places to
format the uncertainty. Probably not the most elegant way of doing this.
"""
# Convert the value into a string, then determine the length by
# splitting at the decimal point
decimal_places = decimal_length(value)
uncertainty = float(uncertainty) # make sure we're dealing floats
uncertainty_places = decimal_length(uncertainty)
# Force the uncertainty into decimals
uncertainty = uncertainty * 10 ** -uncertainty_places[1]
# Work out how many places we've moved now
uncertainty_places = decimal_length(uncertainty)
# Move the precision of the uncertainty to match the precision of the value
uncertainty = uncertainty * 10 ** (uncertainty_places[1] - decimal_places[1])
return uncertainty
def decimal_length(value: float):
# Function that determines the decimal length of a float; convert the value
# into a string, then work out the length by splitting at the decimal point
decimal_split = str(value).split(".")
return [len(position) for position in decimal_split]
def copy_template():
script_location = os.path.dirname(os.path.realpath(__file__))
templates_folder = script_location + "/templates/"
available_templates = glob(templates_folder + "*.json")
available_templates = [template.split("/")[-1] for template in available_templates]
print("The templates available are:")
for template in available_templates:
print(template)
target = input("Please specify which template to copy: ")
if target not in available_templates:
print("Not a template; probably a typo.")
print("Please re-run the script.")
else:
shutil.copy2(templates_folder + target, os.getcwd() + "/parameters.json")
print("Copied template " + target + " to your folder as parameters.json.")
print("Edit the .json input file and re-run the script.")
def flatten_list(input_list: List[List[Any]]):
"""
Takes a nested list of values and flattens it. The code is written as a try/except that makes the assumption
that the data is a list/tuple/array, and in the case that it isn't will simply append the item to the
output instead.
Parameters
----------
input_list: list
List of values, where some of the elements are lists
Returns
-------
output_list: list
Flattened version of input_list
"""
output_list = list()
for value in input_list:
try:
output_list.extend(value)
# Ask for forgiveness
except TypeError:
output_list.append(value)
return output_list
def list_directories():
return [directory for directory in os.listdir() if os.path.isdir(directory)]
def backup_files(molecule_name, save_location):
extensions = [".cat", ".var", ".par", ".int", ".json", ".lin"]
filenames = [molecule_name + ext for ext in extensions]
for filename in filenames:
if os.path.isfile(filename) is True:
shutil.copy2(filename, save_location)
print("Backing up " + filename + " to " + save_location)
else:
pass
def isnotebook():
# Check if the code is being run in a notebook, IPython shell, or Python
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell": # Jupyter notebook or qtconsole?
return True
elif shell == "TerminalInteractiveShell": # Terminal running IPython?
return False
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def save_obj(obj: Any, filepath: str, **kwargs):
"""
Function to serialize an object using dump from joblib.
Additional kwargs are passed into the dump, which can
be compression parameters, etc.
parameters:
---------------
obj - instance of object to be serialized
filepath - filepath to save to
"""
settings = {"compress": ("gzip", 6), "protocol": 4}
settings.update(kwargs)
joblib.dump(obj, filepath, **settings)
def read_obj(filepath: str):
"""
Wrapper for joblib.load to load an object from disk
parameters:
---------------
filepath - path to object
"""
obj = joblib.load(filepath)
return obj
def dump_packages():
"""
Function that will return a list of packages that
have been loaded and their version numbers.
This function will ignore system packages:
sys, __builtins__, types, os
as well as modules with no version.
This is not working the way I want it to...
returns:
-------------
mod_dict - dict with keys corresponding to module name,
and values the version number.
"""
mod_dict = dict()
sys_packages = ["sys", "__builtins__", "types", "os"]
for name, module | |
"""
Some utility functions for the data analysis project.
"""
import numpy as np
import healpy as hp
import pylab as plt
import os
from pixell import curvedsky
from pspy import pspy_utils, so_cov, so_spectra, so_mcm, so_map_preprocessing
from pspy.cov_fortran.cov_fortran import cov_compute as cov_fortran
from pspy.mcm_fortran.mcm_fortran import mcm_compute as mcm_fortran
from pixell import enmap
import gc
def get_filtered_map(orig_map, binary, filter, inv_pixwin_lxly=None, weighted_filter=False, tol=1e-4, ref=0.9):
"""Filter the map in Fourier space using a predefined filter. Note that we mutliply the maps by a binary mask before
doing this operation in order to remove pathological pixels
We also include an option for removing the pixel window function
Parameters
---------
orig_map: ``so_map``
the map to be filtered
binary: ``so_map``
a binary mask removing pathological pixels
filter: 2d array
a filter applied in fourier space
inv_pixwin_lxly: 2d array
the inverse of the pixel window function in fourier space
weighted_filter: boolean
wether to use weighted filter a la sigurd
tol, ref: floats
only in use in the case of the weighted filter, these arg
remove crazy pixels value in the weight applied
"""
if weighted_filter == False:
if inv_pixwin_lxly is not None:
orig_map = fourier_mult(orig_map, binary, filter * inv_pixwin_lxly)
else:
orig_map = fourier_mult(orig_map, binary, filter)
else:
orig_map.data *= binary.data
one_mf = (1 - filter)
rhs = enmap.ifft(one_mf * enmap.fft(orig_map.data, normalize=True), normalize=True).real
gc.collect()
div = enmap.ifft(one_mf * enmap.fft(binary.data, normalize=True), normalize=True).real
del one_mf
gc.collect()
div = np.maximum(div, np.percentile(binary.data[::10, ::10], ref * 100) * tol)
orig_map.data -= rhs / div
del rhs
del div
gc.collect()
if inv_pixwin_lxly is not None:
ft = enmap.fft(orig_map.data, normalize=True)
ft *= inv_pixwin_lxly
orig_map.data = enmap.ifft(ft, normalize=True).real
gc.collect()
return orig_map
def fourier_mult(orig_map, binary, fourier_array):
"""do a fourier multiplication of the FFT of the orig_map with a fourier array, binary help to remove pathological pixels
Parameters
---------
orig_map: ``so_map``
the map to be filtered
binary: ``so_map``
a binary mask removing pathological pixels
fourier_array: 2d array
the fourier array we want to multiply the FFT of the map with
"""
orig_map.data *= binary.data
ft = enmap.fft(orig_map.data, normalize=True)
ft *= fourier_array
orig_map.data = enmap.ifft(ft, normalize=True).real
return orig_map
def get_coadded_map(orig_map, coadd_map, coadd_mask):
"""Co-add a map with another map given its associated mask.
Parameters
---------
orig_map: ``so_map``
the original map without point sources
coadd_map: ``so_map``
the map to be co-added
coadd_mask: ``so_map``
the mask associated to the coadd_map
"""
if coadd_map.ncomp == 1:
coadd_map.data *= coadd_mask.data
else:
coadd_map.data[:] *= coadd_mask.data
orig_map.data += coadd_map.data
return orig_map
def fill_sym_mat(mat):
"""Make a upper diagonal or lower diagonal matrix symmetric
Parameters
----------
mat : 2d array
the matrix we want symmetric
"""
return mat + mat.T - np.diag(mat.diagonal())
def get_nspec(dict):
surveys = dict["surveys"]
nspec = {}
for kind in ["cross", "noise", "auto"]:
nspec[kind] = 0
for id_sv1, sv1 in enumerate(surveys):
arrays_1 = dict["arrays_%s" % sv1]
for id_ar1, ar1 in enumerate(arrays_1):
for id_sv2, sv2 in enumerate(surveys):
arrays_2 = dict["arrays_%s" % sv2]
for id_ar2, ar2 in enumerate(arrays_2):
if (id_sv1 == id_sv2) & (id_ar1 > id_ar2) : continue
if (id_sv1 > id_sv2) : continue
if (sv1 != sv2) & (kind == "noise"): continue
if (sv1 != sv2) & (kind == "auto"): continue
nspec[kind] += 1
return nspec
def get_noise_matrix_spin0and2(noise_dir, survey, arrays, lmax, nsplits):
"""This function uses the measured noise power spectra
and generate a three dimensional array of noise power spectra [n_arrays, n_arrays, lmax] for temperature
and polarisation.
The different entries ([i,j,:]) of the arrays contain the noise power spectra
for the different array pairs.
for example nl_array_t[0,0,:] => nl^{TT}_{ar_{0},ar_{0}), nl_array_t[0,1,:] => nl^{TT}_{ar_{0},ar_{1})
this allows to consider correlated noise between different arrays.
Parameters
----------
noise_data_dir : string
the folder containing the noise power spectra
survey : string
the survey to consider
arrays: 1d array of string
the arrays we consider
lmax: integer
the maximum multipole for the noise power spectra
n_splits: integer
the number of data splits we want to simulate
nl_per_split= nl * n_{splits}
"""
spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
n_arrays = len(arrays)
nl_array_t = np.zeros((n_arrays, n_arrays, lmax))
nl_array_pol = np.zeros((n_arrays, n_arrays, lmax))
for c1, ar1 in enumerate(arrays):
for c2, ar2 in enumerate(arrays):
if c1>c2 : continue
l, nl = so_spectra.read_ps("%s/mean_%sx%s_%s_noise.dat" % (noise_dir, ar1, ar2, survey), spectra=spectra)
nl_t = nl["TT"][:lmax]
nl_pol = (nl["EE"][:lmax] + nl["BB"][:lmax])/2
l = l[:lmax]
nl_array_t[c1, c2, :] = nl_t * nsplits * 2 * np.pi / (l * (l + 1))
nl_array_pol[c1, c2, :] = nl_pol * nsplits * 2 * np.pi / (l * (l + 1))
for i in range(lmax):
nl_array_t[:,:,i] = fill_sym_mat(nl_array_t[:,:,i])
nl_array_pol[:,:,i] = fill_sym_mat(nl_array_pol[:,:,i])
return l, nl_array_t, nl_array_pol
def get_foreground_matrix(fg_dir, all_freqs, lmax):
"""This function uses the best fit foreground power spectra
and generate a three dimensional array of foregroung power spectra [nfreqs, nfreqs, lmax].
The different entries ([i,j,:]) of the array contains the fg power spectra for the different
frequency channel pairs.
for example fl_array_T[0,0,:] => fl_{f_{0},f_{0}), fl_array_T[0,1,:] => fl_{f_{0},f_{1})
this allows to have correlated fg between different frequency channels.
(Not that for now, no fg are including in pol)
Parameters
----------
fg_dir : string
the folder containing the foreground power spectra
all_freqs: 1d array of string
the frequencies we consider
lmax: integer
the maximum multipole for the noise power spectra
"""
nfreqs = len(all_freqs)
fl_array = np.zeros((nfreqs, nfreqs, lmax))
for c1, freq1 in enumerate(all_freqs):
for c2, freq2 in enumerate(all_freqs):
if c1 > c2 : continue
l, fl_all = np.loadtxt("%s/fg_%sx%s_TT.dat"%(fg_dir, freq1, freq2), unpack=True)
fl_all *= 2 * np.pi / (l * (l + 1))
fl_array[c1, c2, 2:lmax] = fl_all[:lmax-2]
for i in range(lmax):
fl_array[:,:,i] = fill_sym_mat(fl_array[:,:,i])
return l, fl_array
def multiply_alms(alms, bl, ncomp):
"""This routine mutliply the alms by a function bl
Parameters
----------
alms : 1d array
the alms to be multiplied
bl : 1d array
the function to multiply the alms
ncomp: interger
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
"""
alms_mult = alms.copy()
if ncomp == 1:
alms_mult = hp.sphtfunc.almxfl(alms_mult, bl)
else:
for i in range(ncomp):
alms_mult[i] = hp.sphtfunc.almxfl(alms_mult[i], bl)
return alms_mult
def generate_noise_alms(nl_array_t, lmax, n_splits, ncomp, nl_array_pol=None, dtype=np.complex128):
"""This function generates the alms corresponding to the noise power spectra matrices
nl_array_t, nl_array_pol. The function returns a dictionnary nlms["T", i].
The entry of the dictionnary are for example nlms["T", i] where i is the index of the split.
note that nlms["T", i] is a (narrays, size(alm)) array, it is the harmonic transform of
the noise realisation for the different frequencies.
Parameters
----------
nl_array_t : 3d array [narrays, narrays, lmax]
noise power spectra matrix for temperature data
lmax : integer
the maximum multipole for the noise power spectra
n_splits: integer
the number of data splits we want to simulate
ncomp: interger
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
nl_array_pol : 3d array [narrays, narrays, lmax]
noise power spectra matrix for polarisation data
(in use if ncomp==3)
"""
nlms = {}
if ncomp == 1:
for k in range(n_splits):
nlms[k] = curvedsky.rand_alm(nl_array_t,lmax=lmax, dtype=dtype)
else:
for k in range(n_splits):
nlms["T", k] = curvedsky.rand_alm(nl_array_t, lmax=lmax, dtype=dtype)
nlms["E", k] = curvedsky.rand_alm(nl_array_pol, lmax=lmax, dtype=dtype)
nlms["B", k] = curvedsky.rand_alm(nl_array_pol, lmax=lmax, dtype=dtype)
return nlms
def remove_mean(so_map, window, ncomp):
"""This function removes the mean value of the map after having applied the
window function
Parameters
----------
so_map : so_map
the map we want to subtract the mean from
window : so_map or so_map tuple
the window function, if ncomp=3 expect
(win_t,win_pol)
ncomp : integer
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
"""
if ncomp == 1:
so_map.data -= np.mean(so_map.data * window.data)
else:
so_map.data[0] -= np.mean(so_map.data[0] * window[0].data)
so_map.data[1] -= np.mean(so_map.data[1] * window[1].data)
so_map.data[2] -= np.mean(so_map.data[2] * window[1].data)
return so_map
def deconvolve_tf(lb, ps, tf1, tf2, ncomp, lmax=None):
"""This function deconvolves the transfer function
Parameters
----------
ps : dict or 1d array
the power spectra with tf applied
tf1 : 1d array
| |
= get_ontology().get_graph(clean)
for t in ontology[::]:
graph.add(t)
return graph
# Provide information about the things described by the knowledge graph
def check_property_value(self, property_uri, value):
"""Checks if the value is valid and transforms it into a corresponding rdflib Literal or URIRef
Returns a pair of a validity message and the Literal/URIRef
If the value is valid the validity message is an emptystring.
If the value is not valid instead of the literal, None is returned
"""
ontology = get_ontology()
is_object_property = ontology.get_property_type(property_uri) == 'ObjectProperty'
is_described = ontology.is_property_described(property_uri)
range_uri = ontology.get_property_range(property_uri)
one_of = next(ontology.graph[range_uri:OWL.oneOf:], None)
if value == '':
# Empty values are allowed but will be deleted on reloading the page
# todo they are persisted in the db though because I have not found out what's the best occasion to delete
# them yet..
return '', Literal(value)
elif is_object_property and is_described:
# we trust that the individual has the correct class, namely self.range_uri
uri = value if type(value) == URIRef else URIRef(value)
return '', uri
elif is_object_property and ontology.is_property_add_custom_option_allowed(property_uri):
# option field with objects as options, not literals
options = ontology.get_tokens(range_uri)
uri = value if type(value) == URIRef else URIRef(value)
if uri in options:
return '', uri
else:
return 'Choose an option from the list.', None
elif is_object_property:
# option field with objects as options that are described at some other place in the tool
options = self.get_tokens(range_uri)
uri = value if type(value) == URIRef else URIRef(value)
if uri in options:
return '', uri
else:
return 'Choose an option from the list.', None
elif one_of:
# option field with literals as options defined as a list in the ontology
options = ontology.construct_list(one_of)
lit = Literal(value, datatype=XSD.boolean) if range_uri == XSD.boolean else Literal(value)
if lit in options:
return '', lit
else:
return 'Choose an option from the list.', None
elif range_uri == XSD.boolean:
# option field with option True and False
lit = Literal(value, datatype=XSD.boolean) if range_uri == XSD.boolean else Literal(value)
if lit in (TRUE, FALSE):
return '', lit
else:
return 'Choose an TRUE or FALSE.', None
elif range_uri == RDFS.Literal:
pass
elif range_uri == XSD.string:
pass
elif range_uri == XSD.float:
try:
float(value)
except (ValueError, SyntaxError):
return '{} is not a valid float.'.format(value), None
elif range_uri == XSD.integer:
try:
int(value)
except (ValueError, SyntaxError):
return '{} is not a valid integer.'.format(value), None
elif range_uri == XSD.nonNegativeInteger:
try:
i = int(value)
except (ValueError, SyntaxError):
return '{} is not a valid integer.'.format(value), None
if i < 0:
return '{} is not a non-negative value.'.format(value), None
elif range_uri == XSD.positiveInteger:
try:
i = int(value)
except (ValueError, SyntaxError):
return '{} is not a valid integer.'.format(value), None
if i <= 0:
return '{} is not a positive value.'.format(value), None
else:
return 'Unknown Datatype: {}'.format(range_uri), None
return '', Literal(value, datatype=range_uri)
def get_property_values(self, individual_uri, property_uri):
if type(individual_uri) == str:
individual_uri = URIRef(individual_uri)
if type(property_uri) == str:
property_uri = URIRef(property_uri)
values = self.properties[(individual_uri, property_uri)]
# filter values from deleted triples:
return {i: values[i] for i in values
if values[i] in self.graph[individual_uri:property_uri:] and str(values[i]) != ''}
def get_property_free_index(self, entity_uri, property_uri):
if type(entity_uri) == str:
entity_uri = URIRef(entity_uri)
if type(property_uri) == str:
property_uri = URIRef(property_uri)
return max(self.properties[(entity_uri, property_uri)].keys(), default=0) + 1
def get_individual_children(self, uri):
if type(uri) == str:
uri = URIRef(uri)
ontology = get_ontology()
return [value
for property_uri in ontology.get_class_child_properties(self.get_individual_class(uri))
if ontology.is_property_described(property_uri)
for value in self.get_property_values(uri, property_uri).values()]
def is_individual_deletable(self, uri):
if type(uri) == str:
uri = URIRef(uri)
ontology = get_ontology()
if uri in self.graph[:RATIO.isRoot:TRUE]:
return False
parent_properties = self.graph.predicates(object=uri)
return all(ontology.is_property_deletable(p) for p in parent_properties)
def get_subgraph_knowledge(subgraph_id):
"""Get SubgraphKnowledge of a certain subgraph.
The connection is unique for each request and will be reused if this is called again.
"""
if 'knowledge' not in g:
g.knowledge = dict()
if subgraph_id not in g.knowledge:
g.knowledge[subgraph_id] = SubgraphKnowledge(subgraph_id)
return g.knowledge[subgraph_id]
class Field:
"""Represents a possible owl:ObjectProperty or owl:DatatypeProperty of an Entity
This is used to provide the information about a field for rendering to Jinja.
Don't use it to check things like field.label - ontology.get_label(property_uri) is more efficient.
"""
def __init__(self, property_uri, label, comment,
type_, is_described, is_deletable, is_functional,
range_uri, range_label,
order, width, values, free_index, is_add_option_allowed, options=None):
self.property_uri = property_uri
self.label = label
self.comment = comment
self.type = type_
self.is_described = is_described
self.is_deletable = is_deletable
self.is_functional = is_functional
self.range_uri = range_uri
self.range_label = range_label
self.order = order
self.width = width
self.values = values
self.free_index = free_index
self.is_add_option_allowed = is_add_option_allowed
self.options = options
@property
def is_object_property(self):
return self.type == 'ObjectProperty'
@property
def is_datatype_property(self):
return self.type == 'DatatypeProperty'
@property
def is_subheading(self):
return self.type == 'Subheading'
def get_sorted_values(self):
"""Get (index, value)-pairs for values sorted by index."""
return sorted(self.values.items(), key=lambda i: i[0])
# Factories
@classmethod
def from_knowledge(cls, subgraph_id, individual_uri, property_uri):
field = Field.new(subgraph_id, property_uri)
if field.type == 'Subheading':
return field
knowledge = get_subgraph_knowledge(subgraph_id)
field.values = knowledge.get_property_values(individual_uri, property_uri)
field.free_index = knowledge.get_property_free_index(individual_uri, property_uri)
if field.is_described:
field.values = {i: Entity.from_knowledge(subgraph_id, field.values[i]) for i in field.values}
elif field.type == 'ObjectProperty':
if field.is_add_option_allowed:
# for options that are not described by the user in a different field of the interface
field.values = {i: Option.from_ontology(field.values[i]) for i in field.values}
else:
# for options that are described by the user in a different field of the interface
field.values = {i: Option.from_knowledge(field.values[i], subgraph_id) for i in field.values}
return field
@classmethod
def new(cls, subgraph_id, property_uri):
knowledge = get_subgraph_knowledge(subgraph_id)
ontology = get_ontology()
label = ontology.get_label(property_uri)
comment = ontology.get_comment(property_uri)
order = ontology.get_property_order(property_uri)
type_ = ontology.get_property_type(property_uri)
if type_ == 'Subheading':
return cls.subheading(property_uri, label, comment, order)
range_class_uri = ontology.get_property_range(property_uri)
range_label = ontology.get_label(range_class_uri)
is_functional = ontology.is_property_functional(property_uri)
is_described = ontology.is_property_described(property_uri)
is_deletable = ontology.is_property_deletable(property_uri)
is_add_option_allowed = ontology.is_property_add_custom_option_allowed(property_uri)
width = ontology.get_property_width(property_uri)
values = dict()
free_index = 1
one_of = next(ontology.graph[range_class_uri:OWL.oneOf:], None)
if type_ == 'ObjectProperty' and not is_described:
if is_add_option_allowed:
options = [Option.from_ontology(uri) for uri in ontology.get_tokens(range_class_uri)]
else:
options = [Option.from_knowledge(uri, subgraph_id) for uri in knowledge.get_tokens(range_class_uri)]
options.sort(key=lambda option: option.label)
elif one_of is not None:
options = ontology.construct_list(one_of)
elif range_class_uri == XSD.boolean:
options = [TRUE, FALSE]
else:
options = None
return cls(property_uri, label, comment, type_, is_described, is_deletable, is_functional,
range_class_uri, range_label, order, width, values, free_index, is_add_option_allowed, options)
@classmethod
def subheading(cls, property_uri, label, comment, order):
return cls(property_uri, label, comment, 'Subheading', False, False, True,
None, None, order, None, dict(), None, False, [])
class Option:
"""Represents a possible owl:NamedIndividual that can be added to a non-described owl:ObjectProperty field
This is used to provide the information about an option for rendering to Jinja.
Don't use it to check things like option.label - ontology.get_label(uri) is more efficient.
"""
def __init__(self, uri, label, class_uri, creator=None, comment=None):
self.uri = uri
self.label = label
self.class_uri = class_uri
self.creator = creator # for custom options, else None
self.comment = comment
# Factories
@classmethod
def from_ontology(cls, uri):
# for options that are not described by the user in a different field of the interface
ontology = get_ontology()
label = ontology.get_label(uri)
comment = ontology.get_comment(uri)
class_uri = ontology.get_individual_class(uri)
if class_uri is None:
raise KeyError('No type found for option ' + str(uri))
creator = ontology.get_creator(uri)
return cls(uri, label, class_uri, creator, comment)
@classmethod
def from_knowledge(cls, uri, subgraph_id):
# for options that are described by the user in a different field of the interface
knowledge = get_subgraph_knowledge(subgraph_id)
label = knowledge.get_label(uri)
class_uri = knowledge.get_individual_class(uri)
if class_uri is None:
raise KeyError('No type found for option ' + str(uri))
return cls(uri, label, class_uri)
class Entity:
"""Represents a owl:NamedIndividual
This is used to provide the information about an individual for rendering to Jinja.
Don't use it to check things like entity.label - knowledge.get_label(uri) is more efficient.
"""
def __init__(self, uri, label, comment, class_uri, class_label, fields):
self.uri = uri
self.label = label
self.comment = comment
self.class_uri = class_uri
self.class_label = class_label
self.fields = fields # fields s.t. field.property_uri rdfs:domain self.uri
# Factories
@classmethod
def from_knowledge(cls, subgraph_id, uri):
knowledge = get_subgraph_knowledge(subgraph_id)
ontology = get_ontology()
class_uri = knowledge.get_individual_class(uri)
if class_uri is None:
raise ValueError('No type found for individual ' + str(uri))
label = knowledge.get_label(uri)
class_label = ontology.get_label(class_uri)
comment = ontology.get_comment(class_uri)
fields = [
Field.from_knowledge(subgraph_id, | |
<filename>mlprodict/testing/einsum/einsum_impl_classes.py
# pylint: disable=C0302
"""
@file
@brief Classes representing the sequence of matrix operations to
implement einsum computation.
"""
import numpy
from onnx import helper, numpy_helper
from skl2onnx.common.data_types import guess_proto_type
from ...onnx_tools.onnx2py_helper import guess_proto_dtype
from ...tools.asv_options_helper import (
get_opset_number_from_onnx, get_ir_version_from_onnx)
from .blas_lapack import gemm_dot
from .einsum_impl_ext import (
numpy_extended_dot, numpy_diagonal,
_numpy_extended_dot_equation,
numpy_extended_dot_python,
numpy_extended_dot_matrix)
def single_axes(axes):
"""
*axes* contains positive values, then it is the position
of this axis in the original matrix, otherwise it is -1
meaning this axis is an added single dimension to align
all the dimensions based on the einsum equation.
:param axes: axes described above
:return: list of integer in set `{1, 2}`, 1 for
a single axis, 2 otherwise
"""
if axes is None:
return axes
return [(1 if a == -1 else 2) for a in axes]
class EinsumSubOp:
"""
Defines a sub operation used in Einsum decomposition.
:param name: name (reshape, transpose, reduce_sum, matmul, id,
squeeze, diagonal, mul, batch_dot)
:param inputs: inputs
:param kwargs: arguments
Operator suffixed by `_mm` (*transpose_mm*, *reduce_sum_mm*)
are equivalent to the same operator without the suffix
but takes two inputs and only changes the first one.
Attributes `_info` summarizes the known information
about dimensions. Many of them are empty because inserted.
Value `1` means it was the case, `2` means it is a plain dimension.
"""
_allowed = {'expand_dims', 'transpose', 'reduce_sum', 'matmul', 'id',
'squeeze', 'diagonal', 'mul', 'batch_dot',
'transpose_mm', 'reduce_sum_mm'}
def __init__(self, full_dim, name, *inputs, **kwargs):
self.full_dim = full_dim
self.name = name
self.inputs = inputs
self.kwargs = kwargs
self._info = {}
if name not in EinsumSubOp._allowed:
raise ValueError(
"Unexpected name %r. It should be in %r."
"" % (name, EinsumSubOp._allowed))
if len(inputs) not in (1, 2):
raise RuntimeError(
"Inputs must contains 1 or 2 inputs not %d." % len(inputs))
if name == 'matmul' and len(inputs) != 2:
raise RuntimeError(
"Inputs must contains 2 inputs not %d for operator 'matmul'."
"" % len(inputs))
for i, inp in enumerate(inputs):
if not isinstance(inp, (int, EinsumSubOp)):
raise TypeError(
"Input %d has type %r, int or EinsumSubOp is expected."
"" % (i, type(inp)))
self._check_()
def _check_(self):
if self.name == 'transpose':
self._check_arg_('perm', tuple)
perm = self.kwargs['perm']
if len(perm) != len(set(perm)):
raise RuntimeError( # pragma: no cover
"perm has duplicated values %r (name=%r)."
"" % (perm, self.name))
if list(perm) == list(range(len(perm))):
raise ValueError( # pragma: no cover
"Transpose = identity perm={}. It must be removed."
"".format(perm))
elif self.name == 'matmul':
self._check_arg_('axes', tuple)
self._check_arg_('left', tuple)
self._check_arg_('right', tuple)
axes = self.kwargs['axes']
left = self.kwargs['left']
right = self.kwargs['right']
for a in axes:
if a in left and a in right:
raise RuntimeError( # pragma: no cover
"One axis belongs to every set (axes, left, right). "
"axes=%r, left=%r, right=%r." % (axes, left, right))
def __repr__(self):
inps = ", ".join(map(str, self.inputs))
kw = ", ".join("%s=%r" % (k, w) for k, w in self.kwargs.items())
m = "%s(%r, %s, %s)" % (
self.__class__.__name__, self.name, inps, kw)
return m
def dot_label(self):
"""
Displays some informations useful to understand the operator.
"""
if self.name == "matmul":
ndim = self.kwargs['ndim']
axes = self.kwargs['axes']
left = self.kwargs['left']
right = self.kwargs['right']
eq = _numpy_extended_dot_equation(ndim, ndim, axes, left, right)
eq = eq.replace(">", "\\\\>")
return "~" + eq
return None
def _check_arg_(self, name, typ, empty=False):
if name not in self.kwargs:
raise RuntimeError( # pragma: no cover
"Parameter %r not found for operator %r." % (name, self.name))
if empty and self.kwargs[name] is None:
return
if not isinstance(self.kwargs[name], typ):
raise TypeError( # pragma: no cover
"Unexpected type %r for parameter %r and parameter %r."
"" % (type(self.kwargs[name]), name, self.name))
def _check_row_(self, row, inp=False, verbose=False):
"""
Checks input or output is valid.
"""
if verbose:
if inp:
print('<<' if inp else '>>', self.name, row, self.kwargs)
else:
print('<<' if inp else '>>', self.name, row)
def _compute_output_row_id(self, row, row2=None, ab=False, verbose=False):
if ab:
raise RuntimeError("ab option not allowed.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
row[:] = row2[:]
self._check_row_(row, verbose=verbose)
def _compute_output_row_transpose(self, row, row2=None, ab=False, verbose=False):
if ab:
self._compute_output_row_transpose(row2, verbose=verbose)
return
self._check_row_(row, True, verbose=verbose)
self._check_arg_('perm', tuple)
if len(self.kwargs['perm']) != len(row):
raise RuntimeError( # pragma: no cover
"Unexpected permutation %r (row=%r)."
"" % (self.kwargs['perm'], row))
perm = self.kwargs['perm']
cpy = row.copy()
for i, p in enumerate(perm):
row[i] = cpy[p]
self._check_row_(row, verbose=verbose)
def _compute_output_row_transpose_mm(self, row, row2=None, ab=False, verbose=False):
if not ab:
raise RuntimeError("ab must be True.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
if row2 is None:
raise RuntimeError( # pragma: no cover
"transpose_mm expects a second input.")
self._compute_output_row_transpose(row, row2=None, verbose=verbose)
def _compute_output_row_expand_dims(self, row, row2=None, ab=False, verbose=False):
if ab:
raise RuntimeError("ab option not allowed.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_arg_('axes', tuple)
axes = self.kwargs['axes']
for axis in axes:
if not isinstance(axis, tuple):
raise TypeError( # pragma: no cover
"Parameter axes of expand_dims should be a tuple of "
"tuple, axes=%r." % axes)
if row[axis[1]] != -1:
raise RuntimeError( # pragma: no cover
"Dimension should be -1 in row %r axis=%r." % (
row, self.kwargs['axis']))
self._check_row_(row, verbose=verbose)
def _compute_output_row_reduce_sum(self, row, row2=None, ab=False, verbose=False):
if ab:
raise RuntimeError("ab option not allowed.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_arg_('axes', tuple)
for a in self.kwargs['axes']:
row[a] = -1
self._check_row_(row, verbose=verbose)
def _compute_output_row_reduce_sum_mm(self, row, row2=None, ab=False, verbose=False):
if not ab:
raise RuntimeError("ab must be true.") # pragma: no cover
self._check_row_(row2, True, verbose=verbose)
if row2 is None:
raise RuntimeError( # pragma: no cover
"reduce_sum_mm expects a second input.")
self._compute_output_row_reduce_sum(row, row2=None, verbose=verbose)
def _compute_output_row_squeeze(self, row, row2=None, ab=False, verbose=False):
if ab:
raise RuntimeError("ab option not allowed.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_arg_('axes', tuple)
for a in self.kwargs['axes']:
row[a] = -1
self._check_row_(row, verbose=verbose)
def _compute_output_row_diagonal(self, row, row2=None, ab=False, verbose=False):
if ab:
raise RuntimeError("ab option not allowed.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_arg_('diag', list)
to_remove = []
for choice, choices in self.kwargs['diag']:
for ch in choices:
if ch != choice:
to_remove.append(ch)
for i in range(len(row)): # pylint: disable=C0200
if row[i] in choices:
if row[i] != choice:
row[i] = choice
to_remove.sort()
for r in to_remove:
for i in range(len(row)): # pylint: disable=C0200
if row[i] == r:
raise RuntimeError( # pragma: no cover
"Unexpected result r=%r row=%r to_remove=%r "
"diag=%r." % (
r, row, to_remove, self.kwargs['diag']))
if row[i] > r:
row[i] -= 1
self._check_row_(row, verbose=verbose)
def _compute_output_row_matmul(self, row, row2=None, ab=False, verbose=False):
if not ab:
raise RuntimeError("ab must be True.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_row_(row2, True, verbose=verbose)
self._check_arg_('axes', tuple)
self._check_arg_('left', tuple)
self._check_arg_('right', tuple)
self._check_arg_('ndim', int)
if row2 is None:
raise RuntimeError(
"matmul expects two inputs.") # pragma: no cover
if verbose:
ndim = self.kwargs['ndim']
axes = self.kwargs['axes']
left = self.kwargs['left']
right = self.kwargs['right']
print(" MATMUL %r @ %r axes=%r left=%r right=%r - eq=%s" % (
row, row2, axes, left, right,
_numpy_extended_dot_equation(ndim, ndim, axes, left, right)))
row2[:] = numpy.maximum(row, row2)
for a in self.kwargs['axes']:
if a not in self.kwargs['right']:
row2[a] = -1
self._check_row_(row2, verbose=verbose)
def _compute_output_row_batch_dot(self, row, row2=None, ab=False, verbose=False):
if not ab:
raise RuntimeError("ab must be True.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_row_(row2, True, verbose=verbose)
self._check_arg_('batch_axes', tuple)
self._check_arg_('keep_axes', tuple, empty=True)
self._check_arg_('sum_axes', tuple)
self._check_arg_('left', tuple)
self._check_arg_('right', tuple)
self._check_arg_('ndim', int)
if row2 is None:
raise RuntimeError(
"batch_dot expects two inputs.") # pragma: no cover
if verbose:
batch_axes = self.kwargs['batch_axes']
keep_axes = self.kwargs['keep_axes']
sum_axes = self.kwargs['sum_axes']
left = self.kwargs['left']
right = self.kwargs['right']
ndim = self.kwargs['ndim']
print(" BATCH_DOT batch_axes=%r keep_axes=%r sum_axes=%r "
"left=%r right=%r eq=%r" % (
batch_axes, keep_axes, sum_axes, left, right,
_numpy_extended_dot_equation(ndim, ndim, sum_axes, left, right)))
row2[:] = numpy.maximum(row, row2)
for a in self.kwargs['sum_axes']:
if a not in self.kwargs['right']:
row2[a] = -1
self._check_row_(row2, verbose=verbose)
def _compute_output_row_mul(self, row, row2=None, ab=False, verbose=False):
if not ab:
raise RuntimeError("ab must be True.") # pragma: no cover
self._check_row_(row, True, verbose=verbose)
self._check_row_(row2, True, verbose=verbose)
if row2 is None:
raise RuntimeError("mul expects two inputs.") # pragma: no cover
if verbose:
print(" MUL %r @ %r" % (row, row2))
row2[:] = numpy.maximum(row, row2)
self._check_row_(row2, verbose=verbose)
def compute_output_row(self, row, row2=None, ab=False, verbose=False):
"""
Updates *row* based on the operator.
"""
method_name = "_compute_output_row_%s" % self.name
meth = getattr(self, method_name, None)
if meth is None:
raise | |
# MINLP written by GAMS Convert at 04/21/18 13:55:13
#
# Equation counts
# Total E G L N X C B
# 384 180 64 140 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 630 182 56 0 392 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 2479 2363 116 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(6.5,None),initialize=11.5)
m.x114 = Var(within=Reals,bounds=(3.25,None),initialize=8.25)
m.x115 = Var(within=Reals,bounds=(16.58,None),initialize=21.58)
m.x116 = Var(within=Reals,bounds=(14.92,None),initialize=19.92)
m.x117 = Var(within=Reals,bounds=(12.925,None),initialize=17.925)
m.x118 = Var(within=Reals,bounds=(12.26,None),initialize=17.26)
m.x119 = Var(within=Reals,bounds=(8.76,None),initialize=13.76)
m.x120 = Var(within=Reals,bounds=(16.08,None),initialize=21.08)
m.x121 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,2.5),initialize=0.961470588235294)
m.x178 = Var(within=Reals,bounds=(0,6),initialize=2.30752941176471)
m.x179 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(None,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(None,None),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b193 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b194 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b195 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b196 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b197 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b198 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b199 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b200 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b201 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b202 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b203 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b204 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b205 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b206 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b207 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b208 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b209 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b210 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b211 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b212 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b213 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b214 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b215 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b216 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b217 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b218 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b219 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b220 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b221 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b222 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b223 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b224 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b225 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b226 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b227 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b228 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b229 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b230 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b231 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b232 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b233 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b234 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b235 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b236 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b237 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.b238 = Var(within=Binary,bounds=(0,1),initialize=0.5)
m.s1s239 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s240 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s241 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s242 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s243 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s244 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s245 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s246 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s247 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s248 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s249 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s250 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s251 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s252 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s253 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s254 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s255 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s256 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s257 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s258 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s259 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s260 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s261 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s262 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s263 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s264 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s265 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s266 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s267 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s268 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s269 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s270 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s271 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s272 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s273 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s274 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s275 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s276 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s277 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s278 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s279 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s280 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s281 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s282 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s283 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s284 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s285 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s286 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s287 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s288 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s289 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s290 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s291 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s292 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s293 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s294 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s295 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s296 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s297 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s298 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s299 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s300 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s301 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s302 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s303 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s304 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s305 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s306 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s307 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s308 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s309 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s310 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s311 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s312 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s313 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s314 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s315 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s316 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s317 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s318 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s319 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s320 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s321 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s322 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s323 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s324 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s325 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s326 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s327 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s328 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s329 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s330 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s331 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s332 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s333 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s334 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s335 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s336 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s337 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s338 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s339 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s340 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s341 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s342 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s343 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s344 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s345 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s346 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s347 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s348 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s349 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s350 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s351 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s352 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s353 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s354 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s355 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s356 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s357 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s358 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s359 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s360 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s361 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s362 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s363 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s364 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s365 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s366 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s367 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s368 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s369 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s370 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s371 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s372 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s373 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s374 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s375 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s376 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s377 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s378 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s379 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s380 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s381 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s382 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s383 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s384 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s385 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s386 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s387 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s388 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s389 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s390 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s391 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s392 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s393 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s394 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s395 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s396 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s397 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s398 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s399 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s400 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s401 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s402 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s403 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s404 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s405 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s406 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s407 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s408 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s409 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s410 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s411 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s412 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s413 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s414 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s415 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s416 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s417 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s418 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s419 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s420 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s421 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s422 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s423 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s424 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s425 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s426 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s427 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s428 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s429 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s430 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s431 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s432 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s433 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s434 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s435 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s436 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s437 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s438 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s439 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s440 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s441 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s442 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s443 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s444 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s445 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s446 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s447 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s448 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s449 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s450 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s451 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s452 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s453 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s454 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s455 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s456 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s457 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s458 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s459 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s460 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s461 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s462 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s463 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s464 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s465 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s466 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s467 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s468 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s469 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s470 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s471 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s472 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s473 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s474 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s475 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s476 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s477 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s478 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s479 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s480 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s481 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s482 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s483 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s484 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s485 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s486 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s487 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s488 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s489 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s490 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s491 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s492 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s493 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s494 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s495 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s496 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s497 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s498 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s499 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s500 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s501 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s502 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s503 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s504 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s505 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s506 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s507 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s508 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s509 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s510 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s511 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s512 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s513 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s514 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s515 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s516 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s517 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s518 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s519 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s520 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s521 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s522 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s523 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s524 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s525 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s526 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s527 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s528 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s529 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s530 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s531 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s532 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s533 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s534 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s535 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s536 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s537 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s538 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s539 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s540 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s541 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s542 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s543 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s544 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s545 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s546 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s547 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s548 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s549 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s550 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s551 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s552 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s553 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s554 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s555 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s556 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s557 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s558 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s559 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s560 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s561 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s562 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s563 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s564 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s565 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s566 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s567 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s568 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s569 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s570 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s571 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s572 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s573 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s574 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s575 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s576 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s577 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s578 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s579 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s580 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s581 = Var(within=CannotHandle,bounds=(0,None),initialize=0.0714285714285714)
m.s1s582 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s583 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s584 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s585 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s586 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s587 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s588 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s589 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s590 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s591 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s592 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s593 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s594 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s595 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s596 = Var(within=CannotHandle,bounds=(0,None),initialize=0)
m.s1s597 | |
# encoding: utf-8
# ------------------------------------------------------------------------
# Copyright 2020 All Histolab Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import math
import operator
from functools import reduce
from typing import Any, Callable
import numpy as np
import PIL
import PIL.ImageOps
import skimage.color as sk_color
import skimage.exposure as sk_exposure
import skimage.feature as sk_feature
import skimage.filters as sk_filters
import skimage.future as sk_future
import skimage.morphology as sk_morphology
import skimage.segmentation as sk_segmentation
from ..util import apply_mask_image, np_to_pil, threshold_to_mask, warn
from .util import mask_percent
def adaptive_equalization(
img: PIL.Image.Image, nbins: int = 256, clip_limit: float = 0.01
) -> PIL.Image.Image:
"""Increase image contrast using adaptive equalization.
Contrast in local region of input image (gray or RGB) is increased using
adaptive equalization
Parameters
----------
img : PIL.Image.Image
Input image (gray or RGB)
nbins : int
Number of histogram bins. Default is 256.
clip_limit : float, optional
Clipping limit where higher value increases contrast. Default is 0.01
Returns
-------
PIL.Image.Image
image with contrast enhanced by adaptive equalization.
"""
if not (isinstance(nbins, int) and nbins > 0):
raise ValueError("Number of histogram bins must be a positive integer")
img_arr = np.array(img)
adapt_equ = sk_exposure.equalize_adapthist(img_arr, nbins, clip_limit)
adapt_equ = np_to_pil(adapt_equ)
return adapt_equ
def blue_pen_filter(img: PIL.Image.Image) -> PIL.Image.Image:
"""Filter out blue pen marks from a diagnostic slide.
The resulting mask is a composition of green filters with different thresholds
for the RGB channels.
Parameters
---------
img : PIL.Image.Image
Input RGB image
Returns
-------
PIL.Image.Image
Input image with the blue pen marks filtered out.
"""
parameters = [
{"red_thresh": 60, "green_thresh": 120, "blue_thresh": 190},
{"red_thresh": 120, "green_thresh": 170, "blue_thresh": 200},
{"red_thresh": 175, "green_thresh": 210, "blue_thresh": 230},
{"red_thresh": 145, "green_thresh": 180, "blue_thresh": 210},
{"red_thresh": 37, "green_thresh": 95, "blue_thresh": 160},
{"red_thresh": 30, "green_thresh": 65, "blue_thresh": 130},
{"red_thresh": 130, "green_thresh": 155, "blue_thresh": 180},
{"red_thresh": 40, "green_thresh": 35, "blue_thresh": 85},
{"red_thresh": 30, "green_thresh": 20, "blue_thresh": 65},
{"red_thresh": 90, "green_thresh": 90, "blue_thresh": 140},
{"red_thresh": 60, "green_thresh": 60, "blue_thresh": 120},
{"red_thresh": 110, "green_thresh": 110, "blue_thresh": 175},
]
blue_pen_filter_img = reduce(
(lambda x, y: x & y), [blue_filter(img, **param) for param in parameters]
)
return apply_mask_image(img, blue_pen_filter_img)
def dab_channel(img: PIL.Image.Image) -> PIL.Image.Image:
"""Obtain DAB channel from RGB image.
Input image is first converted into HED space and the DAB channel is
extracted via color deconvolution.
Parameters
----------
img : PIL.Image.Image
Input RGB image
Returns
-------
PIL.Image.Image
RGB image with DAB staining separated.
"""
if img.mode not in ["RGB", "RGBA"]:
raise ValueError("Input image must be RGB/RGBA.")
hed = rgb_to_hed(img)
null = np.zeros_like(hed[:, :, 0])
return hed_to_rgb(np.stack((null, null, hed[:, :, 2]), axis=-1))
def eosin_channel(img: PIL.Image.Image) -> PIL.Image.Image:
"""Obtain Eosin channel from RGB image.
Input image is first converted into HED space and the Eosin channel is
extracted via color deconvolution.
Parameters
----------
img : PIL.Image.Image
Input RGB image
Returns
-------
PIL.Image.Image
RGB image with Eosin staining separated.
"""
if img.mode not in ["RGB", "RGBA"]:
raise ValueError("Input image must be RGB/RGBA.")
hed = rgb_to_hed(img)
null = np.zeros_like(hed[:, :, 0])
return hed_to_rgb(np.stack((null, hed[:, :, 1], null), axis=-1))
def green_pen_filter(img: PIL.Image.Image) -> PIL.Image.Image:
"""Filter out green pen marks from a diagnostic slide.
The resulting mask is a composition of green filters with different thresholds
for the RGB channels.
Parameters
---------
img : PIL.Image.Image
Input RGB image
Returns
-------
PIL.Image.Image
Input image with the green pen marks filtered out.
"""
parameters = [
{"red_thresh": 150, "green_thresh": 160, "blue_thresh": 140},
{"red_thresh": 70, "green_thresh": 110, "blue_thresh": 110},
{"red_thresh": 45, "green_thresh": 115, "blue_thresh": 100},
{"red_thresh": 30, "green_thresh": 75, "blue_thresh": 60},
{"red_thresh": 195, "green_thresh": 220, "blue_thresh": 210},
{"red_thresh": 225, "green_thresh": 230, "blue_thresh": 225},
{"red_thresh": 170, "green_thresh": 210, "blue_thresh": 200},
{"red_thresh": 20, "green_thresh": 30, "blue_thresh": 20},
{"red_thresh": 50, "green_thresh": 60, "blue_thresh": 40},
{"red_thresh": 30, "green_thresh": 50, "blue_thresh": 35},
{"red_thresh": 65, "green_thresh": 70, "blue_thresh": 60},
{"red_thresh": 100, "green_thresh": 110, "blue_thresh": 105},
{"red_thresh": 165, "green_thresh": 180, "blue_thresh": 180},
{"red_thresh": 140, "green_thresh": 140, "blue_thresh": 150},
{"red_thresh": 185, "green_thresh": 195, "blue_thresh": 195},
]
green_pen_filter_img = reduce(
(lambda x, y: x & y), [green_filter(img, **param) for param in parameters]
)
return apply_mask_image(img, green_pen_filter_img)
def hematoxylin_channel(img: PIL.Image.Image) -> PIL.Image.Image:
"""Obtain Hematoxylin channel from RGB image.
Input image is first converted into HED space and the hematoxylin channel is
extracted via color deconvolution.
Parameters
----------
img : Image.Image
Input RGB image
Returns
-------
PIL.Image.Image
RGB image with Hematoxylin staining separated.
"""
if img.mode not in ["RGB", "RGBA"]:
raise ValueError("Input image must be RGB/RGBA.")
hed = rgb_to_hed(img)
null = np.zeros_like(hed[:, :, 0])
return hed_to_rgb(np.stack((hed[:, :, 0], null, null), axis=-1))
def histogram_equalization(img: PIL.Image.Image, nbins: int = 256) -> PIL.Image.Image:
"""Increase image contrast using histogram equalization.
The input image (gray or RGB) is filterd using histogram equalization to increase
contrast.
Parameters
----------
img : PIL.Image.Image
Input image.
nbins : int. optional
Number of histogram bins. Default is 256.
Returns
-------
PIL.Image.Image
Image with contrast enhanced by histogram equalization.
"""
img_arr = np.array(img)
hist_equ = sk_exposure.equalize_hist(img_arr.flatten(), nbins=nbins)
hist_equ = hist_equ.reshape(img_arr.shape)
return np_to_pil(hist_equ)
def hysteresis_threshold(
img: PIL.Image.Image, low: int = 50, high: int = 100
) -> PIL.Image.Image:
"""Apply two-level (hysteresis) threshold to an image.
Parameters
----------
img : PIL.Image.Image
Input image
low : int, optional
low threshold. Default is 50.
high : int, optional
high threshold. Default is 100.
Returns
-------
PIL.Image.Image
Image with the hysteresis threshold applied
"""
if low is None or high is None:
raise ValueError("thresholds cannot be None")
hyst = sk_filters.apply_hysteresis_threshold(np.array(img), low, high)
img_out = apply_mask_image(img, hyst)
return img_out
def invert(img: PIL.Image.Image) -> PIL.Image.Image:
"""Invert an image, i.e. take the complement of the correspondent array.
Parameters
----------
img : PIL.Image.Image
Input image
Returns
-------
PIL.Image.Image
Inverted image
"""
if img.mode == "RGBA":
red, green, blue, alpha = img.split()
rgb_img = PIL.Image.merge("RGB", (red, green, blue))
inverted_img_rgb = PIL.ImageOps.invert(rgb_img)
red, green, blue = inverted_img_rgb.split()
inverted_img = PIL.Image.merge("RGBA", (red, green, blue, alpha))
else:
inverted_img = PIL.ImageOps.invert(img)
return inverted_img
def kmeans_segmentation(
img: PIL.Image.Image, n_segments: int = 800, compactness: float = 10.0
) -> PIL.Image.Image:
"""Segment an image with K-means segmentation
By using K-means segmentation (color/space proximity) each segment is
colored based on the average color for that segment.
Parameters
---------
img : PIL.Image.Image
Input image
n_segments : int, optional
The number of segments. Default is 800.
compactness : float, optional
Color proximity versus space proximity factor. Default is 10.0.
Returns
-------
PIL.Image.Image
RGB image where each segment has been colored based on the average
color for that segment.
Raises
------
ValueError
If ``img`` mode is RGBA.
"""
if img.mode == "RGBA":
raise ValueError("Input image cannot be RGBA")
img_arr = np.array(img)
labels = sk_segmentation.slic(img_arr, n_segments, compactness, start_label=0)
return np_to_pil(sk_color.label2rgb(labels, img_arr, kind="avg", bg_label=-1))
def lab_to_rgb(
img: PIL.Image.Image, illuminant: str = "D65", observer: int = "2"
) -> PIL.Image.Image:
"""Lab to RGB color space conversion.
Parameters
----------
img : PIL.Image.Image
Input image in Lab space.
illuminant : {“A”, “D50”, “D55”, “D65”, “D75”, “E”}, optional
The name of the illuminant (the function is NOT case sensitive). Default is
"D65".
observer : {“2”, “10”}, optional
The aperture angle of the observer. Default is "2".
Returns
-------
PIL.Image.Image
Image in RGB space.
"""
img_arr = np.array(img)
rgb_arr = sk_color.lab2rgb(img_arr, illuminant=illuminant, observer=observer)
rgb = np_to_pil(rgb_arr)
return rgb
def local_equalization(img: PIL.Image.Image, disk_size: int = 50) -> PIL.Image.Image:
"""Filter gray image using local equalization.
Local equalization method uses local histograms based on a disk structuring element.
Parameters
---------
img : PIL.Image.Image
Input image. Notice that it must be 2D
disk_size : int, optional
Radius of the disk structuring element used for the local histograms. Default is
50.
Returns
-------
PIL.Image.Image
2D image with contrast enhanced using local equalization.
"""
if len(np.array(img).shape) != 2:
raise ValueError("Input must be 2D.")
local_equ = sk_filters.rank.equalize(
np.array(img), selem=sk_morphology.disk(disk_size)
)
return np_to_pil(local_equ)
def local_otsu_threshold(
img: PIL.Image.Image, disk_size: float = 3.0
) -> | |
<gh_stars>0
import inspect as insp
import dask
import numpy as np
from edt import edt
import operator as op
import scipy.ndimage as spim
from skimage.morphology import reconstruction
from skimage.segmentation import clear_border
from skimage.morphology import ball, disk, square, cube, diamond, octahedron
from porespy.tools import _check_for_singleton_axes
from porespy.tools import get_border, subdivide, recombine
from porespy.tools import unpad, extract_subsection
from porespy.tools import ps_disk, ps_ball
from porespy import settings
from porespy.tools import get_tqdm
from loguru import logger
tqdm = get_tqdm()
def apply_padded(im, pad_width, func, pad_val=1, **kwargs):
r"""
Applies padding to an image before sending to ``func``, then extracts
the result corresponding to the original image shape.
Parameters
----------
im : ndarray
The image to which ``func`` should be applied
pad_width : int or list of ints
The amount of padding to apply to each axis. Refer to
``numpy.pad`` documentation for more details.
pad_val : scalar
The value to place into the padded voxels. The default is 1 (or
``True``) which extends the pore space.
func : function handle
The function to apply to the padded image.
kwargs
Additional keyword arguments are collected and passed to ``func``.
Notes
-----
A use case for this is when using ``skimage.morphology.skeletonize_3d``
to ensure that the skeleton extends beyond the edges of the image.
Examples
--------
`Click here
<https://porespy.org/examples/filters/reference/apply_padded.html>`_
to view online example.
"""
padded = np.pad(im, pad_width=pad_width,
mode='constant', constant_values=pad_val)
temp = func(padded, **kwargs)
result = unpad(im=temp, pad_width=pad_width)
return result
def trim_small_clusters(im, size=1):
r"""
Remove isolated voxels or clusters of a given size or smaller
Parameters
----------
im : ndarray
The binary image from which voxels are to be removed.
size : scalar
The threshold size of clusters to trim. As clusters with this
many voxels or fewer will be trimmed. The default is 1 so only
single voxels are removed.
Returns
-------
im : ndarray
A copy of ``im`` with clusters of voxels smaller than the given
``size`` removed.
Examples
--------
`Click here
<https://porespy.org/examples/filters/reference/trim_small_clusters.html>`_
to view online example.
"""
if im.ndim == 2:
strel = disk(1)
elif im.ndim == 3:
strel = ball(1)
else:
raise Exception("Only 2D or 3D images are accepted")
filtered_array = np.copy(im)
labels, N = spim.label(filtered_array, structure=strel)
id_sizes = np.array(spim.sum(im, labels, range(N + 1)))
area_mask = id_sizes <= size
filtered_array[area_mask[labels]] = 0
return filtered_array
def hold_peaks(im, axis=-1, ascending=True):
r"""
Replaces each voxel with the highest value along the given axis.
Parameters
----------
im : ndarray
A greyscale image whose peaks are to be found.
axis : int
The axis along which the operation is to be applied.
ascending : bool
If ``True`` (default) the given ``axis`` is scanned from 0 to end.
If ``False``, it is scanned in reverse order from end to 0.
Returns
-------
result : ndarray
A copy of ``im`` with each voxel is replaced with the highest value along
the given axis.
Notes
-----
"im" must be a greyscale image. In case a Boolean image is fed into this
method, it will be converted to float values [0.0,1.0] before proceeding.
Examples
--------
`Click here
<https://porespy.org/examples/filters/reference/hold_peaks.html>`_
to view online example.
"""
A = im.astype(float)
B = np.swapaxes(A, axis, -1)
if ascending is False: # Flip the axis of interest (-1)
B = np.flip(B, axis=-1)
updown = np.empty((*B.shape[:-1], B.shape[-1] + 1), B.dtype)
updown[..., 0], updown[..., -1] = -1, -1
np.subtract(B[..., 1:], B[..., :-1], out=updown[..., 1:-1])
chnidx = np.where(updown)
chng = updown[chnidx]
(pkidx,) = np.where((chng[:-1] > 0) & (chng[1:] < 0) | (chnidx[-1][:-1] == 0))
pkidx = (*map(op.itemgetter(pkidx), chnidx),)
out = np.zeros_like(A)
aux = out.swapaxes(axis, -1)
aux[(*map(op.itemgetter(slice(1, None)), pkidx),)] = np.diff(B[pkidx])
aux[..., 0] = B[..., 0]
result = out.cumsum(axis=axis)
if ascending is False: # Flip it back
result = np.flip(result, axis=-1)
return result
def distance_transform_lin(im, axis=0, mode="both"):
r"""
Replaces each void voxel with the linear distance to the nearest solid
voxel along the specified axis.
Parameters
----------
im : ndarray
The image of the porous material with ``True`` values indicating
the void phase (or phase of interest).
axis : int
The direction along which the distance should be measured, the
default is 0 (i.e. along the x-direction).
mode : str
Controls how the distance is measured. Options are:
'forward'
Distances are measured in the increasing direction
along the specified axis
'reverse'
Distances are measured in the reverse direction.
'backward' is also accepted.
'both'
Distances are calculated in both directions (by
recursively calling itself), then reporting the minimum value
of the two results.
Returns
-------
image : ndarray
A copy of ``im`` with each foreground voxel containing the
distance to the nearest background along the specified axis.
Examples
--------
`Click here
<https://porespy.org/examples/filters/reference/distance_transform_lin.html>`_
to view online example.
"""
_check_for_singleton_axes(im)
if mode in ["backward", "reverse"]:
im = np.flip(im, axis)
im = distance_transform_lin(im=im, axis=axis, mode="forward")
im = np.flip(im, axis)
return im
elif mode in ["both"]:
im_f = distance_transform_lin(im=im, axis=axis, mode="forward")
im_b = distance_transform_lin(im=im, axis=axis, mode="backward")
return np.minimum(im_f, im_b)
b = np.cumsum(im > 0, axis=axis)
c = np.diff(b * (im == 0), axis=axis)
d = np.minimum.accumulate(c, axis=axis)
if im.ndim == 1:
e = np.pad(d, pad_width=[1, 0], mode="constant", constant_values=0)
elif im.ndim == 2:
ax = [[[1, 0], [0, 0]], [[0, 0], [1, 0]]]
e = np.pad(d, pad_width=ax[axis], mode="constant", constant_values=0)
elif im.ndim == 3:
ax = [
[[1, 0], [0, 0], [0, 0]],
[[0, 0], [1, 0], [0, 0]],
[[0, 0], [0, 0], [1, 0]],
]
e = np.pad(d, pad_width=ax[axis], mode="constant", constant_values=0)
f = im * (b + e)
return f
def find_disconnected_voxels(im, conn=None, surface=False):
r"""
Identifies all voxels that are not connected to the edge of the image.
Parameters
----------
im : ndarray
A Boolean image, with ``True`` values indicating the phase for which
disconnected voxels are sought.
conn : int
For 2D the options are 4 and 8 for square and diagonal neighbors,
while for the 3D the options are 6 and 26, similarily for square
and diagonal neighbors. The default is the maximum option.
surface : bool
If ``True`` any isolated regions touching the edge of the image are
considered disconnected.
Returns
-------
image : ndarray
An ndarray the same size as ``im``, with ``True`` values indicating
voxels of the phase of interest (i.e. ``True`` values in the original
image) that are not connected to the outer edges.
See Also
--------
fill_blind_pores, trim_floating_solid
Notes
-----
This function is just a convenient wrapper around the ``clear_border``
function of ``scikit-image``.
Examples
--------
`Click here
<https://porespy.org/examples/filters/reference/find_disconnected_voxels.html>`_
to view online example.
"""
_check_for_singleton_axes(im)
if im.ndim == 2:
if conn == 4:
strel = disk(1)
elif conn in [None, 8]:
strel = square(3)
else:
raise Exception("Received conn is not valid")
elif im.ndim == 3:
if conn == 6:
strel = ball(1)
elif conn in [None, 26]:
strel = cube(3)
else:
raise Exception("Received conn is not valid")
labels, N = spim.label(input=im, structure=strel)
if not surface:
holes = clear_border(labels=labels) > 0
else:
counts = np.bincount(labels.flatten())[1:]
keep = np.where(counts == counts.max())[0] + 1
holes = (labels != keep)*im
return holes
def fill_blind_pores(im, conn=None, surface=False):
r"""
Fills all blind pores that are isolated from the main void space.
Parameters
----------
im : ndarray
The image of the porous material
Returns
-------
im : ndarray
A version of ``im`` but with all the disconnected pores removed.
conn : int
For 2D the options are 4 and 8 for square and diagonal neighbors,
while for the 3D the options are 6 and 26, similarily for square
and diagonal neighbors. The default is the maximum option.
surface : bool
If ``True``, any isolated pore regions that are connected to the
sufaces of the image are also removed. When this is enabled, only
the voxels belonging to the largest region are kept. This can be
problematic if image contains non-intersecting tube-like structures,
for instance, since only the largest tube will be preserved.
See Also
--------
find_disconnected_voxels
trim_nonpercolating_paths
Examples
--------
`Click here
<https://porespy.org/examples/filters/reference/fill_blind_pores.html>`_
to view online example.
"""
im = np.copy(im)
holes = find_disconnected_voxels(im, conn=conn, surface=surface)
im[holes] = False
return im
def trim_floating_solid(im, conn=None, surface=False):
r"""
Removes all solid that that is not attached to main solid | |
self . packet [ 0 : 60 ] ) ) )
return
if 60 - 60: i11iIiiIii . O0 * iIii1I11I1II1 * OoOoOO00
if 99 - 99: iIii1I11I1II1 - oO0o - OoOoOO00 / iIii1I11I1II1 * Oo0Ooo - oO0o
if ( s_or_r . find ( "Receive" ) != - 1 ) :
o0ooo0oooO = "decap"
o0ooo0oooO += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
o0ooo0oooO = s_or_r
if ( o0ooo0oooO in [ "Send" , "Replicate" ] or o0ooo0oooO . find ( "Fragment" ) != - 1 ) :
o0ooo0oooO = "encap"
if 89 - 89: i1IIi
if 19 - 19: ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
iI1i1Iiii = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 15 - 15: Ii1I
if 17 - 17: OoOoOO00 - I1IiiI
if 63 - 63: OoOoOO00 - oO0o / iIii1I11I1II1 - Ii1I / I1Ii111
if 34 - 34: iII111i / o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
if 97 - 97: i1IIi
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
oooOo = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 46 - 46: I1ii11iIi11i
oooOo += bold ( "control-packet" , False ) + ": {} ..."
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
dprint ( oooOo . format ( bold ( s_or_r , False ) , red ( iI1i1Iiii , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
oooOo = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 23 - 23: I11i
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
if 76 - 76: I1Ii111
if ( self . lisp_header . k_bits ) :
if ( o0ooo0oooO == "encap" ) : o0ooo0oooO = "encrypt/encap"
if ( o0ooo0oooO == "decap" ) : o0ooo0oooO = "decap/decrypt"
if 61 - 61: ooOoO0o / II111iiii * ooOoO0o * OoOoOO00 * I1Ii111 . i11iIiiIii
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
I1i11 = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 83 - 83: ooOoO0o % Ii1I / Oo0Ooo - iII111i / O0
dprint ( oooOo . format ( bold ( s_or_r , False ) , red ( iI1i1Iiii , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( I1i11 , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( o0ooo0oooO ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 97 - 97: iIii1I11I1II1 * I11i
if 95 - 95: OoO0O00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
def get_raw_socket ( self ) :
o0OOoOO = str ( self . lisp_header . get_instance_id ( ) )
if ( o0OOoOO == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( o0OOoOO ) == False ) : return ( None )
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
I111IIiIII = lisp_iid_to_interface [ o0OOoOO ]
IiIIi1I1I11Ii = I111IIiIII . get_socket ( )
if ( IiIIi1I1I11Ii == None ) :
OO0o0o0oo = bold ( "SO_BINDTODEVICE" , False )
Ooo0OOO0O00 = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( OO0o0o0oo , "drop" if Ooo0OOO0O00 else "forward" ) )
if 43 - 43: II111iiii * II111iiii % o0oOOo0O0Ooo / OoO0O00
if ( Ooo0OOO0O00 ) : return ( None )
if 84 - 84: iIii1I11I1II1 . i1IIi % I1ii11iIi11i + iIii1I11I1II1 - I11i % I1ii11iIi11i
if 84 - 84: I1Ii111 - oO0o + I1ii11iIi11i
o0OOoOO = bold ( o0OOoOO , False )
oOo0OOOOOO = bold ( I111IIiIII . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( o0OOoOO , oOo0OOOOOO ) )
return ( IiIIi1I1I11Ii )
if 80 - 80: iIii1I11I1II1 - Oo0Ooo % I1Ii111 % Oo0Ooo + I1IiiI % Ii1I
if 86 - 86: I1Ii111 - oO0o % OOooOOo % i11iIiiIii
def log_flow ( self , encap ) :
global lisp_flow_log
if 57 - 57: I1Ii111
I11i1I1iIiI = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or I11i1I1iIiI ) :
oo0OoOO000O = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = oo0OoOO000O ) . start ( )
if ( I11i1I1iIiI ) : os . system ( "rm ./log-flows" )
return
if 62 - 62: i1IIi * iIii1I11I1II1 % oO0o % OoOoOO00 / OoooooooOO
if 39 - 39: Oo0Ooo % iII111i
OOOO0O00o = datetime . datetime . now ( )
lisp_flow_log . append ( [ OOOO0O00o , encap , self . packet , self ] )
if 90 - 90: I1IiiI * I1ii11iIi11i . I11i * Ii1I - o0oOOo0O0Ooo
if 40 - 40: O0 / IiII - II111iiii + o0oOOo0O0Ooo % Oo0Ooo
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
o00oOo0OoO0oO = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 84 - 84: i1IIi / i1IIi - i1IIi . oO0o . OoO0O00 * I1ii11iIi11i
oOO000000oO0 = red ( self . outer_source . print_address_no_iid ( ) , False )
o0o00oo0OOo0O00OO0O = red ( self . outer_dest . print_address_no_iid ( ) , False )
oOO0oOOoO = green ( self . inner_source . print_address ( ) , False )
oo0O000O00 = green ( self . inner_dest . print_address ( ) , False )
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
o00oOo0OoO0oO += " {}:{} -> {}:{}, LISP control message type {}\n"
o00oOo0OoO0oO = o00oOo0OoO0oO . format ( oOO000000oO0 , self . udp_sport , o0o00oo0OOo0O00OO0O , self . udp_dport ,
self . inner_version )
return ( o00oOo0OoO0oO )
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
if ( self . outer_dest . is_null ( ) == False ) :
o00oOo0OoO0oO += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
o00oOo0OoO0oO = o00oOo0OoO0oO . format ( oOO000000oO0 , self . udp_sport , o0o00oo0OOo0O00OO0O , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 76 - 76: oO0o / OoOoOO00
if 12 - 12: I1Ii111
if 58 - 58: OoO0O00 + iIii1I11I1II1 % O0 + I11i + OoOoOO00 * OoooooooOO
if 41 - 41: oO0o * I1IiiI
if 76 - 76: oO0o . O0 * OoooooooOO + ooOoO0o
if ( self . lisp_header . k_bits != 0 ) :
oo0O00 = "\n"
if ( self . packet_error != "" ) :
oo0O00 = " ({})" . format ( self . packet_error ) | |
we find non-null data for each column in `sample`
sample = [col for col in df.columns if df[col].dtype == "object"]
if sample and schema == "infer":
delayed_schema_from_pandas = delayed(pa.Schema.from_pandas)
for i in range(df.npartitions):
# Keep data on worker
_s = delayed_schema_from_pandas(
df[sample].to_delayed()[i]
).compute()
for name, typ in zip(_s.names, _s.types):
if typ != "null":
i = _schema.get_field_index(name)
j = _s.get_field_index(name)
_schema = _schema.set(i, _s.field(j))
sample.remove(name)
if not sample:
break
# Final (inferred) schema
schema = _schema
# Check that target directory exists
fs.mkdirs(path, exist_ok=True)
if append and division_info is None:
ignore_divisions = True
# Extract metadata and get file offset if appending
fmd, i_offset, append = cls._get_dataset_offset(
path, fs, append, ignore_divisions
)
# Inspect the intial metadata if appending
if append:
arrow_schema = fmd.schema.to_arrow_schema()
names = arrow_schema.names
has_pandas_metadata = (
arrow_schema.metadata is not None and b"pandas" in arrow_schema.metadata
)
if has_pandas_metadata:
pandas_metadata = json.loads(
arrow_schema.metadata[b"pandas"].decode("utf8")
)
categories = [
c["name"]
for c in pandas_metadata["columns"]
if c["pandas_type"] == "categorical"
]
else:
categories = None
dtypes = _get_pyarrow_dtypes(arrow_schema, categories)
if set(names) != set(df.columns) - set(partition_on):
raise ValueError(
"Appended columns not the same.\n"
"Previous: {} | New: {}".format(names, list(df.columns))
)
elif (pd.Series(dtypes).loc[names] != df[names].dtypes).any():
# TODO Coerce values for compatible but different dtypes
raise ValueError(
"Appended dtypes differ.\n{}".format(
set(dtypes.items()) ^ set(df.dtypes.iteritems())
)
)
# Check divisions if necessary
if division_info["name"] not in names:
ignore_divisions = True
if not ignore_divisions:
old_end = None
row_groups = [fmd.row_group(i) for i in range(fmd.num_row_groups)]
for row_group in row_groups:
for i, name in enumerate(names):
if name != division_info["name"]:
continue
column = row_group.column(i)
if column.statistics:
if not old_end:
old_end = column.statistics.max
else:
old_end = max(old_end, column.statistics.max)
break
divisions = division_info["divisions"]
if divisions[0] < old_end:
raise ValueError(
"Appended divisions overlapping with the previous ones"
" (set ignore_divisions=True to append anyway).\n"
"Previous: {} | New: {}".format(old_end, divisions[0])
)
return fmd, schema, i_offset
@classmethod
def _pandas_to_arrow_table(
cls, df: pd.DataFrame, preserve_index=False, schema=None
) -> pa.Table:
table = pa.Table.from_pandas(
df, nthreads=1, preserve_index=preserve_index, schema=schema
)
return table
@classmethod
def write_partition(
cls,
df,
path,
fs,
filename,
partition_on,
return_metadata,
fmd=None,
compression=None,
index_cols=None,
schema=None,
head=False,
custom_metadata=None,
**kwargs,
):
_meta = None
preserve_index = False
if _index_in_schema(index_cols, schema):
df.set_index(index_cols, inplace=True)
preserve_index = True
else:
index_cols = []
t = cls._pandas_to_arrow_table(df, preserve_index=preserve_index, schema=schema)
if custom_metadata:
_md = t.schema.metadata
_md.update(custom_metadata)
t = t.replace_schema_metadata(metadata=_md)
if partition_on:
md_list = _write_partitioned(
t,
path,
filename,
partition_on,
fs,
index_cols=index_cols,
compression=compression,
**kwargs,
)
if md_list:
_meta = md_list[0]
for i in range(1, len(md_list)):
_append_row_groups(_meta, md_list[i])
else:
md_list = []
with fs.open(fs.sep.join([path, filename]), "wb") as fil:
pq.write_table(
t,
fil,
compression=compression,
metadata_collector=md_list,
**kwargs,
)
if md_list:
_meta = md_list[0]
_meta.set_file_path(filename)
# Return the schema needed to write the metadata
if return_metadata:
d = {"meta": _meta}
if head:
# Only return schema if this is the "head" partition
d["schema"] = t.schema
return [d]
else:
return []
@staticmethod
def write_metadata(parts, fmd, fs, path, append=False, **kwargs):
schema = parts[0][0].get("schema", None)
parts = [p for p in parts if p[0]["meta"] is not None]
if parts:
if not append:
# Get only arguments specified in the function
common_metadata_path = fs.sep.join([path, "_common_metadata"])
keywords = getargspec(pq.write_metadata).args
kwargs_meta = {k: v for k, v in kwargs.items() if k in keywords}
with fs.open(common_metadata_path, "wb") as fil:
pq.write_metadata(schema, fil, **kwargs_meta)
# Aggregate metadata and write to _metadata file
metadata_path = fs.sep.join([path, "_metadata"])
if append and fmd is not None:
_meta = fmd
i_start = 0
else:
_meta = parts[0][0]["meta"]
i_start = 1
for i in range(i_start, len(parts)):
_append_row_groups(_meta, parts[i][0]["meta"])
with fs.open(metadata_path, "wb") as fil:
_meta.write_metadata_file(fil)
#
# Private Class Methods
#
@classmethod
def _gather_metadata(
cls,
paths,
fs,
split_row_groups,
gather_statistics,
filters,
index,
dataset_kwargs,
):
"""pyarrow.dataset version of _gather_metadata
Use pyarrow.dataset API to collect list of row-group fragments.
Also, collect other information necessary for parquet-to-ddf
mapping (e.g. schema, partition_info).
This method is overridden in `ArrowLegacyEngine`.
"""
# Use pyarrow.dataset API
ds = None
valid_paths = None # Only used if `paths` is a list containing _metadata
# Discover Partitioning - Note that we need to avoid creating
# this factory until it is actually used. The `partitioning`
# object can be overridden if a "partitioning" kwarg is passed
# in, containing a `dict` with a required "obj" argument and
# optional "arg" and "kwarg" elements. Note that the "obj"
# value must support the "discover" attribute.
partitioning = dataset_kwargs.get(
"partitioning",
{"obj": pa_ds.HivePartitioning},
)
if len(paths) == 1 and fs.isdir(paths[0]):
# Use _analyze_paths to avoid relative-path
# problems (see GH#5608)
paths, base, fns = _sort_and_analyze_paths(paths, fs)
paths = fs.sep.join([base, fns[0]])
meta_path = fs.sep.join([paths, "_metadata"])
if fs.exists(meta_path):
# Use _metadata file
ds = pa_ds.parquet_dataset(
meta_path,
filesystem=fs,
partitioning=partitioning["obj"].discover(
*partitioning.get("args", []),
**partitioning.get("kwargs", {}),
),
)
if gather_statistics is None:
gather_statistics = True
elif len(paths) > 1:
paths, base, fns = _sort_and_analyze_paths(paths, fs)
meta_path = fs.sep.join([base, "_metadata"])
if "_metadata" in fns:
# Pyarrow cannot handle "_metadata" when `paths` is a list
# Use _metadata file
ds = pa_ds.parquet_dataset(
meta_path,
filesystem=fs,
partitioning=partitioning["obj"].discover(
*partitioning.get("args", []),
**partitioning.get("kwargs", {}),
),
)
if gather_statistics is None:
gather_statistics = True
# Populate valid_paths, since the original path list
# must be used to filter the _metadata-based dataset
fns.remove("_metadata")
valid_paths = fns
if ds is None:
ds = pa_ds.dataset(
paths,
filesystem=fs,
format="parquet",
partitioning=partitioning["obj"].discover(
*partitioning.get("args", []),
**partitioning.get("kwargs", {}),
),
)
schema = ds.schema
base = ""
# At this point, we know if `split_row_groups` should be
# set to `True` by default. If the user has not specified
# this option, we will only collect statistics if there is
# a global "_metadata" file available, otherwise we will
# opt for `gather_statistics=False`. For `ArrowDatasetEngine`,
# statistics are only required to calculate divisions
# and/or aggregate row-groups using `chunksize` (not for
# filtering).
#
# By default, we will create an output partition for each
# row group in the dataset (`split_row_groups=True`).
# However, we will NOT split by row-group if
# `gather_statistics=False`, because this can be
# interpreted as an indication that metadata overhead should
# be avoided at all costs.
if gather_statistics is None:
gather_statistics = False
if split_row_groups is None:
if gather_statistics:
split_row_groups = True
else:
split_row_groups = False
# Generate list of (fragment, row_group_info) tuples
# and call it `metadata`
metadata, partition_info = _collect_pyarrow_dataset_frags(
ds,
filters,
valid_paths,
fs,
split_row_groups,
gather_statistics,
)
# Store dict needed to produce a `partitioning`
# factory at IO time. This object is needed to
# reproduce a `fragment` (for row-wise filtering)
# on the worker.
partition_info["partitioning"] = partitioning
return (
schema,
metadata,
base,
partition_info,
split_row_groups,
gather_statistics,
)
@classmethod
def _generate_dd_meta(cls, schema, index, categories, partition_info):
"""Use parquet metadata to construct DataFrame metadata.
This method is used by both `ArrowDatasetEngine`
and `ArrowLegacyEngine`.
"""
partition_obj = partition_info["partitions"]
partitions = partition_info["partition_names"]
columns = None
pandas_metadata = _get_pandas_metadata(schema)
if pandas_metadata:
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(pandas_metadata)
if categories is None:
categories = []
for col in pandas_metadata["columns"]:
if (col["pandas_type"] == "categorical") and (
col["name"] not in categories
):
categories.append(col["name"])
else:
# No pandas metadata implies no index, unless selected by the user
index_names = []
column_names = schema.names
storage_name_mapping = {k: k for k in column_names}
column_index_names = [None]
if index is None and index_names:
index = index_names
# Ensure that there is no overlap between partition columns
# and explicit column storage
if partitions:
_partitions = [p for p in partitions if p not in column_names]
if not _partitions:
partitions = []
partition_info["partitions"] = None
partition_info["partition_keys"] = {}
partition_info["partition_names"] = partitions
elif len(_partitions) != len(partitions):
raise ValueError(
"No partition-columns should be written in the \n"
"file unless they are ALL written in the file.\n"
"columns: {} | partitions: {}".format(column_names, partitions)
)
column_names, index_names = _normalize_index_columns(
columns, column_names + partitions, index, index_names
)
all_columns = index_names + column_names
# Check that categories are included in columns
if categories and not set(categories).intersection(all_columns):
raise ValueError(
"categories not in available columns.\n"
"categories: {} | columns: {}".format(categories, list(all_columns))
)
dtypes = _get_pyarrow_dtypes(schema, categories)
dtypes = | |
a pipeline, all commands to this instance will be
pipelined.
:param pipe: optional Pipeline or NestedPipeline
"""
self._pipe = pipe
@property
def pipe(self):
"""
Get a fresh pipeline() to be used in a `with` block.
:return: Pipeline or NestedPipeline with autoexec set to true.
"""
return autoexec(self._pipe)
def get(self, key):
"""
Return the value of the key or None if the key doesn't exist
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return self.core(pipe=pipe).hget(self.shard(key), key)
def delete(self, key, *args):
keys = self._parse_values(key, args)
response = Future()
with self.pipe as pipe:
core = self.core(pipe=pipe)
tracking = []
for k in keys:
tracking.append(core.hdel(self.shard(k), k))
def cb():
response.set(sum(tracking))
pipe.on_execute(cb)
return response
def set(self, name, value, nx=False):
"""
Set the value at key ``name`` to ``value``
``nx`` if set to True, set the value at key ``name`` to ``value`` if it
does not already exist.
:return: Future()
"""
with self.pipe as pipe:
core = self.core(pipe=pipe)
method = core.hsetnx if nx else core.hset
return method(self.shard(name), name, value)
def setnx(self, name, value):
"""
Set the value as a string in the key only if the key doesn't exist.
:param name: str the name of the redis key
:param value:
:return: Future()
"""
with self.pipe as pipe:
return self.core(pipe=pipe).hsetnx(self.shard(name), name, value)
def strlen(self, name):
"""
Return the number of bytes stored in the value of the key
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return self.core(pipe=pipe).hstrlen(self.shard(name), name)
def incr(self, name, amount=1):
"""
increment the value for key by 1
:param name: str the name of the redis key
:param amount: int
:return: Future()
"""
with self.pipe as pipe:
return self.core(pipe=pipe).hincrby(
self.shard(name), name, amount=amount)
def incrby(self, name, amount=1):
"""
increment the value for key by value: int
:param name: str the name of the redis key
:param amount: int
:return: Future()
"""
with self.pipe as pipe:
return self.core(pipe=pipe).hincrby(
self.shard(name), name, amount=amount)
def incrbyfloat(self, name, amount=1.0):
"""
increment the value for key by value: float
:param name: str the name of the redis key
:param amount: int
:return: Future()
"""
with self.pipe as pipe:
return self.core(pipe=pipe).hincrbyfloat(
self.shard(name), name, amount=amount)
def __getitem__(self, name):
"""
magic python method that makes the class behave like a dictionary.
use to access elements.
:param name:
:return:
"""
return self.get(name)
def __setitem__(self, name, value):
"""
magic python method that makes the class behave like a dictionary.
use to set elements.
:param name:
:param value:
:return:
"""
self.set(name, value)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
with self.pipe as pipe:
f = Future()
core = self.core(pipe=pipe)
keys = [k for k in self._parse_values(keys, args)]
mapping = {k: core.hget(self.shard(k), k) for k in keys}
def cb():
f.set([mapping[k] for k in keys])
pipe.on_execute(cb)
return f
def scan_iter(self, match=None, count=None):
"""
Make an iterator using the hscan command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
core = self.core()
for i in range(0, self.shard_count - 1):
cursor = 0
while True:
res = core.hscan(i, cursor=cursor, match=match, count=count)
cursor, elements = res
if elements:
for k, v in elements.items():
yield k, v
if cursor == 0:
break
class Set(Keyspace):
"""
Manipulate a Set key in redis.
"""
def sdiff(self, keys, *args):
"""
Return the difference of sets specified by ``keys``
:param keys: list
:param args: tuple
:return: Future()
"""
keys = [self.redis_key(k) for k in self._parse_values(keys, args)]
with self.pipe as pipe:
res = pipe.sdiff(*keys)
f = Future()
def cb():
f.set({self.valueparse.decode(v) for v in res.result})
pipe.on_execute(cb)
return f
def sdiffstore(self, dest, *keys):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys = [self.redis_key(k) for k in self._parse_values(keys)]
with self.pipe as pipe:
return pipe.sdiffstore(self.redis_key(dest), *keys)
def sinter(self, keys, *args):
"""
Return the intersection of sets specified by ``keys``
:param keys: list or str
:param args: tuple
:return: Future
"""
keys = [self.redis_key(k) for k in self._parse_values(keys, args)]
with self.pipe as pipe:
res = pipe.sinter(*keys)
f = Future()
def cb():
f.set({self.valueparse.decode(v) for v in res.result})
pipe.on_execute(cb)
return f
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys = [self.redis_key(k) for k in self._parse_values(keys, args)]
with self.pipe as pipe:
return pipe.sinterstore(self.redis_key(dest), keys)
def sunion(self, keys, *args):
"""
Return the union of sets specified by ``keys``
:param keys: list or str
:param args: tuple
:return: Future()
"""
keys = [self.redis_key(k) for k in self._parse_values(keys, args)]
with self.pipe as pipe:
res = pipe.sunion(*keys)
f = Future()
def cb():
f.set({self.valueparse.decode(v) for v in res.result})
pipe.on_execute(cb)
return f
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of members in the new set.
"""
keys = [self.redis_key(k) for k in self._parse_values(keys, args)]
with self.pipe as pipe:
return pipe.sunionstore(self.redis_key(dest), *keys)
def sadd(self, name, values, *args):
"""
Add the specified members to the Set.
:param name: str the name of the redis key
:param values: a list of values or a simple value.
:return: Future()
"""
with self.pipe as pipe:
values = [self.valueparse.encode(v) for v in
self._parse_values(values, args)]
return pipe.sadd(self.redis_key(name), *values)
def srem(self, name, *values):
"""
Remove the values from the Set if they are present.
:param name: str the name of the redis key
:param values: a list of values or a simple value.
:return: Future()
"""
with self.pipe as pipe:
v_encode = self.valueparse.encode
values = [v_encode(v) for v in self._parse_values(values)]
return pipe.srem(self.redis_key(name), *values)
def spop(self, name):
"""
Remove and return (pop) a random element from the Set.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.spop(self.redis_key(name))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f
def smembers(self, name):
"""
get the set of all members for key
:param name: str the name of the redis key
:return:
"""
with self.pipe as pipe:
f = Future()
res = pipe.smembers(self.redis_key(name))
def cb():
f.set({self.valueparse.decode(v) for v in res.result})
pipe.on_execute(cb)
return f
def scard(self, name):
"""
How many items in the set?
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.scard(self.redis_key(name))
def sismember(self, name, value):
"""
Is the provided value is in the ``Set``?
:param name: str the name of the redis key
:param value: str
:return: Future()
"""
with self.pipe as pipe:
return pipe.sismember(self.redis_key(name),
self.valueparse.encode(value))
def srandmember(self, name, number=None):
"""
Return a random member of the set.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.srandmember(self.redis_key(name), number=number)
def cb():
if number is None:
f.set(self.valueparse.decode(res.result))
else:
f.set([self.valueparse.decode(v) for v in res.result])
pipe.on_execute(cb)
return f
def sscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
:param name: str the name of the redis key
:param cursor: int
:param match: str
:param count: int
"""
with self.pipe as pipe:
f = Future()
res = pipe.sscan(self.redis_key(name), cursor=cursor,
match=match, count=count)
def cb():
f.set((res[0], [self.valueparse.decode(v) for v in res[1]]))
pipe.on_execute(cb)
return f
def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
:param name: str the name of the redis key
:param match: str
:param count: | |
import logging
import math
from protocols import reports_4_0_0 as reports_4_0_0
from protocols import reports_5_0_0 as reports_5_0_0
from protocols import opencb_1_3_0 as opencb_1_3_0
from protocols.migration import MigrationParticipants103To110, MigrationParticipants100To103
from protocols.migration.base_migration import MigrationError, BaseMigrateReports400And500
class MigrateReports400To500(BaseMigrateReports400And500):
old_model = reports_4_0_0
new_model = reports_5_0_0
participant_migrator = MigrationParticipants100To103()
def migrate_interpretation_request_rd(self, old_instance, assembly):
"""
Migrates an InterpretationRequestRD into an InterpretedGenomeRD, several unexisting fields need to be provided
:type old_instance: reports_4_0_0.InterpretationRequestRD
:rtype: reports_5_0_0.InterpretationRequestRD
"""
if assembly is None:
raise MigrationError("Parameter <assembly> is required if version is older than 5.0.0")
new_instance = self.convert_class(self.new_model.InterpretationRequestRD, old_instance) # type: reports_5_0_0.InterpretationRequestRD
new_instance.genomeAssembly = assembly
new_instance.pedigree = self._migrate_pedigree(old_instance.pedigree)
# NOTE: store fields in additional fields that are lost otherwise
if not new_instance.additionalInfo:
new_instance.additionalInfo = {}
if old_instance.analysisVersion:
new_instance.additionalInfo['analysisVersion'] = old_instance.analysisVersion
if old_instance.analysisReturnUri:
new_instance.additionalInfo['analysisReturnUri'] = old_instance.analysisReturnUri
if old_instance.tieringVersion:
new_instance.additionalInfo['tieringVersion'] = old_instance.tieringVersion
if old_instance.complexGeneticPhenomena:
new_instance.additionalInfo['complexGeneticPhenomena'] = str(old_instance.complexGeneticPhenomena)
if old_instance.cellbaseVersion:
new_instance.additionalInfo['cellbaseVersion'] = old_instance.cellbaseVersion
if old_instance.interpretGenome:
new_instance.additionalInfo['interpretGenome'] = str(old_instance.interpretGenome)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.InterpretationRequestRD
)
def migrate_interpretation_request_rd_to_interpreted_genome_rd(
self, old_instance, assembly, interpretation_service,
reference_database_versions, software_versions, report_url=None, comments=None):
"""
Migrates an InterpretationRequestRD into an InterpretedGenomeRD, several unexisting fields need to be provided
:type old_instance: reports_4_0_0.InterpretationRequestRD
:type assembly: reports_5_0_0.Assembly
:type interpretation_service: str
:type reference_database_versions: dict
:type software_versions: dict
:type report_url: str
:type comments: list
:rtype: reports_5_0_0.InterpretedGenomeRD
"""
new_instance = self.convert_class(self.new_model.InterpretedGenomeRD, old_instance)
# missing fields not existing in reports_4_0_0.InterpretationRequestRD will be received as parameters
new_instance.interpretationService = interpretation_service
new_instance.referenceDatabasesVersions = reference_database_versions
if not isinstance(software_versions, dict):
software_versions = {}
software_versions['tiering'] = old_instance.tieringVersion
new_instance.softwareVersions = software_versions
new_instance.reportUrl = report_url
new_instance.comments = comments
# converts all reported variants
new_instance.variants = self.convert_collection(
old_instance.tieredVariants, self._migrate_reported_variant, assembly=assembly)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.InterpretedGenomeRD
)
def migrate_interpreted_genome_rd(self, old_instance, assembly, interpretation_request_version):
"""
:type old_instance: reports_4_0_0.InterpretedGenomeRD
:type assembly: reports_5_0_0.Assembly
:type interpretation_request_version: int
:rtype: reports_5_0_0.InterpretedGenomeRD
"""
if assembly is None or interpretation_request_version is None:
raise MigrationError(
"Parameters <assembly> and <interpretation_request_version> are required for models earlier than 5.0.0"
)
new_instance = self.convert_class(
self.new_model.InterpretedGenomeRD, old_instance) # type:self.new_model.InterpretedGenomeRD
new_instance.interpretationRequestVersion = interpretation_request_version
new_instance.interpretationService = old_instance.companyName
new_instance.variants = self.convert_collection(
old_instance.reportedVariants, self._migrate_reported_variant, assembly=assembly)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.InterpretedGenomeRD
)
def migrate_clinical_report_rd(self, old_instance, assembly):
"""
:type old_instance: reports_4_0_0.ClinicalReportRD
:type assembly: reports_5_0_0.Assembly
:rtype: reports_5_0_0.ClinicalReportRD
"""
if assembly is None:
raise MigrationError("Parameter <assembly> is required to migrate model versions earlier than 5.0.0")
new_instance = self.convert_class(
self.new_model.ClinicalReportRD, old_instance) # :type self.new_model.ClinicalReportRD
try:
new_instance.interpretationRequestVersion = self.convert_string_to_integer(
old_instance.interpretationRequestVersion)
except MigrationError as ex:
logging.error("Error converting 'interpretationRequestVersion' to integer from value '{}'".format(
old_instance.interpretationRequestVersion))
raise ex
new_instance.references = old_instance.supportingEvidence
new_instance.variants = self.convert_collection(
old_instance.candidateVariants, self._migrate_reported_variant, assembly=assembly)
if old_instance.additionalAnalysisPanels is not None:
panels = []
for panel in old_instance.additionalAnalysisPanels:
new_panel = self.new_model.AdditionalAnalysisPanel() # :type reports_5_0_0.AdditionalAnalysisPanel
new_panel.specificDisease = panel.specificDisease
new_panel.panel = self.new_model.GenePanel(panelName=panel.panelName, panelVersion=panel.panelVersion)
panels.append(new_panel)
new_instance.additionalAnalysisPanels = panels
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.ClinicalReportRD
)
def migrate_cancer_interpretation_request(self, old_instance, assembly):
"""
:type old_instance: reports_4_0_0.CancerInterpretationRequest
:rtype: reports_5_0_0.CancerInterpretationRequest
"""
if assembly is None:
raise MigrationError(
"Parameter <assembly> is required to migrate cancer interpretation request to version 5")
new_instance = self.convert_class(
self.new_model.CancerInterpretationRequest, old_instance
) # :type: reports_5_0_0.CancerInterpretationRequest
new_instance.interpretationRequestId = old_instance.reportRequestId
new_instance.interpretationRequestVersion = old_instance.reportVersion
new_instance.genomeAssembly = assembly
new_instance.cancerParticipant = self._migrate_cancer_participant(old_participant=old_instance.cancerParticipant)
if not new_instance.additionalInfo:
new_instance.additionalInfo = {}
if old_instance.analysisUri:
new_instance.additionalInfo['analysisUri'] = old_instance.analysisUri
if old_instance.analysisVersion:
new_instance.additionalInfo['analysisVersion'] = old_instance.analysisVersion
if old_instance.tieringVersion:
new_instance.additionalInfo['tieringVersion'] = old_instance.tieringVersion
if old_instance.interpretGenome:
new_instance.additionalInfo['interpretGenome'] = str(old_instance.interpretGenome)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.CancerInterpretationRequest
)
def migrate_cancer_interpretation_request_to_cancer_interpreted_genome(
self, old_instance, assembly, interpretation_service,
reference_database_versions, software_versions, report_url=None, comments=None):
"""
NOTE: we migrate from a model where only one sample and one participant is supported, thus we do not need
a list of samples or participants
:type old_instance: reports_4_0_0.CancerInterpretationRequest
:type assembly: reports_5_0_0.Assembly
:type interpretation_service: str
:type reference_database_versions: dict
:type software_versions: dict
:type report_url: str
:type comments: list
:rtype: reports_5_0_0.CancerInterpretedGenome
"""
new_instance = self.convert_class(
self.new_model.CancerInterpretedGenome, old_instance) # :type: reports_5_0_0.CancerInterpretedGenome
new_instance.interpretationRequestId = old_instance.reportRequestId
new_instance.interpretationRequestVersion = old_instance.reportVersion
new_instance.interpretationService = interpretation_service
new_instance.referenceDatabasesVersions = reference_database_versions
if not isinstance(software_versions, dict):
software_versions = {}
software_versions['tiering'] = old_instance.tieringVersion
new_instance.softwareVersions = software_versions
new_instance.reportUrl = report_url
new_instance.comments = comments
participant_id = old_instance.cancerParticipant.individualId
tumor_samples = old_instance.cancerParticipant.tumourSamples
germline_samples = old_instance.cancerParticipant.germlineSamples
if not tumor_samples:
raise MigrationError("There is no tumour sample to perform the migration")
elif len(tumor_samples) > 1:
raise MigrationError("There are several tumour samples, cannot decide which to use '{}'"
.format(str(tumor_samples)))
sample_ids = {
'germline_variant': germline_samples[0].sampleId if germline_samples else None,
'somatic_variant': tumor_samples[0].sampleId
}
new_instance.variants = self.convert_collection(
old_instance.tieredVariants, self._migrate_reported_variant_cancer,
assembly=assembly, participant_id=participant_id, sample_ids=sample_ids)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.CancerInterpretedGenome
)
def migrate_cancer_interpreted_genome(self, old_instance,
assembly, participant_id, sample_ids,
interpretation_request_version, interpretation_service):
"""
NOTE: we migrate from a model where only one sample and one participant is supported, thus we do not need
a list of samples or participants
:type old_instance: reports_4_0_0.CancerInterpretedGenome
:type assembly: reports_5_0_0.Assembly
:type participant_id: str
:type sample_ids: sample_ids: map[str (alleleOrigin)]: str - {'germline_variant': 'LP...', 'somatic_variant': 'LP...'}
:type interpretation_request_version: int
:type interpretation_service: str
:rtype: reports_5_0_0.CancerInterpretedGenome
"""
self._check_required_parameters(
assembly=assembly, participant_id=participant_id, sample_ids=sample_ids,
interpretation_request_version=interpretation_request_version,
interpretation_service=interpretation_service
)
new_instance = self.convert_class(self.new_model.CancerInterpretedGenome, old_instance) # :type: reports_5_0_0.CancerInterpretedGenome
new_instance.interpretationRequestId = old_instance.reportRequestId
new_instance.interpretationRequestVersion = interpretation_request_version
new_instance.interpretationService = interpretation_service
new_instance.reportUrl = old_instance.reportUri
new_instance.variants = self.convert_collection(
old_instance.reportedVariants, self._migrate_reported_variant_cancer,
assembly=assembly, participant_id=participant_id, sample_ids=sample_ids)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.CancerInterpretedGenome
)
def migrate_cancer_clinical_report(self, old_instance, assembly, participant_id, sample_ids):
"""
NOTE: we migrate from a model where only one sample and one participant is supported, thus we do not need
a list of samples or participants
:type old_instance: reports_4_0_0.ClinicalReportCancer
:type assembly: reports_5_0_0.Assembly
:type participant_id: str
:type sample_ids: map[str (alleleOrigin)]: str - {'germline_variant': 'LP...', 'somatic_variant': 'LP...'}
:rtype: reports_5_0_0.ClinicalReportCancer
"""
if not sample_ids or not assembly or not participant_id:
raise MigrationError("Missing required fields to migrate cancer clinical report from 4.0.0 to 5.0.0")
new_instance = self.convert_class(
self.new_model.ClinicalReportCancer, old_instance) # :type: reports_5_0_0.ClinicalReportCancer
try:
new_instance.interpretationRequestVersion = self.convert_string_to_integer(
old_instance.interpretationRequestVersion)
except MigrationError as ex:
logging.error("Error converting 'interpretationRequestVersion' to integer from value '{}'".format(
old_instance.interpretationRequestVersion))
raise ex
new_instance.variants = self.convert_collection(
old_instance.candidateVariants, self._migrate_reported_variant_cancer,
assembly=assembly, participant_id=participant_id, sample_ids=sample_ids)
return self.validate_object(
object_to_validate=new_instance, object_type=self.new_model.ClinicalReportCancer
)
def _migrate_reported_variant(self, old_instance, assembly, migrate_frequencies=False):
new_instance = self.convert_class(
self.new_model.ReportedVariant, old_instance) # :type: reports_5_0_0.ReportedVariant
new_instance.variantCoordinates = reports_5_0_0.VariantCoordinates(
chromosome=old_instance.chromosome,
position=old_instance.position,
reference=old_instance.reference,
alternate=old_instance.alternate,
assembly=self._migrate_assembly(assembly)
)
new_instance.variantCalls = self.convert_collection(
old_instance.calledGenotypes, self._migrate_called_genotype_to_variant_call, default=[])
new_instance.reportEvents = self.convert_collection(
list(zip(old_instance.reportEvents, new_instance.reportEvents)), self._migrate_report_event)
new_instance.references = old_instance.evidenceIds
new_instance.alleleOrigins = [reports_5_0_0.AlleleOrigin.germline_variant]
if migrate_frequencies:
new_instance.alleleFrequencies = self._migrate_allele_frequencies(
old_instance.additionalNumericVariantAnnotations)
# TODO: fields that are not filled: variantAttributes, alleleFrequencies,
# TODO: dbSnpId, cosmicIds, clinVarIds, genomicChange, cdnaChanges, proteinChanges
return new_instance
def _migrate_assembly(self, assembly):
new_assembly = None
if assembly is not None:
if assembly.lower().startswith(reports_5_0_0.Assembly.GRCh37.lower()) \
or assembly.lower().startswith('hg19'):
new_assembly = reports_5_0_0.Assembly.GRCh37
elif assembly.lower().startswith(reports_5_0_0.Assembly.GRCh38.lower()):
new_assembly = reports_5_0_0.Assembly.GRCh38
else:
raise MigrationError("Assembly does not match any known value '{}'".format(assembly))
return new_assembly
def _migrate_called_genotype_to_variant_call(self, old_instance):
new_instance = self.convert_class(
self.new_model.VariantCall, old_instance) # :type: reports_5_0_0.VariantCall
new_instance.participantId = old_instance.gelId
new_instance.zygosity = old_instance.genotype
new_instance.alleleOrigins = [reports_5_0_0.AlleleOrigin.germline_variant]
# NOTE: fields that are lost: copyNumber
# NOTE: fields that cannot be filled: vaf, alleleOrigins
return new_instance
def _migrate_report_event(self, report_events):
old_instance = report_events[0]
new_instance = report_events[1]
new_instance.phenotypes = [old_instance.phenotype]
if old_instance.panelName is not None:
new_instance.genePanel = self.new_model.GenePanel(
panelName=old_instance.panelName)
if old_instance.panelVersion is not None:
new_instance.genePanel.panelVersion = old_instance.panelVersion
new_instance.genomicEntities = [self._migrate_genomic_feature(old_instance.genomicFeature)]
if old_instance.variantClassification is not None:
old_variant_classification = reports_4_0_0.VariantClassification
new_clinical_significance = reports_5_0_0.ClinicalSignificance
map_variant_classification = {
old_variant_classification.benign_variant: new_clinical_significance.benign,
old_variant_classification.likely_benign_variant: new_clinical_significance.likely_benign,
old_variant_classification.variant_of_unknown_clinical_significance:
new_clinical_significance.uncertain_significance,
old_variant_classification.likely_pathogenic_variant: new_clinical_significance.likely_pathogenic,
old_variant_classification.pathogenic_variant: new_clinical_significance.pathogenic,
old_variant_classification.not_assessed: None
}
clinical_significance = map_variant_classification[old_instance.variantClassification]
if clinical_significance is not None:
new_instance.variantClassification = self.new_model.VariantClassification(
clinicalSignificance=map_variant_classification[old_instance.variantClassification]
)
# NOTE: variant consequences cannot be filled, but it is not nullable so we are creating an empty list
new_instance.variantConsequences = []
if new_instance.score == -999.0: # NOTE: this is a tag value to mark null values in the reverse migration
new_instance.score = None
return new_instance
def _migrate_genomic_feature(self, old_instance):
new_instance = self.convert_class(self.new_model.GenomicEntity, old_instance)
new_instance.geneSymbol = old_instance.hgnc
map_feature_type = {
reports_4_0_0.FeatureTypes.Transcript: reports_5_0_0.GenomicEntityType.transcript,
reports_4_0_0.FeatureTypes.RegulatoryRegion: reports_5_0_0.GenomicEntityType.regulatory_region,
reports_4_0_0.FeatureTypes.Gene: reports_5_0_0.GenomicEntityType.gene
}
new_instance.type = map_feature_type[old_instance.featureType]
return new_instance
def _migrate_reported_variant_cancer(self, old_instance, assembly, participant_id, sample_ids):
ne_instance = old_instance.reportedVariantCancer
new_instance = self.convert_class(self.new_model.ReportedVariantCancer, ne_instance) # :type: reports_5_0_0.ReportedVariant
new_instance.variantCoordinates = self.convert_class(reports_5_0_0.VariantCoordinates, ne_instance)
new_instance.variantCoordinates.assembly = self._migrate_assembly(assembly)
if old_instance.reportedVariantCancer.cDnaChange:
new_instance.cdnaChanges = [old_instance.reportedVariantCancer.cDnaChange]
if ne_instance.proteinChange:
new_instance.proteinChanges = [ne_instance.proteinChange]
# NOTE: missing fields: genomicChanges
sample_id = sample_ids.get(old_instance.alleleOrigins[0], None)
if not sample_id:
raise MigrationError('Couldn\'t retrieve Sample ID for {}'.format(old_instance.alleleOrigins[0]))
# builds up the VariantCall object
# NOTE: fields that cannot be filled "phaseSet"
new_instance.variantCalls = [reports_5_0_0.VariantCall(
depthReference=ne_instance.depthReference,
depthAlternate=ne_instance.depthAlternate,
vaf=ne_instance.vaf,
zygosity=reports_5_0_0.Zygosity.na,
alleleOrigins=old_instance.alleleOrigins,
participantId=participant_id,
sampleId=sample_id
)]
if ne_instance.commonAf is not None:
new_instance.alleleFrequencies = [reports_5_0_0.AlleleFrequency(
study='genomics_england',
population='ALL',
alternateFrequency=self.convert_string_to_float(ne_instance.commonAf)/100
)]
# NOTE: some fields cannot be filled: "fdp50", "recurrentlyReported", "others"
new_instance.variantAttributes = reports_5_0_0.VariantAttributes(
ihp=ne_instance.ihp
)
new_instance.alleleOrigins = old_instance.alleleOrigins
new_instance.reportEvents = self.convert_collection(
list(zip(ne_instance.reportEvents, new_instance.reportEvents)), self._migrate_report_event_cancer)
return new_instance
def _migrate_report_event_cancer(self, report_events):
old_instance = report_events[0]
new_instance | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LXCDiskArgs', 'LXCDisk']
@pulumi.input_type
class LXCDiskArgs:
def __init__(__self__, *,
container: pulumi.Input[str],
mp: pulumi.Input[str],
size: pulumi.Input[str],
slot: pulumi.Input[int],
storage: pulumi.Input[str],
acl: Optional[pulumi.Input[bool]] = None,
backup: Optional[pulumi.Input[bool]] = None,
mountoptions: Optional[pulumi.Input['LXCDiskMountoptionsArgs']] = None,
quota: Optional[pulumi.Input[bool]] = None,
replicate: Optional[pulumi.Input[bool]] = None,
shared: Optional[pulumi.Input[bool]] = None,
volume: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a LXCDisk resource.
"""
pulumi.set(__self__, "container", container)
pulumi.set(__self__, "mp", mp)
pulumi.set(__self__, "size", size)
pulumi.set(__self__, "slot", slot)
pulumi.set(__self__, "storage", storage)
if acl is not None:
pulumi.set(__self__, "acl", acl)
if backup is not None:
pulumi.set(__self__, "backup", backup)
if mountoptions is not None:
pulumi.set(__self__, "mountoptions", mountoptions)
if quota is not None:
pulumi.set(__self__, "quota", quota)
if replicate is not None:
pulumi.set(__self__, "replicate", replicate)
if shared is not None:
pulumi.set(__self__, "shared", shared)
if volume is not None:
pulumi.set(__self__, "volume", volume)
@property
@pulumi.getter
def container(self) -> pulumi.Input[str]:
return pulumi.get(self, "container")
@container.setter
def container(self, value: pulumi.Input[str]):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def mp(self) -> pulumi.Input[str]:
return pulumi.get(self, "mp")
@mp.setter
def mp(self, value: pulumi.Input[str]):
pulumi.set(self, "mp", value)
@property
@pulumi.getter
def size(self) -> pulumi.Input[str]:
return pulumi.get(self, "size")
@size.setter
def size(self, value: pulumi.Input[str]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def slot(self) -> pulumi.Input[int]:
return pulumi.get(self, "slot")
@slot.setter
def slot(self, value: pulumi.Input[int]):
pulumi.set(self, "slot", value)
@property
@pulumi.getter
def storage(self) -> pulumi.Input[str]:
return pulumi.get(self, "storage")
@storage.setter
def storage(self, value: pulumi.Input[str]):
pulumi.set(self, "storage", value)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def backup(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "backup")
@backup.setter
def backup(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "backup", value)
@property
@pulumi.getter
def mountoptions(self) -> Optional[pulumi.Input['LXCDiskMountoptionsArgs']]:
return pulumi.get(self, "mountoptions")
@mountoptions.setter
def mountoptions(self, value: Optional[pulumi.Input['LXCDiskMountoptionsArgs']]):
pulumi.set(self, "mountoptions", value)
@property
@pulumi.getter
def quota(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "quota")
@quota.setter
def quota(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "quota", value)
@property
@pulumi.getter
def replicate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "replicate")
@replicate.setter
def replicate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "replicate", value)
@property
@pulumi.getter
def shared(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "shared")
@shared.setter
def shared(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "shared", value)
@property
@pulumi.getter
def volume(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "volume")
@volume.setter
def volume(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume", value)
@pulumi.input_type
class _LXCDiskState:
def __init__(__self__, *,
acl: Optional[pulumi.Input[bool]] = None,
backup: Optional[pulumi.Input[bool]] = None,
container: Optional[pulumi.Input[str]] = None,
mountoptions: Optional[pulumi.Input['LXCDiskMountoptionsArgs']] = None,
mp: Optional[pulumi.Input[str]] = None,
quota: Optional[pulumi.Input[bool]] = None,
replicate: Optional[pulumi.Input[bool]] = None,
shared: Optional[pulumi.Input[bool]] = None,
size: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[int]] = None,
storage: Optional[pulumi.Input[str]] = None,
volume: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LXCDisk resources.
"""
if acl is not None:
pulumi.set(__self__, "acl", acl)
if backup is not None:
pulumi.set(__self__, "backup", backup)
if container is not None:
pulumi.set(__self__, "container", container)
if mountoptions is not None:
pulumi.set(__self__, "mountoptions", mountoptions)
if mp is not None:
pulumi.set(__self__, "mp", mp)
if quota is not None:
pulumi.set(__self__, "quota", quota)
if replicate is not None:
pulumi.set(__self__, "replicate", replicate)
if shared is not None:
pulumi.set(__self__, "shared", shared)
if size is not None:
pulumi.set(__self__, "size", size)
if slot is not None:
pulumi.set(__self__, "slot", slot)
if storage is not None:
pulumi.set(__self__, "storage", storage)
if volume is not None:
pulumi.set(__self__, "volume", volume)
@property
@pulumi.getter
def acl(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "acl")
@acl.setter
def acl(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "acl", value)
@property
@pulumi.getter
def backup(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "backup")
@backup.setter
def backup(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "backup", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter
def mountoptions(self) -> Optional[pulumi.Input['LXCDiskMountoptionsArgs']]:
return pulumi.get(self, "mountoptions")
@mountoptions.setter
def mountoptions(self, value: Optional[pulumi.Input['LXCDiskMountoptionsArgs']]):
pulumi.set(self, "mountoptions", value)
@property
@pulumi.getter
def mp(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "mp")
@mp.setter
def mp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mp", value)
@property
@pulumi.getter
def quota(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "quota")
@quota.setter
def quota(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "quota", value)
@property
@pulumi.getter
def replicate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "replicate")
@replicate.setter
def replicate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "replicate", value)
@property
@pulumi.getter
def shared(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "shared")
@shared.setter
def shared(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "shared", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def slot(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "slot")
@slot.setter
def slot(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "slot", value)
@property
@pulumi.getter
def storage(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "storage")
@storage.setter
def storage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage", value)
@property
@pulumi.getter
def volume(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "volume")
@volume.setter
def volume(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume", value)
class LXCDisk(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[bool]] = None,
backup: Optional[pulumi.Input[bool]] = None,
container: Optional[pulumi.Input[str]] = None,
mountoptions: Optional[pulumi.Input[pulumi.InputType['LXCDiskMountoptionsArgs']]] = None,
mp: Optional[pulumi.Input[str]] = None,
quota: Optional[pulumi.Input[bool]] = None,
replicate: Optional[pulumi.Input[bool]] = None,
shared: Optional[pulumi.Input[bool]] = None,
size: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[int]] = None,
storage: Optional[pulumi.Input[str]] = None,
volume: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a LXCDisk resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LXCDiskArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a LXCDisk resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param LXCDiskArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LXCDiskArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[bool]] = None,
backup: Optional[pulumi.Input[bool]] = None,
container: Optional[pulumi.Input[str]] = None,
mountoptions: Optional[pulumi.Input[pulumi.InputType['LXCDiskMountoptionsArgs']]] = None,
mp: Optional[pulumi.Input[str]] = None,
quota: Optional[pulumi.Input[bool]] = None,
replicate: Optional[pulumi.Input[bool]] = None,
shared: Optional[pulumi.Input[bool]] = None,
size: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[int]] = None,
storage: Optional[pulumi.Input[str]] = None,
volume: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LXCDiskArgs.__new__(LXCDiskArgs)
__props__.__dict__["acl"] = acl
__props__.__dict__["backup"] = backup
if container is None and not opts.urn:
raise TypeError("Missing required property 'container'")
__props__.__dict__["container"] = container
__props__.__dict__["mountoptions"] = mountoptions
if mp is None and not opts.urn:
raise TypeError("Missing required property 'mp'")
__props__.__dict__["mp"] = mp
__props__.__dict__["quota"] = quota
__props__.__dict__["replicate"] = replicate
__props__.__dict__["shared"] = shared
if size is None and not opts.urn:
raise TypeError("Missing required property 'size'")
__props__.__dict__["size"] = size
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__.__dict__["slot"] = slot
if storage is None and not opts.urn:
raise TypeError("Missing required property 'storage'")
__props__.__dict__["storage"] = storage
__props__.__dict__["volume"] = volume
super(LXCDisk, __self__).__init__(
'proxmox:index/lXCDisk:LXCDisk',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
acl: Optional[pulumi.Input[bool]] = None,
backup: Optional[pulumi.Input[bool]] = None,
container: Optional[pulumi.Input[str]] = None,
mountoptions: Optional[pulumi.Input[pulumi.InputType['LXCDiskMountoptionsArgs']]] = None,
mp: Optional[pulumi.Input[str]] = None,
quota: Optional[pulumi.Input[bool]] = None,
replicate: Optional[pulumi.Input[bool]] = None,
shared: Optional[pulumi.Input[bool]] = None,
size: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[int]] = None,
storage: Optional[pulumi.Input[str]] = None,
volume: Optional[pulumi.Input[str]] = None) -> 'LXCDisk':
"""
Get an existing LXCDisk resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LXCDiskState.__new__(_LXCDiskState)
__props__.__dict__["acl"] = acl
__props__.__dict__["backup"] = backup
__props__.__dict__["container"] = container
__props__.__dict__["mountoptions"] = mountoptions
__props__.__dict__["mp"] = mp
__props__.__dict__["quota"] = quota
__props__.__dict__["replicate"] = replicate
__props__.__dict__["shared"] = shared
__props__.__dict__["size"] = size
__props__.__dict__["slot"] = slot
__props__.__dict__["storage"] = | |
# -*- coding: utf-8 -*-
from django.db import models
from const.country import *
from const.postaladdressprefix import *
from const.purpose import *
from const.status import *
from datetime import *
from django.utils.translation import ugettext as _
from decimal import Decimal
from django.core import serializers
from exceptions import TemplateSetMissing
from exceptions import UserExtensionMissing
from exceptions import OpenInterestAccountMissing
import djangoUserExtension
from django.contrib import auth
from lxml import etree
import accounting
import settings
import copy
from subprocess import *
class Currency (models.Model):
description = models.CharField(verbose_name = _("Description"), max_length=100)
shortName = models.CharField(verbose_name = _("Displayed Name After Price In The Position"), max_length=3)
rounding = models.DecimalField(max_digits=5, decimal_places=2, verbose_name = _("Rounding"), blank=True, null=True)
def __unicode__(self):
return self.shortName
class Meta:
app_label = "crm"
verbose_name = _('Currency')
verbose_name_plural = _('Currency')
class PostalAddress(models.Model):
prefix = models.CharField(max_length=1, choices=POSTALADDRESSPREFIX, verbose_name = _("Prefix"), blank=True, null=True)
name = models.CharField(max_length=100, verbose_name = _("Name"), blank=True, null=True)
prename = models.CharField(max_length=100, verbose_name = _("Prename"), blank=True, null=True)
addressline1 = models.CharField(max_length=200, verbose_name = _("Addressline 1"), blank=True, null=True)
addressline2 = models.CharField(max_length=200, verbose_name = _("Addressline 2"), blank=True, null=True)
addressline3 = models.CharField(max_length=200, verbose_name = _("Addressline 3"), blank=True, null=True)
addressline4 = models.CharField(max_length=200, verbose_name = _("Addressline 4"), blank=True, null=True)
zipcode = models.IntegerField(verbose_name = _("Zipcode"), blank=True, null=True)
town = models.CharField(max_length=100, verbose_name = _("City"), blank=True, null=True)
state = models.CharField(max_length=100, verbose_name = _("State"), blank=True, null=True)
country = models.CharField(max_length=2, choices=[(x[0], x[3]) for x in COUNTRIES], verbose_name = _("Country"), blank=True, null=True)
class Meta:
app_label = "crm"
verbose_name = _('Postal Address')
verbose_name_plural = _('Postal Address')
class PhoneAddress(models.Model):
phone = models.CharField(max_length=20, verbose_name = _("Phone Number"))
class Meta:
app_label = "crm"
verbose_name = _('Phone Address')
verbose_name_plural = _('Phone Address')
class EmailAddress(models.Model):
email = models.EmailField(max_length=200, verbose_name = _("Email Address"))
class Meta:
app_label = "crm"
verbose_name = _('Email Address')
verbose_name_plural = _('Email Address')
class Contact(models.Model):
name = models.CharField(max_length=300, verbose_name = _("Name"))
dateofcreation = models.DateTimeField(verbose_name = _("Created at"), auto_now=True)
lastmodification = models.DateTimeField(verbose_name = _("Last modified"), auto_now_add=True)
lastmodifiedby = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True, verbose_name = _("Last modified by"), editable=True)
class Meta:
app_label = "crm"
verbose_name = _('Contact')
verbose_name_plural = _('Contact')
class CustomerBillingCycle(models.Model):
name = models.CharField(max_length=300, verbose_name = _("Name"))
timeToPaymentDate = models.IntegerField(verbose_name = _("Days To Payment Date"))
class Meta:
app_label = "crm"
verbose_name = _('Customer Billing Cycle')
verbose_name_plural = _('Customer Billing Cycle')
def __unicode__(self):
return str(self.id) + ' ' + self.name
class CustomerGroup(models.Model):
name = models.CharField(max_length=300)
def __unicode__(self):
return str(self.id) + ' ' + self.name
class Meta:
app_label = "crm"
verbose_name = _('Customer Group')
verbose_name_plural = _('Customer Groups')
class Customer(Contact):
defaultCustomerBillingCycle = models.ForeignKey('CustomerBillingCycle', verbose_name= _('Default Billing Cycle'))
ismemberof = models.ManyToManyField(CustomerGroup, verbose_name = _('Is member of'), blank=True, null=True)
def createContract(self, request):
contract = Contract()
contract.defaultcustomer = self
contract.defaultcurrency = djangoUserExtension.models.UserExtension.objects.filter(user=request.user.id)[0].defaultCurrency
contract.lastmodifiedby = request.user
contract.staff = request.user
contract.save()
return contract
def createInvoice(self):
contract = self.createContract()
invoice = contract.createInvoice()
return invoice
def createQuote(self):
contract = self.createContract()
quote = contract.createQuote()
return quote
def isInGroup(self, customerGroup):
for customerGroupMembership in self.ismemberof.all():
if (customerGroupMembership.id == customerGroup.id):
return 1
return 0
class Meta:
app_label = "crm"
verbose_name = _('Customer')
verbose_name_plural = _('Customers')
def __unicode__(self):
return str(self.id) + ' ' + self.name
class Supplier(Contact):
offersShipmentToCustomers = models.BooleanField(verbose_name=_("Offers Shipment to Customer"))
class Meta:
app_label = "crm"
verbose_name = _('Supplier')
verbose_name_plural = _('Supplier')
def __unicode__(self):
return str(self.id) + ' ' + self.name
class Contract(models.Model):
staff = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True, verbose_name = _("Staff"), related_name="db_relcontractstaff", null=True)
description = models.TextField(verbose_name = _("Description"))
defaultcustomer = models.ForeignKey(Customer, verbose_name = _("Default Customer"), null=True, blank=True)
defaultSupplier = models.ForeignKey(Supplier, verbose_name = _("Default Supplier"), null=True, blank=True)
defaultcurrency = models.ForeignKey(Currency, verbose_name=_("Default Currency"), blank=False, null=False)
dateofcreation = models.DateTimeField(verbose_name = _("Created at"), auto_now=True)
lastmodification = models.DateTimeField(verbose_name = _("Last modified"), auto_now_add=True)
lastmodifiedby = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, verbose_name = _("Last modified by"), related_name="db_contractlstmodified")
class Meta:
app_label = "crm"
verbose_name = _('Contract')
verbose_name_plural = _('Contracts')
def createInvoice(self):
invoice = Invoice()
invoice.contract = self
invoice.discount = 0
invoice.staff = self.staff
invoice.customer = self.defaultcustomer
invoice.status = 'C'
invoice.currency = self.defaultcurrency
invoice.payableuntil = date.today()+timedelta(days=self.defaultcustomer.defaultCustomerBillingCycle.timeToPaymentDate)
invoice.dateofcreation = date.today().__str__()
invoice.save()
return invoice
def createQuote(self):
quote = Quote()
quote.contract = self
quote.discount = 0
quote.staff = self.staff
quote.customer = self.defaultcustomer
quote.status = 'C'
quote.currency = self.defaultcurrency
quote.validuntil = date.today().__str__()
quote.dateofcreation = date.today().__str__()
quote.save()
return quote
def createPurchaseOrder(self):
purchaseorder = PurchaseOrder()
purchaseorder.contract = self
purchaseorder.description = self.description
purchaseorder.discount = 0
purchaseorder.currency = self.defaultcurrency
purchaseorder.supplier = self.defaultSupplier
purchaseorder.status = 'C'
purchaseorder.dateofcreation = date.today().__str__()
# TODO: today is not correct it has to be replaced
purchaseorder.save()
return purchaseorder
def __unicode__(self):
return _("Contract") + " " + str(self.id)
class PurchaseOrder(models.Model):
contract = models.ForeignKey(Contract, verbose_name = _("Contract"))
externalReference = models.CharField(verbose_name = _("External Reference"), max_length=100, blank=True, null=True)
supplier = models.ForeignKey(Supplier, verbose_name = _("Supplier"))
description = models.CharField(verbose_name=_("Description"), max_length=100, blank=True, null=True)
lastPricingDate = models.DateField(verbose_name = _("Last Pricing Date"), blank=True, null=True)
lastCalculatedPrice = models.DecimalField(max_digits=17, decimal_places=2, verbose_name=_("Last Calculted Price With Tax"), blank=True, null=True)
lastCalculatedTax = models.DecimalField(max_digits=17, decimal_places=2, verbose_name=_("Last Calculted Tax"), blank=True, null=True)
status = models.CharField(max_length=1, choices=PURCHASEORDERSTATUS)
staff = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True, verbose_name = _("Staff"), related_name="db_relpostaff", null=True)
currency = models.ForeignKey(Currency, verbose_name=_("Currency"), blank=False, null=False)
dateofcreation = models.DateTimeField(verbose_name = _("Created at"), auto_now=True)
lastmodification = models.DateTimeField(verbose_name = _("Last modified"), auto_now_add=True)
lastmodifiedby = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, verbose_name = _("Last modified by"), related_name="db_polstmodified")
def recalculatePrices(self, pricingDate):
price = 0
tax = 0
try:
positions = PurchaseOrderPosition.objects.filter(contract=self.id)
if type(positions) == PurchaseOrderPosition:
if type(self.discount) == Decimal:
price = int(positions.recalculatePrices(pricingDate, self.customer, self.currency)*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
tax = int(positions.recalculateTax(self.currency)*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
else:
price = positions.recalculatePrices(pricingDate, self.customer, self.currency)
tax = positions.recalculateTax(self.currency)
else:
for position in positions:
if type(self.discount) == Decimal:
price += int(position.recalculatePrices(pricingDate, self.customer, self.currency)*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
tax += int(position.recalculateTax(self.currency)*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
else:
price += position.recalculatePrices(pricingDate, self.customer, self.currency)
tax += position.recalculateTax(self.currency)
self.lastCalculatedPrice = price
self.lastCalculatedTax = tax
self.lastPricingDate = pricingDate
self.save()
return 1
except Quote.DoesNotExist, e:
print "ERROR "+e.__str__()
print "Der Fehler trat beim File: "+ self.sourcefile +" / Cell: "+listOfLines[0][listOfLines[0].find("cell ")+4:listOfLines[0].find("(cellType ")-1]+" auf!"
exit()
return 0
def createPDF(self, whatToExport):
XMLSerializer = serializers.get_serializer("xml")
xml_serializer = XMLSerializer()
out = open(settings.PDF_OUTPUT_ROOT+"purchaseorder_"+str(self.id)+".xml", "w")
objectsToSerialize = list(PurchaseOrder.objects.filter(id=self.id))
objectsToSerialize += list(Contact.objects.filter(id=self.supplier.id))
objectsToSerialize += list(Currency.objects.filter(id=self.currency.id))
objectsToSerialize += list(PurchaseOrderPosition.objects.filter(contract=self.id))
for position in list(PurchaseOrderPosition.objects.filter(contract=self.id)):
objectsToSerialize += list(Position.objects.filter(id=position.id))
objectsToSerialize += list(Product.objects.filter(id=position.product.id))
objectsToSerialize += list(Unit.objects.filter(id=position.unit.id))
objectsToSerialize += list(auth.models.User.objects.filter(id=self.staff.id))
userExtension = djangoUserExtension.models.UserExtension.objects.filter(user=self.staff.id)
if (len(userExtension) == 0):
raise UserExtensionMissing(_("During PurchaseOrder PDF Export"))
phoneAddress = djangoUserExtension.models.UserExtensionPhoneAddress.objects.filter(userExtension=userExtension[0].id)
objectsToSerialize += list(userExtension)
objectsToSerialize += list(phoneAddress)
templateset = djangoUserExtension.models.TemplateSet.objects.filter(id=userExtension[0].defaultTemplateSet.id)
if (len(templateset) == 0):
raise TemplateSetMissing(_("During PurchaseOrder PDF Export"))
objectsToSerialize += list(templateset)
objectsToSerialize += list(auth.models.User.objects.filter(id=self.lastmodifiedby.id))
objectsToSerialize += list(PostalAddressForContact.objects.filter(person=self.supplier.id))
for address in list(PostalAddressForContact.objects.filter(person=self.supplier.id)):
objectsToSerialize += list(PostalAddress.objects.filter(id=address.id))
xml_serializer.serialize(objectsToSerialize, stream=out, indent=3)
out.close()
check_output(['/usr/bin/fop', '-c', userExtension[0].defaultTemplateSet.fopConfigurationFile.path, '-xml', settings.PDF_OUTPUT_ROOT+'purchaseorder_'+str(self.id)+'.xml', '-xsl', userExtension[0].defaultTemplateSet.purchaseorderXSLFile.xslfile.path, '-pdf', settings.PDF_OUTPUT_ROOT+'purchaseorder_'+str(self.id)+'.pdf'], stderr=STDOUT)
return settings.PDF_OUTPUT_ROOT+"purchaseorder_"+str(self.id)+".pdf"
class Meta:
app_label = "crm"
verbose_name = _('Purchase Order')
verbose_name_plural = _('Purchase Order')
def __unicode__(self):
return _("Purchase Order")+ ": " + str(self.id) + " "+ _("from Contract") + ": " + str(self.contract.id)
class SalesContract(models.Model):
contract = models.ForeignKey(Contract, verbose_name=_('Contract'))
externalReference = models.CharField(verbose_name = _("External Reference"), max_length=100, blank=True)
discount = models.DecimalField(max_digits=5, decimal_places=2, verbose_name = _("Discount"), blank=True, null=True)
description = models.CharField(verbose_name=_("Description"), max_length=100, blank=True, null=True)
lastPricingDate = models.DateField(verbose_name = _("Last Pricing Date"), blank=True, null=True)
lastCalculatedPrice = models.DecimalField(max_digits=17, decimal_places=2, verbose_name=_("Last Calculted Price With Tax"), blank=True, null=True)
lastCalculatedTax = models.DecimalField(max_digits=17, decimal_places=2, verbose_name=_("Last Calculted Tax"), blank=True, null=True)
customer = models.ForeignKey(Customer, verbose_name = _("Customer"))
staff = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True, verbose_name = _("Staff"), related_name="db_relscstaff", null=True)
currency = models.ForeignKey(Currency, verbose_name=_("Currency"), blank=False, null=False)
dateofcreation = models.DateTimeField(verbose_name = _("Created at"), auto_now=True)
lastmodification = models.DateTimeField(verbose_name = _("Last modified"), auto_now_add=True)
lastmodifiedby = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, verbose_name = _("Last modified by"), related_name="db_lstscmodified", null=True, blank="True")
def recalculatePrices(self, pricingDate):
price = 0
tax = 0
try:
positions = SalesContractPosition.objects.filter(contract=self.id)
if type(positions) == SalesContractPosition:
if type(self.discount) == Decimal:
price = int(positions.recalculatePrices(pricingDate, self.customer, selof.currency)*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
tax = int(positions.recalculateTax(self.currency)*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
else:
price = positions.recalculatePrices(pricingDate, self.customer, self.currency)
tax = positions.recalculateTax(self.currency)
else:
for position in positions:
price += position.recalculatePrices(pricingDate, self.customer, self.currency)
tax += position.recalculateTax(self.currency)
if type(self.discount) == Decimal:
price = int(price*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
tax = int(tax*(1-self.discount/100)/self.currency.rounding)*self.currency.rounding
self.lastCalculatedPrice = price
self.lastCalculatedTax = tax
self.lastPricingDate = pricingDate
self.save()
return 1
except Quote.DoesNotExist:
return 0
class Meta:
app_label = "crm"
verbose_name = _('Sales Contract')
verbose_name_plural = _('Sales Contracts')
def __unicode__(self):
return _("Sales Contract")+ ": " + str(self.id) + " "+_("from Contract")+": " + str(self.contract.id)
class Quote(SalesContract):
validuntil = models.DateField(verbose_name = _("Valid until"))
status = models.CharField(max_length=1, choices=QUOTESTATUS, verbose_name=_('Status'))
def createInvoice(self):
invoice = Invoice()
invoice.contract = self.contract
invoice.description = self.description
invoice.discount = self.discount
invoice.customer = self.customer
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
import collections
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
def _RemoveNoDocs(item):
if json_parse.IsDict(item):
if item.get('nodoc', False):
return True
for key, value in item.items():
if _RemoveNoDocs(value):
del item[key]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _InlineDocs(schema):
"""Replace '$ref's that refer to inline_docs with the json for those docs.
"""
types = schema.get('types')
if types is None:
return
inline_docs = {}
types_without_inline_doc = []
# Gather the types with inline_doc.
for type_ in types:
if type_.get('inline_doc'):
inline_docs[type_['id']] = type_
if type_.get('description'):
del type_['description']
del type_['inline_doc']
del type_['id']
else:
types_without_inline_doc.append(type_)
schema['types'] = types_without_inline_doc
def apply_inline(node):
if isinstance(node, list):
for i in node:
apply_inline(i)
elif isinstance(node, collections.Mapping):
ref = node.get('$ref')
if ref and ref in inline_docs:
node.update(inline_docs[ref])
del node['$ref']
for k, v in node.iteritems():
apply_inline(v)
apply_inline(schema)
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
"""Inserts commas every three digits for integer values. It is magic.
"""
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
class _JSCModel(object):
"""Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
"""
def __init__(self, json, ref_resolver, disable_refs):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
_InlineDocs(clean_json)
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
return {
'name': self._namespace.name,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties)
}
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parent_name'] = function.parent.simple_name
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'parameters': [self._GenerateProperty(p) for p in event.params],
'callback': self._GenerateCallback(event.callback),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parent_name'] = event.parent.simple_name
if event.callback is not None:
# Show the callback as an extra parameter.
event_dict['parameters'].append(
self._GenerateCallbackProperty(event.callback))
if len(event_dict['parameters']) > 0:
event_dict['parameters'][-1]['last'] = True
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parent_name'] = property_.parent.simple_name
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parent_name'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in type_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of.lower()
else:
dst_dict['simple_type'] = type_.property_type.name.lower()
class _LazySamplesGetter(object):
"""This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
"""
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
"""This class fetches and loads JSON APIs from the FileSystem passed in with
|compiled_fs_factory|, so the APIs can be plugged into templates.
"""
class Factory(object):
def __init__(self, compiled_fs_factory, base_path):
def create_compiled_fs(fn, category):
return compiled_fs_factory.Create(fn, APIDataSource, category=category)
self._permissions_cache = create_compiled_fs(self._LoadPermissions,
'permissions')
self._json_cache = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, False),
'json')
self._idl_cache = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, False),
'idl')
# These caches are used if an APIDataSource does not want to resolve the
# $refs in an API. This is needed to prevent infinite recursion in
# ReferenceResolver.
self._json_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadJsonAPI(api, True),
'json-no-refs')
self._idl_cache_no_refs = create_compiled_fs(
lambda api_name, api: self._LoadIdlAPI(api, True),
'idl-no-refs')
self._idl_names_cache = create_compiled_fs(self._GetIDLNames, 'idl-names')
self._names_cache = create_compiled_fs(self._GetAllNames, 'names')
self._base_path = base_path
# These must be set later via the SetFooDataSourceFactory methods.
self._ref_resolver_factory = None
self._samples_data_source_factory = None
def SetSamplesDataSourceFactory(self, samples_data_source_factory):
self._samples_data_source_factory = samples_data_source_factory
def SetReferenceResolverFactory(self, ref_resolver_factory):
self._ref_resolver_factory = ref_resolver_factory
def Create(self, request, disable_refs=False):
"""Create an APIDataSource. |disable_refs| specifies whether $ref's in
APIs being processed by the |ToDict| method of _JSCModel follows $ref's
in the API. This prevents endless recursion in ReferenceResolver.
"""
if self._samples_data_source_factory is None:
# Only error if there is a request, which means this APIDataSource is
# actually being used to render a page.
if request is not None:
logging.error('SamplesDataSource.Factory was never set in '
'APIDataSource.Factory.')
samples = None
else:
samples = self._samples_data_source_factory.Create(request)
if not disable_refs and self._ref_resolver_factory is None:
logging.error('ReferenceResolver.Factory was never set in '
'APIDataSource.Factory.')
return APIDataSource(self._permissions_cache,
self._json_cache,
self._idl_cache,
self._json_cache_no_refs,
self._idl_cache_no_refs,
self._names_cache,
self._idl_names_cache,
self._base_path,
samples,
disable_refs)
def _LoadPermissions(self, file_name, json_str):
return json_parse.Parse(json_str)
def _LoadJsonAPI(self, api, disable_refs):
return _JSCModel(
json_parse.Parse(api)[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs).ToDict()
def _LoadIdlAPI(self, api, disable_refs):
idl = idl_parser.IDLParser().ParseData(api)
return _JSCModel(
idl_schema.IDLSchema(idl).process()[0],
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs).ToDict()
def _GetIDLNames(self, base_dir, apis):
return self._GetExtNames(apis, ['idl'])
def _GetAllNames(self, base_dir, apis):
return self._GetExtNames(apis, ['json', 'idl'])
def _GetExtNames(self, apis, exts):
return [model.UnixName(os.path.splitext(api)[0]) for api in apis
if os.path.splitext(api)[1][1:] in exts]
def __init__(self,
permissions_cache,
json_cache,
idl_cache,
json_cache_no_refs,
idl_cache_no_refs,
names_cache,
idl_names_cache,
base_path,
samples,
disable_refs):
self._base_path = base_path
self._permissions_cache = permissions_cache
self._json_cache = json_cache
self._idl_cache = idl_cache
self._json_cache_no_refs = json_cache_no_refs
self._idl_cache_no_refs = idl_cache_no_refs
self._names_cache = names_cache
self._idl_names_cache = idl_names_cache
self._samples = samples
self._disable_refs = disable_refs
def _GetFeatureFile(self, filename):
perms = self._permissions_cache.GetFromFile('%s/%s' %
(self._base_path, filename))
return dict((model.UnixName(k), v) for k, v in perms.iteritems())
def _GetFeatureData(self, path):
# Remove 'experimental_' from path name to match the keys in
# _permissions_features.json.
path = model.UnixName(path.replace('experimental_', ''))
for filename in ['_permission_features.json', '_manifest_features.json']:
feature_data = self._GetFeatureFile(filename).get(path, None)
if feature_data is not None:
break
# There are specific cases in which the feature is actually a list of
# features where only one needs to match; but currently these are only
# used to whitelist | |
<reponame>byrdie/ndcube
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import astropy.units as u
try:
from sunpy.visualization.animator import ImageAnimatorWCS, LineAnimator
except ImportError:
from sunpy.visualization.imageanimator import ImageAnimatorWCS, LineAnimator
from ndcube import utils
__all__ = ['NDCubeSequencePlotMixin']
NON_COMPATIBLE_UNIT_MESSAGE = \
"All sequence sub-cubes' unit attribute are not compatible with data_unit set by user."
AXES_UNIT_ERRONESLY_SET_MESSAGE = \
"axes_units element must be None unless corresponding axes_coordinate is None or a Quantity."
class NDCubeSequencePlotMixin:
def plot(self, axes=None, plot_axis_indices=None,
axes_coordinates=None, axes_units=None, data_unit=None, **kwargs):
"""
Visualizes data in the NDCubeSequence with the sequence axis as a separate dimension.
Based on the dimensionality of the sequence and value of plot_axis_indices kwarg,
a Line/Image Animation/Plot is produced.
Parameters
----------
axes: `astropy.visualization.wcsaxes.core.WCSAxes` or ??? or None.
The axes to plot onto. If None the current axes will be used.
plot_axis_indices: `int` or iterable of one or two `int`.
If two axis indices are given, the sequence is visualized as an image or
2D animation, assuming the sequence has at least 2 dimensions.
The dimension indicated by the 0th index is displayed on the
x-axis while the dimension indicated by the 1st index is displayed on the y-axis.
If only one axis index is given (either as an int or a list of one int),
then a 1D line animation is produced with the indicated dimension on the x-axis
and other dimensions represented by animations sliders.
Default=[-1, -2]. If sequence only has one dimension,
plot_axis_indices is ignored and a static 1D line plot is produced.
axes_coordinates: `None` or `list` of `None` `astropy.units.Quantity` `numpy.ndarray` `str`
Denotes physical coordinates for plot and slider axes.
If None coordinates derived from the WCS objects will be used for all axes.
If a list, its length should equal either the number sequence dimensions or
the length of plot_axis_indices.
If the length equals the number of sequence dimensions, each element describes
the coordinates of the corresponding sequence dimension.
If the length equals the length of plot_axis_indices,
the 0th entry describes the coordinates of the x-axis
while (if length is 2) the 1st entry describes the coordinates of the y-axis.
Slider axes are implicitly set to None.
If the number of sequence dimensions equals the length of plot_axis_indices,
the latter convention takes precedence.
The value of each entry should be either
`None` (implies derive the coordinates from the WCS objects),
an `astropy.units.Quantity` or a `numpy.ndarray` of coordinates for each pixel,
or a `str` denoting a valid extra coordinate.
axes_units: `None or `list` of `None`, `astropy.units.Unit` and/or `str`
If None units derived from the WCS objects will be used for all axes.
If a list, its length should equal either the number sequence dimensions or
the length of plot_axis_indices.
If the length equals the number of sequence dimensions, each element gives the
unit in which the coordinates along the corresponding sequence dimension should
displayed whether they be a plot axes or a slider axes.
If the length equals the length of plot_axis_indices,
the 0th entry describes the unit in which the x-axis coordinates should be displayed
while (if length is 2) the 1st entry describes the unit in which the y-axis should
be displayed. Slider axes are implicitly set to None.
If the number of sequence dimensions equals the length of plot_axis_indices,
the latter convention takes precedence.
The value of each entry should be either
`None` (implies derive the unit from the WCS object of the 0th sub-cube),
`astropy.units.Unit` or a valid unit `str`.
data_unit: `astropy.unit.Unit` or valid unit `str` or None
Unit in which data be displayed. If the length of plot_axis_indices is 2,
a 2D image/animation is produced and data_unit determines the unit represented by
the color table. If the length of plot_axis_indices is 1,
a 1D plot/animation is produced and data_unit determines the unit in which the
y-axis is displayed.
Returns
-------
ax: `matplotlib.axes.Axes`, `ndcube.mixins.sequence_plotting.ImageAnimatorNDCubeSequence` or `ndcube.mixins.sequence_plotting.ImageAnimatorCubeLikeNDCubeSequence`
Axes or animation object depending on dimensionality of NDCubeSequence
"""
# Check kwargs are in consistent formats and set default values if not done so by user.
naxis = len(self.dimensions)
plot_axis_indices, axes_coordinates, axes_units = _prep_axes_kwargs(
naxis, plot_axis_indices, axes_coordinates, axes_units)
if naxis == 1:
# Make 1D line plot.
ax = self._plot_1D_sequence(axes_coordinates,
axes_units, data_unit, **kwargs)
else:
if len(plot_axis_indices) == 1:
# Since sequence has more than 1 dimension and number of plot axes is 1,
# produce a 1D line animation.
if axes_units is not None:
unit_x_axis = axes_units[plot_axis_indices[0]]
else:
unit_x_axis = None
ax = LineAnimatorNDCubeSequence(self, plot_axis_indices[0], axes_coordinates,
unit_x_axis, data_unit, **kwargs)
elif len(plot_axis_indices) == 2:
if naxis == 2:
# Since sequence has 2 dimensions and number of plot axes is 2,
# produce a 2D image.
ax = self._plot_2D_sequence(plot_axis_indices, axes_coordinates,
axes_units, data_unit, **kwargs)
else:
# Since sequence has more than 2 dimensions and number of plot axes is 2,
# produce a 2D animation.
ax = ImageAnimatorNDCubeSequence(
self, plot_axis_indices=plot_axis_indices,
axes_coordinates=axes_coordinates, axes_units=axes_units, **kwargs)
return ax
def plot_as_cube(self, axes=None, plot_axis_indices=None,
axes_coordinates=None, axes_units=None, data_unit=None, **kwargs):
"""
Visualizes data in the NDCubeSequence with the sequence axis folded into the common axis.
Based on the cube-like dimensionality of the sequence and value of plot_axis_indices
kwarg, a Line/Image Plot/Animation is produced.
Parameters
----------
axes: `astropy.visualization.wcsaxes.core.WCSAxes` or ??? or None.
The axes to plot onto. If None the current axes will be used.
plot_axis_indices: `int` or iterable of one or two `int`.
If two axis indices are given, the sequence is visualized as an image or
2D animation, assuming the sequence has at least 2 cube-like dimensions.
The cube-like dimension indicated by the 0th index is displayed on the
x-axis while the cube-like dimension indicated by the 1st index is
displayed on the y-axis. If only one axis index is given (either as an int
or a list of one int), then a 1D line animation is produced with the indicated
cube-like dimension on the x-axis and other cube-like dimensions represented
by animations sliders.
Default=[-1, -2]. If sequence only has one cube-like dimension,
plot_axis_indices is ignored and a static 1D line plot is produced.
axes_coordinates: `None` or `list` of `None` `astropy.units.Quantity` `numpy.ndarray` `str`
Denotes physical coordinates for plot and slider axes.
If None coordinates derived from the WCS objects will be used for all axes.
If a list, its length should equal either the number cube-like dimensions or
the length of plot_axis_indices.
If the length equals the number of cube-like dimensions, each element describes
the coordinates of the corresponding cube-like dimension.
If the length equals the length of plot_axis_indices,
the 0th entry describes the coordinates of the x-axis
while (if length is 2) the 1st entry describes the coordinates of the y-axis.
Slider axes are implicitly set to None.
If the number of cube-like dimensions equals the length of plot_axis_indices,
the latter convention takes precedence.
The value of each entry should be either
`None` (implies derive the coordinates from the WCS objects),
an `astropy.units.Quantity` or a `numpy.ndarray` of coordinates for each pixel,
or a `str` denoting a valid extra coordinate.
axes_units: `None or `list` of `None`, `astropy.units.Unit` and/or `str`
If None units derived from the WCS objects will be used for all axes.
If a list, its length should equal either the number cube-like dimensions or
the length of plot_axis_indices.
If the length equals the number of cube-like dimensions, each element gives the
unit in which the coordinates along the corresponding cube-like dimension should
displayed whether they be a plot axes or a slider axes.
If the length equals the length of plot_axis_indices,
the 0th entry describes the unit in which the x-axis coordinates should be displayed
while (if length is 2) the 1st entry describes the unit in which the y-axis should
be displayed. Slider axes are implicitly set to None.
If the number of cube-like dimensions equals the length of plot_axis_indices,
the latter convention takes precedence.
The value of each entry should be either
`None` (implies derive the unit from the WCS | |
new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetNetworkInterfaceArgs']]]] network_interfaces: One or more `network_interface` blocks as defined below.
:param pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetOsDiskArgs']] os_disk: An `os_disk` block as defined below.
:param pulumi.Input[bool] overprovision: Should Azure over-provision Virtual Machines in this Scale Set? This means that multiple Virtual Machines will be provisioned and Azure will keep the instances which become available first - which improves provisioning success rates and improves deployment time. You're not billed for these over-provisioned VM's and they don't count towards the Subscription Quota. Defaults to `true`.
:param pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetPlanArgs']] plan: A `plan` block as documented below.
:param pulumi.Input[int] platform_fault_domain_count: Specifies the number of fault domains that are used by this Linux Virtual Machine Scale Set. Changing this forces a new resource to be created.
:param pulumi.Input[str] priority: The Priority of this Virtual Machine Scale Set. Possible values are `Regular` and `Spot`. Defaults to `Regular`. Changing this value forces a new resource.
:param pulumi.Input[bool] provision_vm_agent: Should the Azure VM Agent be provisioned on each Virtual Machine in the Scale Set? Defaults to `true`. Changing this value forces a new resource to be created.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group in which the Virtual Machine Scale Set should be assigned to. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the Windows Virtual Machine Scale Set should be exist. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetRollingUpgradePolicyArgs']] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is Required and can only be specified when `upgrade_mode` is set to `Automatic` or `Rolling`.
:param pulumi.Input[str] scale_in_policy: The scale-in policy rule that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled in. Possible values for the scale-in policy rules are `Default`, `NewestVM` and `OldestVM`, defaults to `Default`. For more information about scale in policy, please [refer to this doc](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-scale-in-policy).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetSecretArgs']]]] secrets: One or more `secret` blocks as defined below.
:param pulumi.Input[bool] single_placement_group: Should this Virtual Machine Scale Set be limited to a Single Placement Group, which means the number of instances will be capped at 100 Virtual Machines. Defaults to `true`.
:param pulumi.Input[str] sku: The Virtual Machine SKU for the Scale Set, such as `Standard_F2`.
:param pulumi.Input[str] source_image_id: The ID of an Image which each Virtual Machine in this Scale Set should be based on.
:param pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetSourceImageReferenceArgs']] source_image_reference: A `source_image_reference` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to this Virtual Machine Scale Set.
:param pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetTerminateNotificationArgs']] terminate_notification: A `terminate_notification` block as defined below.
:param pulumi.Input[str] timezone: Specifies the time zone of the virtual machine, [the possible values are defined here](https://jackstromberg.com/2017/01/list-of-time-zones-consumed-by-azure/).
:param pulumi.Input[str] unique_id: The Unique ID for this Windows Virtual Machine Scale Set.
:param pulumi.Input[str] upgrade_mode: Specifies how Upgrades (e.g. changing the Image/SKU) should be performed to Virtual Machine Instances. Possible values are `Automatic`, `Manual` and `Rolling`. Defaults to `Manual`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WindowsVirtualMachineScaleSetWinrmListenerArgs']]]] winrm_listeners: One or more `winrm_listener` blocks as defined below.
:param pulumi.Input[bool] zone_balance: Should the Virtual Machines in this Scale Set be strictly evenly distributed across Availability Zones? Defaults to `false`. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of Availability Zones in which the Virtual Machines in this Scale Set should be created in. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["additional_capabilities"] = additional_capabilities
__props__["additional_unattend_contents"] = additional_unattend_contents
__props__["admin_password"] = <PASSWORD>
__props__["admin_username"] = admin_username
__props__["automatic_instance_repair"] = automatic_instance_repair
__props__["automatic_os_upgrade_policy"] = automatic_os_upgrade_policy
__props__["boot_diagnostics"] = boot_diagnostics
__props__["computer_name_prefix"] = computer_name_prefix
__props__["custom_data"] = custom_data
__props__["data_disks"] = data_disks
__props__["do_not_run_extensions_on_overprovisioned_machines"] = do_not_run_extensions_on_overprovisioned_machines
__props__["enable_automatic_updates"] = enable_automatic_updates
__props__["encryption_at_host_enabled"] = encryption_at_host_enabled
__props__["eviction_policy"] = eviction_policy
__props__["extensions"] = extensions
__props__["health_probe_id"] = health_probe_id
__props__["identity"] = identity
__props__["instances"] = instances
__props__["license_type"] = license_type
__props__["location"] = location
__props__["max_bid_price"] = max_bid_price
__props__["name"] = name
__props__["network_interfaces"] = network_interfaces
__props__["os_disk"] = os_disk
__props__["overprovision"] = overprovision
__props__["plan"] = plan
__props__["platform_fault_domain_count"] = platform_fault_domain_count
__props__["priority"] = priority
__props__["provision_vm_agent"] = provision_vm_agent
__props__["proximity_placement_group_id"] = proximity_placement_group_id
__props__["resource_group_name"] = resource_group_name
__props__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__["scale_in_policy"] = scale_in_policy
__props__["secrets"] = secrets
__props__["single_placement_group"] = single_placement_group
__props__["sku"] = sku
__props__["source_image_id"] = source_image_id
__props__["source_image_reference"] = source_image_reference
__props__["tags"] = tags
__props__["terminate_notification"] = terminate_notification
__props__["timezone"] = timezone
__props__["unique_id"] = unique_id
__props__["upgrade_mode"] = upgrade_mode
__props__["winrm_listeners"] = winrm_listeners
__props__["zone_balance"] = zone_balance
__props__["zones"] = zones
return WindowsVirtualMachineScaleSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalCapabilities")
def additional_capabilities(self) -> pulumi.Output[Optional['outputs.WindowsVirtualMachineScaleSetAdditionalCapabilities']]:
"""
A `additional_capabilities` block as defined below.
"""
return pulumi.get(self, "additional_capabilities")
@property
@pulumi.getter(name="additionalUnattendContents")
def additional_unattend_contents(self) -> pulumi.Output[Optional[Sequence['outputs.WindowsVirtualMachineScaleSetAdditionalUnattendContent']]]:
"""
One or more `additional_unattend_content` blocks as defined below.
"""
return pulumi.get(self, "additional_unattend_contents")
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> pulumi.Output[str]:
"""
The Password which should be used for the local-administrator on this Virtual Machine. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> pulumi.Output[str]:
"""
The username of the local administrator on each Virtual Machine Scale Set instance. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="automaticInstanceRepair")
def automatic_instance_repair(self) -> pulumi.Output['outputs.WindowsVirtualMachineScaleSetAutomaticInstanceRepair']:
"""
A `automatic_instance_repair` block as defined below. To enable the automatic instance repair, this Virtual Machine Scale Set must have a valid `health_probe_id` or an [Application Health Extension](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-health-extension).
"""
return pulumi.get(self, "automatic_instance_repair")
@property
@pulumi.getter(name="automaticOsUpgradePolicy")
def automatic_os_upgrade_policy(self) -> pulumi.Output[Optional['outputs.WindowsVirtualMachineScaleSetAutomaticOsUpgradePolicy']]:
"""
A `automatic_os_upgrade_policy` block as defined below. This can only be specified when `upgrade_mode` is set to `Automatic`.
"""
return pulumi.get(self, "automatic_os_upgrade_policy")
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> pulumi.Output[Optional['outputs.WindowsVirtualMachineScaleSetBootDiagnostics']]:
"""
A `boot_diagnostics` block as defined below.
"""
return pulumi.get(self, "boot_diagnostics")
@property
@pulumi.getter(name="computerNamePrefix")
def computer_name_prefix(self) -> pulumi.Output[str]:
"""
The prefix which should be used for the name of the Virtual Machines in this Scale Set. If unspecified this defaults to the value for the `name` field. If the value of the `name` field is not a valid `computer_name_prefix`, then you must specify `computer_name_prefix`.
"""
return pulumi.get(self, "computer_name_prefix")
@property
@pulumi.getter(name="customData")
def custom_data(self) -> pulumi.Output[Optional[str]]:
"""
The Base64-Encoded Custom Data which should be used for this Virtual Machine Scale Set.
"""
return pulumi.get(self, "custom_data")
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> pulumi.Output[Optional[Sequence['outputs.WindowsVirtualMachineScaleSetDataDisk']]]:
"""
One or more `data_disk` blocks as defined below.
"""
return pulumi.get(self, "data_disks")
@property
@pulumi.getter(name="doNotRunExtensionsOnOverprovisionedMachines")
def do_not_run_extensions_on_overprovisioned_machines(self) -> pulumi.Output[Optional[bool]]:
"""
Should Virtual Machine Extensions be run on Overprovisioned Virtual Machines in the Scale Set? Defaults to `false`.
"""
return pulumi.get(self, "do_not_run_extensions_on_overprovisioned_machines")
@property
@pulumi.getter(name="enableAutomaticUpdates")
def enable_automatic_updates(self) -> pulumi.Output[Optional[bool]]:
"""
Are automatic updates enabled for this Virtual Machine? Defaults to `true`.
"""
return pulumi.get(self, "enable_automatic_updates")
@property
@pulumi.getter(name="encryptionAtHostEnabled")
def encryption_at_host_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Should all of the disks (including the temp disk) attached to this Virtual Machine be encrypted by enabling Encryption at Host?
"""
return pulumi.get(self, "encryption_at_host_enabled")
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> pulumi.Output[Optional[str]]:
"""
The Policy which should be used Virtual Machines are Evicted from the Scale Set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eviction_policy")
@property
@pulumi.getter
def extensions(self) -> pulumi.Output[Sequence['outputs.WindowsVirtualMachineScaleSetExtension']]:
"""
One or more `extension` blocks as defined below
"""
return pulumi.get(self, "extensions")
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of a Load Balancer Probe which should be used to determine the health of an instance. Changing this forces a new resource to be created. This is Required and can only be specified when `upgrade_mode` is set to `Automatic` or `Rolling`.
"""
return pulumi.get(self, "health_probe_id")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.WindowsVirtualMachineScaleSetIdentity']]:
"""
A `identity` block as defined below.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def instances(self) -> pulumi.Output[int]:
"""
The number of Virtual Machines in the Scale Set.
"""
return pulumi.get(self, "instances")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the type of on-premise license (also known as [Azure Hybrid Use Benefit](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing)) which should be used for this Virtual Machine Scale Set. Possible values are `None`, `Windows_Client` and `Windows_Server`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure location where the Windows | |
#!/usr/bin/python
import pymysql
import pymysql.cursors
import subprocess as sp
import json
from datetime import date
from random import randint
# Open database connection
# a = 5005
# prepare a cursor object using cursor() method
# abstraction meant for data set traversal
def execute_query(query, x):
try:
# Execute the SQL command
cursor.execute(query)
# Commit your changes in the database
db.commit()
row = cursor.fetchall()
if x == 1:
for r in row:
print(r)
rc = cursor.rowcount
if x == 1:
print("\nRow count: ", rc)
return row
except Exception as e:
# Rollback in case there is any error
db.rollback()
print(">>>>>>>>>>>>>", e)
return "-1"
def make(a):
a = str(a)
a = a.replace('(', '')
a = a.replace(')', '')
a = a.replace(',', '')
a = a.replace("'", '')
return a
def Bill(PID):
x2 = execute_query(
"select Cost from Rooms where Room_No=(select Room_No from Patient where Patient_ID = " + str(PID) + ");", 0)
if x2[0][0] != None:
x2 = int(x2[0][0])
else:
x2 = 0
print("Room Bill = " + str(x2) + "\n")
TID = execute_query(
"select Treatment_ID from Treatment where Patient_ID = " + str(PID) + ";", 0)
if TID[0][0] != None:
TID = int(TID[0][0])
else:
TID = 0
x3 = execute_query(
"select Bill from Treatment where Patient_ID = " + str(PID) + ";", 0)
if x3[0][0] != None:
x3 = int(x3[0][0])
else:
x3 = 0
x1 = execute_query(
"select sum(Cost) from Medicine where Medicine_ID in (select Medicine_ID from Prescription where Patient_ID = " + str(PID) + ");", 0)
# print("select Cost from Rooms where Room_No=(select Room_No from Patient where Patient_ID = " + str(PID) + ");")
if x1[0][0] != None:
x1 = int(x1[0][0])
else:
x1 = 0
print("Doctor Bill = 1000\n")
print("Medicine Bill = " + str(x1) + "\n")
print("Treatment Bill = " + str(x3) + "\n")
print("Total Bill = " + str(x1+x2+1000 + x3) + "\n")
def Admit_Patient(ID, RID, Fname, Mname, Lname, Address, email, sex, DOB, Disease_ID, phnos):
mxi = execute_query("select count(Doctor_ID) from Doctor;", 0)
if mxi[0][0] > 0:
mxi = int(mxi[0][0])
else:
mxi = 1
DID = randint(1, mxi)
TID = execute_query("select max(Treatment_ID) from Treatment;", 0)
if TID[0][0] > 0:
TID = int(TID[0][0]) + 1
else:
TID = 1
x1 = execute_query(
"select Patient_ID from Patient where Patient_ID=" + str(ID) + ";", 0)
if x1 != ():
print("Patient_ID already present")
return 0
y =execute_query("INSERT INTO Patient values ( '" + str(ID) + "' , '" + str(RID) + "','" + str(DID) + "','" + Fname + "','" + Mname + "','" +
Lname + "','" + str(Address) + "','" + str(email) + "','" + str(sex) + "','" + str(DOB) + "' , 'Alive');", 1)
if y == -1:
print("Not inserted")
for i in phnos:
y =execute_query("insert into Patient_numbers values ('" +
str(ID) + "','" + i + "' );", 1)
if y == -1:
print("Not inserted")
y = execute_query("INSERT INTO Illness values ( '" +
str(Disease_ID) + "','" + str(ID) + "');", 1)
if y == -1:
print("Not inserted")
y = execute_query("INSERT INTO Treatment values ('" + str(TID) + "','" + str(ID) +
"','" + str(randint(1000, 1500)) + "','" + str(randint(2, 10)) + "');", 1)
if y == -1:
print("Not inserted")
New_Bill(DID, 0, RID, TID)
return 0
def Hire_Nurse(ID, RID, Fname, Mname, Lname, sex, Qual, Phno):
y = execute_query("insert into Nurse values ( '" + str(ID) + "','" + str(RID) + "','" +
Fname + "','" + Mname + "','" + Lname + "','" + sex + "','" + Qual + "' );", 1)
if y == -1:
print("Not inserted")
for i in Phno:
y=execute_query("insert into Nurse_Number values ('" +
str(ID) + "','" + i + "' );", 1)
if y == -1:
print("Not inserted")
def Hire_Doctor(ID, sex, Email, Class, Fname, Mname, Lname, address, DOB, Qual):
y = execute_query("insert into Doctor values('" + str(ID) + "','" + sex + "','" + Email + "','" +
Class + "','" + Fname + "','" + Mname + "','" + Lname + "','" + address + "','" + DOB + "');", 1)
if y == -1:
print("Not inserted")
for i in Qual:
y = execute_query(
"insert into Doctor_Qualification values ('" + str(ID) + "','" + i + "' );", 1)
if y == -1:
print("Not inserted")
def New_Bill(DId, MId, RId, TId):
# print("insert into Bill values ('" + str(DId) + "','" + str(MId) + "','" + str(RId) + "','" + str(TId) + "');")
y = execute_query("insert into Bill values ('" + str(DId) + "','" +
str(MId) + "','" + str(RId) + "','" + str(TId) + "');", 1)
if y == -1:
print("Not inserted")
def insert_into_table():
cnt = 1
a = execute_query("SHOW TABLES;", 0)
print("\nSelect Table Number\n")
for i in ["Disease", "Medicine", "Patient_Attender", "Rooms", "Patient", "Nurse", "Doctor"]:
print(str(cnt) + ") " + make(i))
cnt += 1
cnt = 1
print()
x = input("Enter Choice : ")
if x == '':
print("Invalid Input")
print()
return 0
x = int(x)
if x == 1:
x = 2
elif x == 2:
x = 6
elif x == 3:
x = 10
elif x == 4:
x = 13
elif x == 5:
print("Enter the following seperated by a space :")
print("ID , Room_No , Fname , Mname , Lname , Address , E-Mail ID , sex , DOB , Disease_ID , Phone numbers")
inp = input().split()
if len(inp) != 11:
print("Invalid Input\n")
return 0
arr = []
for i in range(10, len(inp)):
arr.append(inp[i])
Admit_Patient(int(inp[0]), int(inp[1]), inp[2], inp[3],
inp[4], inp[5], inp[6], inp[7], inp[8], int(inp[9]), arr)
return 0
elif x == 6:
print("Enter the following seperated by a space :")
print("ID , RID , Fname , Mname , Lname , sex , Qualifications, Phone numbers")
inp = input().split()
length = len(inp)
arr = []
if length < 8:
print("Invalid Input\n")
return 0
for i in range(7, length):
arr.append(inp[i])
Hire_Nurse(int(inp[0]), int(inp[1]), inp[2],
inp[3], inp[4], inp[5], inp[6], arr)
return 0
elif x == 7:
lentgh = 0
print("Enter the following seperated by a space :")
print("ID , sex , E-mail ID , Class ,Fname , Mname , Lname , Address, D.O.B, Qualifications ")
inp = input().split()
length = len(inp)
arr = []
if length < 10:
print("Invalid Input\n")
return 0
for i in range(9, length):
arr.append(inp[i])
Hire_Doctor(int(inp[0]), inp[1], inp[2], inp[3],
inp[4], inp[5], inp[6], inp[7], inp[8], arr)
return 0
else:
print("Invalid Input")
print()
a1 = execute_query("DESCRIBE " + make(a[x-1]) + ";", 0)
print("Enter the values of ", end="")
for i in a1:
print(i[0], end=" ")
print("seperated by a space")
inp = input().split(" ")
if len(inp) != len(a1):
print("Invalid Input\n")
return 0
st = "("
for i in inp:
st += "'"
st += str(i)
st += "'"
if cnt != len(inp):
st += ","
else:
st += ")"
cnt += 1
execute_query("INSERT INTO " + make(a[x-1]) + " values " + st + ";", 1)
def show():
cnt = 1
a = execute_query("SHOW TABLES;", 0)
print("Select Table Number")
print()
for i in a:
print(str(cnt) + ") " + make(i))
cnt += 1
print()
y = input("Enter Choice : ")
if y == '':
print("Invalid Input")
return 0
x = int(y)
a1 = execute_query("DESCRIBE " + make(a[x-1]) + ";", 0)
for i in a1:
print(i[0], end=" ")
print()
execute_query("SELECT * FROM " + make(a[x-1]) + ";", 1)
def Delete_Medicine(Medicine_ID):
y = execute_query("delete from Medicine where Medicine_ID = " +
str(Medicine_ID) + ";", 1)
if y == -1:
print("Not inserted")
y = execute_query("delete from Prescription where Medicine_ID = " +
str(Medicine_ID) + ";", 1)
if y == -1:
print("Not inserted")
def Discharge_Patient(Patient_ID):
x = execute_query("delete from Patient_numbers where Patient_ID = " + str(Patient_ID) + ";", 1)
if x == -1:
print("Not Discharged")
x = execute_query("delete from Prescription where Patient_ID = " +
str(Patient_ID) + ";", 1)
if x == -1:
print("Not Discharged")
x = execute_query(
"delete from Patient_Attender where Patient_ID = " + str(Patient_ID) + ";", 1)
if x == -1:
print("Not Discharged")
x = execute_query("delete from Illness where | |
#!/usr/local/sci/bin/python
#*****************************
#
# Repeated Streaks Check (RSC)
#
# Checks for replication of
# 1) checks for consecutive repeating values
# 2) checks if one year has more repeating strings than expected
# 3) checks for repeats at a given hour across a number of days
# 4) checks for repeats for whole days - all 24 hourly values
#
#
# Some thresholds now determined dynamically
#
#************************************************************************
# SVN Info
#$Rev:: 114 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2017-01-17 17:26:42 +0000 (Tue, 17 Jan 2017) $: Date of last commit
#************************************************************************
import numpy as np
import scipy as sp
import datetime as dt
import copy
# RJHD routines
import qc_utils as utils
# threshold values for low, mid and high resolution for each of the variables tested
# consecutive repeats (ignoring missing data); spread over N days; same at each hour of day for N days; full day repeats
T = {1: [40, 14, 25, 10], 0.5: [30, 10, 20, 7], 0.1: [24, 7, 15, 5]}
D = {1: [80, 14, 25, 10], 0.5: [60, 10, 20, 7], 0.1: [48, 7, 15, 2]}
S = {1: [120, 28, 25, 10], 0.5:[100, 21, 20, 7], 0.1:[72, 14, 15, 5]}
WS = {1: [40, 14, 25, 10], 0.5: [30, 10, 20, 7], 0.1: [24, 7, 15, 5]}
WD = {90: [120, 28, 28, 10], 45: [96, 28, 28, 10], 22: [72, 21, 21, 7], 10: [48, 14, 14, 7], 1: [24, 7, 14, 5]}
limits_dict = {"temperatures": T, "dewpoints": D, "slp": S, "windspeeds": WS, "winddirs": WD}
WIND_MIN_VALUE = {1:0.5, 0.5:1.0, 0.1:0.5}
#*********************************************
def linear(X,p):
'''
decay function for line fitting
p[0]=intercept
p[1]=slope
'''
return p[1]*X + p[0] # linear
#*********************************************
def residuals_LS(p, Y, X):
'''
Least squared residuals from linear trend
'''
err = ((Y-linear(X,p))**2.0)
return err # ResidualsLS
#************************************************************************
def rsc_get_straight_string_threshold(st_var, start, end, reporting = 0., diagnostics = False, plots = False, old_threshold = 0):
'''
Derive threshold number for strings/streaks of repeating values
:param object st_var: station variable object
:param datetime start: start of data
:param datetime end: end of data
:param float reporting: reporting accuracy
:param bool diagnostics: do diagnostic output
:param bool plots: do plots
:param float old_threshold: old threshold to use as comparison
'''
all_filtered = utils.apply_filter_flags(st_var)
# find and count the length of all repeating strings
prev_value = st_var.mdi
this_string = []
string_lengths =[]
# run through all obs, the inefficient (non-pythonic) way
for o, obs in enumerate(all_filtered):
if all_filtered.mask[o] == False:
if obs != prev_value:
# if different value to before
string_lengths += [len(this_string)]
this_string = [o]
else:
# if same value as before, note and continue
this_string += [o]
prev_value = obs
if plots:
import calendar
title = "Straight String Distribution"
line_label = st_var.name
xlabel = "String length"
else:
title, line_label, xlabel = "","",""
threshold = utils.get_critical_values(string_lengths, binmin = 1, binwidth = 1, plots = plots, diagnostics = diagnostics, title = title, line_label = line_label, xlabel = xlabel, old_threshold = old_threshold)
return threshold # rsc_get_straight_string_threshold
#************************************************************************
def rsc_diagnostics_and_plot(time, data, flags, title, start, plots = False):
''' plots time series of data with flagged streaks highlighted
:param array time: time stamps in hours since
:param array data: data to be plotted
:param list flags: locations of obs to be flagged
:param string title: title of plot (parameter)
:param datetime start: dataset start date
:param bool plots: do the plot
'''
YLABELS = {"temperatures":"Temperature (C)", "dewpoints":"Dewpoints (C)", "slp":"SLP (hPa)", "windspeeds":"Wind Speed (m/s)", "winddirs":"Degrees"}
# get period to plot and convert times
extra = 48
min_t = flags[0] - extra
max_t = flags[-1] + extra
if min_t < 0: min_t = 0
time = utils.times_hours_to_datetime(time[min_t:max_t], start)
print "Streak at %s, %i observations" % (dt.datetime.strftime(time[extra], "%Y %m %d %H:%M"), len(flags))
if plots:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(time, data[min_t:max_t], 'bo', ls = '-')
flag_time = np.array(flags) - min_t
plt.plot(time[flag_time], data[flags], 'ro', markersize = 10)
plt.title(title.capitalize())
plt.ylabel(YLABELS[title])
plt.show()
return # rsc_plots
#************************************************************************
def rsc_annual_string_expectance(all_filtered, value_starts, value_lengths, flags, start, end, st_var, times, diagnostics = False, plots = False):
'''
Find years where have more strings than expected, but not long enough to set off test
:param array all_filtered: data filtered by all flags set so far
:param array value_starts: locations of start of strings/streaks of data
:param array value_lengths: lengths of each streak
:param array flags: array of flags to be set
:param datetime start: start of data
:param datetime end: end of data
:param bool diagnostics: do diagnostic output
:param bool plots: do plots
'''
month_starts = utils.month_starts(start,end)
month_starts = np.array(month_starts).reshape(-1,12)
year_proportions = np.zeros(month_starts.shape[0])
year_proportions.fill(st_var.mdi)
# churn through each year in turn
for y in range(month_starts.shape[0]):
if y != month_starts.shape[0] -1:
year = all_filtered[month_starts[y,0] : month_starts[y+1,0]]
else:
year = all_filtered[month_starts[y,0] :]
if len(year.compressed()) >= 200:
# if there are strings (streaks of same value) in this year
if y != month_starts.shape[0] -1:
string_starts = np.where(np.logical_and((value_starts >= month_starts[y,0]),(value_starts < month_starts[y+1,0])))
else:
string_starts = np.where(value_starts >= month_starts[y,0])
year_proportions[y] = 0
if len(string_starts[0]) >= 1:
# work out the proportion of the amount of data
year_proportions[y] = np.sum(value_lengths[string_starts[0]])/float(len(year.compressed()))
# if enough dirty years
good_years = np.where(year_proportions != st_var.mdi)
if len(good_years[0]) >= 10:
median = np.median(year_proportions[good_years])
if median < 0.005 : median = 0.005
# find the number which have proportions > 5 x median
bad_years = np.where(year_proportions > 5.*median)
if len(bad_years[0]) >= 1:
for bad in bad_years[0]:
# and flag
if bad == month_starts.shape[0]-1:
# if last year, just select all
locs, = np.where(value_starts >= month_starts[bad,0])
else:
locs, = np.where((value_starts >= month_starts[bad,0]) & (value_starts <= month_starts[bad+1,0]))
for loc in locs:
# need to account for missing values here 26/9/2014
goods, = np.where(all_filtered.mask[value_starts[loc]:] == False)
flags[value_starts[loc]+goods[:value_lengths[loc]]] = 1
if plots or diagnostics:
plot_year = all_filtered[month_starts[bad,0]:month_starts[bad+1,0]]
plot_time = times[month_starts[bad,0]:month_starts[bad+1,0]]
plot_flags = np.where(flags[month_starts[bad,0]:month_starts[bad+1,0]] == 1)[0]
rsc_diagnostics_and_plot(plot_time, plot_year, plot_flags, st_var.name, start, plots = plots)
return flags # rsc_annual_string_expectance
#************************************************************************
def rsc_straight_strings(st_var, times, n_obs, n_days, start, end, wind = False, reporting = 0., diagnostics = False, plots = False, dynamic = True):
'''
Check for strings/streaks of repeating values
:param object st_var: station variable object
:param int n_days: number of days to exceed
:param int n_obs: number of observations to exceed
:param datetime start: start of data
:param datetime end: end of data
:param float reporting: reporting accuracy
:param bool wind: whether there is wind data to account for - extra minimum value
:param bool diagnostics: do diagnostic output
:param bool plots: do plots
:param bool dynamic: calculate threshold of number of observations dynamically rather than using n_obs
'''
# January 2015 - changed to dynamically calculating the thresholds, but only use if less than current ^RJHD
if st_var.name == "winddirs":
# remove calm periods for this check.
wd_st_var = copy.deepcopy(st_var)
calms, = np.ma.where(st_var.data == 0) # True calms have direction set to 0, northerlies to 360
wd_st_var.data[calms] = wd_st_var.mdi
if dynamic:
threshold = rsc_get_straight_string_threshold(wd_st_var, start, end, reporting = reporting, diagnostics = diagnostics, plots = plots, old_threshold = n_obs)
if threshold < n_obs: n_obs = threshold
all_filtered = utils.apply_filter_flags(wd_st_var) # calms have been removed
else:
if dynamic:
threshold = rsc_get_straight_string_threshold(st_var, start, end, reporting = reporting, diagnostics = diagnostics, plots = plots, old_threshold = n_obs)
if threshold < n_obs: n_obs = threshold
all_filtered = utils.apply_filter_flags(st_var)
flags = np.zeros(len(all_filtered))
''' Look for continuous straight strings '''
prev_value = st_var.mdi
string_points = []
# storage for excess over years
value_starts = []
value_lengths =[]
for o, obs in enumerate(all_filtered):
if all_filtered.mask[o] == False:
if obs != prev_value:
if (st_var.name == "winddirs") and (prev_value == 0):
# this was a calm as a string of zeros.
# shouldn't be necessary - but just in case!
pass
else:
# if different value to before, which is long enough (and large enough for Wind)
if len(string_points) >= 10:
if wind == False or (wind | |
from __future__ import print_function
import kosh
import shutil
import getpass
import socket
import random
import os
import shlex
from subprocess import Popen, PIPE
import sys
sys.path.insert(0, "tests")
from koshbase import KoshTest # noqa
user = getpass.getuser()
hostname = socket.gethostname()
def create_file(filename):
with open(filename, "w") as f:
print("whatever", file=f)
def run_cp(sources, dest, store_sources, store_destinations=None):
cmd = "python scripts/kosh_command.py cp --dataset_record_type=blah "
for store in store_sources:
cmd += " --store {}".format(store)
if store_destinations is not None:
for store in store_destinations:
cmd += " --destination-store {}".format(store)
cmd += " --sources {} --destination {}".format(" ".join(sources), dest)
print("TESTING:", cmd)
p = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
o, e = p.communicate()
out = o, e
return out
class KoshTestCp(KoshTest):
source_prefix = ""
dest_prefix = ""
def file_exist(self, name):
if "@" not in name:
# regular local file
return os.path.exists(name)
else:
is_file_cmd = "if [ -f {} ]; then echo -e 1 ; else echo -e 0 ; fi ;"
filename = ":".join(name.split(":")[1:]) # split over :
is_file_cmd = is_file_cmd.format(filename)
cmd = "ssh {}@{} '{}'".format(self.user,
self.hostname, is_file_cmd)
is_file_proc = Popen(
"/usr/bin/bash",
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
o, e = is_file_proc.communicate(cmd.encode())
o = o.decode().split("\n")
return int(o[0])
def test_file_to_file(self):
# kosh cv --stores store1.sql --destination_stores store3.sql --source
# file1 --destination file2
rand = str(random.randint(0, 1000000))
store1, db1 = self.connect()
store2, db2 = self.connect()
file_src_orig = os.path.abspath(rand + "_file_to_file.py")
create_file(file_src_orig)
file_src = self.source_prefix + file_src_orig
ds1 = store1.create(name="test")
ds1.associate(file_src_orig, mime_type="py")
store1.close()
store2.close()
dest_name_orig = os.path.abspath(rand + "_file_dest.py")
dest_name = self.dest_prefix + dest_name_orig
run_cp([file_src, ], dest_name, [self.source_prefix + db1, ],
[self.dest_prefix + db2, ])
# is it in dest store with correct url?
# in case the store were remote we need to reopen them
store1 = kosh.KoshStore(db_uri=db1, dataset_record_type="blah")
store2 = kosh.KoshStore(db_uri=db2, dataset_record_type="blah")
ds_store1 = store1.search(name="test")
self.assertEqual(len(ds_store1), 1)
ds1 = store1.search(name="test")[0]
ds_store2 = store2.search(name="test")
self.assertEqual(len(ds_store2), 1)
associated = ds_store2[0].search(mime_type="py")[0]
self.assertEqual(associated.uri, dest_name_orig)
associated = ds1.search(mime_type="py")[0]
self.assertEqual(associated.uri, file_src_orig)
# cleanup file(s)
os.remove(dest_name_orig)
os.remove(file_src_orig)
# cleanup stores
for db in [db1, db2]:
os.remove(db)
def test_files_to_new_directory(self):
# kosh mv --stores_store1.sql store2.sql--destination_stores store3.sql
# --source dir1 --destination dir2
rand = str(random.randint(0, 1000000))
store1, db1 = self.connect()
store2, db2 = self.connect()
file_src_orig = [rand + "_f2d/1.py", rand + "_f2d/sub/file2.py"]
file_src_orig_associate = [os.path.abspath(x) for x in file_src_orig]
try:
os.removedirs(os.path.dirname(file_src_orig[0]))
except BaseException:
pass
try:
os.makedirs(os.path.dirname(file_src_orig[1]))
except BaseException:
pass
for src in file_src_orig:
create_file(src)
file_src = [self.source_prefix + f for f in file_src_orig_associate]
ds1 = store1.create(name="test")
ds1.associate(file_src_orig_associate, mime_type="py")
dest_name_orig = rand + "_f2d_dest"
try:
os.removedirs(dest_name_orig)
except BaseException:
pass
try:
os.makedirs(dest_name_orig)
except BaseException:
pass
dest_name = self.dest_prefix + os.path.abspath(dest_name_orig)
run_cp(file_src, dest_name, [
self.source_prefix + db1, ], [self.dest_prefix + db2, ])
# First let's check files are moved
new_paths = []
for file_src in file_src_orig_associate:
self.assertTrue(os.path.exists(file_src))
new_paths.append(
os.path.abspath(
os.path.join(
dest_name_orig,
os.path.basename(file_src))))
self.assertTrue(os.path.exists(new_paths[-1]))
store1 = kosh.KoshStore(db_uri=db1, dataset_record_type="blah")
ds_store1 = store1.search(name="test")
self.assertEqual(len(ds_store1), 1)
ds1 = ds_store1[0]
associated_uris = ds1.search(mime_type="py")
for associated in associated_uris:
self.assertTrue(associated.uri in file_src_orig_associate)
store2 = kosh.KoshStore(db_uri=db2, dataset_record_type="blah")
ds_store2 = store2.search(name="test")
self.assertEqual(len(ds_store2), 1)
ds2 = ds_store2[0]
associated_uris = ds2.search(mime_type="py")
for associated in associated_uris:
self.assertTrue(associated.uri in new_paths)
# Cleanup files
shutil.rmtree(rand + "_f2d")
shutil.rmtree(rand + "_f2d_dest")
# cleanup stores
for db in [db1, db2]:
os.remove(db)
def test_copy_directory_not_existing(self):
# kosh cp --stores_store1.sql store2.sql --destination_stores
# store3.sql --source dir1 --destination dir2
rand = str(random.randint(0, 1000000))
store1, db1 = self.connect()
store2, db2 = self.connect()
file_src_orig = [rand + "_d2d/1.py", rand + "_d2d/sub/file2.py"]
file_src_orig_associate = [os.path.abspath(x) for x in file_src_orig]
orig_dir = os.path.dirname(file_src_orig_associate[0])
try:
os.removedirs(os.path.dirname(file_src_orig[0]))
except BaseException:
pass
try:
os.makedirs(os.path.dirname(file_src_orig[1]))
except BaseException:
pass
for src in file_src_orig:
create_file(src)
ds1 = store1.create(name="test")
ds1.associate(file_src_orig_associate, mime_type="py")
dest_name_orig = os.path.abspath(rand + "_d2d_dest")
try:
os.removedirs(dest_name_orig)
except BaseException:
pass
# try:
# os.makedirs(dest_name_orig)
# except:
# pass
dest_name = self.dest_prefix + dest_name_orig
run_cp([self.source_prefix + orig_dir, ], dest_name,
[self.source_prefix + db1, ], [self.dest_prefix + db2, ])
# First let's check files are moved
for file_src in file_src_orig_associate:
self.assertTrue(os.path.exists(file_src))
new_paths = []
new_paths.append(os.path.join(dest_name_orig, rand + "_d2d", "1.py"))
self.assertTrue(os.path.exists(new_paths[-1]))
new_paths.append(
os.path.join(
dest_name_orig,
rand + "_d2d",
"sub",
"file2.py"))
self.assertTrue(os.path.exists(new_paths[-1]))
store1 = kosh.KoshStore(db_uri=db1, dataset_record_type="blah")
ds_store1 = store1.search(name="test")
self.assertEqual(len(ds_store1), 1)
ds1 = ds_store1[0]
associated_uris = ds1.search(mime_type="py")
for associated in associated_uris:
self.assertTrue(associated.uri in file_src_orig_associate)
store2 = kosh.KoshStore(db_uri=db2, dataset_record_type="blah")
ds_store2 = store2.search(name="test")
self.assertEqual(len(ds_store2), 1)
ds2 = ds_store2[0]
associated_uris = ds2.search(mime_type="py")
for associated in associated_uris:
self.assertTrue(associated.uri in new_paths)
# Cleanup files
shutil.rmtree(rand + "_d2d")
shutil.rmtree(rand + "_d2d_dest")
# cleanup stores
for db in [db1, db2]:
os.remove(db)
def test_move_files_pattern_to_new_directory(self):
# kosh mv --stores store1.sql store2.sql --source *.testme --source
# dir1/testing_it_*.testme --destination dir2
rand = str(random.randint(0, 1000000))
store1, db1 = self.connect()
store2, db2 = self.connect()
file_src_orig = [
rand + "_1.testme",
rand + "_file2.testme",
"dir1/testing_it_1.testme",
"dir1/testing_it_2.testme",
"dir1/i_dont_move.testme"]
file_src_orig_associate = [os.path.abspath(x) for x in file_src_orig]
try:
os.makedirs(os.path.dirname(file_src_orig[2]))
except BaseException:
pass
for src in file_src_orig:
create_file(src)
file_src = [self.source_prefix + f for f in file_src_orig]
ds1 = store1.create(name="test")
ds1.associate(file_src_orig_associate, mime_type="testme")
ds2 = store2.create(name="test")
ds2.associate(file_src_orig_associate[1:-1], mime_type="testme")
dest_name_orig = os.path.abspath(rand + "_pattern_dest")
try:
os.removedirs(dest_name_orig)
except BaseException:
pass
try:
os.makedirs(dest_name_orig)
except BaseException:
pass
dest_name = self.dest_prefix + dest_name_orig
apath = os.path.dirname(file_src_orig_associate[0])
run_cp([self.source_prefix + os.path.join(apath,
"*.testme"),
self.source_prefix + os.path.join(apath,
"dir1/testing_it*testme")],
dest_name,
[self.source_prefix + db1,
],
[self.dest_prefix + db2,
])
new_paths = []
for file_src in file_src_orig_associate[:-1]:
# Test files are moved
self.assertTrue(os.path.exists(file_src))
dest = os.path.abspath(
os.path.join(
dest_name_orig,
os.path.basename(file_src)))
new_paths.append(dest)
self.assertTrue(os.path.exists(dest))
# Test datasets are updated
store1 = kosh.KoshStore(db_uri=db1, dataset_record_type="blah")
ds_store1 = store1.search(name="test")
self.assertEqual(len(ds_store1), 1)
ds1 = ds_store1[0]
associated_uris = ds1.search(mime_type="testme")
for associated in associated_uris:
self.assertTrue(associated.uri in file_src_orig_associate)
store2 = kosh.KoshStore(db_uri=db2, dataset_record_type="blah")
ds_store2 = store2.search(name="test")
self.assertEqual(len(ds_store2), 1)
ds2 = ds_store2[0]
associated_uris = ds2.search(mime_type="py")
for associated in associated_uris:
self.assertTrue(associated.uri in new_paths)
# Cleanup files
shutil.rmtree("dir1")
shutil.rmtree(rand + "_pattern_dest")
# cleanup stores
for db in [db1, db2]:
os.remove(db)
# this should be documented n a notebook and not implemented here
def test_copy_from_a_store_to_another_in_new_dest_dir(self):
# kosh cp --stores source --destination_stores dest --source file1
# file2, ... --destination dir [--dataset]
pass
pass
# this sholud be done by cping the files
# Maybe doc via a pipe of search command
def test_copy_dataset_from_store_to_another_remote(self):
# kosh cp --stores source --destination_stores
# user@machine:/path/to/kosh/store.sql --datasets dataset1_id
# dataset2_id --destination user@machine:/path/to/destination_directory
pass
# This is done in mv
def test_reassociate_files(self):
# kosh reassociate --store store1 --source file --destination
# new_file_path
pass
def test_dir_and_files_to_dir(self):
rand = str(random.randint(0, 1000000))
store1, db1 = self.connect()
store2, db2 = self.connect()
file_src_orig = [
rand + "_df2d/1.py",
rand + "_df2d/file2.py",
rand + "_f1.py",
rand + "_f2.py"]
try:
os.removedirs(os.path.dirname(file_src_orig[0]))
except BaseException:
pass
try:
os.makedirs(os.path.dirname(file_src_orig[0]))
except BaseException:
pass
for src in file_src_orig:
create_file(src)
file_src_absolute = [os.path.abspath(f) for f in file_src_orig]
file_src = [self.source_prefix + f for f in file_src_absolute]
ds1 = store1.create(name="test")
ds1.associate(file_src_absolute, mime_type="py")
dest_name_orig = os.path.abspath(rand + "_df2d_dest")
try:
os.removedirs(dest_name_orig)
except BaseException:
pass
try:
os.makedirs(dest_name_orig)
except BaseException:
pass
dest_name = self.dest_prefix + dest_name_orig
run_cp([self.source_prefix + os.path.abspath(rand + "_df2d"),
] + [self.source_prefix + f for f in file_src_absolute[2:]],
dest_name,
[self.source_prefix + db1,
],
[self.dest_prefix + db2,
])
new_paths = []
for i, file_src in enumerate(file_src_absolute):
# Test files are moved
self.assertTrue(os.path.exists(file_src))
dest = os.path.abspath(
os.path.join(
dest_name_orig,
file_src_orig[i]))
new_paths.append(dest)
self.assertTrue(os.path.exists(dest))
# Test datasets are updated
store1 = kosh.KoshStore(db_uri=db1, dataset_record_type="blah")
ds_store1 = store1.search(name="test")
self.assertEqual(len(ds_store1), 1)
ds1 = ds_store1[0]
associated_uris = ds1.search(mime_type="testme")
for associated in associated_uris:
self.assertTrue(associated.uri in file_src_absolute)
store2 = kosh.KoshStore(db_uri=db2, dataset_record_type="blah")
ds_store2 = store2.search(name="test")
self.assertEqual(len(ds_store2), 1)
ds2 = ds_store2[0]
associated_uris = ds2.search(mime_type="py")
for associated in associated_uris:
self.assertTrue(associated.uri in new_paths)
# Cleanup files
shutil.rmtree(rand + "_df2d")
shutil.rmtree(rand + "_df2d_dest")
# cleanup stores
for db in [db1, db2]:
os.remove(db)
def test_file_in_double_dir_to_file(self):
rand = str(random.randint(0, 1000000))
store1, db1 = self.connect()
store2, db2 = self.connect()
file_src_orig = os.path.abspath(
rand + "_a_dir/another_dir/file_to_file.py")
try:
os.remove(file_src_orig)
except BaseException:
pass
try:
os.removedirs(os.path.dirname(file_src_orig))
os.removedirs(os.path.dirname(os.path.dirname(file_src_orig)))
except BaseException:
pass
try:
os.makedirs(os.path.dirname(file_src_orig))
except BaseException:
pass
create_file(file_src_orig)
file_src = self.source_prefix + file_src_orig
ds1 = store1.create(name="test")
ds1.associate(file_src_orig, mime_type="py")
dest_name_orig = os.path.abspath("file_dest.py")
dest_name = self.dest_prefix + dest_name_orig
run_cp([file_src, ], dest_name, [self.source_prefix + db1, ],
[self.dest_prefix + db2, ])
# Test files are moved
self.assertTrue(os.path.exists(file_src_orig))
self.assertTrue(os.path.exists(dest_name_orig))
# Test datasets are updated
store1 = kosh.KoshStore(db_uri=db1, dataset_record_type="blah")
ds_store1 = store1.search(name="test")
self.assertEqual(len(ds_store1), 1)
ds1 = ds_store1[0]
associated_uris = ds1.search(mime_type="testme")
for associated in associated_uris:
self.assertEqual(associated.uri, file_src_orig)
store2 = kosh.KoshStore(db_uri=db2, dataset_record_type="blah")
ds_store2 = store2.search(name="test")
self.assertEqual(len(ds_store2), 1)
ds2 = ds_store2[0]
associated_uris = ds2.search(mime_type="py")
for associated in associated_uris:
self.assertEqual(associated.uri, dest_name_orig)
# Cleanup files
| |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import subprocess
import os
import textwrap
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# This is an homemade environment variable set by the user to reduce the
# compilation time if 'SPHINX_BUILDERNAME' == 'latex'. It that case, the
# 'breathe' and 'exhale' extensions are disabled since the final LaTeX PDF do
# not include the API documentation.
buildername = str(os.getenv('SPHINX_BUILDERNAME'))
# 'read_the_docs_build' is whether we are on readthedocs.org, this line of code
# grabbed from docs.readthedocs.org
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
# this is a hack to disable the library API doc on Read the docs until time
# limit can be extended
if (read_the_docs_build):
buildername = "latex"
# -- Project information -----------------------------------------------------
project = 'AFF3CT'
copyright = '2021, AFF3CT team'
author = 'AFF3CT team'
# get the AFF3CT version from Git
if (read_the_docs_build):
subprocess.call('git fetch --unshallow', shell=True)
label = subprocess.check_output(["git", "describe"]).strip().decode(encoding='UTF-8')
split_label = label.split("-")
# The short X.Y version
version = split_label[0]
# The full version, including alpha/beta/rc tags
release = label
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinxcontrib.bibtex',
'sphinxcontrib.rsvgconverter',
'm2r',
]
if buildername != "latex":
extensions.append('breathe')
extensions.append('exhale')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Figures, tables and code-blocks are automatically numbered if they have a caption
numfig = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
if not read_the_docs_build: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# see here for description : https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html#html-theme-options
# TODO : Why the compilation fails the first time ???? -> the second time is good.
html_theme_options = {
'canonical_url': '', # to help search engines with duplicated versions of the doc -> TODO
'style_external_links': False, # Add an icon next to external links.
'display_version': True, # the version number shown at the top of the sidebar
# Toc options
'navigation_depth' : -1,
'collapse_navigation': True,
'sticky_navigation': True,
'includehidden': False,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_favicon = None
html_logo = None
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AFF3CTdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'preamble': '\setcounter{tocdepth}{10}'
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AFF3CT.tex', 'AFF3CT Documentation',
'AFF3CT team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aff3ct', 'AFF3CT Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AFF3CT', 'AFF3CT Documentation',
author, 'AFF3CT', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
rst_epilog = """
.. |logo_ims| image:: https://www.ims-bordeaux.fr/images/logoimsjoom.png
:alt: logo-ims-bordeaux
:width: 60px
:height: 30px
.. |image_required_argument| image:: /user/simulation/parameters/images/required.svg
:alt: Required
:width: 80px
:height: 25px
.. |image_advanced_argument| image:: /user/simulation/parameters/images/advanced.svg
:alt: Advanced
:width: 80px
:height: 25px
.. |AFF3CT| replace:: :abbr:`AFF3CT (A Fast Forward Error Correction Toolbox!)`
.. |AMS| replace:: :abbr:`AMS (Approximate Min-Star)`
.. |API| replace:: :abbr:`API (Application Programming Interface)`
.. |ARM| replace:: :abbr:`ARM (Advanced RISC (Reduced Instruction Set Computer) Machine)`
.. |ARMv7| replace:: :abbr:`ARMv7 (Advanced RISC (Reduced Instruction Set Computer) Machine Vesion 7)`
.. |ARMv8| replace:: :abbr:`ARMv8 (Advanced RISC (Reduced Instruction Set Computer) Machine Vesion 8)`
.. |ARP| replace:: :abbr:`ARP (Almost Regular Permutation)`
.. |ASCII| replace:: :abbr:`ASCII (American Standard Code for Information Interchange)`
.. |A-SCL| replace:: :abbr:`A-SCL (Adaptive Successive Cancellation List)`
.. |FA-SCL| replace:: :abbr:`FA-SCL (Fully Adaptive Successive Cancellation List)`
.. |PA-SCL| replace:: :abbr:`PA-SCL (Partially Adaptive Successive Cancellation List)`
.. |AVX| replace:: :abbr:`AVX (Advanced Vector Extensions)`
.. |AVX2| replace:: :abbr:`AVX2 (Advanced Vector Extensions 2)`
.. |AVX-512| replace:: :abbr:`AVX-512 (Advanced Vector Extensions 512-bit)`
.. |AVX-512F| replace:: :abbr:`AVX-512F (Advanced Vector Extensions 512-bit Foundation)`
.. |AVX-512BW| replace:: :abbr:`AVX-512BW(Advanced Vector Extensions 512-bit Bytes-Words)`
.. |AWGN| replace:: :abbr:`AWGN (Additive White Gaussian Noise)`
.. |AZCW| replace:: :abbr:`AZCW (All Zero Code Word)`
.. |AZCWs| replace:: :abbr:`AZCWs (All Zero Code Words)`
.. |BCH| replace:: :abbr:`BCH (Bose, Ray-Chaudhuri and Hocquenghem)`
.. |BCJR| replace:: :abbr:`BCJR (Bahl, Cocke, Jelinek and Raviv algorithm or Maximum A Posteriori (MAP))`
.. |BEC| replace:: :abbr:`BEC (Binary Erasure Channel)`
.. |BER| replace:: :abbr:`BER (Bit Error Rate)`
.. |BF| replace:: :abbr:`BF (Bit Flipping)`
.. |BFER| replace:: :abbr:`BER/FER (Bit and Frame Error Rate)`
.. |BPSK| replace:: :abbr:`BPSK (Bit Phase-Shift Keying)`
.. |BM| replace:: :abbr:`BM (Berlekamp-Massey)`
.. |BP| replace:: :abbr:`BP (Belief Propagation)`
.. |BP-F| replace:: :abbr:`BP-F (Belief Propagation with Flooding scheduling)`
.. |BP-HL| replace:: :abbr:`BP-HL (Belief Propagation with Horizontal Layered scheduling)`
.. |BP-P| replace:: :abbr:`BP-P (Belief Propagation Peeling)`
.. |BP-VL| replace:: :abbr:`BP-VL (Belief Propagation with Vertical Layered scheduling)`
.. |BPS| replace:: :abbr:`BPS (Bit Per Symbol)`
.. |BSC| replace:: :abbr:`BSC (Binary Symmetric Channel)`
.. |CA| replace:: :abbr:`CA (CRC Aided)`
.. |CCSDS| replace:: :abbr:`CCSDS (Consultative Committee for Space Data Systems)`
.. |CDF| replace:: :abbr:`CDF (Cumulative Distribution Function)`
.. |CISC| replace:: :abbr:`CISC (Complex Instruction Set Computer)`
.. |CN| replace:: :abbr:`CN (Check Node)`
.. |CNs| replace:: :abbr:`CNs (Check Nodes)`
.. |codec| replace:: :abbr:`codec (coder/decoder)`
.. |codecs| replace:: :abbr:`codecs (coders/decodes)`
.. |CP| replace:: :abbr:`CP (Chase-Pyndiah)`
.. |CPM| replace:: :abbr:`CPM (Continuous Phase Modulation)`
.. |CPU| replace:: :abbr:`CPU (Central Process Unit)`
.. |CPUs| replace:: :abbr:`CPUs (Central Process Units)`
.. |CRC| replace:: :abbr:`CRC (Cyclic Redundancy Check)`
.. |CRCs| replace:: :abbr:`CRCs (Cyclic Redundancy Checks)`
.. |CSV| replace:: :abbr:`CSV (Comma-Separated Values)`
.. |DB| replace:: :abbr:`DB (Double Binary)`
.. |DE| replace:: :abbr:`DE (Density Evolution)`
.. |DVB-RCS1| replace:: :abbr:`DVB-RCS1 (Digital Video Broadcasting - Return Channel via Satellite 1)`
.. |DVB-RCS2| replace:: :abbr:`DVB-RCS2 (Digital Video Broadcasting - Return Channel via Satellite 2)`
.. |DVB-S1| replace:: :abbr:`DVB-S1 (Digital Video Broadcasting - Satellite 1)`
.. |DVB-S2| replace:: :abbr:`DVB-S2 (Digital Video Broadcasting - Satellite 2)`
.. |EOF| replace:: :abbr:`EOF | |
import datetime
import logging
import sys
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Index, Integer, MetaData, String, Table, TEXT
from sqlalchemy.exc import NoSuchTableError
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import JSONType, MetadataType, TrimmedString
now = datetime.datetime.utcnow
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData()
# New tables as of changeset 2341:5498ac35eedd
Group_table = Table( "galaxy_group", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", String( 255 ), index=True, unique=True ),
Column( "deleted", Boolean, index=True, default=False ) )
UserGroupAssociation_table = Table( "user_group_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "group_id", Integer, ForeignKey( "galaxy_group.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
UserRoleAssociation_table = Table( "user_role_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
GroupRoleAssociation_table = Table( "group_role_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "group_id", Integer, ForeignKey( "galaxy_group.id" ), index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ) )
Role_table = Table( "role", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", String( 255 ), index=True, unique=True ),
Column( "description", TEXT ),
Column( "type", String( 40 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ) )
DatasetPermissions_table = Table( "dataset_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
LibraryPermissions_table = Table( "library_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "library_id", Integer, ForeignKey( "library.id" ), nullable=True, index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
LibraryFolderPermissions_table = Table( "library_folder_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), nullable=True, index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
LibraryDatasetPermissions_table = Table( "library_dataset_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), nullable=True, index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
LibraryDatasetDatasetAssociationPermissions_table = Table( "library_dataset_dataset_association_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
Index( "ix_lddap_library_dataset_dataset_association_id", LibraryDatasetDatasetAssociationPermissions_table.c.library_dataset_dataset_association_id )
LibraryItemInfoPermissions_table = Table( "library_item_info_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "library_item_info_id", Integer, ForeignKey( "library_item_info.id" ), nullable=True, index=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
LibraryItemInfoTemplatePermissions_table = Table( "library_item_info_template_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "action", TEXT ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ), nullable=True ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
Index( "ix_liitp_library_item_info_template_id", LibraryItemInfoTemplatePermissions_table.c.library_item_info_template_id )
DefaultUserPermissions_table = Table( "default_user_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "action", TEXT ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
DefaultHistoryPermissions_table = Table( "default_history_permissions", metadata,
Column( "id", Integer, primary_key=True ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
Column( "action", TEXT ),
Column( "role_id", Integer, ForeignKey( "role.id" ), index=True ) )
LibraryDataset_table = Table( "library_dataset", metadata,
Column( "id", Integer, primary_key=True ),
Column( "library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fk" ), nullable=True, index=True ), # current version of dataset, if null, there is not a current version selected
Column( "folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ),
Column( "order_id", Integer ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", TrimmedString( 255 ), key="_name" ), # when not None/null this will supercede display in library (but not when imported into user's history?)
Column( "info", TrimmedString( 255 ), key="_info" ), # when not None/null this will supercede display in library (but not when imported into user's history?)
Column( "deleted", Boolean, index=True, default=False ) )
LibraryDatasetDatasetAssociation_table = Table( "library_dataset_dataset_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), index=True ),
Column( "dataset_id", Integer, ForeignKey( "dataset.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "copied_from_history_dataset_association_id", Integer, ForeignKey( "history_dataset_association.id", use_alter=True, name='history_dataset_association_dataset_id_fkey' ), nullable=True ),
Column( "copied_from_library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id", use_alter=True, name='library_dataset_dataset_association_id_fkey' ), nullable=True ),
Column( "name", TrimmedString( 255 ) ),
Column( "info", TrimmedString( 255 ) ),
Column( "blurb", TrimmedString( 255 ) ),
Column( "peek", TEXT ),
Column( "extension", TrimmedString( 64 ) ),
Column( "metadata", MetadataType(), key="_metadata" ),
Column( "parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True ),
Column( "designation", TrimmedString( 255 ) ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "visible", Boolean ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "message", TrimmedString( 255 ) ) )
Library_table = Table( "library", metadata,
Column( "id", Integer, primary_key=True ),
Column( "root_folder_id", Integer, ForeignKey( "library_folder.id" ), index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", String( 255 ), index=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "description", TEXT ) )
LibraryFolder_table = Table( "library_folder", metadata,
Column( "id", Integer, primary_key=True ),
Column( "parent_id", Integer, ForeignKey( "library_folder.id" ), nullable=True, index=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "name", TEXT ),
Column( "description", TEXT ),
Column( "order_id", Integer ),
Column( "item_count", Integer ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "purged", Boolean, index=True, default=False ),
Column( "genome_build", TrimmedString( 40 ) ) )
LibraryItemInfoTemplateElement_table = Table( "library_item_info_template_element", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "optional", Boolean, index=True, default=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "name", TEXT ),
Column( "description", TEXT ),
Column( "type", TEXT, default='string' ),
Column( "order_id", Integer ),
Column( "options", JSONType() ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ) ) )
Index( "ix_liite_library_item_info_template_id", LibraryItemInfoTemplateElement_table.c.library_item_info_template_id )
LibraryItemInfoTemplate_table = Table( "library_item_info_template", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "optional", Boolean, index=True, default=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "name", TEXT ),
Column( "description", TEXT ),
Column( "item_count", Integer, default=0 ) )
LibraryInfoTemplateAssociation_table = Table( "library_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_id", Integer, ForeignKey( "library.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ) ) )
Index( "ix_lita_library_item_info_template_id", LibraryInfoTemplateAssociation_table.c.library_item_info_template_id )
LibraryFolderInfoTemplateAssociation_table = Table( "library_folder_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ) ) )
Index( "ix_lfita_library_item_info_template_id", LibraryFolderInfoTemplateAssociation_table.c.library_item_info_template_id )
LibraryDatasetInfoTemplateAssociation_table = Table( "library_dataset_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ) ) )
Index( "ix_ldita_library_item_info_template_id", LibraryDatasetInfoTemplateAssociation_table.c.library_item_info_template_id )
LibraryDatasetDatasetInfoTemplateAssociation_table = Table( "library_dataset_dataset_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ) ) )
Index( "ix_lddita_library_dataset_dataset_association_id", LibraryDatasetDatasetInfoTemplateAssociation_table.c.library_dataset_dataset_association_id )
Index( "ix_lddita_library_item_info_template_id", LibraryDatasetDatasetInfoTemplateAssociation_table.c.library_item_info_template_id )
LibraryItemInfoElement_table = Table( "library_item_info_element", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "contents", JSONType() ),
Column( "library_item_info_id", Integer, ForeignKey( "library_item_info.id" ), index=True ),
Column( "library_item_info_template_element_id", | |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based XLNet Model."""
from absl import logging
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling.layers import transformer_xl
_SEG_ID_CLS = 2
def _create_causal_attention_mask(
seq_length,
memory_length,
dtype=tf.float32,
same_length=False):
"""Creates a causal attention mask with a single-sided context.
When applying the attention mask in `MultiHeadRelativeAttention`, the
attention scores are of shape `[(batch dimensions), S, S + M]`, where:
- S = sequence length.
- M = memory length.
In a simple case where S = 2, M = 1, here is a simple illustration of the
`attention_scores` matrix, where `a` represents an attention function:
token_0 [[a(token_0, mem_0) a(token_0, token_0) a(token_0, token_1)],
token_1 [a(token_1, mem_0) a(token_1, token_0) a(token_1, token_1)]]
mem_0 token_0 token_1
For uni-directional attention, we want to mask out values in the attention
scores that represent a(token_i, token_j) where j > i. We can achieve this by
concatenating 0s (representing memory positions) with a strictly upper
triangular matrix of 1s.
We then flip the matrix values in order to match the representation where
real values are 1s.
Args:
seq_length: int, The length of each sequence.
memory_length: int, The length of memory blocks.
dtype: dtype of the mask.
same_length: bool, whether to use the same attention length for each token.
Returns:
A unidirectional attention mask of shape
`[seq_length, seq_length + memory_length]`. E.g.:
[[1. 1. 1. 0. 0. 0.]
[1. 1. 1. 1. 0. 0.]
[1. 1. 1. 1. 1. 0.]
[1. 1. 1. 1. 1. 1.]]
"""
ones_matrix = tf.ones([seq_length, seq_length], dtype=dtype)
upper_triangular = tf.linalg.band_part(ones_matrix, 0, -1)
diagonal = tf.linalg.band_part(ones_matrix, 0, 0)
padding = tf.zeros([seq_length, memory_length], dtype=dtype)
causal_attention_mask = tf.concat(
[padding, upper_triangular - diagonal], 1)
if same_length:
lower_triangular = tf.linalg.band_part(ones_matrix, -1, 0)
strictly_lower_triangular = lower_triangular - diagonal
causal_attention_mask = tf.concat(
[causal_attention_mask[:, :seq_length] + strictly_lower_triangular,
causal_attention_mask[:, seq_length:]], 1)
return 1 - causal_attention_mask
def _combine_masks(mask1, mask2, dtype, how="and"):
"""Combines two masks.
Use "and" if trying to combine two existing masks.
Use "or" if trying to flip a few positions to "real".
Args:
mask1: tf.Tensor, input mask 1
mask2: tf.Tensor, input mask 2
dtype: tf.dtype
how: Which logical operation should run.
Returns:
The combined input masks.
"""
if how == "and":
operator = tf.math.logical_and
else:
operator = tf.math.logical_or
return tf.cast(operator(
tf.cast(mask1, tf.bool),
tf.cast(mask2, tf.bool)), dtype=dtype)
def _compute_attention_mask(
input_mask,
permutation_mask,
attention_type,
seq_length,
memory_length,
batch_size,
dtype=tf.float32):
"""Combines all input attention masks for XLNet.
In XLNet modeling, `0` represents tokens that can be attended, and `1`
represents tokens that cannot be attended.
For XLNet pre-training and fine tuning, there are a few masks used:
- Causal attention mask: If the attention type is unidirectional, then all
tokens after the current position cannot be attended to.
- Input mask: when generating data, padding is added to a max sequence length
to make all sequences the same length. This masks out real tokens (`0`) from
padding tokens (`1`).
- Permutation mask: during XLNet pretraining, the input sequence is factorized
into a factorization sequence `z`. During partial prediction, `z` is split
at a cutting point `c` (an index of the factorization sequence) and
prediction is only applied to all tokens after `c`. Therefore, tokens at
factorization positions `i` > `c` can be attended to and tokens at
factorization positions `i` <= `c` cannot be attended to.
This function broadcasts and combines all attention masks to produce the
query attention mask and the content attention mask.
Args:
input_mask: Tensor, the input mask related to padding. Input shape:
`(B, S)`.
permutation_mask: Tensor, the permutation mask used in partial prediction.
Input shape: `(B, S, S)`.
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
seq_length: int, the length of each sequence.
memory_length: int the length of memory blocks.
batch_size: int, the batch size.
dtype: The dtype of the masks.
Returns:
attention_mask, content_attention_mask: The position and context-based
attention masks and content attention masks, respectively.
"""
attention_mask = None
# `1` values mean do not attend to this position.
if attention_type == "uni":
causal_attention_mask = _create_causal_attention_mask(
seq_length=seq_length,
memory_length=memory_length,
dtype=dtype)
causal_attention_mask = causal_attention_mask[None, None, :, :]
# `causal_attention_mask`: [1, 1, S, S + M]
# input_mask: [B, S]
# permutation_mask: [B, S, S]
if input_mask is not None and permutation_mask is not None:
data_mask = _combine_masks(input_mask[:, None, :], permutation_mask, dtype)
elif input_mask is not None and permutation_mask is None:
data_mask = input_mask[:, None, :]
elif input_mask is None and permutation_mask is not None:
data_mask = permutation_mask
else:
data_mask = None
# data_mask: [B, S, S] or [B, 1, S]
if data_mask is not None:
# All positions within state can be attended to.
state_mask = tf.ones([batch_size, tf.shape(data_mask)[1], memory_length],
dtype=dtype)
# state_mask: [B, 1, M] or [B, S, M]
data_mask = tf.concat([state_mask, data_mask], 2)
# data_mask: [B, 1, S + M] or [B, S, S + M]
if attention_type == "uni":
attention_mask = _combine_masks(causal_attention_mask,
data_mask[:, None, :, :],
dtype=dtype)
else:
attention_mask = data_mask[:, None, :, :]
if attention_mask is not None:
# Construct the content attention mask.
# This ensures that the mask allows the model to attend to positions in
# content positions (e.g. the content diagonal).
non_target_mask = tf.concat(
[tf.zeros([seq_length, memory_length], dtype=dtype),
tf.eye(seq_length, dtype=dtype)], axis=-1)
content_attention_mask = _combine_masks(
attention_mask, non_target_mask, how="or", dtype=dtype)
else:
content_attention_mask = None
return attention_mask, content_attention_mask
def _compute_segment_matrix(
segment_ids,
memory_length,
batch_size,
use_cls_mask):
"""Computes the segment embedding matrix.
XLNet introduced segment-based attention for attention calculations. This
extends the idea of relative encodings in Transformer XL by considering
whether or not two positions are within the same segment, rather than
which segments they come from.
This function generates a segment matrix by broadcasting provided segment IDs
in two different dimensions and checking where values are equal. This output
matrix shows `True` whenever two tokens are NOT in the same segment and
`False` whenever they are.
Args:
segment_ids: A Tensor of size `[B, S]` that represents which segment
each token belongs to.
memory_length: int, the length of memory blocks.
batch_size: int, the batch size.
use_cls_mask: bool, whether or not to introduce cls mask in
input sequences.
Returns:
A boolean Tensor of size `[B, S, S + M]`, where `True` means that two
tokens are NOT in the same segment, and `False` means they are in the same
segment.
"""
if segment_ids is None:
return None
memory_padding = tf.zeros([batch_size, memory_length],
dtype=segment_ids.dtype)
padded_segment_ids = tf.concat([memory_padding, segment_ids], 1)
# segment_ids: [B, S]
# padded_segment_ids: [B, S + M]
if use_cls_mask:
# `1` indicates not in the same segment.
# Target result: [B, S, S + M]
# segment_ids: [B, S]
# padded_segment_ids: [B, S + M]
broadcasted_segment_class_indices = (
tf.equal(segment_ids,
tf.constant([_SEG_ID_CLS]))[:, :, None])
broadcasted_padded_class_indices = (
tf.equal(
padded_segment_ids,
tf.constant([_SEG_ID_CLS]))[:, None, :])
class_index_matrix = tf.logical_or(broadcasted_segment_class_indices,
broadcasted_padded_class_indices)
segment_matrix = tf.equal(segment_ids[:, :, None],
padded_segment_ids[:, None, :])
segment_matrix = tf.logical_or(class_index_matrix, segment_matrix)
else:
# TODO(allencwang) - address this legacy mismatch from `use_cls_mask`.
segment_matrix = tf.logical_not(
tf.equal(segment_ids[:, :, None], padded_segment_ids[:, None, :]))
return segment_matrix
def _compute_positional_encoding(
attention_type,
position_encoding_layer,
hidden_size,
batch_size,
total_length,
seq_length,
clamp_length,
bi_data,
dtype=tf.float32):
"""Computes the relative position encoding.
Args:
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
position_encoding_layer: An instance of `RelativePositionEncoding`.
hidden_size: int, the hidden size.
batch_size: int, the batch size.
total_length: int, the sequence length added to the memory length.
seq_length: int, the length of each sequence.
clamp_length: int, clamp all relative distances larger than clamp_length. -1
means no clamping.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
| |
>>> assert os.path.isfile('out.err')
Test groupr and errorr:
>>> out = endf6.get_errorr(verbose=True, groupr=True)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
125 0 0 /
0.005 0. /
0/
groupr
-21 -22 0 -23 /
125 2 0 2 0 1 1 0 /
'sandy runs groupr' /
0.0/
10000000000.0/
3/
0/
0/
errorr
-21 0 -23 33 0 /
125 2 2 0 1 /
0 0.0 /
0 33 1/
stop
Test groupr and errorr for neutron energy grids:
>>> out = endf6.get_errorr(ek_errorr=sandy.energy_grids.CASMO12, ek_groupr=sandy.energy_grids.CASMO12, verbose=True, groupr=True)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
125 0 0 /
0.005 0. /
0/
groupr
-21 -22 0 -23 /
125 1 0 2 0 1 1 0 /
'sandy runs groupr' /
0.0/
10000000000.0/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
3/
0/
0/
errorr
-21 0 -23 33 0 /
125 1 2 0 1 /
0 0.0 /
0 33 1/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
stop
Test groupr and errorr for neutron and photons energy grids:
>>> out = endf6.get_errorr(ek_groupr=sandy.energy_grids.CASMO12, ek_errorr=sandy.energy_grids.CASMO12, ep=sandy.energy_grids.CASMO12, verbose=True, groupr=True)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
125 0 0 /
0.005 0. /
0/
groupr
-21 -22 0 -23 /
125 1 1 2 0 1 1 0 /
'sandy runs groupr' /
0.0/
10000000000.0/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
3/
0/
0/
errorr
-21 0 -23 33 0 /
125 1 2 0 1 /
0 0.0 /
0 33 1/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
stop
U-238 test because it contains mubar, xs, chi and nubar:
>>> endf6 = sandy.get_endf6_file('jeff_33','xs', 922380)
>>> out = endf6.get_errorr(ek_errorr=sandy.energy_grids.CASMO12, ek_groupr=sandy.energy_grids.CASMO12, verbose=True, err=1)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
9237 0 0 /
1 0. /
0/
groupr
-21 -22 0 -23 /
9237 1 0 2 0 1 1 0 /
'sandy runs groupr' /
0.0/
10000000000.0/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
3/
3 251 'mubar' /
5/
5 18 'chi' /
0/
0/
errorr
-21 0 -23 31 0 /
9237 1 2 0 1 /
0 0.0 /
0 31 1/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
errorr
-21 0 -23 33 0 /
9237 1 2 0 1 /
0 0.0 /
0 33 1/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
errorr
-21 0 -23 35 0 /
9237 1 2 0 1 /
0 0.0 /
0 35 1/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
errorr
-21 0 -23 34 0 /
9237 1 2 0 1 /
0 0.0 /
0 34 1/
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
stop
Test spectrum:
>>> spect = [1.000000e-5, 2.00000000, 3.000000e-2, 2.00000000, 5.800000e-2, 4.00000000, 3, 1]
>>> out = endf6.get_errorr(spectrum_errorr=spect, ek_errorr=[1.000000e-5, 3.000000e-2, 5.800000e-2, 3], verbose=True, nubar=False, chi=False, mubar=False)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
9237 0 0 /
0.005 0. /
0/
errorr
-21 -22 0 33 0 /
9237 1 1 0 1 /
0 0.0 /
0 33 1/
3 /
1.00000e-05 3.00000e-02 5.80000e-02 3.00000e+00 /
0.00000000 0.00000000 0 0 1 4
4 1
1.000000-5 2.00000000 3.000000-2 2.00000000 5.800000-2 4.00000000
3.00000000 1.00000000
/
stop
>>> spect_g = [1.000000e-5, 1.00000000, 3.000000e-2, 2.00000000, 5.800000e-2, 3.00000000, 3, 2]
>>> out = endf6.get_errorr(spectrum_errorr=spect, spectrum_groupr=spect_g, ek_errorr=[1.000000e-5, 3.000000e-2, 5.800000e-2, 3], ek_groupr=[1.000000e-5, 3.000000e-2, 5.800000e-2, 3], verbose=True, nubar=False, chi=False, mubar=False, groupr=True)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
9237 0 0 /
0.005 0. /
0/
groupr
-21 -22 0 -23 /
9237 1 0 1 0 1 1 0 /
'sandy runs groupr' /
0.0/
10000000000.0/
3 /
1.00000e-05 3.00000e-02 5.80000e-02 3.00000e+00 /
0.00000000 0.00000000 0 0 1 4
4 1
1.000000-5 1.00000000 3.000000-2 2.00000000 5.800000-2 3.00000000
3.00000000 2.00000000
/
3/
0/
0/
errorr
-21 0 -23 33 0 /
9237 1 1 0 1 /
0 0.0 /
0 33 1/
3 /
1.00000e-05 3.00000e-02 5.80000e-02 3.00000e+00 /
0.00000000 0.00000000 0 0 1 4
4 1
1.000000-5 2.00000000 3.000000-2 2.00000000 5.800000-2 4.00000000
3.00000000 1.00000000
/
stop
Test irespr:
out = endf6.get_errorr(spectrum_errorr=spect, ek_errorr=[1.000000e-5, 3.000000e-2, 5.800000e-2, 3], verbose=True, nubar=False, chi=False, mubar=False, irespr=0)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
125 0 0 /
0.005 0. /
0/
errorr
-21 -22 0 33 0 /
125 1 1 0 1 /
0 0.0 /
0 33 0/
3 /
1.00000e-05 3.00000e-02 5.80000e-02 3.00000e+00 /
1.000000-5 2.00000000 3.000000-2 2.00000000 5.800000-2 4.00000000 3.00000000 1.00000000
/
stop
Test for MT:
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> out = endf6.get_errorr(verbose=True, mt=[1, 2], ek_errorr=sandy.energy_grids.CASMO12)
moder
20 -21 /
reconr
-21 -22 /
'sandy runs njoy'/
125 0 0 /
0.005 0. /
0/
errorr
-21 -22 0 33 0 /
125 1 2 0 1 /
0 0.0 /
1 33 1/
2 0 /
1 2 /
12 /
1.00000e-05 3.00000e-02 5.80000e-02 1.40000e-01 2.80000e-01 3.50000e-01 6.25000e-01 4.00000e+00 4.80520e+01 5.53000e+03 8.21000e+05 2.23100e+06 1.00000e+07 /
stop
Keywords `mt` and `groupr` are incompatible
>>> with pytest.raises(sandy.SandyError):
... sandy.get_endf6_file("jeff_33", "xs", 10010).get_errorr(err=1, mt=1, groupr=True)
Test content of output `Errorr` file
>>> out = sandy.get_endf6_file('jeff_33', "xs", 922350).get_errorr(err=1., irespr=0, mubar=False, chi=False)
>>> keys = [(9228, 1, 451), (9228, 3, 456), (9228, 33, 456), (9228, 3, 1), (9228, 3, 2), (9228, 3, 4), (9228, 3, 16), (9228, 3, 17), (9228, 3, 18), (9228, 3, 37), (9228, 3, 102), (9228, 33, 1), (9228, 33, 2), (9228, 33, 4), (9228, 33, 16), (9228, 33, 17), (9228, 33, 18), (9228, 33, 37), (9228, 33, 102)]
>>> for key in keys: assert key in out.data
"""
kwds_njoy = kwargs.copy()
if float(temperature) == 0:
kwds_njoy["broadr"] = False
kwds_njoy["thermr"] = False
kwds_njoy["gaspr"] = False
kwds_njoy["heatr"] = False
kwds_njoy["purr"] = False
kwds_njoy["unresr"] = False
kwds_njoy['keep_pendf'] = False
else:
kwds_njoy["broadr"] = True
kwds_njoy["thermr"] = kwds_njoy.get("thermr", False)
kwds_njoy["gaspr"] = kwds_njoy.get("gaspr", False)
kwds_njoy["heatr"] = kwds_njoy.get("heatr", False)
kwds_njoy["purr"] = kwds_njoy.get("purr", False)
kwds_njoy["unresr"] = kwds_njoy.get("unresr", False)
kwds_njoy['keep_pendf'] = kwds_njoy.get('keep_pendf', False)
cov_info = self.covariance_info(nubar=nubar, xs=xs,
mubar=mubar, chi=chi)
if not np.any(list(cov_info.values())):
return # no covariance found or wanted
kwds_njoy.update(cov_info)
# Mandatory groupr module activation
groupr_ = True if (kwds_njoy["nubar"] or kwds_njoy["chi"] or "ek_groupr" in kwds_njoy or "spectrum_groupr" in kwds_njoy) else groupr
with TemporaryDirectory() as td:
endf6file = os.path.join(td, "endf6_file")
self.to_file(endf6file)
outputs = sandy.njoy.process(
endf6file,
errorr=True,
acer=False,
verbose=verbose,
temperatures=[temperature],
suffixes=[0],
err=err,
groupr=groupr_,
**kwds_njoy,
)[2]
seq = map(sandy.Errorr.from_file, outputs.values())
errorr = reduce(lambda x, y: x.merge(y), seq)
if to_file:
errorr.to_file(to_file)
return errorr
def get_gendf(self,
temperature=293.6,
njoy=None,
to_file=None,
verbose=False,
err=0.005,
nubar=False,
xs=True,
mubar=False,
chi=False,
**kwargs):
"""
Process `Endf6` instance into a Gendf file using NJOY.
Parameters
----------
temperature : `float`, optional, default is `293.6`.
temperature of the cross sections in K.
If not given, stop the processing after RECONR (before BROADR).
njoy : `str`, optional, default is `None`
NJOY executable, if `None` search in the system path.
to_file : `str`, optional, default is `None`
if not `None` write processed GENDF data to file.
The name of the GENDF file is the keyword argument.
verbose : `bool`, optional, default is `False`
flag to print NJOY input file to screen before running the
executable.
broadr : `bool`, optional, default is `True`
option to | |
<reponame>pulumi/pulumi-f5bigip<filename>sdk/python/pulumi_f5bigip/big_iq_as3.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['BigIqAs3Args', 'BigIqAs3']
@pulumi.input_type
class BigIqAs3Args:
def __init__(__self__, *,
as3_json: pulumi.Input[str],
bigiq_address: pulumi.Input[str],
bigiq_password: pulumi.Input[str],
bigiq_user: pulumi.Input[str],
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
tenant_list: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BigIqAs3 resource.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
pulumi.set(__self__, "as3_json", as3_json)
pulumi.set(__self__, "bigiq_address", bigiq_address)
pulumi.set(__self__, "bigiq_password", bigiq_password)
pulumi.set(__self__, "bigiq_user", bigiq_user)
if bigiq_login_ref is not None:
pulumi.set(__self__, "bigiq_login_ref", bigiq_login_ref)
if bigiq_port is not None:
pulumi.set(__self__, "bigiq_port", bigiq_port)
if bigiq_token_auth is not None:
pulumi.set(__self__, "bigiq_token_auth", bigiq_token_auth)
if tenant_list is not None:
pulumi.set(__self__, "tenant_list", tenant_list)
@property
@pulumi.getter(name="as3Json")
def as3_json(self) -> pulumi.Input[str]:
"""
Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
"""
return pulumi.get(self, "as3_json")
@as3_json.setter
def as3_json(self, value: pulumi.Input[str]):
pulumi.set(self, "as3_json", value)
@property
@pulumi.getter(name="bigiqAddress")
def bigiq_address(self) -> pulumi.Input[str]:
"""
Address of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_address")
@bigiq_address.setter
def bigiq_address(self, value: pulumi.Input[str]):
pulumi.set(self, "bigiq_address", value)
@property
@pulumi.getter(name="bigiqPassword")
def bigiq_password(self) -> pulumi.Input[str]:
"""
Password of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_password")
@bigiq_password.setter
def bigiq_password(self, value: pulumi.Input[str]):
pulumi.set(self, "bigiq_password", value)
@property
@pulumi.getter(name="bigiqUser")
def bigiq_user(self) -> pulumi.Input[str]:
"""
User name of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_user")
@bigiq_user.setter
def bigiq_user(self, value: pulumi.Input[str]):
pulumi.set(self, "bigiq_user", value)
@property
@pulumi.getter(name="bigiqLoginRef")
def bigiq_login_ref(self) -> Optional[pulumi.Input[str]]:
"""
Login reference for token authentication (see BIG-IQ REST docs for details)
"""
return pulumi.get(self, "bigiq_login_ref")
@bigiq_login_ref.setter
def bigiq_login_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_login_ref", value)
@property
@pulumi.getter(name="bigiqPort")
def bigiq_port(self) -> Optional[pulumi.Input[str]]:
"""
The registration key pool to use
"""
return pulumi.get(self, "bigiq_port")
@bigiq_port.setter
def bigiq_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_port", value)
@property
@pulumi.getter(name="bigiqTokenAuth")
def bigiq_token_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Enable to use an external authentication source (LDAP, TACACS, etc)
"""
return pulumi.get(self, "bigiq_token_auth")
@bigiq_token_auth.setter
def bigiq_token_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bigiq_token_auth", value)
@property
@pulumi.getter(name="tenantList")
def tenant_list(self) -> Optional[pulumi.Input[str]]:
"""
Name of Tenant
"""
return pulumi.get(self, "tenant_list")
@tenant_list.setter
def tenant_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_list", value)
@pulumi.input_type
class _BigIqAs3State:
def __init__(__self__, *,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BigIqAs3 resources.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
if as3_json is not None:
pulumi.set(__self__, "as3_json", as3_json)
if bigiq_address is not None:
pulumi.set(__self__, "bigiq_address", bigiq_address)
if bigiq_login_ref is not None:
pulumi.set(__self__, "bigiq_login_ref", bigiq_login_ref)
if bigiq_password is not None:
pulumi.set(__self__, "bigiq_password", bigiq_password)
if bigiq_port is not None:
pulumi.set(__self__, "bigiq_port", bigiq_port)
if bigiq_token_auth is not None:
pulumi.set(__self__, "bigiq_token_auth", bigiq_token_auth)
if bigiq_user is not None:
pulumi.set(__self__, "bigiq_user", bigiq_user)
if tenant_list is not None:
pulumi.set(__self__, "tenant_list", tenant_list)
@property
@pulumi.getter(name="as3Json")
def as3_json(self) -> Optional[pulumi.Input[str]]:
"""
Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
"""
return pulumi.get(self, "as3_json")
@as3_json.setter
def as3_json(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "as3_json", value)
@property
@pulumi.getter(name="bigiqAddress")
def bigiq_address(self) -> Optional[pulumi.Input[str]]:
"""
Address of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_address")
@bigiq_address.setter
def bigiq_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_address", value)
@property
@pulumi.getter(name="bigiqLoginRef")
def bigiq_login_ref(self) -> Optional[pulumi.Input[str]]:
"""
Login reference for token authentication (see BIG-IQ REST docs for details)
"""
return pulumi.get(self, "bigiq_login_ref")
@bigiq_login_ref.setter
def bigiq_login_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_login_ref", value)
@property
@pulumi.getter(name="bigiqPassword")
def bigiq_password(self) -> Optional[pulumi.Input[str]]:
"""
Password of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_password")
@bigiq_password.setter
def bigiq_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_password", value)
@property
@pulumi.getter(name="bigiqPort")
def bigiq_port(self) -> Optional[pulumi.Input[str]]:
"""
The registration key pool to use
"""
return pulumi.get(self, "bigiq_port")
@bigiq_port.setter
def bigiq_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_port", value)
@property
@pulumi.getter(name="bigiqTokenAuth")
def bigiq_token_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Enable to use an external authentication source (LDAP, TACACS, etc)
"""
return pulumi.get(self, "bigiq_token_auth")
@bigiq_token_auth.setter
def bigiq_token_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "bigiq_token_auth", value)
@property
@pulumi.getter(name="bigiqUser")
def bigiq_user(self) -> Optional[pulumi.Input[str]]:
"""
User name of the BIG-IQ to which your targer BIG-IP is attached
"""
return pulumi.get(self, "bigiq_user")
@bigiq_user.setter
def bigiq_user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bigiq_user", value)
@property
@pulumi.getter(name="tenantList")
def tenant_list(self) -> Optional[pulumi.Input[str]]:
"""
Name of Tenant
"""
return pulumi.get(self, "tenant_list")
@tenant_list.setter
def tenant_list(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_list", value)
class BigIqAs3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
`BigIqAs3` provides details about bigiq as3 resource
This resource is helpful to configure as3 declarative JSON on BIG-IP through BIG-IQ.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
# Example Usage for json file
exampletask = f5bigip.BigIqAs3("exampletask",
as3_json=(lambda path: open(path).read())("bigiq_example.json"),
bigiq_address="xx.xx.xxx.xx",
bigiq_password="<PASSWORD>",
bigiq_user="xxxxx")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] as3_json: Path/Filename of Declarative AS3 JSON which is a json file used with builtin ```file``` function
:param pulumi.Input[str] bigiq_address: Address of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_login_ref: Login reference for token authentication (see BIG-IQ REST docs for details)
:param pulumi.Input[str] bigiq_password: Password of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] bigiq_port: The registration key pool to use
:param pulumi.Input[bool] bigiq_token_auth: Enable to use an external authentication source (LDAP, TACACS, etc)
:param pulumi.Input[str] bigiq_user: User name of the BIG-IQ to which your targer BIG-IP is attached
:param pulumi.Input[str] tenant_list: Name of Tenant
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BigIqAs3Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`BigIqAs3` provides details about bigiq as3 resource
This resource is helpful to configure as3 declarative JSON on BIG-IP through BIG-IQ.
## Example Usage
```python
import pulumi
import pulumi_f5bigip as f5bigip
# Example Usage for json file
exampletask = f5bigip.BigIqAs3("exampletask",
as3_json=(lambda path: open(path).read())("bigiq_example.json"),
bigiq_address="xx.xx.xxx.xx",
bigiq_password="<PASSWORD>",
bigiq_user="xxxxx")
```
:param str resource_name: The name of the resource.
:param BigIqAs3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BigIqAs3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
as3_json: Optional[pulumi.Input[str]] = None,
bigiq_address: Optional[pulumi.Input[str]] = None,
bigiq_login_ref: Optional[pulumi.Input[str]] = None,
bigiq_password: Optional[pulumi.Input[str]] = None,
bigiq_port: Optional[pulumi.Input[str]] = None,
bigiq_token_auth: Optional[pulumi.Input[bool]] = None,
bigiq_user: Optional[pulumi.Input[str]] = None,
tenant_list: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts | |
fh = next(values.iteritems())
fh.close()
del values[filename]
def open_files(file_format=None):
'''Return the open files containing sub-arrays of master data arrays.
By default all such files are returned, but the selection may be
restricted to files of a particular format.
.. seealso:: `cf.close_files`, `cf.close_one_file`,
`cf.open_files_threshold_exceeded`
:Parameters:
file_format: `str`, optional
Only return files of the given format. Recognised formats
are ``'netCDF'`` and ``'PP'``. By default all files are
returned.
:Returns:
`dict`
If *file_format* is set then return a dictionary of file
names of the specified format and their open file
objects. If *file_format* is not set then return a
dictionary for which each key is a file format whose value
is the dictionary that would have been returned if the
*file_format* parameter was set.
**Examples:**
>>> cf.open_files()
{'netCDF': {'file1.nc': <netCDF4.Dataset at 0x187b6d0>}}
>>> cf.open_files('netCDF')
{'file1.nc': <netCDF4.Dataset at 0x187b6d0>}
>>> cf.open_files('PP')
{}
'''
if file_format is not None:
if file_format in _file_to_fh:
return _file_to_fh[file_format].copy()
else:
return {}
else:
out = {}
for file_format, values in _file_to_fh.iteritems():
out[file_format] = values.copy()
return out
def ufunc(name, x, *args, **kwargs):
'''The variable must have a `!copy` method and a method called
*name*. Any optional positional and keyword arguments are passed
unchanged to the variable's *name* method.
:Parameters:
name: `str`
x:
The input variable.
args, kwargs:
:Returns:
A new variable with size 1 axes inserted into the data
array.
'''
x = x.copy()
getattr(x, name)(*args, **kwargs)
return x
def _numpy_allclose(a, b, rtol=None, atol=None):
'''Returns True if two broadcastable arrays have equal values to
within numerical tolerance, False otherwise.
The tolerance values are positive, typically very small numbers. The
relative difference (``rtol * abs(b)``) and the absolute difference
``atol`` are added together to compare against the absolute difference
between ``a`` and ``b``.
:Parameters:
a, b : array_like
Input arrays to compare.
atol : float, optional
The absolute tolerance for all numerical comparisons, By
default the value returned by the `ATOL` function is used.
rtol : float, optional
The relative tolerance for all numerical comparisons, By
default the value returned by the `RTOL` function is used.
:Returns:
`bool`
Returns True if the arrays are equal, otherwise False.
**Examples:**
>>> cf._numpy_allclose([1, 2], [1, 2])
True
>>> cf._numpy_allclose(numpy.array([1, 2]), numpy.array([1, 2]))
True
>>> cf._numpy_allclose([1, 2], [1, 2, 3])
False
>>> cf._numpy_allclose([1, 2], [1, 4])
False
>>> a = numpy.ma.array([1])
>>> b = numpy.ma.array([2])
>>> a[0] = numpy.ma.masked
>>> b[0] = numpy.ma.masked
>>> cf._numpy_allclose(a, b)
True
'''
# THIS IS WHERE SOME NUMPY FUTURE WARNINGS ARE COMING FROM
a_is_masked = _numpy_ma_isMA(a)
b_is_masked = _numpy_ma_isMA(b)
if not (a_is_masked or b_is_masked):
try:
return _x_numpy_allclose(a, b, rtol=rtol, atol=atol)
except (IndexError, NotImplementedError, TypeError):
return _numpy_all(a == b)
else:
if a_is_masked and b_is_masked:
if (a.mask != b.mask).any():
return False
else:
return False
try:
return _numpy_ma_allclose(a, b, rtol=rtol, atol=atol)
except (IndexError, NotImplementedError, TypeError):
out = _numpy_ma_all(a == b)
if out is _numpy_ma_masked:
return True
else:
return out
def _numpy_isclose(a, b, rtol=None, atol=None):
'''Returns a boolean array where two broadcastable arrays are
element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (``rtol * abs(b)``) and the absolute difference
``atol`` are added together to compare against the absolute difference
between ``a`` and ``b``.
:Parameters:
a, b: array_like
Input arrays to compare.
atol: `float`, optional
The absolute tolerance for all numerical comparisons, By
default the value returned by the `ATOL` function is used.
rtol: `float`, optional
The relative tolerance for all numerical comparisons, By
default the value returned by the `RTOL` function is used.
:Returns:
`numpy.ndarray`
'''
try:
return _x_numpy_isclose(a, b, rtol=rtol, atol=atol)
except (IndexError, NotImplementedError, TypeError):
return a == b
def parse_indices(shape, indices, cyclic=False, reverse=False,
envelope=False, mask=False):
'''TODO
:Parameters:
shape: sequence of `ints`
indices: `tuple` (not a `list`!)
:Returns:
`list` [, `dict`]
**Examples:**
>>> cf.parse_indices((5, 8), ([1, 2, 4, 6],))
[array([1, 2, 4, 6]), slice(0, 8, 1)]
>>> cf.parse_indices((5, 8), ([2, 4, 6],))
[slice(2, 7, 2), slice(0, 8, 1)]
'''
parsed_indices = []
roll = {}
flip = []
compressed_indices = []
mask_indices = []
if not isinstance(indices, tuple):
indices = (indices,)
if mask and indices:
arg0 = indices[0]
if isinstance(arg0, str) and arg0 == 'mask':
mask_indices = indices[1]
indices = indices[2:]
#--- End: if
# Initialize the list of parsed indices as the input indices with any
# Ellipsis objects expanded
length = len(indices)
n = len(shape)
ndim = n
for index in indices:
if index is Ellipsis:
m = n-length+1
parsed_indices.extend([slice(None)] * m)
n -= m
else:
parsed_indices.append(index)
n -= 1
length -= 1
len_parsed_indices = len(parsed_indices)
if ndim and len_parsed_indices > ndim:
raise IndexError("Invalid indices {} for array with shape {}".format(
parsed_indices, shape))
if len_parsed_indices < ndim:
parsed_indices.extend([slice(None)]*(ndim-len_parsed_indices))
if not ndim and parsed_indices:
## If data is scalar then allow it to be indexed with an
## equivalent to [0]
#if (len_parsed_indices == 1 and
# parsed_indices[0] in (0,
# -1,
# slice(0, 1),
# slice(-1, None, -1),
# slice(None, None, None))):
# parsed_indices = []
#else:
raise IndexError("Scalar array can only be indexed with () or Ellipsis")
#--- End: if
for i, (index, size) in enumerate(zip(parsed_indices, shape)):
is_slice = False
if isinstance(index, slice):
# --------------------------------------------------------
# Index is a slice
# --------------------------------------------------------
is_slice = True
start = index.start
stop = index.stop
step = index.step
if start is None or stop is None:
step = 0
elif step is None:
step = 1
if step > 0:
if 0 < start < size and 0 <= stop <= start:
# 6:0:1 => -4:0:1
# 6:1:1 => -4:1:1
# 6:3:1 => -4:3:1
# 6:6:1 => -4:6:1
start = size-start
elif -size <= start < 0 and -size <= stop <= start:
# -4:-10:1 => -4:1:1
# -4:-9:1 => -4:1:1
# -4:-7:1 => -4:3:1
# -4:-4:1 => -4:6:1
# -10:-10:1 => -10:0:1
stop += size
elif step < 0:
if -size <= start < 0 and start <= stop < 0:
# -4:-1:-1 => 6:-1:-1
# -4:-2:-1 => 6:-2:-1
# -4:-4:-1 => 6:-4:-1
# -10:-2:-1 => 0:-2:-1
# -10:-10:-1 => 0:-10:-1
start += size
elif 0 <= start < size and start < stop < size:
# 0:6:-1 => 0:-4:-1
# 3:6:-1 => 3:-4:-1
# 3:9:-1 => 3:-1:-1
stop -= size
#--- End: if
if step > 0 and -size <= start < 0 and 0 <= stop <= size+start:
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# -1:0:1 => [9]
# -1:1:1 => [9, 0]
# -1:3:1 => [9, 0, 1, 2]
# -1:9:1 => [9, 0, 1, 2, 3, 4, 5, 6, 7, 8]
# -4:0:1 => [6, 7, 8, 9]
# -4:1:1 => [6, 7, 8, 9, 0]
# -4:3:1 => [6, 7, 8, 9, 0, 1, 2]
# -4:6:1 => [6, 7, 8, 9, 0, 1, 2, 3, 4, 5]
# -9:0:1 => [1, 2, 3, 4, 5, 6, 7, 8, 9]
# -9:1:1 => [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
# -10:0:1 => [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
if cyclic:
index = slice(0, stop-start, step)
roll[i] = -start
else:
index = slice(start, stop, step)
elif step < 0 and 0 <= start < size and start-size <= stop < 0:
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# 0:-4:-1 => [0, 9, 8, 7]
# 6:-1:-1 => [6, 5, 4, 3, 2, 1, 0]
# 6:-2:-1 => [6, 5, 4, 3, 2, 1, 0, 9]
# 6:-4:-1 => [6, 5, 4, 3, 2, 1, 0, 9, 8, 7]
# 0:-2:-1 => [0, 9]
# 0:-10:-1 => [0, 9, | |
"""RDF datasets
Datasets from "A Collection of Benchmark Datasets for
Systematic Evaluations of Machine Learning on
the Semantic Web"
"""
import os
from collections import OrderedDict
import itertools
import rdflib as rdf
import abc
import re
import networkx as nx
import numpy as np
import dgl
import dgl.backend as F
from .utils import download, extract_archive, get_download_dir, _get_dgl_url
__all__ = ['AIFB', 'MUTAG', 'BGS', 'AM']
class Entity:
"""Class for entities
Parameters
----------
id : str
ID of this entity
cls : str
Type of this entity
"""
def __init__(self, id, cls):
self.id = id
self.cls = cls
def __str__(self):
return '{}/{}'.format(self.cls, self.id)
class Relation:
"""Class for relations
Parameters
----------
cls : str
Type of this relation
"""
def __init__(self, cls):
self.cls = cls
def __str__(self):
return str(self.cls)
class RDFGraphDataset:
"""Base graph dataset class from RDF tuples.
To derive from this, implement the following abstract methods:
* ``parse_entity``
* ``parse_relation``
* ``process_tuple``
* ``process_idx_file_line``
* ``predict_category``
Preprocessed graph and other data will be cached in the download folder
to speedup data loading.
The dataset should contain a "trainingSet.tsv" and a "testSet.tsv" file
for training and testing samples.
Attributes
----------
graph : dgl.DGLHeteroGraph
Graph structure
num_classes : int
Number of classes to predict
predict_category : str
The entity category (node type) that has labels for prediction
train_idx : Tensor
Entity IDs for training. All IDs are local IDs w.r.t. to ``predict_category``.
test_idx : Tensor
Entity IDs for testing. All IDs are local IDs w.r.t. to ``predict_category``.
labels : Tensor
All the labels of the entities in ``predict_category``
Parameters
----------
url : str or path
URL to download the raw dataset.
name : str
Name of the dataset
force_reload : bool, optional
If true, force load and process from raw data. Ignore cached pre-processed data.
print_every : int, optional
Log for every X tuples.
insert_reverse : bool, optional
If true, add reverse edge and reverse relations to the final graph.
"""
def __init__(self, url, name,
force_reload=False,
print_every=10000,
insert_reverse=True):
download_dir = get_download_dir()
zip_file_path = os.path.join(download_dir, '{}.zip'.format(name))
download(url, path=zip_file_path)
self._dir = os.path.join(download_dir, name)
extract_archive(zip_file_path, self._dir)
self._print_every = print_every
self._insert_reverse = insert_reverse
if not force_reload and self.has_cache():
print('Found cached graph. Load cache ...')
self.load_cache()
else:
raw_tuples = self.load_raw_tuples()
self.process_raw_tuples(raw_tuples)
print('#Training samples:', len(self.train_idx))
print('#Testing samples:', len(self.test_idx))
print('#Classes:', self.num_classes)
print('Predict category:', self.predict_category)
def load_raw_tuples(self):
raw_rdf_graphs = []
for i, filename in enumerate(os.listdir(self._dir)):
fmt = None
if filename.endswith('nt'):
fmt = 'nt'
elif filename.endswith('n3'):
fmt = 'n3'
if fmt is None:
continue
g = rdf.Graph()
print('Parsing file %s ...' % filename)
g.parse(os.path.join(self._dir, filename), format=fmt)
raw_rdf_graphs.append(g)
return itertools.chain(*raw_rdf_graphs)
def process_raw_tuples(self, raw_tuples):
mg = nx.MultiDiGraph()
ent_classes = OrderedDict()
rel_classes = OrderedDict()
entities = OrderedDict()
src = []
dst = []
ntid = []
etid = []
for i, (sbj, pred, obj) in enumerate(raw_tuples):
if i % self._print_every == 0:
print('Processed %d tuples, found %d valid tuples.' % (i, len(src)))
sbjent = self.parse_entity(sbj)
rel = self.parse_relation(pred)
objent = self.parse_entity(obj)
processed = self.process_tuple((sbj, pred, obj), sbjent, rel, objent)
if processed is None:
# ignored
continue
# meta graph
sbjclsid = _get_id(ent_classes, sbjent.cls)
objclsid = _get_id(ent_classes, objent.cls)
relclsid = _get_id(rel_classes, rel.cls)
mg.add_edge(sbjent.cls, objent.cls, key=rel.cls)
if self._insert_reverse:
mg.add_edge(objent.cls, sbjent.cls, key='rev-%s' % rel.cls)
# instance graph
src_id = _get_id(entities, str(sbjent))
if len(entities) > len(ntid): # found new entity
ntid.append(sbjclsid)
dst_id = _get_id(entities, str(objent))
if len(entities) > len(ntid): # found new entity
ntid.append(objclsid)
src.append(src_id)
dst.append(dst_id)
etid.append(relclsid)
src = np.array(src)
dst = np.array(dst)
ntid = np.array(ntid)
etid = np.array(etid)
ntypes = list(ent_classes.keys())
etypes = list(rel_classes.keys())
# add reverse edge with reverse relation
if self._insert_reverse:
print('Adding reverse edges ...')
newsrc = np.hstack([src, dst])
newdst = np.hstack([dst, src])
src = newsrc
dst = newdst
etid = np.hstack([etid, etid + len(etypes)])
etypes.extend(['rev-%s' % t for t in etypes])
self.build_graph(mg, src, dst, ntid, etid, ntypes, etypes)
print('Load training/validation/testing split ...')
idmap = F.asnumpy(self.graph.nodes[self.predict_category].data[dgl.NID])
glb2lcl = {glbid : lclid for lclid, glbid in enumerate(idmap)}
def findidfn(ent):
if ent not in entities:
return None
else:
return glb2lcl[entities[ent]]
self.load_data_split(findidfn)
self.save_cache(mg, src, dst, ntid, etid, ntypes, etypes)
def build_graph(self, mg, src, dst, ntid, etid, ntypes, etypes):
# create homo graph
print('Creating one whole graph ...')
g = dgl.graph((src, dst))
g.ndata[dgl.NTYPE] = F.tensor(ntid)
g.edata[dgl.ETYPE] = F.tensor(etid)
print('Total #nodes:', g.number_of_nodes())
print('Total #edges:', g.number_of_edges())
# convert to heterograph
print('Convert to heterograph ...')
hg = dgl.to_hetero(g,
ntypes,
etypes,
metagraph=mg)
print('#Node types:', len(hg.ntypes))
print('#Canonical edge types:', len(hg.etypes))
print('#Unique edge type names:', len(set(hg.etypes)))
self.graph = hg
def save_cache(self, mg, src, dst, ntid, etid, ntypes, etypes):
nx.write_gpickle(mg, os.path.join(self._dir, 'cached_mg.gpickle'))
np.save(os.path.join(self._dir, 'cached_src.npy'), src)
np.save(os.path.join(self._dir, 'cached_dst.npy'), dst)
np.save(os.path.join(self._dir, 'cached_ntid.npy'), ntid)
np.save(os.path.join(self._dir, 'cached_etid.npy'), etid)
save_strlist(os.path.join(self._dir, 'cached_ntypes.txt'), ntypes)
save_strlist(os.path.join(self._dir, 'cached_etypes.txt'), etypes)
np.save(os.path.join(self._dir, 'cached_train_idx.npy'), F.asnumpy(self.train_idx))
np.save(os.path.join(self._dir, 'cached_test_idx.npy'), F.asnumpy(self.test_idx))
np.save(os.path.join(self._dir, 'cached_labels.npy'), F.asnumpy(self.labels))
def has_cache(self):
return (os.path.exists(os.path.join(self._dir, 'cached_mg.gpickle'))
and os.path.exists(os.path.join(self._dir, 'cached_src.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_dst.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_ntid.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_etid.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_ntypes.txt'))
and os.path.exists(os.path.join(self._dir, 'cached_etypes.txt'))
and os.path.exists(os.path.join(self._dir, 'cached_train_idx.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_test_idx.npy'))
and os.path.exists(os.path.join(self._dir, 'cached_labels.npy')))
def load_cache(self):
mg = nx.read_gpickle(os.path.join(self._dir, 'cached_mg.gpickle'))
src = np.load(os.path.join(self._dir, 'cached_src.npy'))
dst = np.load(os.path.join(self._dir, 'cached_dst.npy'))
ntid = np.load(os.path.join(self._dir, 'cached_ntid.npy'))
etid = np.load(os.path.join(self._dir, 'cached_etid.npy'))
ntypes = load_strlist(os.path.join(self._dir, 'cached_ntypes.txt'))
etypes = load_strlist(os.path.join(self._dir, 'cached_etypes.txt'))
self.train_idx = F.tensor(np.load(os.path.join(self._dir, 'cached_train_idx.npy')))
self.test_idx = F.tensor(np.load(os.path.join(self._dir, 'cached_test_idx.npy')))
labels = np.load(os.path.join(self._dir, 'cached_labels.npy'))
self.num_classes = labels.max() + 1
self.labels = F.tensor(labels)
self.build_graph(mg, src, dst, ntid, etid, ntypes, etypes)
def load_data_split(self, ent2id):
label_dict = {}
labels = np.zeros((self.graph.number_of_nodes(self.predict_category),)) - 1
train_idx = self.parse_idx_file(
os.path.join(self._dir, 'trainingSet.tsv'),
ent2id, label_dict, labels)
test_idx = self.parse_idx_file(
os.path.join(self._dir, 'testSet.tsv'),
ent2id, label_dict, labels)
self.train_idx = F.tensor(train_idx)
self.test_idx = F.tensor(test_idx)
self.labels = F.tensor(labels).long()
self.num_classes = len(label_dict)
def parse_idx_file(self, filename, ent2id, label_dict, labels):
idx = []
with open(filename, 'r') as f:
for i, line in enumerate(f):
if i == 0:
continue # first line is the header
sample, label = self.process_idx_file_line(line)
#person, _, label = line.strip().split('\t')
ent = self.parse_entity(sample)
entid = ent2id(str(ent))
if entid is None:
print('Warning: entity "%s" does not have any valid links associated. Ignored.' % str(ent))
else:
idx.append(entid)
lblid = _get_id(label_dict, label)
labels[entid] = lblid
return idx
@abc.abstractmethod
def parse_entity(self, term):
"""Parse one entity from an RDF term.
Return None if the term does not represent a valid entity and the
whole tuple should be ignored.
Parameters
----------
term : rdflib.term.Identifier
RDF term
Returns
-------
Entity or None
An entity.
"""
pass
@abc.abstractmethod
def parse_relation(self, term):
"""Parse one relation from an RDF term.
Return None if the term does not represent a valid relation and the
whole tuple should be ignored.
Parameters
----------
term : rdflib.term.Identifier
RDF term
Returns
-------
Relation or None
A relation
"""
pass
@abc.abstractmethod
def process_tuple(self, raw_tuple, sbj, rel, obj):
"""Process the tuple.
Return (Entity, Relation, Entity) tuple for as the final tuple.
Return None if the tuple should be ignored.
Parameters
----------
raw_tuple : tuple of rdflib.term.Identifier
(subject, predicate, object) tuple
sbj : Entity
Subject entity
rel : Relation
Relation
obj : Entity
Object entity
Returns
-------
(Entity, Relation, Entity)
The final tuple or None if should be ignored
"""
pass
@abc.abstractmethod
def process_idx_file_line(self, line):
"""Process one line of ``trainingSet.tsv`` or ``testSet.tsv``.
Parameters
----------
line : str
One line of the file
Returns
-------
(str, str)
One sample and its label
"""
pass
@property
@abc.abstractmethod
def predict_category(self):
"""Return the category name that has labels."""
pass
def _get_id(dict, key):
id = dict.get(key, None)
if id is None:
id = len(dict)
dict[key] = id
return id
def save_strlist(filename, strlist):
with open(filename, 'w') as f:
for s in strlist:
f.write(s + '\n')
def load_strlist(filename):
with open(filename, 'r') as f:
ret = []
for line in f:
ret.append(line.strip())
return ret
class AIFB(RDFGraphDataset):
"""AIFB dataset.
Examples
--------
>>> dataset = dgl.data.rdf.AIFB()
>>> print(dataset.graph)
"""
employs = rdf.term.URIRef("http://swrc.ontoware.org/ontology#employs")
affiliation = rdf.term.URIRef("http://swrc.ontoware.org/ontology#affiliation")
entity_prefix = 'http://www.aifb.uni-karlsruhe.de/'
relation_prefix = 'http://swrc.ontoware.org/'
def __init__(self,
force_reload=False,
print_every=10000,
insert_reverse=True):
url = _get_dgl_url('dataset/rdf/aifb-hetero.zip')
name = 'aifb-hetero'
super(AIFB, self).__init__(url, name,
force_reload=force_reload,
print_every=print_every,
insert_reverse=insert_reverse)
def parse_entity(self, term):
if isinstance(term, rdf.Literal):
return Entity(id=str(term), cls="_Literal")
if isinstance(term, rdf.BNode):
return None
entstr = str(term)
if entstr.startswith(self.entity_prefix):
sp = entstr.split('/')
return Entity(id=sp[5], cls=sp[3])
else:
return None
def parse_relation(self, term):
if term == self.employs or term == self.affiliation:
return None
relstr = str(term)
if relstr.startswith(self.relation_prefix):
return Relation(cls=relstr.split('/')[3])
else:
relstr = relstr.split('/')[-1]
return Relation(cls=relstr)
| |
# 根据op的身体节点坐标,根据决策树,返回命令
from math import atan2, degrees, sqrt, pi, fabs
from simple_pid import PID
import time
def distance(a, b):
if a[0] is None or b[0] is None:
return None
return int(sqrt((b[0]-a[0])**2+(b[1]-a[1])**2))
def angle(A, B, C):
if A[0] is None or B[0] is None or C[0] is None:
return None
dg = degrees(atan2(C[1]-B[1], C[0]-B[0]) -
atan2(A[1]-B[1], A[0]-B[0])) % 360
if dg >= 180 and dg < 360:
dg = 360-dg
return dg
class Com:
def __init__(self):
# 飞行数据与状态
self.isfly = None
self.flymode = None
self.preflymode = None
self.isfly = None
self.batry = None
self.throwflytimer = None
self.height = None
self.wifi = None
self.state = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # 长度保持和reset一致
self.comd = None
# 遥测姿态数据等
self.anglerroll = 0.0
self.anglerpitch = 0.0
self.velz = 0.0
self.velxy = 0.0
self.posx = 0.0
self.posy = 0.0
self.posz = 0.0
self.pitch = 0.0 # 四元数解算
self.roll = 0.0
self.yew = 0.0
self.visualstate = None
# 是否飞行 电池 飞行模式 动作指令 油门 俯仰 副翼 偏航 备用
self.pose = None # 用于判读手势操作
self.posespeed = 30
# 定义按压摄像头
self.press = None
# 定义屏幕中的定点
self.point = [320, 240] # 固定点
# 初始化pid控制
self.pid_yaw = None
self.pid_pith = None
self.pid_roll = None
self.pid_thro = None
# flag
self.palmflag = None
# 定义各个点,每次循环都会更新
self.nose = None
self.letfhand = None
self.righthand = None
self.letfear = None
self.rightear = None
self.letfshd = None
self.rightshd = None
self.midp = None
self.neck = None
# 作为跟踪锁定点
self.target = None
# 定义距离,每次循环更新距离,lock距离更换模式会改变
self.distance_shd = None
self.distance_midneck = None
self.lock_distance_mn = None # 两种锁定距离方法 #切换模式清零
self.lock_distance_sd = None
# 定义模式判断的距离角度等
self.angleright = None
self.anglerletf = None
self.lethand_rigear = None
self.righand_letear = None
self.rihan_neck = None
self.hand_hand = None
self.lehan_neck = None
# 定义模式时间用于切换判断
self.flymodechange = time.time()
def reset(self): # 每次降落后调用
# 飞行数据与状态
self.isfly = None
self.flymode = 0
self.preflymode = 0
self.isfly = None
self.batry = None
self.throwflytimer = None
self.height = None
self.wifi = None
self.state = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.comd = None
# 是否飞行 电池 飞行模式 动作指令 油门 俯仰 副翼 偏航 备用
self.pose = None # 用于判读手势操作
self.posespeed = 30
# 定义按压摄像头
self.press = None
# 定义屏幕中的定点
self.point = [320, 240] # 固定点
# 初始化pid控制
self.pid_yaw = None
self.pid_pith = None
self.pid_roll = None
self.pid_thro = None
self.palmflag = None
# 定义各个点,每次循环都会更新
self.nose = None
self.letfhand = None
self.righthand = None
self.letfear = None
self.rightear = None
self.letfshd = None
self.rightshd = None
self.midp = None
self.neck = None
# 作为跟踪锁定点
self.target = None
# 定义距离,每次循环更新距离,lock距离更换模式会改变
self.distance_shd = None
self.distance_midneck = None
self.lock_distance_mn = None # 两种锁定距离方法 #切换模式清零
self.lock_distance_sd = None
# 定义模式判断的距离角度等
self.angleright = None
self.anglerletf = None
self.lethand_rigear = None
self.righand_letear = None
self.rihan_neck = None
self.hand_hand = None
self.lehan_neck = None
# 定义模式时间用于切换判断
self.flymodechange = time.time()
def get_data(self, kp): # 很多if else是为了后面的判断以及保证每一帧如果失去某个点则这个点一定为none,后面判断才不会出错
# if前面得i有预判断
# 每个点的坐标提取出来
if kp[0][0]:
self.nose = [kp[0][0], kp[0][1]]
else:
self.nose = None
if kp[7][0]:
self.letfhand = [kp[7][0], kp[7][1]]
else:
self.letfhand = None
if kp[6][0]:
self.lerfhandmid = [kp[6][0], kp[6][1]]
else:
self.lerfhandmid = None
if kp[4][0]:
self.righthand = [kp[4][0], kp[4][1]]
else:
self.righthand = None
if kp[3][0]:
self.righthandmid = [kp[3][0], kp[3][1]]
else:
self.righthandmid = None
if kp[18][0]:
self.letfear = [kp[18][0], kp[18][1]]
else:
self.letfear = None
if kp[17][0]:
self.rightear = [kp[17][0], kp[17][1]]
else:
self.rightear = None
if kp[5][0]:
self.letfshd = [kp[5][0], kp[5][1]]
else:
self.letfshd = None
if kp[2][0]:
self.rightshd = [kp[2][0], kp[2][1]]
else:
self.rightshd = None
if kp[8][0]:
self.midp = [kp[8][0], kp[8][1]]
else:
self.midp = None
if kp[1][0]:
self.neck = [kp[1][0], kp[1][1]]
else:
self.neck = None
# 从listid[10]获取亮度
if kp[10][0] and kp[10][1]:
self.press = kp[10][0]
else:
self.press = None
# 计算肩宽和中心点和脖子的长度用于模拟远近
if self.letfshd and self.rightshd:
self.distance_shd = distance(self.letfshd, self.rightshd)
else:
self.distance_shd = None
if self.midp and self.neck:
self.distance_midneck = distance(self.midp, self.neck)
else:
self.distance_midneck = None
# 定义手臂角度,手的距离,
if self.righthand and self.righthandmid and self.rightshd:
self.angleright = angle(
self.righthand, self.righthandmid, self.rightshd)
else:
self.angleright = None
if self.letfhand and self.lerfhandmid and self.letfshd:
self.anglerletf = angle(
self.letfhand, self.lerfhandmid, self.letfshd)
else:
self.anglerletf = None
if self.letfhand and self.rightear:
self.lethand_rigear = distance(self.letfhand, self.rightear)
else:
self.lethand_rigear = None
if self.righthand and self.letfear:
self.righand_letear = distance(self.righthand, self.letfear)
else:
self.righand_letear = None
if self.righthand and self.neck:
self.rihan_neck = distance(self.righthand, self.neck)
else:
self.rihan_neck = None
if self.letfhand and self.neck:
self.lehan_neck = distance(self.letfhand, self.neck)
else:
self.lehan_neck = None
if self.righthand and self.letfhand:
self.hand_hand = distance(self.righthand, self.letfhand)
else:
self.hand_hand = None
def check_mode(self, kp): # 完成两件事,首先根据姿势确定飞行模式,然后确定self.pose
self.get_data(kp) # 太长了丢到这个方法里
# 模式判断逻辑
# pose 0无操作
# 1向前
# 2向后
# 3向左飘
# 4向右
# flymode 0普通跟踪,只修正偏航
# 1跟随模式,修正偏航和锁定距离
# 2平行跟随,修正roll和锁定距离
# 3丢失目标,保持高度,同时旋转寻找目标,如果超过15秒则降落
# 4降落,所有参数清零
# 5靠近降落在手掌,所有参数清零
# 6抛飞,
# 7起飞,
# 8紧急停机,
if self.preflymode is None: # 看看是不是第一次捕捉目标
self.preflymode = 0
self.pose = 0 # 先归零
if self.letfshd and self.neck and self.rightshd and self.letfhand and self.lerfhandmid and self.righthand and self.righthandmid:
# 第0层判断是否满足判断条件,如果没有同时存在这些点则不做任何指令切换或动作
# 判断pose左右手操作互斥
#if self.isfly:感觉不需要了
if (self.righthand[1] < self.rightshd[1]) and (self.letfhand[1] > self.letfshd[1]) and (self.righthand[0] < self.neck[0]): # 右手举起了决定左右飘
if self.angleright <= 90:
self.pose = 4
elif self.angleright > 90:
self.pose = 3
elif (self.letfhand[1] < self.letfshd[1]) and (self.righthand[1] > self.rightshd[1]) and (self.letfhand[0] > self.neck[0]): # 左手举起了决定前后
if self.anglerletf <= 90:
self.pose = 1
elif self.anglerletf > 90:
self.pose = 2
else:
self.pose = 0
else:
self.pose = 0
if self.nose and self.letfear and self.rightear and self.neck and self.letfshd and self.rightshd and self.letfhand and self.lerfhandmid and self.righthand and self.righthandmid:
# 第0层判断是否满足判断条件,如果没有同时存在这些点则不做任何指令切换或动作
# 判断fly_mode
# 首先是单手
# 手掌降落模式5
if (self.righthand[1] < self.rightshd[1]) and (self.letfhand[1] > self.letfshd[1]) and (self.righthand[0] > self.nose[0]): # 还要判断手过鼻子
if self.righand_letear < 50: # 这个值还不知道,先这样设置
if self.flymode != 5: # 将进入模式
if time.time()-self.flymodechange > 2: # 判断时间是大于2秒,否则不执行
self.flymodechange = time.time()
self.flymode = 5
self.preflymode = self.flymode
else: # 退出模式
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
self.flymode = 0
self.preflymode = self.flymode
# 降落4
elif (self.righthand[1] > self.rightshd[1]) and (self.letfhand[1] < self.letfshd[1]) and (self.letfhand[0] < self.nose[0]): # 还要判断手过鼻子
if self.lethand_rigear < 50: # 这个值还不知道,先这样设置
if self.flymode != 4: # 将进入模式
if time.time()-self.flymodechange > 2: # 判断时间是否大于2秒,否则不执行
self.flymodechange = time.time()
self.flymode = 4
self.preflymode = self.flymode
else: # 退出模式
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
self.flymode = 0
self.preflymode = self.flymode
# 双手的操作
# 跟随模式1
# 手合并举高高数字待测试
elif (self.righthand[1] < self.nose[1]) and (self.letfhand[1] < self.nose[1]) and (self.hand_hand < 65):
if self.flymode != 1:
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
if self.distance_midneck and self.distance_shd:
self.lock_distance_mn = self.distance_midneck
self.lock_distance_sd = self.distance_shd
self.flymode = 1
self.preflymode = self.flymode
else:
self.flymode = self.preflymode # 进入模式失败
else:
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
self.lock_distance_mn = None
self.lock_distance_sd = None
self.flymode = 0
self.preflymode = self.flymode
# 平行跟随2
elif(self.hand_hand < 65) and (self.rihan_neck < 65) and (self.lehan_neck < 65): # 手合并在胸前
if self.flymode != 2:
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
if self.distance_midneck and self.distance_shd:
self.lock_distance_mn = self.distance_midneck
self.lock_distance_sd = self.distance_shd
self.flymode = 2
self.preflymode = self.flymode
else:
self.flymode = self.preflymode # 进入模式失败
else:
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
self.lock_distance_mn = None
self.lock_distance_sd = None
self.flymode = 0
self.preflymode = self.flymode
else:
if self.preflymode:
self.flymode = self.preflymode # 没有切换模式,则不改变
else:
self.flymode = 0
self.preflymode = self.flymode
# 判断是否存在target
if self.neck:
self.target = self.neck
self.flymode = self.preflymode # 上一个模式,如果目标丢失后可以直接返回来
elif self.nose:
self.target = self.nose
self.flymode = self.preflymode
elif self.midp:
self.target = self.midp
self.flymode = self.preflymode
elif self.rightshd:
self.target = self.rightshd
self.flymode = self.preflymode
elif self.letfshd:
self.target = self.letfshd
self.flymode = self.preflymode
else:
self.target = None
if (self.flymode == 5) or (self.flymode == 4):
pass
else:
self.flymode = 3 # 丢失目标直接滚动起来
# m没有起飞时则判断起飞方式
# print(self.press)
if self.isfly != 1:
if self.press == 1: # 抛飞#如果没有抛起来怎么办
if self.flymode != 6:
if time.time()-self.flymodechange > 2:
self.flymodechange = time.time()
self.flymode = 6
# else:#退出抛飞
# if time.time()-self.flymodechange>2:
# sefl.flymodechange=time.time()
# self.flymode=0
# print(self.flymode)
def get_comd(self, kp, userc):
comd = [0, | |
import cv2
import sys, datetime
import glob
import dlib
import math
import os
from time import sleep
from shapely.geometry import Polygon
import numpy as np
# Kinect Azure intrinsics
FX = -622.359
CX = 641.666
FY = -620.594
CY = 352.072
FRAME_W, FRAME_H = 1280, 720
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
RED = (0, 0, 255)
YELLOW = (0, 255, 255)
tracked_face_color = BLUE
new_face_color = RED
tracked_landmarks_color = BLUE
new_landmarks_color = RED
DEBUG = True
STATE_NO_FACE = 0
STATE_INIT = 1
STATE_TRACKED = 2
STATE_LOSE_TRACK_MIN = 3
STATE_LOSE_TRACK_MAX = 5
MIN_FACE_AREA=500
TRACK_BOX_WIDTH = 11
LANDMARK_OPENCV=0
LANDMARK_DLIB = 1
LANDMARK_DETECTOR=LANDMARK_DLIB
# DATASET_PATH = '/mnt_d/Programming/0VR/OpenARK/data/avatar-dataset/car_exr/mount-tripod-loop'
DATASET_PATH = '/mnt_d/Programming/0VR/OpenARK/data/avatar-dataset/car_exr/mount-tripod-eye-open-close'
# DATASET_PATH = '/mnt_d/Programming/0VR/OpenARK/data/avatar-dataset/car_exr/mount-tripod-real-road'
# Approximated face points. This can be replaced by real-world 3D face points
# This is using orthographic projection approximation in image coordinates
model_3D_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -40.0, -30.0), # Mouth center
(-35, 55, -40.0), # Left eye center
(35, 55, -40.0), # Right eye center
(-25, -35, -60.0), # Left Mouth corner
(25, -35, -60.0) # Right mouth corner
])
# Set fixed image resize resolution in opencv convension
# resize_size = (640, 360)
resize_size = (960, 480)
FX_SHR = FX * (resize_size[0] / FRAME_W)
CX_SHR = CX * (resize_size[0] / FRAME_W)
FY_SHR = FY * (resize_size[1] / FRAME_H)
CY_SHR = CY * (resize_size[1] / FRAME_H)
# Approximated camera intrinsic parameters
focal_length = resize_size[0]
center = (resize_size[0]/2, resize_size[1]/2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
camera_dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
#https://github.com/twairball/face_tracking/blob/master/face_tracking.py
# OpenCV Face Landmark Indexes:
# 30: nose tip; 31: right nose corner; 33: middle between nose corners; 35: left nose corner
# 36: right eye outside corner; 39: right eye inside corner; 42: left eye inside corner: 45: left eye outside corner
# 48: right mouth corner; 54: left mouth corner
def bbox_to_point(bbox):
(bX, bY, bW, bH) = bbox
bX, bY, bW, bH = int(bX), int(bY), int(bW), int(bH)
return bX + (bW / 2), bY + (bH / 2), 5, 5
def dlib_full_obj_to_np(obj_detection):
return [(p.x, p.y) for p in obj_detection.parts()]
def draw_boxes(frame, boxes, color=(0,255,0)):
for i in range(len(boxes)):
# Prevent empty list units
if boxes[i]:
(bX, bY, bW, bH) = boxes[i]
bX, bY, bW, bH = int(bX), int(bY), int(bW), int(bH)
cv2.rectangle(frame, (bX, bY), (bX + bW, bY + bH), color, 1)
def draw_points(frame, points,color=(0,0,255)):
# for set_of_landmarks in points:
for (x, y) in points:
cv2.circle(frame, (int(x), int(y)), 2, color, -1)
def resize_image(image, size_limit=500.0):
max_size = max(image.shape[0], image.shape[1])
if max_size > size_limit:
scale = size_limit / max_size
_img = cv2.resize(image, None, fx=scale, fy=scale)
return _img
return image
#a landmark at coordinate (x, y) should have a bounding box of (x - half_width, y - half_width, half_width * 2, half_width * 2)
def make_bbox_for_landmark(landmark, half_width):
bbox_x = int(landmark[0] - half_width)
bbox_y = int(landmark[1] - half_width)
return (bbox_x, bbox_y, half_width * 2 + 1, half_width * 2 + 1)
def make_bbox_from_point_list(point_list):
# test the validity of the update
box_min_x = math.inf
box_min_y = math.inf
box_max_x = 0
box_max_y = 0
for index in range(len(point_list)):
# calculate its area
if point_list[index][0]<box_min_x:
box_min_x=point_list[index][0]
if point_list[index][0]>box_max_x:
box_max_x=point_list[index][0]
if point_list[index][1]<box_min_y:
box_min_y=point_list[index][1]
if point_list[index][1]>box_max_y:
box_max_y=point_list[index][1]
bbox = (int(box_min_x)-5, int(box_min_y)-5, int(box_max_x-box_min_x)+10, int(box_max_y-box_min_y)+10)
return bbox
def make_feature_bbox_from_landmarks(landmarks, feature_index=0):
bboxes=[]
# Define nose box
if feature_index==0 or feature_index==1:
point_list = landmarks[0:4]
bbox= make_bbox_from_point_list(point_list)
bboxes.append(bbox)
# Define right eye box
if feature_index==0 or feature_index==2:
point_list = landmarks[4:10]
bbox= make_bbox_from_point_list(point_list)
bboxes.append(bbox)
# Define right eye box
if feature_index==0 or feature_index==3:
point_list = landmarks[10:16]
bbox= make_bbox_from_point_list(point_list)
bboxes.append(bbox)
# Define right eye box
if feature_index==0 or feature_index==4:
point_list = landmarks[16:20]
bbox= make_bbox_from_point_list(point_list)
bboxes.append(bbox)
return bboxes
def avg_dist_between_points(tracked_points, detected_points):
if len(tracked_points) != len(detected_points):
raise Exception
num_points = len(tracked_points)
total = sum([calc_distance(tracked_points[i], detected_points[i]) for i in range(len(tracked_points))])
return total / num_points
def calc_distance(point_1, point_2):
x1, y1 = point_1 #comes from tracker, so is x, y
x2, y2 = point_2 #comes from detection, so is x, y
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
#return the percentage overlap between 2 face detection bounding boxes, tracked and newly detected
def overlapping_percentage(bbox_a, bbox_b):
a_x, a_y, a_w, a_h = bbox_a #x, y is upper left corner
b_x, b_y, b_w, b_h = bbox_b
polygon_a = Polygon([(a_x, a_y + a_h), (a_x + a_w, a_y + a_h), (a_x + a_w, a_y), (a_x, a_y)]) #lower left, lower right, upper right, upper left
polygon_b = Polygon([(b_x, b_y + b_h), (b_x + b_w, b_y + b_h), (b_x + b_w, b_y), (b_x, b_y)]) #lower left, lower right, upper right, upper left
intersection = polygon_a.intersection(polygon_b)
min_area = min(polygon_a.area, polygon_b.area)
return float(intersection.area) / min_area
# take a bounding predicted by opencv and convert it
# to the dlib (top, right, bottom, left)
def bb_to_rect(bb):
top=bb[1]
left=bb[0]
right=bb[0]+bb[2]
bottom=bb[1]+bb[3]
return dlib.rectangle(left, top, right, bottom)
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h) as we would normally do
# with OpenCV
def rect_to_bb(rect):
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
# return a tuple of (x, y, w, h)
return (x, y, w, h)
class FaceDetectorDNN():
def __init__(self, modelFile="res10_300x300_ssd_iter_140000_fp16.caffemodel", configFile="deploy.prototxt"):
self.net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
self.conf_threshold = .8
def detect(self, frame):
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123])
self.net.setInput(blob)
detections = self.net.forward()
faces = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > self.conf_threshold:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
face = (x1, y1, x2 - x1, y2 - y1)
faces.append(face)
return faces
class FacemarkDetectorOpenCV():
def __init__(self):
self.facemark = cv2.face.createFacemarkLBF()
self.facemark.loadModel("lbfmodel.yaml")
#faces should be an np.array of face, where face is (x, y, w, h)
def detect(self, frame, facebox):
#fitted[0] is a boolean representing if landmarks were found or not
#fitted[1] is an array whose first element is a 3D np.array of points
if len(facebox) > 0:
faces = np.asarray([facebox])
fitted = self.facemark.fit(frame, faces)
success = fitted[0]
if success:
# landmarks annotate four track-able areas: nose(4), right eye(6), left eye(6), mouth (4)
keep = [30, 31, 33, 35, 36, 37,38, 39, 40,41, 42, 43, 44, 45,46, 47, 48, 51, 54, 57] #indices of landmarks to keep
self.landmarks = [fitted[1][0][0][i] for i in keep]
return self.landmarks
else:
print("No success detecting")
return []
else:
print("No faces passed into landmark detector")
return []
class FacemarkDetectorDlib():
def __init__(self):
self.predictor = dlib.shape_predictor("./shape_predictor_68_face_landmarks.dat")
def detect(self, frame, bbox):
if bbox:
# reduce bbox high due to dlib training on square face boxes
delta = bbox[3] - bbox[2]
# if delta>0:
# bbox = (bbox[0], bbox[1] + delta//2, bbox[2], bbox[3]-delta//2)
rect = bb_to_rect(bbox)
shape = self.predictor(frame, rect)
points = dlib_full_obj_to_np(shape)
# landmarks annotate four track-able areas: nose(4), right eye(6), left eye(6), mouth (4)
keep = [30, 31, 33, 35, 36, 37, 38,39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 51, 54, 57]
# keep = [54]
filtered_points = [points[i] for i in keep]
return filtered_points
else:
return []
class Tracker():
def __init__(self, frame, bbox):
# Arbitrarily picked KCF tracking
self.tracker = cv2.TrackerKCF_create() # Boosting
self.tracker.init(frame, bbox)
def update(self, frame):
ok, bbox = self.tracker.update(frame)
return ok, bbox
class Pipeline():
def __init__(self):
self.face_detector = FaceDetectorDNN()
if LANDMARK_DETECTOR==LANDMARK_OPENCV:
self.facemark_detector = FacemarkDetectorOpenCV()
else:
self.facemark_detector = FacemarkDetectorDlib()
self.landmark_trackers = []
#return faces and True/False if faces detected
def detect_faces(self, frame):
faces = self.face_detector.detect(frame)
return faces
def detect_landmarks(self, frame, facebox):
if len(facebox)!=4:
#if no faces are found, return
return [], False
landmarks = self.facemark_detector.detect(frame, facebox)
return landmarks
def facial_orientation(bboxes, landmarks, xyz):
image_points = np.array([
(0, 0), # Nose tip
(0, 0), # Mouth center
(0, 0), # Left eye center
(0, 0), # Right eye center
(0, 0), # Left Mouth corner
(0, 0) # Right mouth corner
], dtype="double")
image_points[0] = [bboxes[0][0]+bboxes[0][2]/2, bboxes[0][1]+bboxes[0][3]/2]
image_points[1] = [bboxes[3][0]+bboxes[3][2]/2, bboxes[3][1]+bboxes[3][3]/2]
image_points[2] = [bboxes[1][0]+bboxes[1][2]/2, bboxes[1][1]+bboxes[1][3]/2]
image_points[3] = [bboxes[2][0]+bboxes[2][2]/2, bboxes[2][1]+bboxes[2][3]/2]
image_points[4] = landmarks[16]
image_points[5] = landmarks[18]
# points_3d = np.zeros((6, 3));
# for i in range(points_3d.shape[0]):
# py = min(int(image_points[i,1]), xyz.shape[0]-1)
# px = min(int(image_points[i,0]), xyz.shape[1]-1)
# points_3d[i, :] = xyz[py, px]
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_3D_points, image_points, camera_matrix, camera_dist_coeffs, | |
"""
The module that offers access to the Paxton Net2 server
"""
import clr
import os
import sys
import Net2Scripting.settings
from Net2Scripting.net2xs.conversions import date_time_to_net, flex_date_time_to_net, \
time_zones_to_py, access_levels_to_py, access_level_detail_to_py
from datetime import datetime
from Net2Scripting.net2base import Net2Base
from Net2Scripting.pylog4net import Log4Net
from threading import RLock
from System import Array
from System.Data import DataSet
from System.Reflection import Assembly
# Minimal required net2 version
MIN_NET2_VERSION = 501
# Paxton assembly
PAXTON_ASSEMBLY = 'Paxton.Net2.OEMClientLibrary'
def readable_min_version():
"""Show a user readable minimum version
"""
major = MIN_NET2_VERSION / 100
minor = MIN_NET2_VERSION - major * 100
return "%d.%d" % (major, minor)
class Net2XSException(Exception):
"""Exception class for net2 xs
"""
pass
# The code below is required to determine if the already installed
# Net2 version can be used, or that the packaged version is required.
try:
# Obtain paxton assembly reference
asm = Assembly.LoadWithPartialName(PAXTON_ASSEMBLY)
# Found: check the version
if asm:
ver = asm.GetName().Version
if ver.Major * 100 + ver.Minor < MIN_NET2_VERSION:
raise Net2XSException(
'Only Net2 V%s or higher is supported' %
readable_min_version())
# Not found: enable the packaged paxton libs
else:
# Add path lib path to search path
PAXTON_LIB_DIR = os.path.join(Net2Scripting.settings.LIB_DIR, 'paxton')
if PAXTON_LIB_DIR not in sys.path:
sys.path.append(PAXTON_LIB_DIR)
try:
Assembly.LoadWithPartialName(PAXTON_ASSEMBLY)
clr.AddReference(PAXTON_ASSEMBLY)
from Paxton.Net2.OemClientLibrary import OemClient as OC
from Paxton.Net2.OemClientLibrary import AccessLevelDetailSet
from Paxton.Net2.OemClientLibrary import TimezonesSet
from Paxton.Net2.OemClientLibrary import EventViewEnums
except:
raise Net2XSException('Failed to load the library')
except Exception as e:
Log4Net.get_logger('Net2XS').Fatal('Paxton error: %s' % str(e))
sys.exit(1)
# end of paxton loading
class Net2XS(Net2Base):
"""Net2 Access class
"""
# Class variables
_logger = Log4Net.get_logger('Net2XS')
_lock = RLock()
def __init__(self, host='localhost', port=8025):
"""Class constructor
"""
self._client = None
self._host = host
self._port = port
self._connected = False
self._on_acu_event = None
def __enter__(self):
"""With enter handler
"""
return self
def __exit__(self, type, value, traceback):
"""With exit handler
"""
self.dispose()
def _check_client(self):
"""Check is client connection is valid
"""
if not self._client or not self._connected:
raise Net2XSException('Not connected')
def authenticate(self, user_id, password):
"""Authenticate to Net2
"""
with Net2XS._lock:
# Save for re authentication
self._user_id = user_id
self._password = password
self.dispose()
Net2XS._logger.Debug('Connecting to net2 server on %s:%d' %
(self._host, self._port))
self._client = OC(self._host, self._port)
methods = self._client.AuthenticateUser(user_id, password)
if not methods:
raise Net2XSException('Authentication failed: ' +
self._client.LastErrorMessage)
else:
self._connected = True
# Add disconnect reconnect handlers
self._client.Net2ServerReconnected += (
OC.Net2ServerReconnectedHandler(self._reconnected))
self._client.Net2ServerDisconnected += (
OC.Net2ServerDisconnectedHandler(self._disconnected))
# Add acu event handler
self._client.Net2AccessEvent += (
OC.Net2AcuEventHandler(self._acu_event))
Net2XS._logger.Debug('Authenticated')
@property
def client_version(self):
"""Client version
"""
asm = Assembly.GetAssembly(OC)
ver = asm.GetName().Version
return (ver.Major, ver.Minor)
def query_db(self, query):
"""Perform a db query
Returns a dataset
"""
with Net2XS._lock:
self._check_client()
return self._client.QueryDb(query)
def are_doors_being_synchronised(self):
"""Return if synchronization is taking place
Returns a boolean
"""
with Net2XS._lock:
self._check_client()
return self._client.AreDoorsBeingSynchronised()
def get_doors(self, device_address=-1):
"""Get all doors (ViewDoors)
Device_address is an optional integer
Returns a dataset
"""
with Net2XS._lock:
self._check_client()
if device_address < 0:
return self._client.ViewDoors().DoorsDataSource
else:
return self._client.ViewDoors(device_address).DoorsDataSource
def get_door_name(self, device_address):
"""Obtain door name
Returns a string
"""
with Net2XS._lock:
self._check_client()
dataset = self._client.ViewDoors(device_address).DoorsDataSource
if (not dataset or
dataset.Tables.Count < 1 or
dataset.Tables[0].Rows.Count == 0):
return None
return dataset.Tables[0].Rows[0].Name
def get_departments(self):
"""Get all departments (ViewDepartments)
Returns a dataset
"""
with Net2XS._lock:
self._check_client()
return self._client.ViewDepartments().DepartmentsDataSource
def close_door(self, device_address):
"""Close a door
Returns True on success
"""
with Net2XS._lock:
self._check_client()
return self._client.CloseDoor(device_address)
def hold_door_open(self, device_address):
"""Hold a door open
Returns True on success
"""
with Net2XS._lock:
self._check_client()
return self._client.HoldDoorOpen(device_address)
def control_door(self, device_address, relay, function, door_open_time, led_flash):
"""Control a door
Relay is 0 or 1, for relay 1 or 2.
Function 0 Close, 1 Timed open, 2 Hold Open.
Door_open_time in ms.
Led_flash see Net2 API.
Returns True on success.
"""
with Net2XS._lock:
self._check_client()
return self._client.ControlDoorEx(
device_address, relay, function, door_open_time, led_flash)
def get_department_name(self, department_id):
"""Get department name by id
Returns a string or None if department was not found
"""
dataset = self.query_db(
'select DepartmentName from sdk.Departments'
' where DepartmentID=%d' % department_id)
if (not dataset or
dataset.Tables.Count < 1 or
dataset.Tables[0].Rows.Count == 0):
return None
return dataset.Tables[0].Rows[0][0]
def get_users_ex(self, name=None):
"""Get users, from the UsersEx view.
Name param is optional tuple (first_name, sur_name)
Returns a dataset
"""
query = 'select * from sdk.UsersEx'
if name and len(name) == 2:
first_name, sur_name = name
query = ("%s where FirstName='%s' and Surname='%s'" %
(query, first_name, sur_name))
return self.query_db(query)
def get_user_id_by_name(self, name):
"""Get user id by name
Name is a (first_name, sur_name) tuple
Returns the id or -1 if not found.
"""
dataset = self.get_users(name)
if (not dataset or
dataset.Tables.Count < 1 or
dataset.Tables[0].Rows.Count == 0):
return -1
return dataset.Tables[0].Rows[0].get_Item("UserID")
def get_users(self, name=None):
"""Get users, from the ViewUserRecords call
Name param is optional tuple (first_name, sur_name)
Returns a dataset
"""
with Net2XS._lock:
self._check_client()
wheres = ['Active=1']
if name and len(name) == 2:
first_name, sur_name = name
if first_name:
wheres.append("FirstName='%s'" % first_name)
if sur_name:
wheres.append("Surname='%s'" % sur_name)
return self._client.ViewUserRecords(
' and '.join(wheres)).UsersDataSource
def get_user_record(self, user_id):
"""Get user record, from the ViewUserRecords call
Returns an IUserView user record or None if not found
"""
with Net2XS._lock:
self._check_client()
# Fetch current user info
users = self._client.ViewUserRecords(
'UserID=%d' % user_id).UsersList()
if users.Count != 1 or not users.ContainsKey(user_id):
return None
return users[user_id]
def get_user_name(self, user_id):
"""Get user name
Returns a string or None when user is not found
"""
dataset = self.query_db(
'select Username from sdk.UsersEx where UserID=%d' % user_id)
if (not dataset or
dataset.Tables.Count < 1 or
dataset.Tables[0].Rows.Count == 0):
return None
return dataset.Tables[0].Rows[0][0]
def add_user(
self,
access_level_id=1,
department_id=0,
anti_passback_ind=False,
alarm_user_ind=False,
first_name=None,
middle_name=None,
sur_name=None,
telephone_no=None,
telephone_extension=None,
pin_code=None,
activation_date=None,
active=True,
fax_no=None,
expiry_date=None,
custom_fields=None,
user_picture=None):
"""Add user record
DateTime fields can be either python or dotnet objects.
If activation date is None, the current date will be used.
If expiry date is None, the user entry will not expire.
Custom_fields is a string array (15) of which the first element is
ignored.
Returns True on success.
"""
# If no user name is given at all: create one (required by Net2)
if not first_name and not sur_name and not middle_name:
first_name = "New"
sur_name = "User"
with Net2XS._lock:
self._check_client()
return self._client.AddUserRecord(
access_level_id,
department_id,
anti_passback_ind,
alarm_user_ind,
first_name,
middle_name,
sur_name,
telephone_no,
telephone_extension,
pin_code,
None,
flex_date_time_to_net(activation_date) or self.now_date,
0,
0,
active,
fax_no,
flex_date_time_to_net(expiry_date) or self.no_expiration_date,
custom_fields,
user_picture)
def modify_user(
self,
user_id,
access_level_id=None,
department_id=None,
anti_passback_ind=None,
alarm_user_ind=None,
first_name=None,
middle_name=None,
sur_name=None,
telephone_no=None,
telephone_extension=None,
pin_code=None,
activation_date=None,
active=None,
fax_no=None,
expiry_date=None,
custom_fields=None,
user_picture=None,
delete_image=False):
"""Modify user record
Fields omitted keep their original value.
Custom_fields is a string array (15) of which the first element is
ignored.
Providing None custom values in the array, leaves the original value
unchanged.
Returns True on success.
"""
# Fetch current user info
uview = self.get_user_record(user_id)
if not uview:
return False
with Net2XS._lock:
self._check_client()
return self._client.UpdateUserRecord(
user_id,
uview.AccessLevelId if access_level_id is None else access_level_id,
uview.DepartmentId if department_id is None else department_id,
uview.AntiPassbackUser if anti_passback_ind is None else anti_passback_ind,
uview.AlarmUser if alarm_user_ind is None else alarm_user_ind,
first_name,
middle_name,
sur_name,
telephone_no,
telephone_extension,
pin_code,
None,
flex_date_time_to_net(activation_date) or uview.ActivationDate,
uview.Active if active is None else active,
fax_no,
flex_date_time_to_net(expiry_date) or uview.ExpiryDate,
custom_fields,
user_picture,
delete_image)
def delete_user(self, user_id):
"""Delete user record
Returns True on success
"""
with Net2XS._lock:
self._check_client()
return self._client.PurgeUser(user_id)
def deactivate_user(self, user_id):
"""Deactivate user. Preferred method iso delete.
Returns True on success
"""
return self.modify_user(
user_id=user_id,
access_level_id=0,
department_id=0,
pin_code="",
active=False)
def modify_user_access_level(self, user_id, access_level_id):
"""Alter user access level
Returns True on success
"""
return self.modify_user(
user_id=user_id,
access_level_id=access_level_id)
def modify_user_picture(self, user_id, user_picture):
"""Alter user picture. If user_picture is None, remove the picture.
Returns True on success
"""
return self.modify_user(
user_id=user_id,
user_picture=user_picture,
delete_image=True if not user_picture else False)
def get_area_ids(self, device_address):
"""Get area ids of a device
Returns a tuple with area id's (in, out)
"""
dataset = self.query_db(
'select b.ToAreaID from sdk.PeripheralNames a'
' inner join sdk.AreaGateways b on'
' a.PeripheralID=b.PeripheralID and'
' a.SerialNumber=%d order by a.SubAddress' %
(device_address))
if (not dataset or
dataset.Tables.Count < 1 or
dataset.Tables[0].Rows.Count != 2):
return None
return (dataset.Tables[0].Rows[0][0], dataset.Tables[0].Rows[1][0])
def get_device_addr_info(self):
"""Obtain all relevant device address info
Returns a dataset
"""
return self.query_db(
'select a.SerialNumber, a.SubAddress, a.PeripheralID, b.ToAreaID'
' from sdk.PeripheralNames a'
' inner join AreaGateways b on a.PeripheralID=b.PeripheralID'
' order by a.SerialNumber')
def get_time_slots(self, access_level_id, | |
import re
import torch
import unicodedata
from abc import abstractmethod
from typing import List, Union, Dict, Set, Optional
from .import_utils import (
is_available_kss,
is_available_nltk,
)
SPACE_NORMALIZER = re.compile(r"\s+")
InputTexts = Union[str, List[str]]
TokenizedOutput = Union[List[str], List[List[str]]]
EncodedOutput = Union[List[int], List[List[int]], torch.Tensor]
PaddedOutput = Union[List[List[int]], torch.Tensor]
DecodedOutput = Union[str, List[str]]
class _BaseTokenizer:
def __init__(
self,
lang: str,
vocab: Dict[str, int],
cls_token: str = "<s>",
sep_token: str = "</s>",
pad_token: str = "<pad>",
unk_token: str = "<unk>",
padding_side: str = "right",
max_seq_length: int = 512,
):
assert padding_side in ["right", "left"]
self.lang = lang
self.vocab = vocab
self.pos_vocab = None
self.id2token = {i: tok for tok, i in vocab.items()}
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.unk_token = unk_token
self.padding_side = padding_side
self.max_seq_length = max_seq_length
self._langtok_style = "basic"
self.sub_tokenizer = {}
@property
def cls_token_id(self) -> int:
return self.vocab[self.cls_token]
@property
def sep_token_id(self) -> int:
return self.vocab[self.sep_token]
@property
def pad_token_id(self) -> int:
return self.vocab[self.pad_token]
@property
def unk_token_id(self) -> int:
return self.vocab[self.unk_token]
@property
def nspecial(self) -> int:
return 4 # cls, sep, pad, unk
@property
def langtok_style(self):
return self._langtok_style
@langtok_style.setter
def langtok_style(self, val: str):
self._langtok_style = val
def _langtok(self, lang: str):
# https://github.com/pytorch/fairseq/blob/master/fairseq/data/multilingual/multilingual_utils.py#L34
langtok = ""
if self.langtok_style == "basic":
langtok = f"[{lang.upper()}]"
elif self.langtok_style == "mbart":
mapping = {"en": "_XX", "ja": "_XX", "ko": "_KR", "zh": "_CN"}
langtok = f"[{lang + mapping[lang]}]"
elif self.langtok_style == "multilingual":
langtok = f"__{lang}__"
return langtok
def _set_sub_tokenizer(self, lang: str, tokenizer_object):
self.sub_tokenizer[lang] = tokenizer_object
def __call__(
self,
text: InputTexts,
text_pair: Optional[InputTexts] = None,
src_lang: Optional[InputTexts] = None,
tgt_lang: Optional[InputTexts] = None,
padding: Union[str, bool] = False,
return_tokens: bool = False,
return_tags: bool = True,
return_tensors: Union[str, bool] = False,
return_attention_mask: bool = True,
add_special_tokens: bool = True,
no_separator: bool = False,
) -> Union[TokenizedOutput, Dict[str, EncodedOutput]]:
return self.encode(
text=text,
text_pair=text_pair,
src_lang=src_lang,
tgt_lang=tgt_lang,
padding=padding,
return_tokens=return_tokens,
return_tags=return_tags,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
add_special_tokens=add_special_tokens,
no_separator=no_separator,
)
def _normalize(self, text: str) -> str:
""" Unicode normalization and whitespace removal (often needed for context) """
text = unicodedata.normalize("NFKC", text)
text = self._normalize_space(text)
return text
@staticmethod
def _normalize_space(text: str) -> str:
return SPACE_NORMALIZER.sub(" ", text).strip()
@abstractmethod
def _tokenize(self, text: str, *args, **kwargs) -> List[str]:
pass
def tokenize(
self,
text: str,
text_pair: Optional[str] = None,
src_lang: Optional[str] = None,
tgt_lang: Optional[str] = None,
return_tags: bool = True,
add_special_tokens: bool = False,
no_separator: bool = False,
) -> List[str]:
"""
If you want to use `src_lang` and `tgt_lang` parameters, plz overrides!
"""
if self.pos_vocab is None:
return_tags = False
tokenized = self._tokenize(text)
if return_tags:
tokenized, tags = tokenized
if add_special_tokens:
tokenized = [self.cls_token] + tokenized + [self.sep_token]
if return_tags:
tags = [self.cls_token] + tags + [self.sep_token]
if text_pair is not None:
tokenized += [self.sep_token] if not no_separator else []
tokenized_pair = self._tokenize(text_pair)
if return_tags:
tags += [self.sep_token] if no_separator else []
tokenized_pair, tags_pair = tokenized_pair
tags += tags_pair
tokenized += tokenized_pair
if add_special_tokens:
tokenized += [self.sep_token]
if return_tags:
tags += [self.sep_token]
if return_tags:
return tokenized, tags
return tokenized
def encode_line(
self,
tokenized: List[str],
add_special_tokens: bool = False,
use_pos_vocab: bool = False,
) -> List[int]:
vocab = self.vocab
if use_pos_vocab and self.pos_vocab is not None:
vocab = self.pos_vocab
encoded = []
for token in tokenized:
encoded.append(vocab.get(token, self.unk_token_id))
if add_special_tokens:
encoded = [self.cls_token_id] + encoded + [self.sep_token_id]
return encoded
def encode(
self,
text: InputTexts,
text_pair: Optional[InputTexts] = None,
src_lang: Optional[InputTexts] = None,
tgt_lang: Optional[InputTexts] = None,
padding: Union[str, bool] = False,
return_tokens: bool = False,
return_tags: bool = True,
return_tensors: Union[str, bool] = False,
return_attention_mask: bool = True,
add_special_tokens: bool = True,
no_separator: bool = False,
) -> Union[TokenizedOutput, Dict[str, EncodedOutput]]:
""" Encode tokens to ids, used for single or batched sentence """
assert isinstance(return_tensors, bool) or return_tensors == "pt"
return_tensors = (return_tensors == "pt") or return_tensors
assert text_pair is None or type(text) == type(text_pair)
if (src_lang is None) ^ (tgt_lang is None):
src_lang = tgt_lang = None
if not hasattr(self, "pos_tagger"):
return_tags = False
if isinstance(text, str):
return self.encode(
text=[text],
text_pair=[text_pair],
src_lang=[src_lang],
tgt_lang=[tgt_lang],
padding=padding,
return_tokens=return_tokens,
return_tags=return_tags,
return_tensors=return_tensors,
return_attention_mask=return_attention_mask,
add_special_tokens=add_special_tokens,
no_separator=no_separator,
)
if text_pair is None:
text_pair = [None] * len(text)
if src_lang is None:
src_lang = [None] * len(text)
if tgt_lang is None:
tgt_lang = [None] * len(text)
assert len(text) == len(text_pair)
assert len(src_lang) == len(tgt_lang)
if len(src_lang) == 1:
src_lang = src_lang * len(text)
tgt_lang = tgt_lang * len(text)
assert len(text) == len(src_lang)
texts, text_pairs = text, text_pair
src_langs, tgt_langs = src_lang, tgt_lang
input_ids = []
segment_labels = []
for text, text_pair, src_lang, tgt_lang in zip(
texts, text_pairs, src_langs, tgt_langs
):
tokenized = self.tokenize(
text=text,
text_pair=text_pair,
src_lang=src_lang,
tgt_lang=tgt_lang,
return_tags=return_tags,
no_separator=no_separator,
add_special_tokens=add_special_tokens,
)
encoded = None
encoded_tags = None
if return_tags:
tokenized, tags = tokenized
if not return_tokens:
encoded = self.encode_line(tokenized=tokenized)
if return_tags:
encoded_tags = self.encode_line(tokenized=tags, use_pos_vocab=True)
input_ids.append(tokenized if return_tokens else encoded)
if return_tags:
segment_labels.append(tags if return_tokens else encoded_tags)
if return_tokens:
input_ids = input_ids if len(texts) > 1 else input_ids[0]
if return_tags:
segment_labels = segment_labels if len(texts) > 1 else segment_labels[0]
return input_ids, segment_labels
return input_ids
attention_mask = None
if return_tensors or padding:
padded = self.pad(
sequences={"input_ids": input_ids},
padding=padding,
return_tensors=return_tensors,
)
input_ids = padded["input_ids"]
attention_mask = padded["attention_mask"]
if return_tags:
segment_labels = self.pad(
sequences={"input_ids": segment_labels},
padding=padding,
return_tensors=return_tensors,
)["input_ids"]
batch_encoding = {"input_ids": input_ids}
if return_attention_mask and attention_mask is not None:
batch_encoding.update({"attention_mask": attention_mask})
if return_tags:
batch_encoding.update({"segment_labels": segment_labels})
return batch_encoding
def decode_line(self, ids: List[int], ignore_symbols: Set[int] = {}) -> str:
sent = []
for _id in ids:
if _id not in ignore_symbols:
sent.append(self.id2token.get(_id, self.unk_token))
return " ".join(sent)
def _recover_original(self, decoded_text: str) -> str:
return decoded_text
def decode(
self,
ids: EncodedOutput,
ignore_symbols: List[int] = [],
recover_original: bool = True,
) -> DecodedOutput:
if isinstance(ids, torch.Tensor):
ids = ids.detach().cpu().tolist()
if isinstance(ids[0], int):
return self.decode(
ids=[ids],
ignore_symbols=ignore_symbols,
recover_original=recover_original,
)
ignore_symbols = set(None or ignore_symbols)
ignore_symbols.update([self.cls_token_id, self.sep_token_id, self.pad_token_id])
list_of_ids = ids
decoded_texts = []
for ids in list_of_ids:
decoded = self.decode_line(ids, ignore_symbols)
if recover_original:
decoded = self._recover_original(decoded)
decoded_texts.append(decoded)
if len(decoded_texts) == 1:
decoded_texts = decoded_texts[0]
return decoded_texts
def pad(
self,
sequences: Dict[str, EncodedOutput],
padding: Union[str, bool] = True,
return_tensors: bool = True,
pad_to_multiple_of: Union[int, bool] = False, # match to hf pad method
) -> Dict[str, PaddedOutput]:
"""Pad batched sequences.
if return_tensors, then return torch.LongTensor object.
"""
input_ids = sequences.get("input_ids")
assert input_ids is not None
if isinstance(input_ids[0], int):
input_ids = [input_ids]
max_length = -1
if padding == "max_length":
max_length = self.max_seq_length
else:
max_length = max(len(ids) for ids in input_ids)
padded = {"input_ids": [], "attention_mask": []}
for ids in input_ids:
seq_len = len(ids)
if self.padding_side == "right":
ids = ids + [self.pad_token_id] * (max_length - seq_len)
attn_mask = [1] * seq_len + [0] * (max_length - seq_len)
else:
ids = [self.pad_token_id] * (max_length - seq_len) + ids
attn_mask = [0] * (max_length - seq_len) + [1] * seq_len
padded["input_ids"].append(ids)
padded["attention_mask"].append(attn_mask)
if return_tensors:
for k, v in padded.items():
padded[k] = torch.LongTensor(v)
return padded
class SentTokenizeMixin:
""" Sentence Tokenization Mixin """
def _set_sent_tokenizer(self):
if self.lang in ["ko", "multi"]:
if is_available_kss():
from kss import split_sentences
self._ko_sent_tokenizer = split_sentences
else:
raise ModuleNotFoundError("Please install kss with: `pip install kss`.")
if self.lang in ["en", "multi"]:
if is_available_nltk():
import nltk
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt")
from nltk.tokenize import sent_tokenize
self._en_sent_tokenizer = sent_tokenize
else:
raise ModuleNotFoundError(
"Please install nltk with: `pip install nltk`."
)
def sent_tokenize(
self,
texts: InputTexts,
langs: Optional[InputTexts] = None,
) -> List[List[str]]:
if isinstance(texts, str):
texts = [texts]
if langs is None:
langs = self.lang
elif self.lang != "multi": # F632
raise AttributeError("`langs` parameter is only used for `multi` model.")
if isinstance(langs, str):
langs = [langs] * len(texts)
do_per_sample = False
if len(set(langs)) == 1 and langs[0] == "ko":
# korean sentence splitter can be batched
if not hasattr(self, "_ko_sent_tokenizer"):
raise AttributeError
try:
sentences = self._ko_sent_tokenizer(texts)
except Exception:
do_per_sample = True
else:
do_per_sample = True
if do_per_sample:
sentences = []
for text, lang in zip(texts, langs):
if lang in "ko":
if not hasattr(self, "_ko_sent_tokenizer"):
raise AttributeError
sentences.append(self._ko_sent_tokenizer(text))
elif lang == "en":
if not hasattr(self, "_en_sent_tokenizer"):
raise AttributeError
sentences.append(self._en_sent_tokenizer(text))
else: # lang in ["ja", "zh"]
text = text.replace("。", | |
<gh_stars>1-10
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import gridspec
from scipy.stats import wasserstein_distance
from tqdm import tqdm
class LookoutEquipmentAnalysis:
"""
A class to manage Lookout for Equipment result analysis
Attributes:
model_name (string): the name of the Lookout for Equipment trained model
predicted_ranges (pandas.DataFrame): a Pandas dataframe with the
predicted anomaly ranges listed in chronological order with a Start
and End columns
labelled_ranges (pandas.DataFrame): A Pandas dataframe with the labelled
anomaly ranges listed in chronological order with a Start and End
columns
df_list (list of pandas.DataFrame): A list with each time series into a
dataframe
"""
def __init__(self, model_name, tags_df):
"""
Create a new analysis for a Lookout for Equipment model.
Parameters:
model_name (string):
The name of the Lookout for Equipment trained model
tags_df (pandas.DataFrame):
A dataframe containing all the signals, indexed by time
region_name (string):
Name of the AWS region from where the service is called.
"""
self.client = boto3.client('lookoutequipment')
self.model_name = model_name
self.predicted_ranges = None
self.labelled_ranges = None
self.ts_normal_training = None
self.ts_label_evaluation = None
self.ts_known_anomalies = None
self.df_list = dict()
for signal in tags_df.columns:
self.df_list.update({signal: tags_df[[signal]]})
model_description = self.client.describe_model(ModelName=self.model_name)
if model_description['Status'] == 'FAILED':
raise Exception('Model training failed, nothing to analyze.')
# Extracting time ranges used at training time:
self.training_start = pd.to_datetime(
model_description['TrainingDataStartTime'].replace(tzinfo=None)
)
self.training_end = pd.to_datetime(
model_description['TrainingDataEndTime'].replace(tzinfo=None)
)
self.evaluation_start = pd.to_datetime(
model_description['EvaluationDataStartTime'].replace(tzinfo=None)
)
self.evaluation_end = pd.to_datetime(
model_description['EvaluationDataEndTime'].replace(tzinfo=None)
)
def _load_model_response(self):
"""
Use the trained model description to extract labelled and predicted
ranges of anomalies. This method will extract them from the
DescribeModel API from Lookout for Equipment and store them in the
labelled_ranges and predicted_ranges properties.
"""
describe_model_response = self.client.describe_model(
ModelName=self.model_name
)
if self.labelled_ranges is None:
self.labelled_ranges = eval(
describe_model_response['ModelMetrics']
)['labeled_ranges']
if len(self.labelled_ranges) > 0:
self.labelled_ranges = pd.DataFrame(self.labelled_ranges)
self.labelled_ranges['start'] = pd.to_datetime(self.labelled_ranges['start'])
self.labelled_ranges['end'] = pd.to_datetime(self.labelled_ranges['end'])
else:
self.labelled_ranges = pd.DataFrame(columns=['start', 'end'])
self.predicted_ranges = eval(
describe_model_response['ModelMetrics']
)['predicted_ranges']
if len(self.predicted_ranges) > 0:
self.predicted_ranges = pd.DataFrame(self.predicted_ranges)
self.predicted_ranges['start'] = pd.to_datetime(self.predicted_ranges['start'])
self.predicted_ranges['end'] = pd.to_datetime(self.predicted_ranges['end'])
else:
self.predicted_ranges = pd.DataFrame(columns=['start', 'end'])
def set_time_periods(
self,
evaluation_start,
evaluation_end,
training_start,
training_end
):
"""
Set the time period of analysis
Parameters:
evaluation_start (datetime):
Start of the evaluation period
evaluation_end (datetime):
End of the evaluation period
training_start (datetime):
Start of the training period
training_end (datetime):
End of the training period
"""
self.evaluation_start = evaluation_start
self.evaluation_end = evaluation_end
self.training_start = training_start
self.training_end = training_end
def get_predictions(self):
"""
Get the anomaly ranges predicted by the current model
Returns:
pandas.DataFrame:
A Pandas dataframe with the predicted anomaly ranges listed in
chronological order with a Start and End columns
"""
if self.predicted_ranges is None:
self._load_model_response()
return self.predicted_ranges
def get_labels(self, labels_fname=None):
"""
Get the labelled ranges as provided to the model before training
Parameters:
labels_fname (string):
As an option, if you provide a path to a CSV file containing
the label ranges, this method will use this file to load the
labels. If this argument is not provided, it will load the
labels from the trained model Describe API (Default to None)
Returns:
pandas.DataFrame:
A Pandas dataframe with the labelled anomaly ranges listed in
chronological order with a Start and End columns
"""
if labels_fname is not None:
labels_df = pd.read_csv(labels_fname, header=None)
labels_df[0] = pd.to_datetime(labels_df[0])
labels_df[1] = pd.to_datetime(labels_df[1])
labels_df.columns = ['start', 'end']
self.labelled_ranges = labels_df
elif self.labelled_ranges is None:
self._load_model_response()
return self.labelled_ranges
def _get_time_ranges(self):
"""
Extract DateTimeIndex with normal values and anomalies from the
predictions generated by the model.
Returns:
pandas.DateTimeIndex:
Timestamp index for all the values marked as normal during the
training period
pandas.DateTimeIndex:
Timestamp index for all the values predicted as anomalies by
the model during the evaluation period
"""
# Extract the first time series
tag = list(self.df_list.keys())[0]
tag_df = self.df_list[tag]
# Initialize the predictions dataframe:
predictions_df = pd.DataFrame(columns=['Prediction'], index=tag_df.index)
predictions_df['Prediction'] = 0
# Loops through the predicted and labelled anomalies
# ranges and set these predictions to 1 (predicted)
# or 2 (initially known):
for index, row in self.predicted_ranges.iterrows():
predictions_df.loc[row['start']:row['end'], 'Prediction'] = 1
for index, row in self.labelled_ranges.iterrows():
predictions_df.loc[row['start']:row['end'], 'Prediction'] = 2
# Limits the analysis range to the evaluation period:
predictions_df = predictions_df[self.training_start:self.evaluation_end]
# Build a DateTimeIndex for normal values and anomalies:
index_normal = predictions_df[predictions_df['Prediction'] == 0].index
index_anomaly = predictions_df[predictions_df['Prediction'] == 1].index
index_known = predictions_df[predictions_df['Prediction'] == 2].index
return index_normal, index_anomaly, index_known
def compute_histograms(
self,
index_normal=None,
index_anomaly=None,
num_bins=20
):
"""
This method loops through each signal and computes two distributions of
the values in the time series: one for all the anomalies found in the
evaluation period and another one with all the normal values found in the
same period. It then computes the Wasserstein distance between these two
histograms and then rank every signals based on this distance. The higher
the distance, the more different a signal is when comparing anomalous
and normal periods. This can orient the investigation of a subject
matter expert towards the sensors and associated components.
Parameters:
index_normal (pandas.DateTimeIndex):
All the normal indices
index_anomaly (pandas.DateTimeIndex):
All the indices for anomalies
num_bins (integer):
Number of bins to use to build the distributions (default: 20)
"""
if (index_normal is None) or (index_anomaly is None):
index_lists = self._get_time_ranges()
self.ts_normal_training = index_lists[0]
self.ts_label_evaluation = index_lists[1]
self.ts_known_anomalies = index_lists[2]
self.num_bins = num_bins
# Now we loop on each signal to compute a
# histogram of each of them in this anomaly range,
# compte another one in the normal range and
# compute a distance between these:
rank = dict()
for tag, current_tag_df in tqdm(
self.df_list.items(),
desc='Computing distributions'
):
try:
# Get the values for the whole signal, parts
# marked as anomalies and normal part:
current_signal_values = current_tag_df[tag]
current_signal_evaluation = current_tag_df.loc[self.ts_label_evaluation, tag]
current_signal_training = current_tag_df.loc[self.ts_normal_training, tag]
# Let's compute a bin width based on the whole range of possible
# values for this signal (across the normal and anomalous periods).
# For both normalization and aesthetic reasons, we want the same
# number of bins across all signals:
bin_width = (np.max(current_signal_values) - np.min(current_signal_values))/self.num_bins
bins = np.arange(
np.min(current_signal_values),
np.max(current_signal_values) + bin_width,
bin_width
)
# We now use the same bins arrangement for both parts of the signal:
u = np.histogram(
current_signal_training,
bins=bins,
density=True
)[0]
v = np.histogram(
current_signal_evaluation,
bins=bins,
density=True
)[0]
# Wasserstein distance is the earth mover distance: it can be
# used to compute a similarity between two distributions: this
# metric is only valid when the histograms are normalized (hence
# the density=True in the computation above):
d = wasserstein_distance(u, v)
rank.update({tag: d})
except Exception as e:
rank.update({tag: 0.0})
# Sort histograms by decreasing Wasserstein distance:
rank = {k: v for k, v in sorted(rank.items(), key=lambda rank: rank[1], reverse=True)}
self.rank = rank
def plot_histograms_v2(self, custom_ranking, nb_cols=3, max_plots=12, num_bins=20):
index_lists = self._get_time_ranges()
self.ts_normal_training = index_lists[0]
self.ts_label_evaluation = index_lists[1]
self.ts_known_anomalies = index_lists[2]
self.num_bins = num_bins
# Prepare the figure:
nb_rows = len(self.df_list.keys()) // nb_cols + 1
plt.style.use('Solarize_Light2')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(figsize=(16, int(nb_rows * 3)))
gs = gridspec.GridSpec(nb_rows, nb_cols, hspace=0.5, wspace=0.25)
axes = []
for i in range(max_plots):
axes.append(fig.add_subplot(gs[i]))
# Loops through each signal by decreasing distance order:
| |
tpr
self.register_performance_stats(config, time=duration, TPR=tpr)
return self._true_positive_rate
def run_on_negative(self, config, **kwargs):
"""
Optionally test the detection strategy on negative client packets, call this
instead of :meth:`negative_run`.
:param tuple config: a consistently-formatted tuple containing configurations
such as window size and threshold for performance indexing in records. It
should be sufficiently specific to distinguish individual runs of the
same configuration, as otherwise performance records for the config
will be overwritten between runs.
"""
if not self._neg_collection:
return False
if not self._packets_loaded:
self._load_into_memory()
if config is None:
return False
if (isinstance(config, list) or isinstance(config, tuple)):
assert(len(config) == len(self.RUN_CONFIG_DESCRIPTION))
# Clear the falsely block set.
self._negative_blocked_ips = set()
fpr = self.negative_run(**kwargs)
self._false_positive_rate = fpr
self._false_positive_blocked_rate = float(len(self._negative_blocked_ips)) / self._negative_unique_ips
self.register_performance_stats(config, FPR=fpr, ip_block_rate=self._false_positive_blocked_rate)
return self._false_positive_rate
def run_on_recall(self, **kwargs):
"""
Wrapper for the optional :meth:`recall_run`, testing a trained classifier
on positive recall packets.
"""
if not self._recall_collection:
return False
if not self._packets_loaded:
self._load_into_memory()
self._recall_rate = self.recall_run(**kwargs)
return self._recall_rate
def register_performance_stats(self, config, time=None, TPR=None, FPR=None,
ip_block_rate=None):
"""
Register timed performance metrics for each specific configuration. This
should be called after each individual operation cycle of the strategy.
:param tuple config: a consistently-styled tuple containing configurations
such as window size and threshold in a tuple, enabling separately
setting the TPR and FPR values (below) in different method calls.
:param float time: if not None, update the execution time of positive run.
:param float TPR: if not None, update the true positive rate of the performance
record specified by config. Float between 0 and 1.
:param float FPR: if not None, update the false positive rate of the performance
record specified by config. Float between 0 and 1.
:param float ip_block_rate: if not None, update the rate of falsely blocked IPs
of the strategy execution specified by config. Float between 0 and 1.
"""
if config not in self._time_statistics:
self._time_statistics[config] = {'time': None, 'TPR': 0, 'FPR': 1,
'block_rate': 1}
# Assume worst case if they are not later amended.
if TPR is not None and 0 <= TPR <= 1:
self._time_statistics[config]['TPR'] = TPR
if FPR is not None and 0 <= FPR <= 1:
self._time_statistics[config]['FPR'] = FPR
if time is not None and time >= 0:
self._time_statistics[config]['time'] = time
if ip_block_rate is not None and 0 <= ip_block_rate <= 1:
self._time_statistics[config]['block_rate'] = ip_block_rate
def _score_performance_stats(self):
"""
Based on the execution time, TPR, and FPR of strategy runs, score the
effectiveness of this strategy in identifying the input PT.
:returns: a floating point score between 0 and 100 for this strategy,
and the config underwhich this was achieved.
"""
# Filter out records yielding unacceptable TPR or FPR values.
acceptables = list(filter(lambda x: x[1]['TPR'] >= constants.TPR_BOUNDARY \
and x[1]['FPR'] <= constants.FPR_BOUNDARY and all(x[1]), self._time_statistics.items()))
acceptable_runs = [i[1] for i in acceptables]
acceptable_configs = [i[0] for i in acceptables]
# If invalid values or no acceptable runs, this strategy scores zero.
if len(acceptable_runs) < 1:
return 0, None
for i in acceptable_runs:
if not (0 <= i['TPR'] <= 1) or not (0 <= i['FPR'] <= 1):
return 0, None
# Penalise runs for their differences from best TPR/FPR and time values.
best_tpr = max([i['TPR'] for i in acceptable_runs])
worst_time = max([i['time'] for i in acceptable_runs])
scaled_times = [i['time'] / worst_time for i in acceptable_runs]
best_scaled_time = min(scaled_times)
tpr_penalties = [log1p((best_tpr - i['TPR'])*100) for i in acceptable_runs]
fpr_penalties = [log1p((max(0, i['FPR'] - constants.FPR_TARGET))*100) for i in acceptable_runs] # Hard target for FPR.
time_penalties = [log1p((i - best_scaled_time)*100) for i in scaled_times]
# For IP falsely blocked rate, penalise from zero.
block_rate_penalties = [log1p(i['block_rate']*100) for i in acceptable_runs]
# Calculate weighted penalties across all metrics.
overall_penalties = []
for i in range(len(tpr_penalties)):
overall_penalties.append(tpr_penalties[i] * constants.PENALTY_WEIGHTS[0] + \
fpr_penalties[i] * constants.PENALTY_WEIGHTS[1] + \
time_penalties[i] * constants.PENALTY_WEIGHTS[2] + \
block_rate_penalties[i] * constants.PENALTY_WEIGHTS[3])
# Now find out the minimum penalty required to reach the acceptable
# TPR and FPR performance, and calculate the scores accordingly.
scores = [(log1p(100) - i) / log1p(100) * 100 for i in overall_penalties]
# Apply strategy-specific penalisation.
strategy_penalised_scores = []
for i, score in enumerate(scores):
# Clip the penalty proportion to between 0 and 1.
strategy_penalty = sorted([0, self.config_specific_penalisation(acceptable_configs[i]), 1])[1]
strategy_penalised_scores.append(score * (1-strategy_penalty))
best_score = max(strategy_penalised_scores)
best_config = acceptable_configs[strategy_penalised_scores.index(max(strategy_penalised_scores))]
return best_score, best_config
def split_pt(self, split_ratio=0.7):
"""
Gatekeeper method for :meth:`test_validation_split`, ensuring that it is
called after traces have been loaded from MongoDB into memory. Performs
an implicit trace load if not yet loaded. Call this method to perform
a split.
Do not override this method, but override :meth:`test_validation_split` below.
:param float split_ratio: the proportion of positive packets used as test
rather than validation in a split.
"""
if not self._packets_loaded:
self._load_into_memory()
splits = self.test_validation_split(split_ratio)
if splits and isinstance(splits, tuple):
test, validation = splits
# Only validate split if a non-empty split has been performed.
if len(test) > 0 or len(validation) > 0:
self._pt_test_packets = test
self._pt_validation_packets = validation
self._pt_split = True
def debug_print(self, message):
"""
Prints a debug message to the console, useful for debugging. Appends the
strategy name and timestamp automatically.
"""
if self.__debug_on == False:
return
msg = "[" + self._DEBUG_PREFIX + "] " + str(datetime.now()) +" : " + message
print(msg)
def setup(self, pt_ip_filters=[], negative_ip_filters=[], pt_collection=None,
negative_collection=None, test_recall=False, recall_ip_filters=[],
recall_collection=None):
"""
Set up the analysis strategy with filters and any existing collection names.
To skip parsing traces again and use existing collections in MongoDB,
both `pt_collection` and `negative_collection` need to be set to valid names.
Recall used for evaluation of strategy itself only, not for user's use.
:param list pt_ip_filters: input IP filters for positive test packets.
:param list negative_ip_filters: input IP filters for negative test packets.
:param str pt_collection: set pt_collection to be the name of an existing
collection in MongoDB to skip parsing again.
:param str negative_collection: set negative_collection to be the name of an
existing collection in MongoDB to skip parsing again.
:param bool test_recall: if True, the strategy will also test the classifier
on unseen positive recall packets to cross validate.
:param list recall_ip_filters: input IP filter for recall test packets.
:param str recall_collection: set recall_collection to be the name of an
existing collection in MongoDB to skip parsing again.
"""
self.debug_print("Executing detection strategy: " + self.NAME)
self.debug_print(self.DESCRIPTION)
reparsing_positive = True
if not self.__negative_pcap:
reparsing_negative = False
else:
reparsing_negative = True
if pt_collection is not None:
if self.__reader.select(pt_collection):
reparsing_positive = False
self._pt_collection = pt_collection
self.debug_print("Loading existing PT trace...")
else:
self.debug_print("Re-parsing PT PCAP file as {} does not exist in MongoDB...".format(pt_collection))
if reparsing_positive:
self.debug_print("- Parsing PT PCAP...")
if self._parse_PT_packets(pt_ip_filters):
self.debug_print("Parsed PCAP file according to input positive IP filters.")
else:
raise RuntimeError("! Failure to parse positive PCAP files.")
if negative_collection is not None:
if self.__reader.select(negative_collection):
reparsing_negative = False
self._neg_collection = negative_collection
self.debug_print("Loading existing negative trace...")
else:
self.debug_print("Re-parsing negative trace as {} does not exist in MongoDB...".format(negative_collection))
if reparsing_negative:
self.debug_print("- Parsing negative PCAP...")
if self._parse_negative_packets(negative_ip_filters):
self.debug_print("Parsed PCAP file according to input negative IP filters.")
else:
raise RuntimeError("! Failure to parse negative PCAP file.")
if test_recall:
self._test_recall = True
self.debug_print("This run will test the positive recall of the best classifier.")
if self.__reader.select(recall_collection):
self._recall_collection = recall_collection
self.debug_print("Loading existing recall trace...")
else:
self.debug_print("- Attempting to parse recall PCAP as specified recall collection does not exist.")
if self._parse_recall_packets(recall_ip_filters):
self.debug_print("Parsed PCAP file according to input recall IP filters.")
else:
raise RuntimeError("! Failure to parse recall PCAP file.")
self._packets_parsed = True
def load(self):
"""
Load parsed or stored packets from their trace collections.
Call this method when it is ready to load traces from memory. Call this
method again after calling :meth:`set_strategic_filter` to set a new
strategic filter, as afterwards traces need to be reloaded based on the new
filter.
"""
self.debug_print("- Setting strategic filter...")
self.set_strategic_filter()
self.debug_print("Strategy filter on traces from MongoDB: {}".format(self._strategic_packet_filter))
self.debug_print("- Loading packets according to the initial strategic filter...")
self._load_into_memory()
self.debug_print("Positive: {} packets, examining {}.".format(self._pt_collection_total, len(self._pt_packets)))
self.debug_print("Negative: {} packets, examining {}.".format(self._neg_collection_total, | |
stat.S_IEXEC)
# spawn the sub-agent
cmdline = './%s' % ls_name
self._log.info ('create services: %s' % cmdline)
ru.sh_callout_bg(cmdline, stdout='services.out', stderr='services.err')
self._log.debug('services started done')
# --------------------------------------------------------------------------
#
def _start_sub_agents(self):
'''
For the list of sub_agents, get a launch command and launch that
agent instance on the respective node. We pass it to the seconds
bootstrap level, there is no need to pass the first one again.
'''
# FIXME: reroute to agent daemonizer
if not self._cfg.get('agents'):
return
assert (len(self._rm.info.agent_node_list) >= len(self._cfg['agents']))
self._log.debug('start_sub_agents')
# store the current environment as the sub-agents will use the same
ru.env_prep(os.environ, script_path='./env/agent.env')
# the configs are written, and the sub-agents can be started. To know
# how to do that we create the agent launch method, have it creating
# the respective command lines per agent instance, and run via
# popen.
#
threads = self._rm.info.cores_per_node * \
self._rm.info.threads_per_core
for idx, sa in enumerate(self._cfg['agents']):
target = self._cfg['agents'][sa]['target']
if target not in ['local', 'node']:
raise ValueError('agent target unknown (%s)' % target)
if target == 'local':
# start agent locally
cmdline = '/bin/sh -l %s/bootstrap_2.sh %s' % (self._pwd, sa)
else: # target == 'node':
node = self._rm.info.agent_node_list[idx]
# start agent remotely, use launch method
# NOTE: there is some implicit assumption that we can use
# the 'agent_node' string as 'agent_string:0' and
# obtain a well format slot...
# FIXME: it is actually tricky to translate the agent_node
# into a viable 'slots' structure, as that is
# usually done by the schedulers. So we leave that
# out for the moment, which will make this unable to
# work with a number of launch methods. Can the
# offset computation be moved to the ResourceManager?
bs_name = '%s/bootstrap_2.sh' % (self._pwd)
launch_script = '%s/%s.launch.sh' % (self._pwd, sa)
exec_script = '%s/%s.exec.sh' % (self._pwd, sa)
agent_task = {
'uid' : sa,
'task_sandbox_path': self._pwd,
'description' : {'cpu_processes' : 1,
'cpu_threads' : threads,
'gpu_processes' : 0,
'gpu_threads' : 0,
'executable' : '/bin/sh',
'arguments' : [bs_name, sa]},
'slots': {'ranks' : [{'node_name' : node['node_name'],
'node_id' : node['node_id'],
'core_map' : [[0]],
'gpu_map' : [],
'lfs' : 0,
'mem' : 0}]}
}
# find a launcher to use
launcher = self._rm.find_launcher(agent_task)
if not launcher:
raise RuntimeError('no launch method found for sub agent')
tmp = '#!/bin/sh\n\n'
cmds = launcher.get_launcher_env()
for cmd in cmds:
tmp += '%s || exit 1\n' % cmd
cmds = launcher.get_launch_cmds(agent_task, exec_script)
tmp += '%s\nexit $?\n\n' % '\n'.join(cmds)
with ru.ru_open(launch_script, 'w') as fout:
fout.write(tmp)
tmp = '#!/bin/sh\n\n'
tmp += '. ./env/agent.env\n'
tmp += '/bin/sh -l ./bootstrap_2.sh %s\n\n' % sa
with ru.ru_open(exec_script, 'w') as fout:
fout.write(tmp)
# make sure scripts are executable
st = os.stat(launch_script)
st = os.stat(exec_script)
os.chmod(launch_script, st.st_mode | stat.S_IEXEC)
os.chmod(exec_script, st.st_mode | stat.S_IEXEC)
# spawn the sub-agent
cmdline = launch_script
self._log.info ('create sub-agent %s: %s' % (sa, cmdline))
ru.sh_callout_bg(cmdline, stdout='%s.out' % sa,
stderr='%s.err' % sa)
# FIXME: register heartbeats?
self._log.debug('start_sub_agents done')
# --------------------------------------------------------------------------
#
def _agent_command_cb(self):
if not self._check_commands(): return False
if not self._check_rpc (): return False
if not self._check_state (): return False
return True
# --------------------------------------------------------------------------
#
def _check_commands(self):
# Check if there's a command waiting
# FIXME: this pull should be done by the update worker, and commands
# should then be communicated over the command pubsub
# FIXME: commands go to pmgr, tmgr, session docs
# FIXME: check if pull/wipe are atomic
# FIXME: long runnign commands can time out on hb
retdoc = self._dbs._c.find_and_modify(
query ={'uid' : self._pid},
fields=['cmds'], # get new commands
update={'$set': {'cmds': list()}}) # wipe old commands
if not retdoc:
return True
for spec in retdoc.get('cmds', []):
cmd = spec['cmd']
arg = spec['arg']
self._log.debug('pilot command: %s: %s', cmd, arg)
self._prof.prof('cmd', msg="%s : %s" % (cmd, arg), uid=self._pid)
if cmd == 'heartbeat' and arg['pmgr'] == self._pmgr:
self._hb.beat(uid=self._pmgr)
elif cmd == 'cancel_pilot':
self._log.info('cancel pilot cmd')
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'terminate',
'arg' : None})
self._final_cause = 'cancel'
self.stop()
return False # we are done
elif cmd == 'cancel_tasks':
self._log.info('cancel_tasks cmd')
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'cancel_tasks',
'arg' : arg})
else:
self._log.warn('could not interpret cmd "%s" - ignore', cmd)
return True
# --------------------------------------------------------------------------
#
def _check_rpc(self):
'''
check if the DB has any RPC request for this pilot. If so, then forward
that request as `rpc_req` command on the CONTROL channel, and listen for
an `rpc_res` command on the same channel, for the same rpc id. Once
that response is received (from whatever component handled that
command), send the response back to the databse for the callee to pick
up.
'''
# FIXME: implement a timeout, and/or a registry of rpc clients
retdoc = self._dbs._c.find_and_modify(
query ={'uid' : self._pid},
fields=['rpc_req'],
update={'$set': {'rpc_req': None}})
if not retdoc:
# no rpc request found
return True
rpc_req = retdoc.get('rpc_req')
if rpc_req is None:
# document has no rpc request
return True
self._log.debug('rpc req: %s', rpc_req)
# RPCs are synchronous right now - we send the RPC on the command
# channel, hope that some component picks it up and replies, and then
# return that reply. The reply is received via a temporary callback
# defined here, which will receive all CONTROL messages until the right
# rpc response comes along.
def rpc_cb(topic, msg):
rpc_id = rpc_req['uid']
cmd = msg['cmd']
rpc_res = msg['arg']
if cmd != 'rpc_res':
# not an rpc responese
return True
if rpc_res['uid'] != rpc_id:
# not the right rpc response
return True
# send the response to the DB
self._dbs._c.update({'type' : 'pilot',
'uid' : self._pid},
{'$set' : {'rpc_res': rpc_res}})
# work is done - unregister this temporary cb (rpc_cb)
return False
self.register_subscriber(rpc.CONTROL_PUBSUB, rpc_cb)
# ready to receive and proxy rpc response -- forward rpc request on
# control channel
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'rpc_req',
'arg' : rpc_req})
return True # keeb cb registered (self._check_rpc)
# --------------------------------------------------------------------------
#
def _check_control(self, _, msg):
'''
Check for commands on the control pubsub, mainly waiting for RPC
requests to handle. We handle two types of RPC requests: `hello` for
testing, and `prepare_env` for environment preparation requests.
'''
cmd = msg['cmd']
arg = msg['arg']
if cmd != 'rpc_req':
# not an rpc request
return True
req = arg['rpc']
if req not in ['hello', 'prepare_env']:
# we don't handle that request
return True
ret = None
rpc_res = {'uid': arg['uid']}
try:
if req == 'hello' :
ret = 'hello %s' % ' '.join(arg['arg'])
elif req == 'prepare_env':
env_name = arg['arg']['env_name']
env_spec = arg['arg']['env_spec']
ret = self._prepare_env(env_name, env_spec)
except Exception as e:
# request failed for some reason - indicate error
rpc_res['err'] = repr(e)
rpc_res['ret'] = None
self._log.exception('control cmd failed')
else:
# request succeeded - respond with return value
rpc_res['err'] = None
rpc_res['ret'] = ret
# publish the response (success or failure)
self.publish(rpc.CONTROL_PUBSUB, {'cmd': 'rpc_res',
'arg': rpc_res})
return True
# --------------------------------------------------------------------------
#
def _check_state(self):
# Make sure that we haven't exceeded the runtime - otherwise terminate.
if self._cfg.runtime:
if time.time() >= self._starttime + (int(self._cfg.runtime) * 60):
self._log.info('runtime limit (%ss).', self._cfg.runtime * 60)
self._final_cause = 'timeout'
self.stop()
return False # we are done
return True
# --------------------------------------------------------------------------
#
def _check_tasks_cb(self):
# Check for tasks waiting for input staging and log pull.
#
# FIXME: Unfortunately, 'find_and_modify' is not bulkable, so we have
# to use 'find'. To avoid finding the same tasks over and over
# again, we update the 'control' field *before* running the next
# find -- so we do it right here.
# This also blocks us from using multiple ingest threads, or from
# doing late binding by task pull :/
task_cursor = self._dbs._c.find({'type' : 'task',
'pilot' : self._pid,
'control' : 'agent_pending'})
if not task_cursor.count():
self._log.info('tasks pulled: 0')
return True
# update the tasks to avoid pulling them again next time.
task_list = list(task_cursor)
task_uids = [task['uid'] for task in task_list]
self._dbs._c.update({'type' : 'task',
'uid' : {'$in' : task_uids}},
{'$set' : {'control' : 'agent'}},
multi=True)
self._log.info("tasks pulled: %4d", len(task_list))
self._prof.prof('get', msg='bulk: %d' % len(task_list), uid=self._pid)
for task in | |
<reponame>Swiffers/puretabix
import gzip
import io
import itertools
import logging
import struct
import zlib
logger = logging.getLogger(__name__)
class TabixIndex:
def __init__(self, fileobj):
"""
In-memory representation of a Tabix index. See https://samtools.github.io/hts-specs/tabix.pdf
for more information.
Generally these are pretty small files that need to be read entirely, thus downloading them
locally before processing is recommented e.g. io.BytesIO
"""
self._fileobject = fileobj
self.magic = None
self.n_sequences = None
self.file_format = None
self.column_sequence = 0 # column for sequence ids, 1-based
self.column_begin = 0 # column for region start, 1-based
self.column_end = 0 # column for region end, 1-based
self.meta = None
self.headerlines_count = None
self.names = None
self.index_bin = {}
self.index_interval = {}
# pre-process the file
self._parse_index()
def _parse_index(self):
# the index file is block-gzipped but small enough we can
# load it into memory and process like a regular gzip file
with gzip.GzipFile(fileobj=self._fileobject) as f:
header_pattern = "<4siiiii4sii"
header = struct.unpack(
header_pattern, f.read(struct.calcsize(header_pattern))
)
self.magic = header[0]
if self.magic != b"TBI\01": # check magic
raise RuntimeError(f"invalid tabix index magic {self.magic}.")
self.file_format = header[2]
# 0 = generic tab-delemited
# 1 = SAM
# 2 = VCF
if self.file_format not in (0, 1, 2):
raise RuntimeError(f"invalid tabix index format {self.file_format}.")
# these are 1 based
# value of 0 states not included in file
# e.g. VCF has no explicit end column
self.column_sequence = header[3] # Column for the sequence name
self.column_begin = header[4] # Column for the start of a region
self.column_end = header[5] # Column for the end of a region
# this is the comment marker, usually #
self.meta = header[6].decode("ascii")
# number of lines of header at the start of the file
# this does not include lines marked as comments
self.headerlines_count = header[7]
# sequence names are a series of bytes followed by a null byte
self.names = tuple(
map(bytes.decode, f.read(header[8]).split(b"\x00")[:-1])
) # throw the last empty one away
if len(self.names) != header[1]:
raise RuntimeError(
f"unexpected number of sequences {header[1]} vs {len(self.names)}"
)
# for each sequence
for name in self.names:
# each sequence has a bin index and an interval index
# parse the bin index
n_bins = struct.unpack("<i", f.read(4))[0]
bins = {}
for _ in range(n_bins):
# each bin has a key, and a series of chunks
bin_key, n_chunks = struct.unpack("<Ii", f.read(8))
chunks = [
chunk
for chunk in struct.iter_unpack("<QQ", f.read(16 * n_chunks))
]
assert bin_key not in bins
bins[bin_key] = chunks
if name in self.index_bin:
raise RuntimeError(f"duplicate sequence name {name}")
self.index_bin[name] = bins
# parse the interval index
n_intervals = struct.unpack("<i", f.read(4))[0]
intervals = [
i[0] for i in struct.iter_unpack("<Q", f.read(8 * n_intervals))
]
if name in self.index_interval:
raise RuntimeError(f"duplicate sequence name {name}")
self.index_interval[name] = intervals
def _lookup_linear(self, sequence_name, start):
"""
For each tiling 16 kb window keep the virtual file offset of the leftmost record (i.e.
having the smallest start coordinate) that overlaps the window.
Given a location, get the smallest start location of all records that overlap the 16kb window
containing the location
"""
# linear index is in 16kb intervals
# 16kb = 16 * (2 ** 10) = 2 ** 14 = 1 << 14
# throw away the first 14 bits to get index position
i = start >> 14
# if this sequence_name isn't valid, say that
if sequence_name not in self.index_interval:
return None
# if it would be beyond the index, say that
if i >= len(self.index_interval[sequence_name]):
return None
# its a valid sequnce name and a valid interval window
return self.index_interval[sequence_name][i]
def _lookup_bin_chunks(self, sequence_name, start, end):
"""
Records are assigned to a bin if the entirely fit in the bin.
So we want all the records in all the bins that overlap with the region of interest.
These records *might* overlap with the region of interest.
"""
for chunks_bin_index in reversed(tuple(self.region_to_bins(start, end))):
if chunks_bin_index in self.index_bin[sequence_name]:
for chunk in self.index_bin[sequence_name][chunks_bin_index]:
yield chunk
def lookup_virtual(self, sequence_name, start, end):
virtual_start = None
virtual_end = None
linear_start = self._lookup_linear(sequence_name, start)
# if this is not in the linear index, cant return anything
if not linear_start:
return None, None
for chunk_start, chunk_end in self._lookup_bin_chunks(
sequence_name, start, end
):
if chunk_end <= linear_start:
# if the chunk finished before this section of the linear starts, skip the chunk
# rare, but does happen sometimes
continue
# move the chunk start to where the linear start begins
chunk_start = min(chunk_start, linear_start)
if virtual_start is None or chunk_start < virtual_start:
virtual_start = chunk_start
if virtual_end is None or chunk_end > virtual_end:
virtual_end = chunk_end
# either both or neither must be set
assert (virtual_start is None) == (virtual_end is None)
return virtual_start, virtual_end
@staticmethod
def region_to_bins(begin, end, n_levels=5, min_shift=14):
"""
generator of keys to bins of records which *may* overlap the given region
n_levels: int, optional
cluster level, 5 for tabix
min_shift: int, optional
minimum shift, 14 for tabix
"""
t = 0
s = min_shift + (n_levels << 1) + n_levels
for level in range(n_levels + 1):
b = t + (begin >> s)
e = t + (end >> s)
n = e - b + 1
for k in range(b, e + 1):
yield k
n += 1
t += 1 << ((level << 1) + level)
s -= 3
class TabixIndexedFile:
def __init__(self, fileobj, index_fileobj):
self.index = TabixIndex(index_fileobj)
self.fileobj = fileobj
# check fileobject is a real block-gzipped file
if not self.is_block_gzip():
raise ValueError("fileobj must be a block gzipped file-like object")
def is_gzip(self):
"""is bytes_data a valid gzip file?"""
self.fileobj.seek(0)
bytes_data = self.fileobj.read(3)
header = struct.unpack("<BBB", bytes_data)
return header[0] == 31 and header[1] == 139 and header[2] == 8
def is_block_gzip(self):
"""is bytes_data is a valid block gzip file?"""
if not self.is_gzip():
return False
# NOTE assumes there is only one extra header
# not sure if this is required by block gzip spec or not
self.fileobj.seek(12)
bytes_data = self.fileobj.read(4)
header = struct.unpack("<ccH", bytes_data)
return header[0] == b"B" and header[1] == b"C" and header[2] == 2
def fetch(self, name, start, end=None):
"""
Returns a text block of lines that are included in the region of interest
"""
# quick check
if name not in self.index.names:
return ""
# default if only start specified
if not end:
end = start
# use the index to get "virtual" file offsets that include all of the region of interest
virtual_start, virtual_end = self.index.lookup_virtual(name, start, end)
# location not indexed, return empty string
if not virtual_start and not virtual_end:
return ""
# the lower 16 bits store the offset of the byte inside the gzip block
# the rest store the offset of gzip block
block_start = virtual_start >> 16
offset_start = virtual_start & 0xFFFF
block_end = virtual_end >> 16
offset_end = None # this is important only in the last block
value = io.BytesIO()
while block_start <= block_end:
# if this is the last block, then we need the ending offset
if block_start == block_end:
offset_end = virtual_end & 0xFFFF
# read this block
block, block_size = self._read_block(block_start)
# take the content of interest out of the block
if offset_end is None:
# we want everything else in the block
value.write(block[offset_start:])
else:
# we want to stop within this block
value.write(block[offset_start:offset_end])
# move to next block
offset_start = 0
block_start += block_size
# turn the bytes into a list of strings
# TODO ascii vs utf-8?
lines = io.StringIO(value.getvalue().decode("ascii"))
# skip header lines defined in index
if self.index.headerlines_count:
lines = itertools.islice(lines, self.index.headerlines_count, None)
# filter out comments
lines = (line for line in lines if not line.startswith(self.index.meta))
# filter lines of wrong lengths i.e. cut off around chunk boundries
lines = (
line
for line in lines
if len(line.split("\t"))
>= max(
self.index.column_sequence,
self.index.column_begin,
self.index.column_end,
)
)
# filter lines before start
lines = (
line
for line in lines
if int(line.split("\t")[self.index.column_begin - 1]) >= start
)
# filter lines after end
| |
<reponame>akjmicro/pystepseq
#!/usr/bin/python3 -i
# -*- coding: utf-8 -*-
#
# pystepseq.py
#
# Copyright 2013-2019 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# modules needed:
import _thread
import os
from copy import deepcopy
from math import ceil, log
from random import randint, choice
# my modules:
from pystepseq.lib.midi_functions import (
close_port,
note_off,
note_on,
open_port,
pitch_bend,
)
from pystepseq.lib.scales import * # noqa
from pystepseq.lib.pink_noise import pink_noise
class DataSlot:
__slots__ = [
"chn",
"end",
"triggers_per_beat",
"beats_per_measure",
"scl",
"scl_min",
"scl_max",
"scl_trans",
"len_list",
"vol_list",
"gate_list",
"note_list",
"note_noise",
"note_depth",
"note_repeat",
"note_tie",
"vol_noise",
"vol_depth",
"space",
]
def __init__(self):
for slot in self.__slots__:
setattr(self, slot, None)
class Pystepseq:
"""The Pystepseq object defines a MIDI voice that will be triggered
to sound by a multicast network Tempotrigger object.
"""
# fmt: off
__slots__ = [
"chn", "end", "triggers_per_beat", "beats_per_measure", "_triggers_per_measure",
"scl", "scl_min", "scl_max", "scl_trans", "len_list", "vol_list", "gate_list", "note_list",
"note_noise", "note_depth", "note_repeat", "note_tie",
"vol_noise", "vol_depth", "space",
"_scl", "_note", "_note_index", "_note_length", "_bend", "_old_note",
"_gate", "_gate_cutoff", "_gate_list", "_vol",
"_cycle_idx", "_step", "_trigger_count",
"_MYGROUP", "_MYPORT", "_receiver", "_open_port_exists",
"_data_slots", "_requested_slot", "_current_slot",
"_saveable_attrs", "_runstate",
]
# fmt: on
def __init__(self, chn=0, data_slots={}):
from . import constants
from .tempotrigger import openmcastsock
self._saveable_attrs = [x for x in self.__slots__ if not x.startswith("_")]
self.chn = chn
self._step = -1
self.end = 16 # num of note events, distinguished from beats
self.triggers_per_beat = 24
self.beats_per_measure = 4 # total number of beats in a measure
self._triggers_per_measure = self.triggers_per_beat * self.beats_per_measure
self.scl = "modal" # noqa
self.scl_min = 48
self.scl_max = 72
self.scl_trans = 0
self._runstate = 0
self.len_list = []
self.vol_list = []
self.gate_list = []
self.note_list = []
self.note_noise = "white" # can be brown or pink, too
self.note_depth = 5
self.note_repeat = 0
self.note_tie = 0
self.vol_noise = "white"
self.vol_depth = 20
self.space = 0
self._MYGROUP = "172.16.31.10"
self._MYPORT = constants.DEFAULT_MULTICAST_PORT
self._receiver = openmcastsock(self._MYGROUP, self._MYPORT)
self._open_port_exists = False
self._data_slots = [DataSlot() for x in range(16)]
self._requested_slot = 0
self._current_slot = 0
# automatic init:
# on a Mac, the variable is a dummy...
self.init_scl()
self.init_midi_port(
os.environ.get("PYSTEPSEQ_MIDI_PORT", constants.DEFAULT_MIDI_PORT)
)
if data_slots:
self._init_data_slots(data_slots)
else:
self.init_random_lists()
def _init_data_slots(self, data_slots):
for i, ds in enumerate(data_slots):
if ds:
local_slot = DataSlot()
for k, v in ds.items():
setattr(local_slot, k, v)
self._data_slots[i] = local_slot
for i, ds in enumerate(self._data_slots):
if ds.note_list:
self.data_slot_recall(i)
self._data_update()
break
if not self.len_list:
print("No data found in loaded sequence; defaulting to randomization...")
self.init_random_lists()
def init_scl(self):
self._scl = MidiScale(self.scl, self.scl_min, self.scl_max, self.scl_trans)
def data_slot_save(self, num):
self._requested_slot, self._current_slot = num, num
data_slot = DataSlot()
for attr in self._saveable_attrs:
val = deepcopy(getattr(self, attr))
setattr(data_slot, attr, val)
self._data_slots[num] = data_slot
def _data_update(self):
data_slot = self._data_slots[self._requested_slot]
for k in data_slot.__slots__:
val = deepcopy(getattr(data_slot, k))
setattr(self, k, val)
self.init_scl()
self._triggers_per_measure = self.triggers_per_beat * self.beats_per_measure
self._current_slot = self._requested_slot
def data_slot_recall(self, num):
# check that the slot has data:
if getattr(self._data_slots[num], "note_list"):
self._requested_slot = num
else:
print(f"slot {num} has no data, defaulting to slot 0...")
self._requested_slot = 0
def init_midi_port(self, midiport=None):
if self._open_port_exists:
close_port()
open_port(midiport)
self._open_port_exists = True
# randomize functions:
def _note_white(self, start, finish=None):
"""create white noise shaped note contour"""
if finish is None:
finish = len(self.len_list)
var = self.note_depth
chance_repeat = self.note_repeat
chance_tie = self.note_tie
scale_midpoint = self._scl.size // 2
for blah in range(start, finish):
randnum = scale_midpoint + randint(-var, var)
if randnum > self._scl.size:
randnum = self._scl.size - (randnum - self._scl.size)
if randnum < 0:
randnum = abs(randnum)
if chance_repeat >= randint(1, 100): # for repeat
if chance_tie >= randint(1, 100): # for space
randnum = -1
else:
randnum = self.note_list[(blah - 1) % self.end]
try:
self.note_list[blah] = randnum
except IndexError:
self.note_list.append(randnum)
def _note_brown(self, start, finish=None):
"""create brown noise shaped note contour"""
if finish is None:
finish = len(self.len_list)
var = self.note_depth
chance_repeat = self.note_repeat
chance_tie = self.note_tie
if start == 0 and finish == 1:
start = 1
finish = 2
for blah in range(start, finish + 1):
offset = randint(-var, var)
current = self.note_list[blah - 1]
new = current + offset
if new > self._scl.size:
new = current - offset
if new < 0:
new = abs(new)
if chance_repeat >= randint(1, 100): # for repeat
if chance_tie >= randint(1, 100): # for tie
new = -1
else:
new = self.note_list[(blah - 1) % self.end]
try:
self.note_list[blah] = new
except IndexError:
self.note_list.append(new)
def _note_pink(self, start, finish=None):
"""create pink noise shaped note contour"""
if finish is None:
finish = len(self.len_list)
var = self.note_depth
chance_repeat = self.note_repeat
chance_tie = self.note_tie
scale_midpoint = self._scl.size // 2
finish_pow = int(ceil(log(finish, 2)))
result_list = pink_noise(finish_pow, var)
offset = -1 * (max(result_list) // 2)
for blah in range(start, finish):
randnum = scale_midpoint + (result_list[blah - 1] + offset)
if randnum > self._scl.size:
randnum = self._scl.size - (randnum - self._scl.size)
if randnum < 0:
randnum = abs(randnum)
if chance_repeat >= randint(1, 100): # for repeat
if chance_tie >= randint(1, 100): # for tie
randnum = -1
else:
randnum = self.note_list[(blah - 1) % self.end]
try:
self.note_list[blah] = randnum
except IndexError:
self.note_list.append(randnum)
def _vol_white(self, start, finish=None):
"""create white noise shaped volume contour"""
if finish is None:
finish = len(self.len_list)
var = self.vol_depth
chance = self.space
for blah in range(start, finish):
randnum = 64 + randint(-var, var) # 64 is half of 127
if randnum > 127:
randnum = 127 - (randnum - 127)
if randnum < 0:
randnum = abs(randnum)
if chance >= randint(1, 100): # for space
randnum = 0
try:
self.vol_list[blah] = randnum
except IndexError:
self.vol_list.append(randnum)
def _vol_brown(self, start, finish=None):
"""create brown noise shaped volume contour"""
if finish is None:
finish = len(self.len_list)
var = self.vol_depth
chance = self.space
if start == 0 and finish == 1:
start = 1
finish = 2
for blah in range(start, finish):
offset = randint(-var, var)
current = self.vol_list[blah - 1]
if chance >= randint(1, 100):
new = 0
else:
new = current + offset
if new > 127:
new = current - offset
if new < 0:
new = current - offset
try:
self.vol_list[blah] = new
except IndexError:
self.vol_list.append(blah)
def _vol_pink(self, start, finish=None):
"""create pink noise shaped volume contour"""
if finish is None:
finish = len(self.len_list)
var = self.vol_depth
chance = self.space
result_list = pink_noise(5, var)
offset = -1 * (max(result_list) // 2)
for blah in range(start, finish):
randnum = 64 + (result_list[blah - 1] + offset)
if randnum > 127:
randnum = 127 - (randnum - 127)
if randnum < 0:
randnum = abs(randnum)
if chance >= randint(1, 100): # for space
randnum = 0
try:
self.vol_list[blah] = randnum
except IndexError:
self.vol_list.append(randnum)
def randomize_lengths(self, choice_list=None):
"""randomize lengths"""
# give a sensible default if none is given:
if choice_list is None:
choice_list = [6, 6, 6, 6, 6, 6, 6, 6, 12, 12, 12, 18, 18, 24]
outarr = []
total = 0
# re-calc the measure length:
self._triggers_per_measure = self.triggers_per_beat * self.beats_per_measure
# do this while we are below the beat count:
while total < self._triggers_per_measure:
leftover = self._triggers_per_measure - total
# filter the choices by what won't go over:
choice_list = list(filter(lambda x: x <= leftover, choice_list))
if not choice_list:
pick = leftover
else:
pick = choice(choice_list)
outarr.append(pick)
total += pick
# we now have a replacement rhythm list. Set it!
self.len_list = outarr
# set the endpoint
self.end = len(self.len_list)
def randomize_gates(self, choice_list=None):
"""randomize gate lengths"""
if choice_list is None:
self.gate_list = [100 for x in self.len_list]
else:
self.gate_list = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.