text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""Transform a string with Python-like source code into SymPy expression. """
from __future__ import print_function, division
from .sympy_tokenize import \
generate_tokens, untokenize, TokenError, \
NUMBER, STRING, NAME, OP, ENDMARKER
from keyword import iskeyword
import ast
import re
import unicodedata
import sympy
from sympy.core.compatibility import exec_, StringIO
from sympy.core.basic import Basic
_re_repeated = re.compile(r"^(\d*)\.(\d*)\[(\d+)\]$")
def _token_splittable(token):
"""
Predicate for whether a token name can be split into multiple tokens.
A token is splittable if it does not contain an underscore character and
it is not the name of a Greek letter. This is used to implicitly convert
expressions like 'xyz' into 'x*y*z'.
"""
if '_' in token:
return False
else:
try:
return not unicodedata.lookup('GREEK SMALL LETTER ' + token)
except KeyError:
pass
if len(token) > 1:
return True
return False
def _token_callable(token, local_dict, global_dict, nextToken=None):
"""
Predicate for whether a token name represents a callable function.
Essentially wraps ``callable``, but looks up the token name in the
locals and globals.
"""
func = local_dict.get(token[1])
if not func:
func = global_dict.get(token[1])
return callable(func) and not isinstance(func, sympy.Symbol)
def _add_factorial_tokens(name, result):
if result == [] or result[-1][1] == '(':
raise TokenError()
beginning = [(NAME, name), (OP, '(')]
end = [(OP, ')')]
diff = 0
length = len(result)
for index, token in enumerate(result[::-1]):
toknum, tokval = token
i = length - index - 1
if tokval == ')':
diff += 1
elif tokval == '(':
diff -= 1
if diff == 0:
if i - 1 >= 0 and result[i - 1][0] == NAME:
return result[:i - 1] + beginning + result[i - 1:] + end
else:
return result[:i] + beginning + result[i:] + end
return result
class AppliedFunction(object):
"""
A group of tokens representing a function and its arguments.
`exponent` is for handling the shorthand sin^2, ln^2, etc.
"""
def __init__(self, function, args, exponent=None):
if exponent is None:
exponent = []
self.function = function
self.args = args
self.exponent = exponent
self.items = ['function', 'args', 'exponent']
def expand(self):
"""Return a list of tokens representing the function"""
result = []
result.append(self.function)
result.extend(self.args)
return result
def __getitem__(self, index):
return getattr(self, self.items[index])
def __repr__(self):
return "AppliedFunction(%s, %s, %s)" % (self.function, self.args,
self.exponent)
class ParenthesisGroup(list):
"""List of tokens representing an expression in parentheses."""
pass
def _flatten(result):
result2 = []
for tok in result:
if isinstance(tok, AppliedFunction):
result2.extend(tok.expand())
else:
result2.append(tok)
return result2
def _group_parentheses(recursor):
def _inner(tokens, local_dict, global_dict):
"""Group tokens between parentheses with ParenthesisGroup.
Also processes those tokens recursively.
"""
result = []
stacks = []
stacklevel = 0
for token in tokens:
if token[0] == OP:
if token[1] == '(':
stacks.append(ParenthesisGroup([]))
stacklevel += 1
elif token[1] == ')':
stacks[-1].append(token)
stack = stacks.pop()
if len(stacks) > 0:
# We don't recurse here since the upper-level stack
# would reprocess these tokens
stacks[-1].extend(stack)
else:
# Recurse here to handle nested parentheses
# Strip off the outer parentheses to avoid an infinite loop
inner = stack[1:-1]
inner = recursor(inner,
local_dict,
global_dict)
parenGroup = [stack[0]] + inner + [stack[-1]]
result.append(ParenthesisGroup(parenGroup))
stacklevel -= 1
continue
if stacklevel:
stacks[-1].append(token)
else:
result.append(token)
return result
return _inner
def _apply_functions(tokens, local_dict, global_dict):
"""Convert a NAME token + ParenthesisGroup into an AppliedFunction.
Note that ParenthesisGroups, if not applied to any function, are
converted back into lists of tokens.
"""
result = []
symbol = None
for tok in tokens:
if tok[0] == NAME:
symbol = tok
result.append(tok)
elif isinstance(tok, ParenthesisGroup):
if symbol and _token_callable(symbol, local_dict, global_dict):
result[-1] = AppliedFunction(symbol, tok)
symbol = None
else:
result.extend(tok)
else:
symbol = None
result.append(tok)
return result
def _implicit_multiplication(tokens, local_dict, global_dict):
"""Implicitly adds '*' tokens.
Cases:
- Two AppliedFunctions next to each other ("sin(x)cos(x)")
- AppliedFunction next to an open parenthesis ("sin x (cos x + 1)")
- A close parenthesis next to an AppliedFunction ("(x+2)sin x")\
- A close parenthesis next to an open parenthesis ("(x+2)(x+3)")
- AppliedFunction next to an implicitly applied function ("sin(x)cos x")
"""
result = []
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (isinstance(tok, AppliedFunction) and
isinstance(nextTok, AppliedFunction)):
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and
nextTok[0] == OP and nextTok[1] == '('):
# Applied function followed by an open parenthesis
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
isinstance(nextTok, AppliedFunction)):
# Close parenthesis followed by an applied function
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
nextTok[0] == NAME):
# Close parenthesis followed by an implicitly applied function
result.append((OP, '*'))
elif (tok[0] == nextTok[0] == OP
and tok[1] == ')' and nextTok[1] == '('):
# Close parenthesis followed by an open parenthesis
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME):
# Applied function followed by implicitly applied function
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == OP and nextTok[1] == '('):
# Constant followed by parenthesis
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == NAME and
not _token_callable(nextTok, local_dict, global_dict)):
# Constant followed by constant
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
(isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)):
# Constant followed by (implicitly applied) function
result.append((OP, '*'))
if tokens:
result.append(tokens[-1])
return result
def _implicit_application(tokens, local_dict, global_dict):
"""Adds parentheses as needed after functions."""
result = []
appendParen = 0 # number of closing parentheses to add
skip = 0 # number of tokens to delay before adding a ')' (to
# capture **, ^, etc.)
exponentSkip = False # skipping tokens before inserting parentheses to
# work with function exponentiation
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (tok[0] == NAME and
nextTok[0] != OP and
nextTok[0] != ENDMARKER):
if _token_callable(tok, local_dict, global_dict, nextTok):
result.append((OP, '('))
appendParen += 1
# name followed by exponent - function exponentiation
elif (tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**'):
if _token_callable(tok, local_dict, global_dict):
exponentSkip = True
elif exponentSkip:
# if the last token added was an applied function (i.e. the
# power of the function exponent) OR a multiplication (as
# implicit multiplication would have added an extraneous
# multiplication)
if (isinstance(tok, AppliedFunction)
or (tok[0] == OP and tok[1] == '*')):
# don't add anything if the next token is a multiplication
# or if there's already a parenthesis (if parenthesis, still
# stop skipping tokens)
if not (nextTok[0] == OP and nextTok[1] == '*'):
if not(nextTok[0] == OP and nextTok[1] == '('):
result.append((OP, '('))
appendParen += 1
exponentSkip = False
elif appendParen:
if nextTok[0] == OP and nextTok[1] in ('^', '**', '*'):
skip = 1
continue
if skip:
skip -= 1
continue
result.append((OP, ')'))
appendParen -= 1
if tokens:
result.append(tokens[-1])
if appendParen:
result.extend([(OP, ')')] * appendParen)
return result
def function_exponentiation(tokens, local_dict, global_dict):
"""Allows functions to be exponentiated, e.g. ``cos**2(x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, function_exponentiation)
>>> transformations = standard_transformations + (function_exponentiation,)
>>> parse_expr('sin**4(x)', transformations=transformations)
sin(x)**4
"""
result = []
exponent = []
consuming_exponent = False
level = 0
for tok, nextTok in zip(tokens, tokens[1:]):
if tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**':
if _token_callable(tok, local_dict, global_dict):
consuming_exponent = True
elif consuming_exponent:
exponent.append(tok)
# only want to stop after hitting )
if tok[0] == nextTok[0] == OP and tok[1] == ')' and nextTok[1] == '(':
consuming_exponent = False
# if implicit multiplication was used, we may have )*( instead
if tok[0] == nextTok[0] == OP and tok[1] == '*' and nextTok[1] == '(':
consuming_exponent = False
del exponent[-1]
continue
elif exponent and not consuming_exponent:
if tok[0] == OP:
if tok[1] == '(':
level += 1
elif tok[1] == ')':
level -= 1
if level == 0:
result.append(tok)
result.extend(exponent)
exponent = []
continue
result.append(tok)
if tokens:
result.append(tokens[-1])
if exponent:
result.extend(exponent)
return result
def split_symbols_custom(predicate):
"""Creates a transformation that splits symbol names.
``predicate`` should return True if the symbol name is to be split.
For instance, to retain the default behavior but avoid splitting certain
symbol names, a predicate like this would work:
>>> from sympy.parsing.sympy_parser import (parse_expr, _token_splittable,
... standard_transformations, implicit_multiplication,
... split_symbols_custom)
>>> def can_split(symbol):
... if symbol not in ('list', 'of', 'unsplittable', 'names'):
... return _token_splittable(symbol)
... return False
...
>>> transformation = split_symbols_custom(can_split)
>>> parse_expr('unsplittable', transformations=standard_transformations +
... (transformation, implicit_multiplication))
unsplittable
"""
def _split_symbols(tokens, local_dict, global_dict):
result = []
split = False
split_previous=False
for tok in tokens:
if split_previous:
# throw out closing parenthesis of Symbol that was split
split_previous=False
continue
split_previous=False
if tok[0] == NAME and tok[1] == 'Symbol':
split = True
elif split and tok[0] == NAME:
symbol = tok[1][1:-1]
if predicate(symbol):
for char in symbol:
if char in local_dict or char in global_dict:
# Get rid of the call to Symbol
del result[-2:]
result.extend([(NAME, "%s" % char),
(NAME, 'Symbol'), (OP, '(')])
else:
result.extend([(NAME, "'%s'" % char), (OP, ')'),
(NAME, 'Symbol'), (OP, '(')])
# Delete the last two tokens: get rid of the extraneous
# Symbol( we just added
# Also, set split_previous=True so will skip
# the closing parenthesis of the original Symbol
del result[-2:]
split = False
split_previous = True
continue
else:
split = False
result.append(tok)
return result
return _split_symbols
#: Splits symbol names for implicit multiplication.
#:
#: Intended to let expressions like ``xyz`` be parsed as ``x*y*z``. Does not
#: split Greek character names, so ``theta`` will *not* become
#: ``t*h*e*t*a``. Generally this should be used with
#: ``implicit_multiplication``.
split_symbols = split_symbols_custom(_token_splittable)
def implicit_multiplication(result, local_dict, global_dict):
"""Makes the multiplication operator optional in most cases.
Use this before :func:`implicit_application`, otherwise expressions like
``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication)
>>> transformations = standard_transformations + (implicit_multiplication,)
>>> parse_expr('3 x y', transformations=transformations)
3*x*y
"""
# These are interdependent steps, so we don't expose them separately
for step in (_group_parentheses(implicit_multiplication),
_apply_functions,
_implicit_multiplication):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_application(result, local_dict, global_dict):
"""Makes parentheses optional in some cases for function calls.
Use this after :func:`implicit_multiplication`, otherwise expressions
like ``sin 2x`` will be parsed as ``x * sin(2)`` rather than
``sin(2*x)``.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_application)
>>> transformations = standard_transformations + (implicit_application,)
>>> parse_expr('cot z + csc z', transformations=transformations)
cot(z) + csc(z)
"""
for step in (_group_parentheses(implicit_application),
_apply_functions,
_implicit_application,):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_multiplication_application(result, local_dict, global_dict):
"""Allows a slightly relaxed syntax.
- Parentheses for single-argument method calls are optional.
- Multiplication is implicit.
- Symbol names can be split (i.e. spaces are not needed between
symbols).
- Functions can be exponentiated.
Example:
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication_application)
>>> parse_expr("10sin**2 x**2 + 3xyz + tan theta",
... transformations=(standard_transformations +
... (implicit_multiplication_application,)))
3*x*y*z + 10*sin(x**2)**2 + tan(theta)
"""
for step in (split_symbols, implicit_multiplication,
implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
def auto_symbol(tokens, local_dict, global_dict):
"""Inserts calls to ``Symbol`` for undefined variables."""
result = []
prevTok = (None, None)
tokens.append((None, None)) # so zip traverses all tokens
for tok, nextTok in zip(tokens, tokens[1:]):
tokNum, tokVal = tok
nextTokNum, nextTokVal = nextTok
if tokNum == NAME:
name = tokVal
if (name in ['True', 'False', 'None']
or iskeyword(name)
or name in local_dict
# Don't convert attribute access
or (prevTok[0] == OP and prevTok[1] == '.')
# Don't convert keyword arguments
or (prevTok[0] == OP and prevTok[1] in ('(', ',')
and nextTokNum == OP and nextTokVal == '=')):
result.append((NAME, name))
continue
elif name in global_dict:
obj = global_dict[name]
if isinstance(obj, (Basic, type)) or callable(obj):
result.append((NAME, name))
continue
result.extend([
(NAME, 'Symbol'),
(OP, '('),
(NAME, repr(str(name))),
(OP, ')'),
])
else:
result.append((tokNum, tokVal))
prevTok = (tokNum, tokVal)
return result
def lambda_notation(tokens, local_dict, global_dict):
"""Substitutes "lambda" with its Sympy equivalent Lambda().
However, the conversion doesn't take place if only "lambda"
is passed because that is a syntax error.
"""
result = []
flag = False
toknum, tokval = tokens[0]
tokLen = len(tokens)
if toknum == NAME and tokval == 'lambda':
if tokLen == 2:
result.extend(tokens)
elif tokLen > 2:
result.extend([
(NAME, 'Lambda'),
(OP, '('),
(OP, '('),
(OP, ')'),
(OP, ')'),
])
for tokNum, tokVal in tokens[1:]:
if tokNum == OP and tokVal == ':':
tokVal = ','
flag = True
if flag:
result.insert(-1, (tokNum, tokVal))
else:
result.insert(-2, (tokNum, tokVal))
else:
result.extend(tokens)
return result
def factorial_notation(tokens, local_dict, global_dict):
"""Allows standard notation for factorial."""
result = []
prevtoken = ''
for toknum, tokval in tokens:
if toknum == OP:
op = tokval
if op == '!!':
if prevtoken == '!' or prevtoken == '!!':
raise TokenError
result = _add_factorial_tokens('factorial2', result)
elif op == '!':
if prevtoken == '!' or prevtoken == '!!':
raise TokenError
result = _add_factorial_tokens('factorial', result)
else:
result.append((OP, op))
else:
result.append((toknum, tokval))
prevtoken = tokval
return result
def convert_xor(tokens, local_dict, global_dict):
"""Treats XOR, ``^``, as exponentiation, ``**``."""
result = []
for toknum, tokval in tokens:
if toknum == OP:
if tokval == '^':
result.append((OP, '**'))
else:
result.append((toknum, tokval))
else:
result.append((toknum, tokval))
return result
def auto_number(tokens, local_dict, global_dict):
"""Converts numeric literals to use SymPy equivalents.
Complex numbers use ``I``; integer literals use ``Integer``, float
literals use ``Float``, and repeating decimals use ``Rational``.
"""
result = []
prevtoken = ''
for toknum, tokval in tokens:
if toknum == NUMBER:
number = tokval
postfix = []
if number.endswith('j') or number.endswith('J'):
number = number[:-1]
postfix = [(OP, '*'), (NAME, 'I')]
if '.' in number or (('e' in number or 'E' in number) and
not (number.startswith('0x') or number.startswith('0X'))):
match = _re_repeated.match(number)
if match is not None:
# Clear repeating decimals, e.g. 3.4[31] -> (3 + 4/10 + 31/990)
pre, post, repetend = match.groups()
zeros = '0'*len(post)
post, repetends = [w.lstrip('0') for w in [post, repetend]]
# or else interpreted as octal
a = pre or '0'
b, c = post or '0', '1' + zeros
d, e = repetends, ('9'*len(repetend)) + zeros
seq = [
(OP, '('),
(NAME,
'Integer'), (OP, '('), (NUMBER, a), (OP, ')'),
(OP, '+'),
(NAME, 'Rational'), (OP, '('), (
NUMBER, b), (OP, ','), (NUMBER, c), (OP, ')'),
(OP, '+'),
(NAME, 'Rational'), (OP, '('), (
NUMBER, d), (OP, ','), (NUMBER, e), (OP, ')'),
(OP, ')'),
]
else:
seq = [(NAME, 'Float'), (OP, '('),
(NUMBER, repr(str(number))), (OP, ')')]
else:
seq = [(NAME, 'Integer'), (OP, '('), (
NUMBER, number), (OP, ')')]
result.extend(seq + postfix)
else:
result.append((toknum, tokval))
return result
def rationalize(tokens, local_dict, global_dict):
"""Converts floats into ``Rational``. Run AFTER ``auto_number``."""
result = []
passed_float = False
for toknum, tokval in tokens:
if toknum == NAME:
if tokval == 'Float':
passed_float = True
tokval = 'Rational'
result.append((toknum, tokval))
elif passed_float == True and toknum == NUMBER:
passed_float = False
result.append((STRING, tokval))
else:
result.append((toknum, tokval))
return result
#: Standard transformations for :func:`parse_expr`.
#: Inserts calls to :class:`Symbol`, :class:`Integer`, and other SymPy
#: datatypes and allows the use of standard factorial notation (e.g. ``x!``).
standard_transformations = (lambda_notation, auto_symbol, auto_number, factorial_notation)
def stringify_expr(s, local_dict, global_dict, transformations):
"""
Converts the string ``s`` to Python code, in ``local_dict``
Generally, ``parse_expr`` should be used.
"""
tokens = []
input_code = StringIO(s.strip())
for toknum, tokval, _, _, _ in generate_tokens(input_code.readline):
tokens.append((toknum, tokval))
for transform in transformations:
tokens = transform(tokens, local_dict, global_dict)
return untokenize(tokens)
def eval_expr(code, local_dict, global_dict):
"""
Evaluate Python code generated by ``stringify_expr``.
Generally, ``parse_expr`` should be used.
"""
expr = eval(
code, global_dict, local_dict) # take local objects in preference
return expr
def parse_expr(s, local_dict=None, transformations=standard_transformations,
global_dict=None, evaluate=True):
"""Converts the string ``s`` to a SymPy expression, in ``local_dict``
Parameters
==========
s : str
The string to parse.
local_dict : dict, optional
A dictionary of local variables to use when parsing.
global_dict : dict, optional
A dictionary of global variables. By default, this is initialized
with ``from sympy import *``; provide this parameter to override
this behavior (for instance, to parse ``"Q & S"``).
transformations : tuple, optional
A tuple of transformation functions used to modify the tokens of the
parsed expression before evaluation. The default transformations
convert numeric literals into their SymPy equivalents, convert
undefined variables into SymPy symbols, and allow the use of standard
mathematical factorial notation (e.g. ``x!``).
evaluate : bool, optional
When False, the order of the arguments will remain as they were in the
string and automatic simplification that would normally occur is
suppressed. (see examples)
Examples
========
>>> from sympy.parsing.sympy_parser import parse_expr
>>> parse_expr("1/2")
1/2
>>> type(_)
<class 'sympy.core.numbers.Half'>
>>> from sympy.parsing.sympy_parser import standard_transformations,\\
... implicit_multiplication_application
>>> transformations = (standard_transformations +
... (implicit_multiplication_application,))
>>> parse_expr("2x", transformations=transformations)
2*x
When evaluate=False, some automatic simplifications will not occur:
>>> parse_expr("2**3"), parse_expr("2**3", evaluate=False)
(8, 2**3)
In addition the order of the arguments will not be made canonical.
This feature allows one to tell exactly how the expression was entered:
>>> a = parse_expr('1 + x', evaluate=False)
>>> b = parse_expr('x + 1', evaluate=0)
>>> a == b
False
>>> a.args
(1, x)
>>> b.args
(x, 1)
See Also
========
stringify_expr, eval_expr, standard_transformations,
implicit_multiplication_application
"""
if local_dict is None:
local_dict = {}
if global_dict is None:
global_dict = {}
exec_('from sympy import *', global_dict)
code = stringify_expr(s, local_dict, global_dict, transformations)
if not evaluate:
code = compile(evaluateFalse(code), '<string>', 'eval')
return eval_expr(code, local_dict, global_dict)
def evaluateFalse(s):
"""
Replaces operators with the SymPy equivalent and sets evaluate=False.
"""
node = ast.parse(s)
node = EvaluateFalseTransformer().visit(node)
# node is a Module, we want an Expression
node = ast.Expression(node.body[0].value)
return ast.fix_missing_locations(node)
class EvaluateFalseTransformer(ast.NodeTransformer):
operators = {
ast.Add: 'Add',
ast.Mult: 'Mul',
ast.Pow: 'Pow',
ast.Sub: 'Add',
ast.Div: 'Mul',
ast.BitOr: 'Or',
ast.BitAnd: 'And',
ast.BitXor: 'Not',
}
def flatten(self, args, func):
result = []
for arg in args:
if isinstance(arg, ast.Call) and arg.func.id == func:
result.extend(self.flatten(arg.args, func))
else:
result.append(arg)
return result
def visit_BinOp(self, node):
if node.op.__class__ in self.operators:
sympy_class = self.operators[node.op.__class__]
right = self.visit(node.right)
if isinstance(node.op, ast.Sub):
right = ast.UnaryOp(op=ast.USub(), operand=right)
elif isinstance(node.op, ast.Div):
right = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
new_node = ast.Call(
func=ast.Name(id=sympy_class, ctx=ast.Load()),
args=[self.visit(node.left), right],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
if sympy_class in ('Add', 'Mul'):
# Denest Add or Mul as appropriate
new_node.args = self.flatten(new_node.args, sympy_class)
return new_node
return node
|
AunShiLord/sympy
|
sympy/parsing/sympy_parser.py
|
Python
|
bsd-3-clause
| 29,878
|
[
"VisIt"
] |
966047c34944a6eb571e7f89a34afde38dbabc6f4c90558363dcc8bcec1ea6ce
|
#!/usr/bin/env python
"""
Compute homogenized elastic coefficients for a given microstructure.
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import sys
import six
sys.path.append('.')
import numpy as nm
from sfepy import data_dir
import sfepy.discrete.fem.periodic as per
from sfepy.homogenization.utils import define_box_regions
def define_regions(filename):
"""
Define various subdomains for a given mesh file.
"""
regions = {}
dim = 2
regions['Y'] = 'all'
eog = 'cells of group %d'
if filename.find('osteonT1') >= 0:
mat_ids = [11, 39, 6, 8, 27, 28, 9, 2, 4, 14, 12, 17, 45, 28, 15]
regions['Ym'] = ' +c '.join((eog % im) for im in mat_ids)
wx = 0.865
wy = 0.499
regions['Yc'] = 'r.Y -c r.Ym'
# Sides and corners.
regions.update(define_box_regions(2, (wx, wy)))
return dim, regions
def get_pars(ts, coor, mode=None, term=None, **kwargs):
"""
Define material parameters: :math:`D_ijkl` (elasticity), in a given region.
"""
if mode == 'qp':
dim = coor.shape[1]
sym = (dim + 1) * dim // 2
out = {}
# in 1e+10 [Pa]
lam = 1.7
mu = 0.3
o = nm.array([1.] * dim + [0.] * (sym - dim), dtype = nm.float64)
oot = nm.outer(o, o)
out['D'] = lam * oot + mu * nm.diag(o + 1.0)
for key, val in six.iteritems(out):
out[key] = nm.tile(val, (coor.shape[0], 1, 1))
channels_cells = term.region.domain.regions['Yc'].cells
n_cell = term.region.get_n_cells()
val = out['D'].reshape((n_cell, -1, 3, 3))
val[channels_cells] *= 1e-1
return out
##
# Mesh file.
filename_mesh = data_dir + '/meshes/2d/special/osteonT1_11.mesh'
##
# Define regions (subdomains, boundaries) - $Y$, $Y_i$, ...
# depending on a mesh used.
dim, regions = define_regions(filename_mesh)
functions = {
'get_pars' : (lambda ts, coors, **kwargs:
get_pars(ts, coors, **kwargs),),
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
'match_x_line' : (per.match_x_line,),
'match_y_line' : (per.match_y_line,),
}
##
# Define fields: 'displacement' in $Y$,
# 'pressure_m' in $Y_m$.
fields = {
'displacement' : ('real', dim, 'Y', 1),
}
##
# Define corrector variables: unknown displaements: uc, test: vc
# displacement-like variables: Pi, Pi1, Pi2
variables = {
'uc' : ('unknown field', 'displacement', 0),
'vc' : ('test field', 'displacement', 'uc'),
'Pi' : ('parameter field', 'displacement', 'uc'),
'Pi1' : ('parameter field', 'displacement', None),
'Pi2' : ('parameter field', 'displacement', None),
}
##
# Periodic boundary conditions.
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'uc.all' : 'uc.all'},
'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'uc.all' : 'uc.all'},
'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'uc.all' : 'uc.all'},
'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'uc.all' : 'uc.all'},
'match_y_line'),
'periodic_y' : (['Bottom', 'Top'], {'uc.all' : 'uc.all'},
'match_x_line'),
}
##
# Dirichlet boundary conditions.
ebcs = {
'fixed_u' : ('Corners', {'uc.all' : 0.0}),
}
##
# Material defining constitutive parameters of the microproblem.
materials = {
'm' : 'get_pars',
}
##
# Numerical quadratures for volume (i3 - order 3) integral terms.
integrals = {
'i3' : 3,
}
##
# Homogenized coefficients to compute.
def set_elastic(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['uc'] + corrs_rs.states[ir, ic]['uc']
variables[mode2var[mode]].set_data(val)
coefs = {
'E' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : 'dw_lin_elastic.i3.Y(m.D, Pi1, Pi2)',
'set_variables' : set_elastic,
},
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
requirements = {
'pis' : {
'variables' : ['uc'],
},
##
# Steady state correctors $\bar{\omega}^{rs}$.
'corrs_rs' : {
'requires' : ['pis'],
'save_variables' : ['uc'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : {'eq' : """dw_lin_elastic.i3.Y(m.D, vc, uc)
= - dw_lin_elastic.i3.Y(m.D, vc, Pi)"""},
'set_variables' : [('Pi', 'pis', 'uc')],
'save_name' : 'corrs_elastic',
'is_linear' : True,
},
}
##
# Solvers.
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-8,
'eps_r' : 1e-2,
})
}
############################################
# Mini-application below, computing the homogenized elastic coefficients.
helps = {
'no_pauses' : 'do not make pauses',
}
def main():
import os
from sfepy.base.base import spause, output
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
import sfepy.homogenization.coefs_base as cb
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-n', '--no-pauses',
action="store_true", dest='no_pauses',
default=False, help=helps['no_pauses'])
options = parser.parse_args()
if options.no_pauses:
def spause(*args):
output(*args)
nm.set_printoptions(precision=3)
spause(r""">>>
First, this file will be read in place of an input
(problem description) file.
Press 'q' to quit the example, press any other key to continue...""")
required, other = get_standard_keywords()
required.remove('equations')
# Use this file as the input file.
conf = ProblemConf.from_file(__file__, required, other)
print(list(conf.to_dict().keys()))
spause(r""">>>
...the read input as a dict (keys only for brevity).
['q'/other key to quit/continue...]""")
spause(r""">>>
Now the input will be used to create a Problem instance.
['q'/other key to quit/continue...]""")
problem = Problem.from_conf(conf, init_equations=False)
# The homogenization mini-apps need the output_dir.
output_dir = ''
problem.output_dir = output_dir
print(problem)
spause(r""">>>
...the Problem instance.
['q'/other key to quit/continue...]""")
spause(r""">>>
The homogenized elastic coefficient $E_{ijkl}$ is expressed
using $\Pi$ operators, computed now. In fact, those operators are permuted
coordinates of the mesh nodes.
['q'/other key to quit/continue...]""")
req = conf.requirements['pis']
mini_app = cb.ShapeDimDim('pis', problem, req)
mini_app.setup_output(save_format='vtk',
file_per_var=False)
pis = mini_app()
print(pis)
spause(r""">>>
...the $\Pi$ operators.
['q'/other key to quit/continue...]""")
spause(r""">>>
Next, $E_{ijkl}$ needs so called steady state correctors $\bar{\omega}^{rs}$,
computed now.
['q'/other key to quit/continue...]""")
req = conf.requirements['corrs_rs']
save_name = req.get('save_name', '')
name = os.path.join(output_dir, save_name)
mini_app = cb.CorrDimDim('steady rs correctors', problem, req)
mini_app.setup_output(save_format='vtk',
file_per_var=False)
corrs_rs = mini_app(data={'pis': pis})
print(corrs_rs)
spause(r""">>>
...the $\bar{\omega}^{rs}$ correctors.
The results are saved in: %s.%s
Try to display them with:
python postproc.py %s.%s
['q'/other key to quit/continue...]""" % (2 * (name, problem.output_format)))
spause(r""">>>
Then the volume of the domain is needed.
['q'/other key to quit/continue...]""")
volume = problem.evaluate('d_volume.i3.Y(uc)')
print(volume)
spause(r""">>>
...the volume.
['q'/other key to quit/continue...]""")
spause(r""">>>
Finally, $E_{ijkl}$ can be computed.
['q'/other key to quit/continue...]""")
mini_app = cb.CoefSymSym('homogenized elastic tensor',
problem, conf.coefs['E'])
c_e = mini_app(volume, data={'pis': pis, 'corrs_rs' : corrs_rs})
print(r""">>>
The homogenized elastic coefficient $E_{ijkl}$, symmetric storage
with rows, columns in 11, 22, 12 ordering:""")
print(c_e)
if __name__ == '__main__':
main()
|
lokik/sfepy
|
examples/homogenization/rs_correctors.py
|
Python
|
bsd-3-clause
| 8,679
|
[
"VTK"
] |
6326b40f9b1b784dc54b17e6b546ab92dd20cf7a71f262caf549c7de2b8b0b46
|
import os, subprocess
#########################################
def generate(params):
# change to topology directory
currdir = params.rundirs.top_dir
os.chdir("/".join([params.run_options.workdir, currdir]))
pdbname = params.run_options.pdb
pdbfile = pdbname+".pdb"
topfile = pdbname+".top"
grofile = pdbname+".gro"
if os.path.exists(topfile) and os.path.exists(grofile):
print "\t %s and %s found" % (topfile, grofile)
return
program = "pdb2gmx"
#################################
# create topology for the protein
if pdbname!="none":
cmdline_args = "-ignh -ff %s -water %s " % (params.physical.forcefield, params.physical.water)
cmdline_args += "-f ../%s/%s -o %s -p %s" % (params.rundirs.in_dir, pdbfile, grofile, topfile)
# if there are salt bridges or termini modifications,
# create file with input options for pdb2gmx, and indicate so on the command line
# -ss is interactive but candidates must exist
if os.path.exists("pdb2gmx_in"): os.remove("pdb2gmx_in")
if os.path.exists("ssbridges"):
subprocess.call(["bash","-c", "echo ssbridges > pdb2gmx_in"], stdout=subprocess.PIPE)
cmdline_args += " -ss"
# notes on terminal mods:
# the -ter flag will actually prompt for manual input - we are funneling in the answers
# the default ins NH3+ and COO-, which might be the best choice enabling the hydrogen
# bonds in the secondary structure
if os.path.exists("termini"): # amber forcefield is incompatible with ter
subprocess.call(["bash", "-c", "echo termini >> pdb2gmx_in"], stdout=subprocess.PIPE)
cmdline_args += " -ter"
if os.path.exists("pdb2gmx_in"): cmdline_args += " < pdb2gmx_in"
params.command_log.write("in %s:\n"%(currdir))
##########################################
# here is where the run actually happens:
params.gmx_engine.run(program, cmdline_args,"generating top and gro files for the peptide", params.command_log)
# the things in bracket are some false alarm msgs gmx sometimes issues
params.gmx_engine.check_logs_for_error(program,["masses will be determined based on residue and atom names"])
##########################################
if os.path.exists("pdb2gmx_in"): subprocess.Popen(["bash", "-c", "rm -f pdb2gmx_in"])
# TODO: create top for ligands
# merge
return
|
ivanamihalek/smallmol
|
gmx_lib/gro_and_top.py
|
Python
|
gpl-2.0
| 2,279
|
[
"Amber"
] |
3b2c4cb6c6342967cac1e8b59d24dec5ab3e3581261eb3c46cfb94d15a79ae2b
|
"""Rewrite assertion AST to produce nice error messages"""
from __future__ import absolute_import, division, print_function
import ast
import _ast
import errno
import itertools
import imp
import marshal
import os
import re
import struct
import sys
import types
import py
from _pytest.assertion import util
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
if sys.version_info >= (3,5):
ast_Call = ast.Call
else:
ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
self.fnpats = config.getini("python_files")
self.session = None
self.modules = {}
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
def set_session(self, session):
self.session = session
def find_module(self, name, path=None):
state = self.config._assertstate
state.trace("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = imp.find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = py.path.local(fn)
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc, state.trace)
if co is None:
state.trace("rewriting %r" % (fn,))
source_stat, co = _rewrite_test(self.config, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
_make_rewritten_pyc(state, source_stat, pyc, co)
else:
state.trace("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _should_rewrite(self, name, fn_pypath, state):
# always rewrite conftest files
fn = str(fn_pypath)
if fn_pypath.basename == 'conftest.py':
state.trace("rewriting conftest file: %r" % (fn,))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace("matched test file (was specified on cmdline): %r" %
(fn,))
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
for pat in self.fnpats:
# use fnmatch instead of fn_pypath.fnmatch because the
# latter might trigger an import to fnmatch.fnmatch
# internally, which would cause this method to be
# called recursively
if fn_pypath.fnmatch(pat):
state.trace("matched test file %r" % (fn,))
return True
for marked in self._must_rewrite:
if name.startswith(marked):
state.trace("matched marked file %r (from %r)" % (name, marked))
return True
return False
def mark_rewrite(self, *names):
"""Mark import names as needing to be re-written.
The named module or package as well as any nested modules will
be re-written on import.
"""
already_imported = set(names).intersection(set(sys.modules))
if already_imported:
for name in already_imported:
if name not in self._rewritten_names:
self._warn_already_imported(name)
self._must_rewrite.update(names)
def _warn_already_imported(self, name):
self.config.warn(
'P1',
'Module already imported so can not be re-written: %s' % name)
def load_module(self, name):
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
if name in sys.modules:
return sys.modules[name]
co, pyc = self.modules.pop(name)
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
py.builtin.exec_(co, mod.__dict__)
except:
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = imp.find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, 'rb') as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
try:
fp = open(pyc, "wb")
except IOError:
err = sys.exc_info()[1].errno
state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
try:
fp.write(imp.get_magic())
mtime = int(source_stat.mtime)
size = source_stat.size & 0xFFFFFFFF
fp.write(struct.pack("<ll", mtime, size))
marshal.dump(co, fp)
finally:
fp.close()
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _rewrite_test(config, fn):
"""Try to read and rewrite *fn* and return the code object."""
state = config._assertstate
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
if hasattr(state, "_indecode"):
# encodings imported us again, so don't rewrite.
return None, None
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
finally:
del state._indecode
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
if REWRITE_NEWLINES:
source = source.replace(RN, N) + N
try:
tree = ast.parse(source)
except SyntaxError:
# Let this pop up again in the real import.
state.trace("failed to parse: %r" % (fn,))
return None, None
rewrite_asserts(tree, fn, config)
try:
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
state.trace("failed to compile: %r" % (fn,))
return None, None
return stat, co
def _make_rewritten_pyc(state, source_stat, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, source_stat, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, source_stat, proc_pyc):
os.rename(proc_pyc, pyc)
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
mtime = int(source.mtime())
size = source.size()
data = fp.read(12)
except EnvironmentError as e:
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
return None
# Check for invalid or out of date pyc file.
if (len(data) != 12 or data[:4] != imp.get_magic() or
struct.unpack("<ll", data[4:]) != (mtime, size)):
trace('_read_pyc(%s): invalid or out of date pyc' % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
return None
if not isinstance(co, types.CodeType):
trace('_read_pyc(%s): not a code object' % source)
return None
return co
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
repr = py.io.saferepr(obj)
if py.builtin._istext(repr):
t = py.builtin.text
else:
t = py.builtin.bytes
return repr.replace(t("\n"), t("\\n"))
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects py.io.saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
s = obj
is_repr = False
else:
s = py.io.saferepr(obj)
is_repr = True
if py.builtin._istext(s):
t = py.builtin.text
else:
t = py.builtin.bytes
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
if is_repr:
s = s.replace(t("\\n"), t("\n~"))
return s
def _should_repr_global_name(obj):
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if py.builtin._istext(explanation):
t = py.builtin.text
else:
t = py.builtin.bytes
return explanation.replace(t('%'), t('%%'))
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {
ast.Not: "not %s",
ast.Invert: "~%s",
ast.USub: "-%s",
ast.UAdd: "+%s"
}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in"
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and re-write them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it re-writes the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:on_failure: The AST statements which will be executed if the
assertion test fails. This is the code which will construct
the failure message and raises the AssertionError.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
expect_docstring = True
pos = 0
lineno = 0
for item in mod.body:
if (expect_docstring and isinstance(item, ast.Expr) and
isinstance(item.value, ast.Str)):
doc = item.value.s
if "PYTEST_DONT_REWRITE" in doc:
# The module has disabled assertion rewriting.
return
lineno += len(doc) - 1
expect_docstring = False
elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
item.module != "__future__"):
lineno = item.lineno
break
pos += 1
imports = [ast.Import([alias], lineno=lineno, col_offset=0)
for alias in aliases]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (isinstance(field, ast.AST) and
# Don't recurse into expressions as they can't contain
# asserts.
not isinstance(field, ast.expr)):
nodes.append(field)
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call py.io.saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .on_failure and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This re-writes the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
fslocation = (self.module_path, assert_.lineno)
self.config.warn('R1', 'assertion is always true, perhaps '
'remove parentheses?', fslocation=fslocation)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper('format_assertmsg', assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store())
for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: ## **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
return starred, '*' + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs,
new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
left_expl = "({0})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
next_expl = "({0})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper("call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()))
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
alexzoo/python
|
selenium_tests/env/lib/python3.6/site-packages/_pytest/assertion/rewrite.py
|
Python
|
apache-2.0
| 36,453
|
[
"VisIt"
] |
a017c738fa852a1f6bff9c3295d42123e1c345693fe0706c93babd6f6f04b974
|
"""The PyRhO package setup script"""
#from __future__ import print_function # Added for Python 2.x support
from setuptools import setup, find_packages # Prefer setuptools over distutils
from codecs import open # To use a consistent encoding
import os
# Download and install setuptools if not installed
#from ez_setup import use_setuptools
#use_setuptools()
#python -m ensurepip --upgrade
#from setuptools import setup
#from distutils import setup
here = os.path.abspath(os.path.dirname(__file__))
home = os.path.expanduser("~")
print(home)
prwd = os.path.join(home, 'pyrho') # pyrho working directory
# TODO: Test changes to package_data and include notebooks and license without MANIFEST
# TODO: Fix this to remove redundant long_description text
# Get the long description from the relevant file
#with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
#with open('DESCRIPTION.rst', encoding='utf-8') as f:
# long_description = f.read()
long_description = """
PyRhO - A Virtual Optogenetics Laboratory
=========================================
A Python module to fit and characterise rhodopsin photocurrents.
Background
----------
Optogenetics has become a key tool for understanding the function of neural circuits and controlling their behaviour. An array of directly light driven opsins have been genetically isolated from several families of organisms, with a wide range of temporal and spectral properties. In order to characterize, understand and apply these rhodopsins, we present an integrated suite of open-source, multi-scale computational tools called PyRhO.
PyRhO enables users to:
(i) characterize new (and existing) rhodopsins by automatically fitting a minimal set of experimental data to three, four or six-state kinetic models,
(ii) simulate these models at the channel, neuron & network levels and
(iii) gain functional insights through model selection and virtual experiments *in silico*.
The module is written in Python with an additional IPython/Jupyter notebook based GUI, allowing models to be fit, simulations to be run and results to be shared through simply interacting with a webpage. The seamless integration of model fitting algorithms with simulation environments for these virtual opsins will enable (neuro)scientists to gain a comprehensive understanding of their behaviour and rapidly identify the most suitable variant for application in a particular biological system. This process may thereby guide not only experimental design and opsin choice but also alterations of the rhodopsin genetic code in a neuro-engineering feed-back loop. In this way, we hope PyRhO will help to significantly improve optogenetics as a tool for transforming biological sciences.
Further Information
-------------------
If you use PyRhO please cite our paper:
Evans, B. D., Jarvis, S., Schultz, S. R. & Nikolic K. (2016) "PyRhO: A Multiscale Optogenetics Simulation Platform", *Frontiers in Neuroinformatics, 10* (8). `doi:10.3389/fninf.2016.00008 <https://dx.doi.org/10.3389/fninf.2016.00008>`_
The PyRhO project website with additional documentation may be found here: `www.imperial.ac.uk/bio-modelling/pyrho <http://www.imperial.ac.uk/a-z-research/bio-modelling/pyrho>`_
Finally, don't forget to follow us on twitter for updates: `@ProjectPyRhO <https://twitter.com/ProjectPyRhO>`_!
"""
setup(
name='PyRhO',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.9.5',
description='Fit and characterise rhodopsin photocurrents',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ProjectPyRhO/PyRhO/',
# download_url='https://github.com/ProjectPyRhO/PyRhO/archive/master.zip',
# download_url='https://github.com/ProjectPyRhO/PyRhO/tarball/' + version,
# Author details
author='Benjamin D. Evans',
author_email='ben.d.evans@gmail.com',
license='BSD',
platforms=['Linux', 'Mac OS X', 'Windows'],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Artificial Life',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
# The license should match "license" above
'License :: OSI Approved :: BSD License',
# Supported Python versions
'Programming Language :: Python',
'Programming Language :: Python :: 3',
# 3.6 EOL: 23/12/21
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: IPython',
'Natural Language :: English',
'Operating System :: OS Independent',
],
#keywords='optogenetics rhodopsin opsin brain neuroscience neuron brian jupyter',
keywords=['optogenetics', 'rhodopsin', 'opsin', 'brain', 'neuroscience',
'neuron', 'brian', 'jupyter'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# package_dir = {'':'.'},
# package_dir = {'pyrho': 'pyrho'}, # Relative to this script
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# ipython is used for latex repr - remove from requirements and have a fallback repr?
install_requires=['numpy>=1.8', 'scipy>=0.15', 'matplotlib>=1.3',
'lmfit>=0.9.3,<1.0.3', 'brian2>=2.0'], # 'ipython>=4.1'
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# 'brian' : ['brian2'],
# 'docs' : ['sphinx>=1.3'],
'extras': ['seaborn>=0.7', 'pandas>=0.17'], # 'cython>=0.23'
# traitlets is a dependency of ipywidgets and can be removed if 4.1 entails traitlets>=4.1
'GUI' : ['jupyter>=1.0', 'notebook>=4.1', 'ipywidgets>=4.1,<5',
'seaborn>=0.7'], # , 'traitlets>=4.1,<5'
'full': ['jupyter>=1.0', 'notebook>=4.1', 'ipywidgets>=4.1,<5',
'seaborn>=0.7', 'pandas>=0.17'], # 'cython>=0.23'
},
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# TODO: Try this without MANIFEST
'NEURON' : ['*.mod', '*.hoc', '*.sh'],
'gui' : ['*.png'],
'datasets': ['*.pkl'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
#data_files=[#(prwd, []),
# (prwd, [os.path.join(prwd, 'gui/*.png'), ])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
|
ProjectPyRhO/PyRhO
|
setup.py
|
Python
|
bsd-3-clause
| 8,243
|
[
"Brian",
"NEURON"
] |
f91c217f77562a1642b33713d9f162835639d80ae3f493202c9aa64d6d80f668
|
from __future__ import absolute_import
from __future__ import print_function
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from six import StringIO
import six.moves.cPickle
from collections import defaultdict
import logging
import os
import time
import six
from six.moves import map
from six.moves import range
log = logging.getLogger("main")
from ete3.tools.phylobuild_lib.master_task import CogSelectorTask
from ete3.tools.phylobuild_lib.errors import DataError
from ete3.tools.phylobuild_lib.utils import (GLOBALS, print_as_table, generate_node_ids,
encode_seqname, md5, pjoin, _mean, _median, _max, _min, _std)
from ete3.tools.phylobuild_lib import db
__all__ = ["BrhCogCreator"]
quote = lambda _x: '"%s"' %_x
class BrhCogCreator(CogSelectorTask):
def __init__(self, target_sp, out_sp, seqtype, conf, confname):
self.seed = conf[confname]["_seed"]
self.missing_factor = float(conf[confname]["_species_missing_factor"])
node_id, clade_id = generate_node_ids(target_sp, out_sp)
# Initialize task
CogSelectorTask.__init__(self, node_id, "cog_selector",
"Cog-Selector", None, conf[confname])
# taskid does not depend on jobs, so I set it manually
self.cladeid = clade_id
self.seqtype = seqtype
self.targets = target_sp
self.outgroups = out_sp
self.init()
self.size = len(target_sp | out_sp)
self.cog_analysis = None
self.cogs = None
def finish(self):
tm_start = time.ctime()
all_species = self.targets | self.outgroups
cogs, cog_analysis = brh_cogs2(db, all_species,
missing_factor=self.missing_factor,
seed_sp=self.seed)
self.raw_cogs = cogs
self.cog_analysis = cog_analysis
self.cogs = []
for co in cogs:
# self.cogs.append(map(encode_seqname, co))
encoded_names = db.translate_names(co)
if len(encoded_names) != len(co):
print(set(co) - set(encoded_names.keys()))
raise DataError("Some sequence ids could not be translated")
self.cogs.append(list(encoded_names.values()))
# Sort Cogs according to the md5 hash of its content. Random
# sorting but kept among runs
list(map(lambda x: x.sort(), self.cogs))
self.cogs.sort(lambda x,y: cmp(md5(','.join(x)), md5(','.join(y))))
log.log(28, "%s COGs detected" %len(self.cogs))
tm_end = time.ctime()
#open(pjoin(self.taskdir, "__time__"), "w").write(
# '\n'.join([tm_start, tm_end]))
CogSelectorTask.store_data(self, self.cogs, self.cog_analysis)
def brh_cogs(DB, species, missing_factor=0.0, seed_sp=None, min_score=0):
"""It scans all precalculate BRH relationships among the species
passed as an argument, and detects Clusters of Orthologs
according to several criteria:
min_score: the min coverage/overalp value required for a
blast to be a reliable hit.
missing_factor: the min percentage of species in which a
given seq must have orthologs.
"""
log.log(26, "Searching BRH orthologs")
species = set(map(str, species))
min_species = len(species) - round(missing_factor * len(species))
if seed_sp == "auto":
# seed2size = get_sorted_seeds(seed_sp, species, species, min_species, DB)
# sort_seeds = sorted([(len(size), sp) for sp, size in seed2size.iteritems()])
# sp_to_test = [sort_seeds[-1][1]]
sp_to_test = list(species)
elif seed_sp == "largest":
cmd = """SELECT taxid, size FROM species"""
db.seqcursor.execute(cmd)
sp2size = {}
for tax, counter in db.seqcursor.fetchall():
if tax in species:
sp2size[tax] = counter
sorted_sp = sorted(list(sp2size.items()), lambda x,y: cmp(x[1],y[1]))
log.log(24, sorted_sp[:6])
largest_sp = sorted_sp[-1][0]
sp_to_test = [largest_sp]
log.log(28, "Using %s as search seed. Proteome size=%s genes" %\
(largest_sp, sp2size[largest_sp]))
else:
sp_to_test = [str(seed_sp)]
# The following loop tests each possible seed if none is
# specified.
log.log(28, "Detecting Clusters of Orthologs groups (COGs)")
log.log(28, "Min number of species per COG: %d" %min_species)
cogs_selection = []
for j, seed in enumerate(sp_to_test):
log.log(26,"Testing new seed species:%s (%d/%d)", seed, j+1, len(sp_to_test))
species_side1 = ','.join(map(quote, [s for s in species if str(s)>str(seed)]))
species_side2 = ','.join(map(quote, [s for s in species if str(s)<str(seed)]))
pairs1 = []
pairs2 = []
# Select all ids with matches in the target species, and
# return the total number of species covered by each of
# such ids.
if species_side1 != "":
cmd = """SELECT seqid1, taxid1, seqid2, taxid2 from ortho_pair WHERE
taxid1="%s" AND taxid2 IN (%s) """ %\
(seed, species_side1)
DB.orthocursor.execute(cmd)
pairs1 = DB.orthocursor.fetchall()
if species_side2 != "":
cmd = """SELECT seqid2, taxid2, seqid1, taxid1 from ortho_pair WHERE
taxid1 IN (%s) AND taxid2 = "%s" """ %\
(species_side2, seed)
#taxid2="%s" AND taxid1 IN (%s) AND score >= %s""" %\
#(seed, species_side2, min_score)
DB.orthocursor.execute(cmd)
pairs2 = DB.orthocursor.fetchall()
cog_candidates = defaultdict(set)
for seq1, sp1, seq2, sp2 in pairs1 + pairs2:
s1 = (sp1, seq1)
s2 = (sp2, seq2)
cog_candidates[(sp1, seq1)].update([s1, s2])
all_cogs = [cand for cand in list(cog_candidates.values()) if
len(cand) >= min_species]
cog_sizes = [len(cog) for cog in all_cogs]
cog_spsizes = [len(set([e[0] for e in cog])) for cog in all_cogs]
if [1 for i in range(len(cog_sizes)) if cog_sizes[i] != cog_spsizes[i]]:
# for i in xrange(len(cog_sizes)):
# if cog_sizes[i] != cog_spsizes[i]:
# print cog_sizes[i], cog_spsizes[i]
# raw_input()
raise ValueError("Inconsistent COG found")
if cog_sizes:
cogs_selection.append([seed, all_cogs])
log.log(26, "Found %d COGs" % len(all_cogs))
def _sort_cogs(cogs1, cogs2):
cogs1 = cogs1[1] # discard seed info
cogs2 = cogs2[1] # discard seed info
cog_sizes1 = [len(cog) for cog in cogs1]
cog_sizes2 = [len(cog) for cog in cogs2]
mx1, mn1, avg1 = _max(cog_sizes1), _min(cog_sizes1), round(_mean(cog_sizes1))
mx2, mn2, avg2 = _max(cog_sizes2), _min(cog_sizes2), round(_mean(cog_sizes2))
# we want to maximize all these values in the following order:
for i, j in ((mx1, mx2), (avg1, avg2), (len(cogs1), len(cogs2))):
v = -1 * cmp(i, j)
if v != 0:
break
return v
log.log(26, "Finding best COG selection...")
cogs_selection.sort(_sort_cogs)
lines = []
for seed, all_cogs in cogs_selection:
cog_sizes = [len(cog) for cog in all_cogs]
mx, mn, avg = max(cog_sizes), min(cog_sizes), round(_mean(cog_sizes))
lines.append([seed, mx, mn, avg, len(all_cogs)])
analysis_txt = StringIO()
print_as_table(lines[:25], stdout=analysis_txt,
header=["Seed","largest COG", "smallest COGs", "avg COG size", "total COGs"])
log.log(28, "Analysis details:\n"+analysis_txt.getvalue())
best_seed, best_cogs = cogs_selection[0]
cog_sizes = [len(cog) for cog in best_cogs]
# Not necessary since they will be sorted differently later on
#best_cogs.sort(lambda x,y: cmp(len(x), len(y)), reverse=True)
if max(cog_sizes) < len(species):
raise ValueError("Current COG selection parameters do not permit to cover all species")
recoded_cogs = []
for cog in best_cogs:
named_cog = ["%s%s%s" %(x[0], GLOBALS["spname_delimiter"],x[1]) for x in cog]
recoded_cogs.append(named_cog)
return recoded_cogs, analysis_txt.getvalue()
def brh_cogs2(DB, species, missing_factor=0.0, seed_sp=None, min_score=0):
"""It scans all precalculate BRH relationships among the species
passed as an argument, and detects Clusters of Orthologs
according to several criteria:
min_score: the min coverage/overalp value required for a
blast to be a reliable hit.
missing_factor: the min percentage of species in which a
given seq must have orthologs.
"""
def _sort_cogs(cogs1, cogs2):
seed1, mx1, avg1, ncogs1 = cogs1
seed2, mx2, avg2, ncogs2 = cogs2
for i, j in ((mx1, mx2), (avg1, avg2), (ncogs1, ncogs2)):
v = -1 * cmp(i, j)
if v != 0:
break
return v
log.log(26, "Searching BRH orthologs")
species = set(map(str, species))
min_species = len(species) - round(missing_factor * len(species))
if seed_sp == "auto":
sp_to_test = list(species)
elif seed_sp == "largest":
cmd = """SELECT taxid, size FROM species"""
db.seqcursor.execute(cmd)
sp2size = {}
for tax, counter in db.seqcursor.fetchall():
if tax in species:
sp2size[tax] = counter
sorted_sp = sorted(list(sp2size.items()), lambda x,y: cmp(x[1],y[1]))
log.log(24, sorted_sp[:6])
largest_sp = sorted_sp[-1][0]
sp_to_test = [largest_sp]
log.log(28, "Using %s as search seed. Proteome size=%s genes" %\
(largest_sp, sp2size[largest_sp]))
else:
sp_to_test = [str(seed_sp)]
analysis_txt = StringIO()
if sp_to_test:
log.log(26, "Finding best COG selection...")
seed2size = get_sorted_seeds(seed_sp, species, sp_to_test, min_species, DB)
size_analysis = []
for seedname, content in six.iteritems(seed2size):
cog_sizes = [size for seq, size in content]
mx, avg = _max(cog_sizes), round(_mean(cog_sizes))
size_analysis.append([seedname, mx, avg, len(content)])
size_analysis.sort(_sort_cogs)
#print '\n'.join(map(str, size_analysis))
seed = size_analysis[0][0]
print_as_table(size_analysis[:25], stdout=analysis_txt,
header=["Seed","largest COG", "avg COG size", "total COGs"])
if size_analysis[0][1] < len(species)-1:
print(size_analysis[0][1])
raise ValueError("Current COG selection parameters do not permit to cover all species")
log.log(28, analysis_txt.getvalue())
# The following loop tests each possible seed if none is
# specified.
log.log(28, "Computing Clusters of Orthologs groups (COGs)")
log.log(28, "Min number of species per COG: %d" %min_species)
cogs_selection = []
log.log(26,"Using seed species:%s", seed)
species_side1 = ','.join(map(quote, [s for s in species if str(s)>str(seed)]))
species_side2 = ','.join(map(quote, [s for s in species if str(s)<str(seed)]))
pairs1 = []
pairs2 = []
# Select all ids with matches in the target species, and
# return the total number of species covered by each of
# such ids.
if species_side1 != "":
cmd = """SELECT seqid1, taxid1, seqid2, taxid2 from ortho_pair WHERE
taxid1="%s" AND taxid2 IN (%s) """ % (seed, species_side1)
DB.orthocursor.execute(cmd)
pairs1 = DB.orthocursor.fetchall()
if species_side2 != "":
cmd = """SELECT seqid2, taxid2, seqid1, taxid1 from ortho_pair WHERE
taxid1 IN (%s) AND taxid2 = "%s" """ % (species_side2, seed)
DB.orthocursor.execute(cmd)
pairs2 = DB.orthocursor.fetchall()
cog_candidates = defaultdict(set)
for seq1, sp1, seq2, sp2 in pairs1 + pairs2:
s1 = (sp1, seq1)
s2 = (sp2, seq2)
cog_candidates[(sp1, seq1)].update([s1, s2])
all_cogs = [cand for cand in list(cog_candidates.values()) if
len(cand) >= min_species]
# CHECK CONSISTENCY
seqs = set()
for cand in all_cogs:
seqs.update([b for a,b in cand if a == seed])
pre_selected_seqs = set([v[0] for v in seed2size[seed]])
if len(seqs & pre_selected_seqs) != len(set(seed2size[seed])) or\
len(seqs & pre_selected_seqs) != len(seqs):
print("old method seqs", len(seqs), "new seqs", len(set(seed2size[seed])), "Common", len(seqs & pre_selected_seqs))
raise ValueError("ooops")
cog_sizes = [len(cog) for cog in all_cogs]
cog_spsizes = [len(set([e[0] for e in cog])) for cog in all_cogs]
if [1 for i in range(len(cog_sizes)) if cog_sizes[i] != cog_spsizes[i]]:
raise ValueError("Inconsistent COG found")
if cog_sizes:
cogs_selection.append([seed, all_cogs])
log.log(26, "Found %d COGs" % len(all_cogs))
recoded_cogs = []
for cog in all_cogs:
named_cog = ["%s%s%s" %(x[0], GLOBALS["spname_delimiter"],x[1]) for x in cog]
recoded_cogs.append(named_cog)
return recoded_cogs, analysis_txt.getvalue()
def get_sorted_seeds(seed, species, sp_to_test, min_species, DB):
seed2count = {}
species = set(species)
for j, seed in enumerate(sp_to_test):
log.log(26,"Testing SIZE of new seed species:%s (%d/%d)", seed, j+1, len(sp_to_test))
pairs1 = []
pairs2 = []
cmd = """SELECT seqid1, GROUP_CONCAT(taxid2) FROM ortho_pair WHERE
taxid1="%s" GROUP BY (seqid1)""" % (seed)
DB.orthocursor.execute(cmd)
pairs1= DB.orthocursor.fetchall()
cmd = """SELECT seqid2, GROUP_CONCAT(taxid1) FROM ortho_pair WHERE
taxid2 = "%s" GROUP BY seqid2""" % (seed)
DB.orthocursor.execute(cmd)
pairs2 = DB.orthocursor.fetchall()
# Compute number of species for each seqid representing a cog
counter = defaultdict(set)
all_pairs = pairs1 + pairs2
for seqid, targets in all_pairs:
counter[seqid].update(set(targets.split(",")) & species)
# Filter out too small COGs
valid_seqs = [(k, len(v)) for k, v in six.iteritems(counter) if
len(v)>= min_species-1]
seed2count[seed] = valid_seqs
log.log(28, "Seed species:%s COGs:%s" %(seed, len(seed2count[seed])))
return seed2count
def get_best_selection(cogs_selections, species):
ALL_SPECIES = set(species)
def _compare_cog_selection(cs1, cs2):
seed_1, missing_sp_allowed_1, candidates_1, sp2hits_1 = cs1
seed_2, missing_sp_allowed_2, candidates_2, sp2hits_2 = cs2
score_1, min_cov_1, max_cov_1, median_cov_1, cov_std_1, cog_cov_1 = get_cog_score(candidates_1, sp2hits_1, median_cogs, ALL_SPECIES-set([seed_1]))
score_2, min_cov_2, max_cov_2, median_cov_2, cov_std_2, cog_cov_2 = get_cog_score(candidates_2, sp2hits_2, median_cogs, ALL_SPECIES-set([seed_2]))
sp_represented_1 = len(sp2hits_1)
sp_represented_2 = len(sp2hits_1)
cmp_rpr = cmp(sp_represented_1, sp_represented_2)
if cmp_rpr == 1:
return 1
elif cmp_rpr == -1:
return -1
else:
cmp_score = cmp(score_1, score_2)
if cmp_score == 1:
return 1
elif cmp_score == -1:
return -1
else:
cmp_mincov = cmp(min_cov_1, min_cov_2)
if cmp_mincov == 1:
return 1
elif cmp_mincov == -1:
return -1
else:
cmp_maxcov = cmp(max_cov_1, max_cov_2)
if cmp_maxcov == 1:
return 1
elif cmp_maxcov == -1:
return -1
else:
cmp_cand = cmp(len(candidates_1), len(candidates_2))
if cmp_cand == 1:
return 1
elif cmp_cand == -1:
return -1
else:
return 0
min_score = 0.5
max_cogs = _max([len(data[2]) for data in cogs_selections])
median_cogs = _median([len(data[2]) for data in cogs_selections])
cogs_selections.sort(_compare_cog_selection)
cogs_selections.reverse()
header = ['seed',
'missing sp allowed',
'spcs covered',
'#COGs',
'mean sp coverage)',
'#COGs for worst sp.',
'#COGs for best sp.',
'sp. in COGS(avg)',
'SCORE' ]
print_header = True
best_cog_selection = None
cog_analysis = StringIO()
for i, cogs in enumerate(cogs_selections):
seed, missing_sp_allowed, candidates, sp2hits = cogs
sp_percent_coverages = [(100*sp2hits.get(sp,0))/float(len(candidates)) for sp in species]
sp_coverages = [sp2hits.get(sp, 0) for sp in species]
score, min_cov, max_cov, median_cov, cov_std, cog_cov = get_cog_score(candidates, sp2hits, median_cogs, ALL_SPECIES-set([seed]))
if best_cog_selection is None:
best_cog_selection = i
flag = "*"
else:
flag = " "
data = (candidates,
flag+"%10s" %seed, \
missing_sp_allowed, \
"%d (%0.1f%%)" %(len(set(sp2hits.keys()))+1, 100*float(len(ALL_SPECIES))/(len(sp2hits)+1)) , \
len(candidates), \
"%0.1f%% +- %0.1f" %(_mean(sp_percent_coverages), _std(sp_percent_coverages)), \
"% 3d (%0.1f%%)" %(min(sp_coverages),100*min(sp_coverages)/float(len(candidates))), \
"% 3d (%0.1f%%)" %(max(sp_coverages),100*max(sp_coverages)/float(len(candidates))), \
cog_cov,
score
)
if print_header:
print_as_table([data[1:]], header=header, print_header=True, stdout=cog_analysis)
print_header = False
else:
print_as_table([data[1:]], header=header, print_header=False, stdout=cog_analysis)
#raw_input("Press")
print(cog_analysis.getvalue())
#best_cog_selection = int(raw_input("choose:"))
return cogs_selections[best_cog_selection], cog_analysis
def _analyze_cog_selection(all_cogs):
print("total cogs:", len(all_cogs))
sp2cogcount = {}
size2cogs = {}
for cog in all_cogs:
for seq in cog:
sp = seq.split(GLOBALS["spname_delimiter"])[0]
sp2cogcount[sp] = sp2cogcount.setdefault(sp, 0)+1
size2cogs.setdefault(len(cog), []).append(cog)
sorted_spcs = sorted(list(sp2cogcount.items()), lambda x,y: cmp(x[1], y[1]))
# Take only first 20 species
coverages = [s[1] for s in sorted_spcs][:20]
spnames = [str(s[0])+ s[0] for s in sorted_spcs][:20]
pylab.subplot(1,2,1)
pylab.bar(list(range(len(coverages))), coverages)
labels = pylab.xticks(pylab.arange(len(spnames)), spnames)
pylab.subplots_adjust(bottom=0.35)
pylab.title(str(len(all_cogs))+" COGs")
pylab.setp(labels[1], 'rotation', 90,fontsize=10, horizontalalignment = 'center')
pylab.subplot(1,2,2)
pylab.title("Best COG contains "+str(max(size2cogs.values()))+" species" )
pylab.bar(list(range(1,216)), [len(size2cogs.get(s, [])) for s in range(1,216)])
pylab.show()
def cog_info(candidates, sp2hits):
sp_coverages = [hits/float(len(candidates)) for hits in list(sp2hits.values())]
species_covered = len(set(sp2hits.keys()))+1
min_cov = _min(sp_coverages)
max_cov = _min(sp_coverages)
median_cov = _median(sp_coverages)
return min_cov, max_cov, median_cov
def get_cog_score(candidates, sp2hits, max_cogs, all_species):
cog_cov = _mean([len(cogs) for cogs in candidates])/float(len(sp2hits)+1)
cog_mean_cov = _mean([len(cogs)/float(len(sp2hits)) for cogs in candidates]) # numero medio de especies en cada cog
cog_min_sp = _min([len(cogs) for cogs in candidates])
sp_coverages = [sp2hits.get(sp, 0)/float(len(candidates)) for sp in all_species]
species_covered = len(set(sp2hits.keys()))+1
nfactor = len(candidates)/float(max_cogs) # Numero de cogs
min_cov = _min(sp_coverages) # el coverage de la peor especie
max_cov = _min(sp_coverages)
median_cov = _median(sp_coverages)
cov_std = _std(sp_coverages)
score = _min([nfactor, cog_mean_cov, min_cov])
return score, min_cov, max_cov, median_cov, cov_std, cog_cov
|
fmaguire/ete
|
ete3/tools/phylobuild_lib/task/cog_creator.py
|
Python
|
gpl-3.0
| 22,224
|
[
"BLAST"
] |
1ae7229992e0e8a06b7b7fa05ba87ee570befe99c36e909e5b866aa519c5b7cf
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Remove the given file or a list of files from the File Catalog and from the storage
Usage:
%s <LFN | fileContainingLFNs>
""" % Script.scriptName )
Script.parseCommandLine()
import sys, os
import DIRAC
from DIRAC import gLogger
args = Script.getPositionalArgs()
lfns = []
for inputFileName in args:
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
inputFile.close()
lfns.extend( [ lfn.strip() for lfn in string.splitlines() ] )
else:
lfns.append( inputFileName )
from DIRAC.Core.Utilities.List import sortList, breakListIntoChunks
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()
errorReasons = {}
successfullyRemoved = 0
for lfnList in breakListIntoChunks( lfns, 100 ):
res = dm.removeFile( lfnList )
if not res['OK']:
gLogger.error( "Failed to remove data", res['Message'] )
DIRAC.exit( -2 )
for lfn, r in res['Value']['Failed'].items():
reason = str( r )
if not reason in errorReasons.keys():
errorReasons[reason] = []
errorReasons[reason].append( lfn )
successfullyRemoved += len( res['Value']['Successful'].keys() )
for reason, lfns in errorReasons.items():
gLogger.notice( "Failed to remove %d files with error: %s" % ( len( lfns ), reason ) )
gLogger.notice( "Successfully removed %d files" % successfullyRemoved )
DIRAC.exit( 0 )
|
sposs/DIRAC
|
DataManagementSystem/scripts/dirac-dms-remove-files.py
|
Python
|
gpl-3.0
| 1,664
|
[
"DIRAC"
] |
758b742b296f9465af9a18e72bc06c344be0e2a6774ebc3c61dba167b7e0ec4f
|
"""Tests for the thumbs module"""
import pytest
from bok_choy.promise import EmptyPromise
from workbench import scenarios
from workbench.test.selenium_test import SeleniumTest
class ThreeThumbsTest(SeleniumTest):
"""Test the functionalities of the three thumbs test XBlock."""
def setUp(self):
super().setUp()
scenarios.add_xml_scenario(
"test_three_file_thumbs", "three file thumbs test",
"""<vertical_demo><filethumbs/><filethumbs/><filethumbs/></vertical_demo>"""
)
self.addCleanup(scenarios.remove_scenario, "test_three_file_thumbs")
# Suzy opens the browser to visit the workbench
self.browser.get(self.live_server_url)
# She knows it's the site by the header
header1 = self.browser.find_element_by_css_selector('h1')
self.assertEqual(header1.text, 'XBlock scenarios')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
def test_three_thumbs_initial_state(self):
# She clicks on the three thumbs at once scenario
link = self.browser.find_element_by_link_text('three file thumbs test')
link.click()
self.wait_for_page_load(link, timeout=10)
# The header reflects the XBlock
header1 = self.browser.find_element_by_css_selector('h1')
self.assertEqual(header1.text, 'XBlock: three file thumbs test')
# She sees that there are 3 sets of thumbs
vertical_css = 'div.student_view > div.xblock-v1 > div.vertical'
# The following will give a NoSuchElementException error
# if it is not there
vertical = self.browser.find_element_by_css_selector(vertical_css)
# Make sure there are three thumbs blocks
thumb_css = 'div.xblock-v1[data-block-type="filethumbs"]'
thumbs = vertical.find_elements_by_css_selector(thumb_css)
self.assertEqual(3, len(thumbs))
# Make sure they all have 0 for upvote and downvote counts
up_count_css = 'span.upvote span.count'
down_count_css = 'span.downvote span.count'
for thumb in thumbs:
# pylint: disable=cell-var-from-loop
up_count = thumb.find_element_by_css_selector(up_count_css)
down_count = thumb.find_element_by_css_selector(down_count_css)
initial_up = int(up_count.text)
initial_down = int(down_count.text)
# upvote
thumb.find_element_by_css_selector('span.upvote').click()
_ = EmptyPromise(
lambda: int(thumb.find_element_by_css_selector(up_count_css).text) == initial_up + 1,
"upvote action succeeded"
).fulfill()
self.assertEqual(initial_down, int(thumb.find_element_by_css_selector(down_count_css).text))
# downvote
thumb.find_element_by_css_selector('span.downvote').click()
_ = EmptyPromise(
lambda: int(thumb.find_element_by_css_selector(down_count_css).text) == initial_down + 1,
"downvote action succeeded"
).fulfill()
self.assertEqual(initial_up + 1, int(thumb.find_element_by_css_selector(up_count_css).text))
|
edx/xblock-sdk
|
workbench/test/test_filethumbs.py
|
Python
|
apache-2.0
| 3,181
|
[
"VisIt"
] |
778b107ff4f79cebb9c1d8e17d5198beec365fcd43cf35b39932741bba2a3473
|
############################################
# Daniel Dixey
# Datagenic - Interest in Sentiment Analysis
# 28/4/15
############################################
# Import Modules
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
import time
from pattern.web import Twitter
def Harry_Potter_Review():
# Start Stop Watch
t1 = time.time()
# Demo Text
text = '''
Harry Potter and the Philosopher's stone is a book about a boy called Harry and when he is a baby something terrible happened to his parents. This very evil wizard called Voldemort killed his mum and dad however he tried to kill Harry but somehow he could not. Therefore Harry had to go to live with his aunt and uncle. Eleven years later he had a letter saying he is invited to go to Hogwarts. Harry travelled there on a scarlet red steam engine. At Hogwarts Harry Ron and Hermione, Harry's friends, were caught out of school and their punishment was to collect unicorn blood from the dark woods. Then Harry and his friends go on a big adventure!\n\n
I think the book is very exciting! My favourite part is when Harry and his friends go on a very exciting adventure!\n\n
I think this book is suitable for eight and above. Eleven out of eleven people from Bancffosfelen school said they loved the book! My mark out of ten is nine!\n\n
'''
# Process the Text using the NLTK through Textblob
blob = TextBlob(text)
# Check the Text has been imported correctly
# print(text)
# Iterate Through Each Sentence and calculate the Sentiment
for sentence in blob.sentences:
print(sentence)
print('Using Naive Bayes - The Sentence Above is: %s') % \
(TextBlob(sentence.string,
analyzer=NaiveBayesAnalyzer()).sentiment[0].upper())
# Translate Text to French
# print(blob.translate(to="fr") )
return time.time() - t1
def Pattern_Module_Twitter_Stream():
# Start Stop Watch
t1 = time.time()
# Create a list to Store the Data
List = Twitter().stream('#Fail')
# For 10 Instances
for second in range(10):
# Get Stream Data
value = List.update(bytes=1024)
# Add Value to List if not Empty
if len(value) == 0:
# Pass
continue
else:
# Storing Results
List.append()
# Print Tweet
print('Tweet: %s') % (value.text)
# Get Sentiment
print('Sentiment Analysis of Tweet: %s') % (TextBlob(str(value.text),
analyzer=NaiveBayesAnalyzer()).sentiment[0].upper())
# Wait 3 Seconds between queries - Do not want to get blocked
time.sleep(3)
return time.time() - t1
# Define the Main Function
if __name__ == "__main__":
# Execute the Harry Potter Function
timeF = Harry_Potter_Review()
print('Time to Complete: %.3f Seconds') % (timeF)
# Connect and Retrieve a Twitter Stream
timeF1 = Pattern_Module_Twitter_Stream()
print('Time to Complete: %.3f Seconds') % (timeF1)
|
dandxy89/ExperiKeras
|
NLP/Sentiment_Harry_Potter_Review.py
|
Python
|
mit
| 3,129
|
[
"exciting"
] |
95f3746c3f9cce72b18085247181d8a853b51010f766f6d724cd17c58ebf6c68
|
__author__ = 'noe'
import numpy as _np
def _guess_model_type(observations):
""" Suggests a HMM model type based on the observation data
Uses simple rules in order to decide which HMM model type makes sense based on observation data.
If observations consist of arrays/lists of integer numbers (irrespective of whether the python type is
int or float), our guess is 'discrete'.
If observations consist of arrays/lists of 1D-floats, our guess is 'discrete'.
In any other case, a TypeError is raised because we are not supporting that data type yet.
Parameters:
-----------
observations : list of lists or arrays
observation trajectories
Returns:
--------
model_type : str
One of {'discrete', 'gaussian'}
"""
from bhmm.util import types as _types
o1 = _np.array(observations[0])
# CASE: vector of int? Then we want a discrete HMM
if _types.is_int_vector(o1):
return 'discrete'
# CASE: not int type, but everything is an integral number. Then we also go for discrete
if _np.allclose(o1, _np.round(o1)):
isintegral = True
for i in range(1, len(observations)):
if not _np.allclose(observations[i], _np.round(observations[i])):
isintegral = False
break
if isintegral:
return 'discrete'
# CASE: vector of double? Then we want a gaussian
if _types.is_float_vector(o1):
return 'gaussian'
# None of the above? Then we currently do not support this format!
raise TypeError('Observations is neither sequences of integers nor 1D-sequences of floats. The current version'
'does not support your input.')
def _lag_observations(observations, lag):
""" Create new trajectories that are subsampled at lag but shifted
Given a trajectory (s0, s1, s2, s3, s4, ...) and lag 3, this function will generate 3 trajectories
(s0, s3, s6, ...), (s1, s4, s7, ...) and (s2, s5, s8, ...). Use this function in order to parametrize a MLE
at lag times larger than 1 without discarding data. Do not use this function for Bayesian estimators, where
data must be given such that subsequent transitions are uncorrelated.
"""
obsnew = []
for obs in observations:
for shift in range(0, lag):
obsnew.append(obs[shift:][::lag])
return obsnew
def init_hmm(observations, nstates, lag=1, type=None):
"""Use a heuristic scheme to generate an initial model.
Parameters
----------
observations : list of ndarray((T_i))
list of arrays of length T_i with observation data
nstates : int
The number of states.
type : str, optional, default=None
Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output
model type based on the format of observations.
Examples
--------
Generate initial model for a gaussian output model.
>>> import bhmm
>>> [model, observations, states] = bhmm.testsystems.generate_synthetic_observations(output_model_type='gaussian')
>>> initial_model = init_hmm(observations, model.nstates, type='gaussian')
Generate initial model for a discrete output model.
>>> import bhmm
>>> [model, observations, states] = bhmm.testsystems.generate_synthetic_observations(output_model_type='discrete')
>>> initial_model = init_hmm(observations, model.nstates, type='discrete')
"""
# select output model type
if (type is None):
type = _guess_model_type(observations)
if type == 'discrete':
from bhmm.init import discrete
return discrete.initial_model_discrete(observations, nstates, lag=lag, reversible=True)
elif type == 'gaussian':
from bhmm.init import gaussian
return gaussian.initial_model_gaussian1d(observations, nstates, reversible=True)
else:
raise NotImplementedError('output model type '+str(type)+' not yet implemented.')
def gaussian_hmm(P, means, sigmas, pi=None, stationary=True, reversible=True):
""" Initializes a 1D-Gaussian HMM
Parameters
----------
P : ndarray(nstates,nstates)
Hidden transition matrix
means : ndarray(nstates, )
Means of Gaussian output distributions
sigmas : ndarray(nstates, )
Standard deviations of Gaussian output distributions
pi : ndarray(nstates, )
Fixed initial (if stationary=False) or fixed stationary distribution (if stationary=True).
stationary : bool, optional, default=True
If True: initial distribution is equal to stationary distribution of transition matrix
reversible : bool, optional, default=True
If True: transition matrix will fulfill detailed balance constraints.
"""
from bhmm.hmm.gaussian_hmm import GaussianHMM
from bhmm.output_models.gaussian import GaussianOutputModel
# count states
nstates = _np.array(P).shape[0]
# initialize output model
output_model = GaussianOutputModel(nstates, means, sigmas)
# initialize general HMM
from bhmm.hmm.generic_hmm import HMM as _HMM
ghmm = _HMM(P, output_model, Pi=pi, stationary=stationary, reversible=reversible)
# turn it into a Gaussian HMM
ghmm = GaussianHMM(ghmm)
return ghmm
def discrete_hmm(P, pout, pi=None, stationary=True, reversible=True):
""" Initializes a discrete HMM
Parameters
----------
P : ndarray(nstates,nstates)
Hidden transition matrix
pout : ndarray(nstates,nsymbols)
Output matrix from hidden states to observable symbols
pi : ndarray(nstates, )
Fixed initial (if stationary=False) or fixed stationary distribution (if stationary=True).
stationary : bool, optional, default=True
If True: initial distribution is equal to stationary distribution of transition matrix
reversible : bool, optional, default=True
If True: transition matrix will fulfill detailed balance constraints.
"""
from bhmm.hmm.discrete_hmm import DiscreteHMM
from bhmm.output_models.discrete import DiscreteOutputModel
# initialize output model
output_model = DiscreteOutputModel(pout)
# initialize general HMM
from bhmm.hmm.generic_hmm import HMM as _HMM
dhmm = _HMM(P, output_model, Pi=pi, stationary=stationary, reversible=reversible)
# turn it into a Gaussian HMM
dhmm = DiscreteHMM(dhmm)
return dhmm
def estimate_hmm(observations, nstates, lag=1, initial_model=None, type=None,
reversible=True, stationary=True, p=None, accuracy=1e-3, maxit=1000):
r""" Estimate maximum-likelihood HMM
Generic maximum-likelihood estimation of HMMs
Parameters
----------
observations : list of numpy arrays representing temporal data
`observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i`
nstates : int
The number of states in the model.
lag : int
the lag time at which observations should be read
initial_model : HMM, optional, default=None
If specified, the given initial model will be used to initialize the BHMM.
Otherwise, a heuristic scheme is used to generate an initial guess.
type : str, optional, default=None
Output model type from [None, 'gaussian', 'discrete']. If None, will automatically select an output
model type based on the format of observations.
reversible : bool, optional, default=True
If True, a prior that enforces reversible transition matrices (detailed balance) is used;
otherwise, a standard non-reversible prior is used.
stationary : bool, optional, default=True
If True, the initial distribution of hidden states is self-consistently computed as the stationary
distribution of the transition matrix. If False, it will be estimated from the starting states.
p : ndarray (nstates), optional, default=None
Initial or fixed stationary distribution. If given and stationary=True, transition matrices will be
estimated with the constraint that they have p as their stationary distribution. If given and
stationary=False, p is the fixed initial distribution of hidden states.
accuracy : float
convergence threshold for EM iteration. When two the likelihood does not increase by more than accuracy, the
iteration is stopped successfully.
maxit : int
stopping criterion for EM iteration. When so many iterations are performed without reaching the requested
accuracy, the iteration is stopped without convergence (a warning is given)
Return
------
hmm : :class:`HMM <bhmm.hmm.generic_hmm.HMM>`
"""
# select output model type
if (type is None):
type = _guess_model_type(observations)
if lag > 1:
observations = _lag_observations(observations, lag)
# construct estimator
from bhmm.estimators.maximum_likelihood import MaximumLikelihoodEstimator as _MaximumLikelihoodEstimator
est = _MaximumLikelihoodEstimator(observations, nstates, initial_model=initial_model, type=type,
reversible=reversible, stationary=stationary, p=p, accuracy=accuracy, maxit=maxit)
# run
est.fit()
# set lag time
est.hmm._lag = lag
# return model
return est.hmm
def bayesian_hmm(observations, estimated_hmm, nsample=100, transition_matrix_prior=None,
store_hidden=False, call_back=None):
r""" Bayesian HMM based on sampling the posterior
Generic maximum-likelihood estimation of HMMs
Parameters
----------
observations : list of numpy arrays representing temporal data
`observations[i]` is a 1d numpy array corresponding to the observed trajectory index `i`
estimated_hmm : HMM
HMM estimated from estimate_hmm or initialize_hmm
nsample : int, optional, default=100
number of Gibbs sampling steps
transition_matrix_prior : str or ndarray(n,n)
prior count matrix to be used for transition matrix sampling, or a keyword specifying the prior mode
| None (default), -1 prior is used that ensures consistency between mean and MLE. Can lead to sampling
disconnected matrices in the low-data regime. If you have disconnectivity problems, consider
using 'init-connect'
| 'init-connect', prior count matrix ensuring the same connectivity as in the initial model. 1 count
is added to all diagonals. All off-diagonals share one prior count distributed proportional to
the row of the initial transition matrix.
store_hidden : bool, optional, default=False
store hidden trajectories in sampled HMMs
call_back : function, optional, default=None
a call back function with no arguments, which if given is being called
after each computed sample. This is useful for implementing progress bars.
Return
------
hmm : :class:`SampledHMM <bhmm.hmm.generic_sampled_hmm.SampledHMM>`
"""
# construct estimator
from bhmm.estimators.bayesian_sampling import BayesianHMMSampler as _BHMM
sampler = _BHMM(observations, estimated_hmm.nstates, initial_model=estimated_hmm,
reversible=estimated_hmm.is_reversible, transition_matrix_sampling_steps=1000,
transition_matrix_prior=transition_matrix_prior, type=estimated_hmm.output_model.model_type)
# Sample models.
sampled_hmms = sampler.sample(nsamples=nsample, save_hidden_state_trajectory=store_hidden,
call_back=call_back)
# return model
from bhmm.hmm.generic_sampled_hmm import SampledHMM
return SampledHMM(estimated_hmm, sampled_hmms)
|
marscher/bhmm
|
bhmm/api.py
|
Python
|
lgpl-3.0
| 11,746
|
[
"Gaussian"
] |
261f129e4505d4efa60346cfb076a34ad84192373f4048b44549ab3064796959
|
#!/usr/bin/python
################################################################################
#
# SOAP.py 0.9.7 - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
#
# INCLUDED:
# - General SOAP Parser based on sax.xml (requires Python 2.0)
# - General SOAP Builder
# - SOAP Proxy for RPC client code
# - SOAP Server framework for RPC server code
#
# FEATURES:
# - Handles all of the types in the BDG
# - Handles faults
# - Allows namespace specification
# - Allows SOAPAction specification
# - Homogeneous typed arrays
# - Supports multiple schemas
# - Header support (mustUnderstand and actor)
# - XML attribute support
# - Multi-referencing support (Parser/Builder)
# - Understands SOAP-ENC:root attribute
# - Good interop, passes all client tests for Frontier, SOAP::LITE, SOAPRMI
# - Encodings
# - SSL clients (with OpenSSL configured in to Python)
# - SSL servers (with OpenSSL configured in to Python and M2Crypto installed)
#
# TODO:
# - Timeout on method calls - MCU
# - Arrays (sparse, multidimensional and partial) - BLM
# - Clean up data types - BLM
# - Type coercion system (Builder) - MCU
# - Early WSDL Support - MCU
# - Attachments - BLM
# - setup.py - MCU
# - mod_python example - MCU
# - medusa example - MCU
# - Documentation - JAG
# - Look at performance
#
################################################################################
#
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
#
# Additional changes:
# 0.9.7.3 - 4/18/2002 - Mark Pilgrim (f8dy@diveintomark.org)
# added dump_dict as alias for dump_dictionary for Python 2.2 compatibility
# 0.9.7.2 - 4/12/2002 - Mark Pilgrim (f8dy@diveintomark.org)
# fixed logic to unmarshal the value of "null" attributes ("true" or "1"
# means true, others false)
# 0.9.7.1 - 4/11/2002 - Mark Pilgrim (f8dy@diveintomark.org)
# added "dump_str" as alias for "dump_string" for Python 2.2 compatibility
# Between 2.1 and 2.2, type("").__name__ changed from "string" to "str"
################################################################################
import xml.sax
import UserList
import base64
import cgi
import urllib
import exceptions
import copy
import re
import socket
import string
import sys
import time
import SocketServer
from types import *
try: from M2Crypto import SSL
except: pass
ident = '$Id: SOAP.py,v 1.1.1.1 2005/09/29 21:38:51 mikem Exp $'
__version__ = "0.9.7.3"
# Platform hackery
# Check float support
try:
float("NaN")
float("INF")
float("-INF")
good_float = 1
except:
good_float = 0
################################################################################
# Exceptions
################################################################################
class Error(exceptions.Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "<Error : %s>" % self.msg
__repr__ = __str__
class RecursionError(Error):
pass
class UnknownTypeError(Error):
pass
class HTTPError(Error):
# indicates an HTTP protocol error
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return "<HTTPError %s %s>" % (self.code, self.msg)
__repr__ = __str__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
################################################################################
# Configuration class
################################################################################
class SOAPConfig:
__readonly = ('SSLserver', 'SSLclient')
def __init__(self, config = None, **kw):
d = self.__dict__
if config:
if not isinstance(config, SOAPConfig):
raise AttributeError, \
"initializer must be SOAPConfig instance"
s = config.__dict__
for k, v in s.items():
if k[0] != '_':
d[k] = v
else:
# Setting debug also sets returnFaultInfo, dumpFaultInfo,
# dumpHeadersIn, dumpHeadersOut, dumpSOAPIn, and dumpSOAPOut
self.debug = 0
# Setting namespaceStyle sets typesNamespace, typesNamespaceURI,
# schemaNamespace, and schemaNamespaceURI
self.namespaceStyle = '1999'
self.strictNamespaces = 0
self.typed = 1
self.buildWithNamespacePrefix = 1
self.returnAllAttrs = 0
try: SSL; d['SSLserver'] = 1
except: d['SSLserver'] = 0
try: socket.ssl; d['SSLclient'] = 1
except: d['SSLclient'] = 0
for k, v in kw.items():
if k[0] != '_':
setattr(self, k, v)
def __setattr__(self, name, value):
if name in self.__readonly:
raise AttributeError, "readonly configuration setting"
d = self.__dict__
if name in ('typesNamespace', 'typesNamespaceURI',
'schemaNamespace', 'schemaNamespaceURI'):
if name[-3:] == 'URI':
base, uri = name[:-3], 1
else:
base, uri = name, 0
if type(value) == StringType:
if NS.NSMAP.has_key(value):
n = (value, NS.NSMAP[value])
elif NS.NSMAP_R.has_key(value):
n = (NS.NSMAP_R[value], value)
else:
raise AttributeError, "unknown namespace"
elif type(value) in (ListType, TupleType):
if uri:
n = (value[1], value[0])
else:
n = (value[0], value[1])
else:
raise AttributeError, "unknown namespace type"
d[base], d[base + 'URI'] = n
try:
d['namespaceStyle'] = \
NS.STMAP_R[(d['typesNamespace'], d['schemaNamespace'])]
except:
d['namespaceStyle'] = ''
elif name == 'namespaceStyle':
value = str(value)
if not NS.STMAP.has_key(value):
raise AttributeError, "unknown namespace style"
d[name] = value
n = d['typesNamespace'] = NS.STMAP[value][0]
d['typesNamespaceURI'] = NS.NSMAP[n]
n = d['schemaNamespace'] = NS.STMAP[value][1]
d['schemaNamespaceURI'] = NS.NSMAP[n]
elif name == 'debug':
d[name] = \
d['returnFaultInfo'] = \
d['dumpFaultInfo'] = \
d['dumpHeadersIn'] = \
d['dumpHeadersOut'] = \
d['dumpSOAPIn'] = \
d['dumpSOAPOut'] = value
else:
d[name] = value
Config = SOAPConfig()
################################################################################
# Types and Wrappers
################################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns, self._name = self._validURIs[0], name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
self._attrs[attr] = str(value)
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value" % self._type
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value" % self._type
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if not e[0]:
e[0] = '--'
elif e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = ''
s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._aslist = []
self._asdict = {}
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def __getitem__(self, item):
if type(item) == IntType:
return self._aslist[item]
return getattr(self, item)
def __len__(self):
return len(self._aslist)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
d = self._asdict
if d.has_key(name):
if type(d[name]) != ListType:
d[name] = [d[name]]
d[name].append(value)
else:
d[name] = value
self._keyord.append(name)
self._aslist.append(value)
self.__dict__[name] = d[name]
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
d = self._asdict
if subpos == 0 and type(d[name]) != ListType:
d[name] = value
else:
d[name][subpos] = value
self._keyord[pos] = name
self._aslist[pos] = value
self.__dict__[name] = d[name]
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
self._full = 1
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._type = typed
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
################################################################################
class RefHolder:
def __init__(self, name, frame):
self.name = name
self.parent = frame
self.pos = len(frame)
self.subpos = frame.namecounts.get(name, 0)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
################################################################################
# Utility infielders
################################################################################
def collapseWhiteSpace(s):
return re.sub('\s+', ' ', s).strip()
def decodeHexString(data):
conv = {'0': 0x0, '1': 0x1, '2': 0x2, '3': 0x3, '4': 0x4,
'5': 0x5, '6': 0x6, '7': 0x7, '8': 0x8, '9': 0x9, 'a': 0xa,
'b': 0xb, 'c': 0xc, 'd': 0xd, 'e': 0xe, 'f': 0xf, 'A': 0xa,
'B': 0xb, 'C': 0xc, 'D': 0xd, 'E': 0xe, 'F': 0xf,}
ws = string.whitespace
bin = ''
i = 0
while i < len(data):
if data[i] not in ws:
break
i += 1
low = 0
while i < len(data):
c = data[i]
if c in string.whitespace:
break
try:
c = conv[c]
except KeyError:
raise ValueError, \
"invalid hex string character `%s'" % c
if low:
bin += chr(high * 16 + c)
low = 0
else:
high = c
low = 1
i += 1
if low:
raise ValueError, "invalid hex string length"
while i < len(data):
if data[i] not in string.whitespace:
raise ValueError, \
"invalid hex string character `%s'" % c
i += 1
return bin
def encodeHexString(data):
h = ''
for i in data:
h += "%02X" % ord(i)
return h
def leapMonth(year, month):
return month == 2 and \
year % 4 == 0 and \
(year % 100 != 0 or year % 400 == 0)
def cleanDate(d, first = 0):
ranges = (None, (1, 12), (1, 31), (0, 23), (0, 59), (0, 61))
months = (0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
names = ('year', 'month', 'day', 'hours', 'minutes', 'seconds')
if len(d) != 6:
raise ValueError, "date must have 6 elements"
for i in range(first, 6):
s = d[i]
if type(s) == FloatType:
if i < 5:
try:
s = int(s)
except OverflowError:
if i > 0:
raise
s = long(s)
if s != d[i]:
raise ValueError, "%s must be integral" % names[i]
d[i] = s
elif type(s) == LongType:
try: s = int(s)
except: pass
elif type(s) != IntType:
raise TypeError, "%s isn't a valid type" % names[i]
if i == first and s < 0:
continue
if ranges[i] != None and \
(s < ranges[i][0] or ranges[i][1] < s):
raise ValueError, "%s out of range" % names[i]
if first < 6 and d[5] >= 61:
raise ValueError, "seconds out of range"
if first < 2:
leap = first < 1 and leapMonth(d[0], d[1])
if d[2] > months[d[1]] + leap:
raise ValueError, "day out of range"
class UnderflowError(exceptions.ArithmeticError):
pass
def debugHeader(title):
s = '*** ' + title + ' '
print s + ('*' * (72 - len(s)))
def debugFooter(title):
print '*' * 72
sys.stdout.flush()
################################################################################
# SOAP Parser
################################################################################
class SOAPParser(xml.sax.handler.ContentHandler):
class Frame:
def __init__(self, name, kind = None, attrs = {}, rules = {}):
self.name = name
self.kind = kind
self.attrs = attrs
self.rules = rules
self.contents = []
self.names = []
self.namecounts = {}
self.subattrs = []
def append(self, name, data, attrs):
self.names.append(name)
self.contents.append(data)
self.subattrs.append(attrs)
if self.namecounts.has_key(name):
self.namecounts[name] += 1
else:
self.namecounts[name] = 1
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
self.contents[pos] = value
if attrs:
self.attrs.update(attrs)
def __len__(self):
return len(self.contents)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
def __init__(self, rules = None):
xml.sax.handler.ContentHandler.__init__(self)
self.body = None
self.header = None
self.attrs = {}
self._data = None
self._next = "E" # Keeping state for message validity
self._stack = [self.Frame('SOAP')]
# Make two dictionaries to store the prefix <-> URI mappings, and
# initialize them with the default
self._prem = {NS.XML_T: NS.XML}
self._prem_r = {NS.XML: NS.XML_T}
self._ids = {}
self._refs = {}
self._rules = rules
def startElementNS(self, name, qname, attrs):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
name = (None, name[1][1:])
else:
name = tuple(name)
# First some checking of the layout of the message
if self._next == "E":
if name[1] != 'Envelope':
raise Error, "expected `SOAP-ENV:Envelope', got `%s:%s'" % \
(self._prem_r[name[0]], name[1])
if name[0] != NS.ENV:
raise faultType, ("%s:VersionMismatch" % NS.ENV_T,
"Don't understand version `%s' Envelope" % name[0])
else:
self._next = "HorB"
elif self._next == "HorB":
if name[0] == NS.ENV and name[1] in ("Header", "Body"):
self._next = None
else:
raise Error, \
"expected `SOAP-ENV:Header' or `SOAP-ENV:Body', " \
"got `%s'" % self._prem_r[name[0]] + ':' + name[1]
elif self._next == "B":
if name == (NS.ENV, "Body"):
self._next = None
else:
raise Error, "expected `SOAP-ENV:Body', got `%s'" % \
self._prem_r[name[0]] + ':' + name[1]
elif self._next == "":
raise Error, "expected nothing, got `%s'" % \
self._prem_r[name[0]] + ':' + name[1]
if len(self._stack) == 2:
rules = self._rules
else:
try:
rules = self._stack[-1].rules[name[1]]
except:
rules = None
if type(rules) not in (NoneType, DictType):
kind = rules
else:
kind = attrs.get((NS.ENC, 'arrayType'))
if kind != None:
del attrs._attrs[(NS.ENC, 'arrayType')]
i = kind.find(':')
if i >= 0:
kind = (self._prem[kind[:i]], kind[i + 1:])
else:
kind = None
self.pushFrame(self.Frame(name[1], kind, attrs._attrs, rules))
self._data = '' # Start accumulating
def pushFrame(self, frame):
self._stack.append(frame)
def popFrame(self):
return self._stack.pop()
def endElementNS(self, name, qname):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
ns, name = None, name[1][1:]
else:
ns, name = tuple(name)
if self._next == "E":
raise Error, "didn't get SOAP-ENV:Envelope"
if self._next in ("HorB", "B"):
raise Error, "didn't get SOAP-ENV:Body"
cur = self.popFrame()
attrs = cur.attrs
idval = None
if attrs.has_key((None, 'id')):
idval = attrs[(None, 'id')]
if self._ids.has_key(idval):
raise Error, "duplicate id `%s'" % idval
del attrs[(None, 'id')]
root = 1
if len(self._stack) == 3:
if attrs.has_key((NS.ENC, 'root')):
root = int(attrs[(NS.ENC, 'root')])
# Do some preliminary checks. First, if root="0" is present,
# the element must have an id. Next, if root="n" is present,
# n something other than 0 or 1, raise an exception.
if root == 0:
if idval == None:
raise Error, "non-root element must have an id"
elif root != 1:
raise Error, "SOAP-ENC:root must be `0' or `1'"
del attrs[(NS.ENC, 'root')]
while 1:
href = attrs.get((None, 'href'))
if href:
if href[0] != '#':
raise Error, "only do local hrefs right now"
if self._data != None and self._data.strip() != '':
raise Error, "hrefs can't have data"
href = href[1:]
if self._ids.has_key(href):
data = self._ids[href]
else:
data = RefHolder(name, self._stack[-1])
if self._refs.has_key(href):
self._refs[href].append(data)
else:
self._refs[href] = [data]
del attrs[(None, 'href')]
break
kind = None
if attrs:
for i in NS.XSI_L:
if attrs.has_key((i, 'type')):
kind = attrs[(i, 'type')]
del attrs[(i, 'type')]
if kind != None:
i = kind.find(':')
if i >= 0:
kind = (self._prem[kind[:i]], kind[i + 1:])
else:
# XXX What to do here? (None, kind) is just going to fail in convertType
kind = (None, kind)
null = 0
if attrs:
for i in (NS.XSI, NS.XSI2):
if attrs.has_key((i, 'null')):
null = attrs[(i, 'null')]
del attrs[(i, 'null')]
if attrs.has_key((NS.XSI3, 'nil')):
null = attrs[(NS.XSI3, 'nil')]
del attrs[(NS.XSI3, 'nil')]
#MAP 4/12/2002 - must also support "true"
#null = int(null)
null = (str(null).lower() in ['true', '1'])
if null:
if len(cur) or \
(self._data != None and self._data.strip() != ''):
raise Error, "nils can't have data"
data = None
break
if len(self._stack) == 2:
if (ns, name) == (NS.ENV, "Header"):
self.header = data = headerType(attrs = attrs)
self._next = "B"
break
elif (ns, name) == (NS.ENV, "Body"):
self.body = data = bodyType(attrs = attrs)
self._next = ""
break
elif len(self._stack) == 3 and self._next == None:
if (ns, name) == (NS.ENV, "Fault"):
data = faultType()
self._next = ""
break
if cur.rules != None:
rule = cur.rules
if type(rule) in (StringType, UnicodeType):
# XXX Need a namespace here
rule = (None, rule)
elif type(rule) == ListType:
rule = tuple(rule)
# XXX What if rule != kind?
if callable(rule):
data = rule(self._data)
elif type(rule) == DictType:
data = structType(name = (ns, name), attrs = attrs)
else:
data = self.convertType(self._data, rule, attrs)
break
if (kind == None and cur.kind != None) or \
(kind == (NS.ENC, 'Array')):
kind = cur.kind
if kind == None:
kind = 'ur-type[%d]' % len(cur)
else:
kind = kind[1]
if len(cur.namecounts) == 1:
elemsname = cur.names[0]
else:
elemsname = None
data = self.startArray((ns, name), kind, attrs, elemsname)
break
if len(self._stack) == 3 and kind == None and \
len(cur) == 0 and \
(self._data == None or self._data.strip() == ''):
data = structType(name = (ns, name), attrs = attrs)
break
if len(cur) == 0 and ns != NS.URN:
# Nothing's been added to the current frame so it must be a
# simple type.
if kind == None:
# If the current item's container is an array, it will
# have a kind. If so, get the bit before the first [,
# which is the type of the array, therefore the type of
# the current item.
kind = self._stack[-1].kind
if kind != None:
i = kind[1].find('[')
if i >= 0:
kind = (kind[0], kind[1][:i])
elif ns != None:
kind = (ns, name)
if kind != None:
try:
data = self.convertType(self._data, kind, attrs)
except UnknownTypeError:
data = None
else:
data = None
if data == None:
data = self._data or ''
if len(attrs) == 0:
try: data = str(data)
except: pass
break
data = structType(name = (ns, name), attrs = attrs)
break
if isinstance(data, compoundType):
for i in range(len(cur)):
v = cur.contents[i]
data._addItem(cur.names[i], v, cur.subattrs[i])
if isinstance(v, RefHolder):
v.parent = data
if root:
self._stack[-1].append(name, data, attrs)
if idval != None:
self._ids[idval] = data
if self._refs.has_key(idval):
for i in self._refs[idval]:
i.parent._placeItem(i.name, data, i.pos, i.subpos, attrs)
del self._refs[idval]
self.attrs[id(data)] = attrs
if isinstance(data, anyType):
data._setAttrs(attrs)
self._data = None # Stop accumulating
def endDocument(self):
if len(self._refs) == 1:
raise Error, \
"unresolved reference " + self._refs.keys()[0]
elif len(self._refs) > 1:
raise Error, \
"unresolved references " + ', '.join(self._refs.keys())
def startPrefixMapping(self, prefix, uri):
self._prem[prefix] = uri
self._prem_r[uri] = prefix
def endPrefixMapping(self, prefix):
try:
del self._prem_r[self._prem[prefix]]
del self._prem[prefix]
except:
pass
def characters(self, c):
if self._data != None:
self._data += c
arrayre = '^(?:(?P<ns>[^:]*):)?' \
'(?P<type>[^[]+)' \
'(?:\[(?P<rank>,*)\])?' \
'(?:\[(?P<asize>\d+(?:,\d+)*)?\])$'
def startArray(self, name, kind, attrs, elemsname):
if type(self.arrayre) == StringType:
self.arrayre = re.compile (self.arrayre)
offset = attrs.get((NS.ENC, "offset"))
if offset != None:
del attrs[(NS.ENC, "offset")]
try:
if offset[0] == '[' and offset[-1] == ']':
offset = int(offset[1:-1])
if offset < 0:
raise Exception
else:
raise Exception
except:
raise AttributeError, "invalid Array offset"
else:
offset = 0
try:
m = self.arrayre.search(kind)
if m == None:
raise Exception
t = m.group('type')
if t == 'ur-type':
return arrayType(None, name, attrs, offset, m.group('rank'),
m.group('asize'), elemsname)
elif m.group('ns') != None:
return typedArrayType(None, name,
(self._prem[m.group('ns')], t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
else:
return typedArrayType(None, name, (None, t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
except:
raise AttributeError, "invalid Array type `%s'" % kind
# Conversion
class DATETIMECONSTS:
SIGNre = '(?P<sign>-?)'
CENTURYre = '(?P<century>\d{2,})'
YEARre = '(?P<year>\d{2})'
MONTHre = '(?P<month>\d{2})'
DAYre = '(?P<day>\d{2})'
HOURre = '(?P<hour>\d{2})'
MINUTEre = '(?P<minute>\d{2})'
SECONDre = '(?P<second>\d{2}(?:\.\d*)?)'
TIMEZONEre = '(?P<zulu>Z)|(?P<tzsign>[-+])(?P<tzhour>\d{2}):' \
'(?P<tzminute>\d{2})'
BOSre = '^\s*'
EOSre = '\s*$'
__allres = {'sign': SIGNre, 'century': CENTURYre, 'year': YEARre,
'month': MONTHre, 'day': DAYre, 'hour': HOURre,
'minute': MINUTEre, 'second': SECONDre, 'timezone': TIMEZONEre,
'b': BOSre, 'e': EOSre}
dateTime = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)sT' \
'%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % __allres
timeInstant = dateTime
timePeriod = dateTime
time = '%(b)s%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % \
__allres
date = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)s' \
'(%(timezone)s)?%(e)s' % __allres
century = '%(b)s%(sign)s%(century)s(%(timezone)s)?%(e)s' % __allres
gYearMonth = '%(b)s%(sign)s%(century)s%(year)s-%(month)s' \
'(%(timezone)s)?%(e)s' % __allres
gYear = '%(b)s%(sign)s%(century)s%(year)s(%(timezone)s)?%(e)s' % \
__allres
year = gYear
gMonthDay = '%(b)s--%(month)s-%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDate = gMonthDay
gDay = '%(b)s---%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDay = gDay
gMonth = '%(b)s--%(month)s--(%(timezone)s)?%(e)s' % __allres
month = gMonth
recurringInstant = '%(b)s%(sign)s(%(century)s|-)(%(year)s|-)-' \
'(%(month)s|-)-(%(day)s|-)T' \
'(%(hour)s|-):(%(minute)s|-):(%(second)s|-)' \
'(%(timezone)s)?%(e)s' % __allres
duration = '%(b)s%(sign)sP' \
'((?P<year>\d+)Y)?' \
'((?P<month>\d+)M)?' \
'((?P<day>\d+)D)?' \
'((?P<sep>T)' \
'((?P<hour>\d+)H)?' \
'((?P<minute>\d+)M)?' \
'((?P<second>\d*(?:\.\d*)?)S)?)?%(e)s' % \
__allres
timeDuration = duration
# The extra 31 on the front is:
# - so the tuple is 1-based
# - so months[month-1] is December's days if month is 1
months = (31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def convertDateTime(self, value, kind):
def getZoneOffset(d):
zoffs = 0
try:
if d['zulu'] == None:
zoffs = 60 * int(d['tzhour']) + int(d['tzminute'])
if d['tzsign'] != '-':
zoffs = -zoffs
except TypeError:
pass
return zoffs
def applyZoneOffset(months, zoffs, date, minfield, posday = 1):
if zoffs == 0 and (minfield > 4 or 0 <= date[5] < 60):
return date
if minfield > 5: date[5] = 0
if minfield > 4: date[4] = 0
if date[5] < 0:
date[4] += int(date[5]) / 60
date[5] %= 60
date[4] += zoffs
if minfield > 3 or 0 <= date[4] < 60: return date
date[3] += date[4] / 60
date[4] %= 60
if minfield > 2 or 0 <= date[3] < 24: return date
date[2] += date[3] / 24
date[3] %= 24
if minfield > 1:
if posday and date[2] <= 0:
date[2] += 31 # zoffs is at most 99:59, so the
# day will never be less than -3
return date
while 1:
# The date[1] == 3 (instead of == 2) is because we're
# going back a month, so we need to know if the previous
# month is February, so we test if this month is March.
leap = minfield == 0 and date[1] == 3 and \
date[0] % 4 == 0 and \
(date[0] % 100 != 0 or date[0] % 400 == 0)
if 0 < date[2] <= months[date[1]] + leap: break
date[2] += months[date[1] - 1] + leap
date[1] -= 1
if date[1] > 0: break
date[1] = 12
if minfield > 0: break
date[0] -= 1
return date
try:
exp = getattr(self.DATETIMECONSTS, kind)
except AttributeError:
return None
if type(exp) == StringType:
exp = re.compile(exp)
setattr (self.DATETIMECONSTS, kind, exp)
m = exp.search(value)
try:
if m == None:
raise Exception
d = m.groupdict()
f = ('century', 'year', 'month', 'day',
'hour', 'minute', 'second')
fn = len(f) # Index of first non-None value
r = []
if kind in ('duration', 'timeDuration'):
if d['sep'] != None and d['hour'] == None and \
d['minute'] == None and d['second'] == None:
raise Exception
f = f[1:]
for i in range(len(f)):
s = d[f[i]]
if s != None:
if f[i] == 'second':
s = float(s)
else:
try: s = int(s)
except ValueError: s = long(s)
if i < fn: fn = i
r.append(s)
if fn > len(r): # Any non-Nones?
raise Exception
if d['sign'] == '-':
r[fn] = -r[fn]
return tuple(r)
if kind == 'recurringInstant':
for i in range(len(f)):
s = d[f[i]]
if s == None or s == '-':
if i > fn:
raise Exception
s = None
else:
if i < fn:
fn = i
if f[i] == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
r.append(s)
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if fn < len(r) and d['sign'] == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
return tuple(applyZoneOffset(self.DATETIMECONSTS.months,
getZoneOffset(d), r, fn, 0))
r = [0, 0, 1, 1, 0, 0, 0]
for i in range(len(f)):
field = f[i]
s = d.get(field)
if s != None:
if field == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
if i < fn:
fn = i
r[i] = s
if fn > len(r): # Any non-Nones?
raise Exception
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if d.get('sign') == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
zoffs = getZoneOffset(d)
if zoffs:
r = applyZoneOffset(self.DATETIMECONSTS.months, zoffs, r, fn)
if kind == 'century':
return r[0] / 100
s = []
for i in range(1, len(f)):
if d.has_key(f[i]):
s.append(r[i - 1])
if len(s) == 1:
return s[0]
return tuple(s)
except Exception, e:
raise Error, "invalid %s value `%s' - %s" % (kind, value, e)
intlimits = \
{
'nonPositiveInteger': (0, None, 0),
'non-positive-integer': (0, None, 0),
'negativeInteger': (0, None, -1),
'negative-integer': (0, None, -1),
'long': (1, -9223372036854775808L,
9223372036854775807L),
'int': (0, -2147483648L, 2147483647),
'short': (0, -32768, 32767),
'byte': (0, -128, 127),
'nonNegativeInteger': (0, 0, None),
'non-negative-integer': (0, 0, None),
'positiveInteger': (0, 1, None),
'positive-integer': (0, 1, None),
'unsignedLong': (1, 0, 18446744073709551615L),
'unsignedInt': (0, 0, 4294967295L),
'unsignedShort': (0, 0, 65535),
'unsignedByte': (0, 0, 255),
}
floatlimits = \
{
'float': (7.0064923216240861E-46, -3.4028234663852886E+38,
3.4028234663852886E+38),
'double': (2.4703282292062327E-324, -1.7976931348623158E+308,
1.7976931348623157E+308),
}
zerofloatre = '[1-9]'
def convertType(self, d, t, attrs):
dnn = d or ''
if t[0] in NS.EXSD_L:
if t[1] == "integer":
try:
d = int(d)
if len(attrs):
d = long(d)
except:
d = long(d)
return d
if self.intlimits.has_key (t[1]):
l = self.intlimits[t[1]]
try: d = int(d)
except: d = long(d)
if l[1] != None and d < l[1]:
raise UnderflowError, "%s too small" % d
if l[2] != None and d > l[2]:
raise OverflowError, "%s too large" % d
if l[0] or len(attrs):
return long(d)
return d
if t[1] == "string":
if len(attrs):
return unicode(dnn)
try:
return str(dnn)
except:
return dnn
if t[1] == "boolean":
d = d.strip().lower()
if d in ('0', 'false'):
return 0
if d in ('1', 'true'):
return 1
raise AttributeError, "invalid boolean value"
if self.floatlimits.has_key (t[1]):
l = self.floatlimits[t[1]]
s = d.strip().lower()
try:
d = float(s)
except:
# Some platforms don't implement the float stuff. This
# is close, but NaN won't be > "INF" as required by the
# standard.
if s in ("nan", "inf"):
return 1e300**2
if s == "-inf":
return -1e300**2
raise
if str (d) == 'nan':
if s != 'nan':
raise ValueError, "invalid %s" % t[1]
elif str (d) == '-inf':
if s != '-inf':
raise UnderflowError, "%s too small" % t[1]
elif str (d) == 'inf':
if s != 'inf':
raise OverflowError, "%s too large" % t[1]
elif d < 0:
if d < l[1]:
raise UnderflowError, "%s too small" % t[1]
elif d > 0:
if d < l[0] or d > l[2]:
raise OverflowError, "%s too large" % t[1]
elif d == 0:
if type(self.zerofloatre) == StringType:
self.zerofloatre = re.compile(self.zerofloatre)
if self.zerofloatre.search(s):
raise UnderflowError, "invalid %s" % t[1]
return d
if t[1] in ("dateTime", "date", "timeInstant", "time"):
return self.convertDateTime(d, t[1])
if t[1] == "decimal":
return float(d)
if t[1] in ("language", "QName", "NOTATION", "NMTOKEN", "Name",
"NCName", "ID", "IDREF", "ENTITY"):
return collapseWhiteSpace(d)
if t[1] in ("IDREFS", "ENTITIES", "NMTOKENS"):
d = collapseWhiteSpace(d)
return d.split()
if t[0] in NS.XSD_L:
if t[1] in ("base64", "base64Binary"):
return base64.decodestring(d)
if t[1] == "hexBinary":
return decodeHexString(d)
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("normalizedString", "token"):
return collapseWhiteSpace(d)
if t[0] == NS.ENC:
if t[1] == "base64":
return base64.decodestring(d)
if t[0] == NS.XSD:
if t[1] == "binary":
try:
e = attrs[(None, 'encoding')]
if e == 'hex':
return decodeHexString(d)
elif e == 'base64':
return base64.decodestring(d)
except:
pass
raise Error, "unknown or missing binary encoding"
if t[1] == "uri":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "recurringInstant":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.ENC):
if t[1] == "uriReference":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "timePeriod":
return self.convertDateTime(d, t[1])
if t[1] in ("century", "year"):
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD, NS.XSD2, NS.ENC):
if t[1] == "timeDuration":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD3:
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("gYearMonth", "gMonthDay"):
return self.convertDateTime(d, t[1])
if t[1] == "gYear":
return self.convertDateTime(d, t[1])
if t[1] == "gMonth":
return self.convertDateTime(d, t[1])
if t[1] == "gDay":
return self.convertDateTime(d, t[1])
if t[1] == "duration":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.XSD3):
if t[1] == "token":
return collapseWhiteSpace(d)
if t[1] == "recurringDate":
return self.convertDateTime(d, t[1])
if t[1] == "month":
return self.convertDateTime(d, t[1])
if t[1] == "recurringDay":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD2:
if t[1] == "CDATA":
return collapseWhiteSpace(d)
raise UnknownTypeError, "unknown type `%s'" % (t[0] + ':' + t[1])
################################################################################
# call to SOAPParser that keeps all of the info
################################################################################
def _parseSOAP(xml_str, rules = None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
parser = xml.sax.make_parser()
t = SOAPParser(rules = rules)
parser.setContentHandler(t)
e = xml.sax.handler.ErrorHandler()
parser.setErrorHandler(e)
inpsrc = xml.sax.xmlreader.InputSource()
inpsrc.setByteStream(StringIO(xml_str))
# turn on namespace mangeling
parser.setFeature(xml.sax.handler.feature_namespaces,1)
parser.parse(inpsrc)
return t
################################################################################
# SOAPParser's more public interface
################################################################################
def parseSOAP(xml_str, attrs = 0):
t = _parseSOAP(xml_str)
if attrs:
return t.body, t.attrs
return t.body
def parseSOAPRPC(xml_str, header = 0, body = 0, attrs = 0, rules = None):
t = _parseSOAP(xml_str, rules = rules)
p = t.body._aslist[0]
# Empty string, for RPC this translates into a void
if type(p) in (type(''), type(u'')) and p in ('', u''):
name = "Response"
for k in t.body.__dict__.keys():
if k[0] != "_":
name = k
p = structType(name)
if header or body or attrs:
ret = (p,)
if header : ret += (t.header,)
if body: ret += (t.body,)
if attrs: ret += (t.attrs,)
return ret
else:
return p
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = '%(ENV_T)s:Envelope %(ENV_T)s:encodingStyle="%(ENC)s"' % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = ''
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
def build(self):
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out += "<%sBody>\n" % body_ns
if self.method:
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out += '<%s%s%s%s%s>\n' % \
(methodns, self.method, n, a, self.genroot(ns_map))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out += "</%s%s>\n" % (methodns, self.method)
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out += "</%sBody>\n" % body_ns
self.depth -= 1
if self.envelope:
e = map (lambda ns: 'xmlns:%s="%s"' % (ns[1], ns[0]),
self.envns.items())
self.out = '<' + self._env_top + ' '.join([''] + e) + '>\n' + \
self.out + \
self._env_bot
if self.encoding != None:
self.out = self._xml_enc_top % self.encoding + self.out
return self.out.encode(self.encoding)
return self._xml_top + self.out
def gentag(self):
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out += '<%s href="#i%d"%s/>\n' % (tag, n, self.genroot(ns_map))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
try:
meth = getattr(self, "dump_" + type(obj).__name__)
meth(obj, tag, typed, ns_map)
except AttributeError:
if type(obj) == LongType:
obj_type = "integer"
else:
obj_type = type(obj).__name__
self.out += self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map))
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except: data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
# Terrible windows hack
if not good_float:
if obj == float(1e300**2):
obj = "INF"
elif obj == float(-1e300**2):
obj = "-INF"
obj = str(obj)
if obj in ('inf', '-inf'):
obj = str(obj).upper()
elif obj == 'nan':
obj = 'NaN'
self.out += self.dumper(None, "float", obj, tag, typed, ns_map,
self.genroot(ns_map))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out += self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id)
dump_unicode = dump_string
dump_str = dump_string # 4/12/2002 - MAP - for Python 2.2
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
tag = tag or self.gentag()
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out += '<%s %snull="1"%s/>\n' % (tag, ns, self.genroot(ns_map))
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType: # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = last._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + sample._type
else:
t = 'ur-type'
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
type(sample).__name__
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
self.out += \
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %\
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a)
typed = not same_type
try: elemsname = obj._elemsname
except: elemsname = "item"
for i in data:
self.dump(i, elemsname, typed, ns_map)
self.out += '</%s>\n' % tag
dump_tuple = dump_list
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
tag = tag or self.gentag()
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out += '<%s%s%s%s>\n' % \
(tag, id, a, self.genroot(ns_map))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out += '</%s>\n' % tag
dump_dict = dump_dictionary # 4/18/2002 - MAP - for Python 2.2
def dump_instance(self, obj, tag, typed = 1, ns_map = {}):
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
if isinstance(obj, arrayType): # Array
self.dump_list(obj, tag, typed, ns_map)
return
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out += '''<%sFault %sroot="1"%s%s>
<faultcode>%s</faultcode>
<faultstring>%s</faultstring>
''' % (vns, cns, vdecl, cdecl, obj.faultcode, obj.faultstring)
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out += "</%sFault>\n" % vns
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out += "<%s%s%s></%s>\n" % (tag, a, r, tag)
return
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out += "<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r)
# If we have order use it.
order = 1
for i in obj._keys():
if i not in obj._keyord:
order = 0
break
if order:
for i in range(len(obj._keyord)):
self.dump(obj._aslist[i], obj._keyord[i], 1, ns_map)
else:
# don't have pristine order information, just build it.
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out += '</%s>\n' % tag
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out += '<%s%s%s%s%s>%s</%s>\n' % \
(tag, t, id, a, r, obj._marshalData(), tag)
else: # Some Class
self.out += '<%s%s%s>\n' % (tag, id, r)
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out += '</%s>\n' % tag
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None, header=None,
methodattrs=None,envelope=1,encoding='UTF-8',config=Config):
t = SOAPBuilder(args=args,kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config)
return t.build()
################################################################################
# RPC
################################################################################
def SOAPUserAgent():
return "SOAP.py " + __version__ + " (actzero.com)"
################################################################################
# Client
################################################################################
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https'):
raise IOError, "unsupported SOAP protocol"
if proto == 'https' and not config.SSLclient:
raise AttributeError, \
"SSL client not supported by this Python installation"
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class HTTPTransport:
# Need a Timeout someday?
def call(self, addr, data, soapaction = '', encoding = None,
http_proxy = None, config = Config):
import httplib
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'https':
r = httplib.HTTPS(real_addr)
else:
r = httplib.HTTP(real_addr)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset="%s"' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print "POST %s %s" % (real_path, r._http_vsn_str)
print "Host:", addr.host
print "User-agent: SOAP.py " + __version__ + " (actzero.com)"
print "Content-type:", t
print "Content-length:", len(data)
print 'SOAPAction: "%s"' % soapaction
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print "HTTP/1.? %d %s" % (code, msg)
print "\n".join(map (lambda x: x.strip(), headers.headers))
else:
print "HTTP/0.9 %d %s" % (code, msg)
debugFooter(s)
if config.dumpSOAPIn:
data = r.getfile().read()
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
if not config.dumpSOAPIn:
data = r.getfile().read()
# return response payload
return data
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = '',
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = 1,
http_proxy=None, config = Config):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.unwrap_results = unwrap_results
self.http_proxy = http_proxy
self.config = config
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType: sa = sa[0]
else:
sa = self.soapaction
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config)
r = self.transport.call(self.proxy, m, sa, encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config)
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
raise p
# Bubble a regular result up, if there is only element in the
# struct, assume that is the result and return it.
# Otherwise it will return the struct with all the elements
# as attributes.
if self.unwrap_results:
try:
count = 0
for i in p.__dict__.keys():
if i[0] != "_": # don't move the private stuff
count += 1
t = getattr(p, i)
if count == 1: p = t # Only one piece of data, bubble it up
except:
pass
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
return self.__Method(self.__call, name, config = self.config)
# To handle attribute wierdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
self.__call__ = self.__repr__
else:
self.__call__ = self.__f_call
else:
self.__call__ = self.__r_call
def __getattr__(self, name):
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self))
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Don't understand `%s' header element but "
"mustUnderstand attribute is set." % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServer(SocketServer.TCPServer):
import BaseHTTPServer
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://www.actzero.com/solution.html">' + \
'SOAP.py ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
SOAPServer.BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist
kw = r._asdict
ns = r._ns
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
else: # Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"No method %s found" % nsmethod,
"%s %s" % tuple(sys.exc_info()[0:2])),
encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # Build context object
c = SOAPContext(header, body, attrs, data,
self.connection, self.headers,
self.headers["soapaction"])
if f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
except Exception, e:
import traceback
info = sys.exc_info()
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method %s failed." % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
import traceback
s = 'Internal exception'
debugHeader(s)
traceback.print_exc ()
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset="%s"' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def log_message(self, format, *args):
if self.server.log:
SOAPServer.BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 1, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = ''):
if namespace == '': namespace = self.namespace
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None):
if not funcName : funcName = function.__name__
if namespace == '': namespace = self.namespace
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = ''):
if namespace == '': namespace = self.namespace
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None):
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
|
mikemintz/neutron
|
modules/SOAP.py
|
Python
|
gpl-2.0
| 130,709
|
[
"Brian"
] |
bbc8c94022f3ed945dd8cc5293b5a8f0e5184f34680c1c62bcb05aa7040d6685
|
import paraBEM
from paraBEM.pan3d import doublet_src_3_0_vsaero, src_3_0_vsaero_v
from paraBEM.vtk_export import VtkWriter
import numpy
from paraBEM.utils import check_path
v1 = paraBEM.PanelVector3(-0.5, -0.5, 0)
v2 = paraBEM.PanelVector3(0.5, -0.5, 0)
v3 = paraBEM.PanelVector3(0.5, 0.5, 0)
v4 = paraBEM.PanelVector3(-0.5, 0.5, 0)
p = paraBEM.Panel3([v1, v2, v3, v4])
p.potential = 1.
n = 50
a = numpy.linspace(-1, 1, n).tolist()
b = [paraBEM.PanelVector3(i, j, k) for i in a for j in a for k in a]
pot = [doublet_src_3_0_vsaero(i, p)[1] for i in b]
vel = [src_3_0_vsaero_v(i, p) for i in b]
writer = VtkWriter()
with open(check_path("results/src_panel.vtk"), "w") as _file:
writer.structed_grid(_file, "duplet", [n, n, n])
writer.points(_file, b)
writer.data(_file, pot, name="potential", _type="SCALARS", data_type="POINT_DATA")
writer.data(_file, vel, name="velocity", _type="VECTORS", data_type="POINT_DATA")
|
looooo/paraBEM
|
examples/vtk/vtk_panel_source.py
|
Python
|
gpl-3.0
| 937
|
[
"VTK"
] |
e2d0d19f53bdd27553997c1090e2fa65efa9daaa3d2c4f349b4358230bce9ceb
|
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import time
import string
import rpm
import sys
try:
# python 2
import xmlrpclib
except ImportError:
# python3
import xmlrpc.client as xmlrpclib
from spacewalk.common.usix import IntType
# common module
from spacewalk.common.usix import raise_with_tb
from spacewalk.common import rhnCache, rhnFlags
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common.rhnTranslate import _
# local module
import rhnUser
import rhnSQL
import rhnLib
class NoBaseChannelError(Exception):
pass
class InvalidServerArchError(Exception):
pass
class BaseChannelDeniedError(Exception):
pass
class ChannelException(Exception):
def __init__(self, channel_id=None, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.channel_id = channel_id
self.channel = None
class ModifiedError(ChannelException):
pass
class IncompatibilityError(Exception):
pass
class InvalidDataError(Exception):
pass
class ChannelNotFoundError(Exception):
pass
class NoToolsChannel(Exception):
pass
class NoChildChannels(Exception):
pass
class InvalidChannel(Exception):
pass
class BaseDatabaseObject:
def __init__(self):
self._row = None
def __getattr__(self, name):
if name.startswith('get_'):
return rhnLib.CallableObj(name[4:], self._get)
if name.startswith('set_'):
return rhnLib.CallableObj(name[4:], self._set)
raise AttributeError(name)
def _set(self, name, val):
self._new_row()
self._row[name] = val
def _get(self, name):
return self._row[name]
def _new_row(self):
raise NotImplementedError()
def save(self, with_updates=1):
try:
return self._save(with_updates=with_updates)
except:
rhnSQL.rollback()
raise
def _save(self, with_updates=1):
try:
self._row.save(with_updates=with_updates)
except rhnSQL.ModifiedRowError:
raise_with_tb(ModifiedError(self._row['id']), sys.exc_info()[2])
class BaseChannelObject(BaseDatabaseObject):
_table_name = None
_sequence_name = None
_generic_fields = []
def load_by_label(self, label):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'label')
self._row.load(label)
return self
def load_by_id(self, obj_id):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'id')
self._row.load(obj_id)
return self
def load_from_dict(self, dict):
# Re-init
self.__init__()
for f in self._generic_fields:
method = getattr(self, 'set_' + f)
method(dict.get(f))
self._load_rest(dict)
return self
def _load_rest(self, dict):
pass
def exists(self):
if not self._row:
return 0
return self._row.real
def get_org_id(self):
org_id = self._row['org_id']
if org_id is None:
return None
row = self._lookup_org_id(org_id)
if row.real:
return row['login']
return org_id
def set_org_id(self, val):
self._new_row()
if val is None or isinstance(val, IntType):
self._row['org_id'] = val
return
row = self._lookup_org_by_login(val)
if not row.real:
raise InvalidDataError("No such org", val)
self._row['org_id'] = row['org_id']
def _lookup_org_id(self, org_id):
row = rhnSQL.Row('web_contact', 'org_id')
row.load(org_id)
return row
def _lookup_org_by_login(self, login):
row = rhnSQL.Row('web_contact', 'login')
row.load(login)
return row
def _lookup_channel_family_by_id(self, channel_family_id):
row = rhnSQL.Row('rhnChannelFamily', 'id')
row.load(channel_family_id)
return row
def _lookup_channel_family_by_label(self, channel_family):
row = rhnSQL.Row('rhnChannelFamily', 'label')
row.load(channel_family)
return row
def _new_row(self):
if self._row is None:
self._row = rhnSQL.Row(self._table_name, 'id')
channel_id = rhnSQL.Sequence(self._sequence_name).next()
self._row.create(channel_id)
def as_dict(self):
ret = {}
for f in self._generic_fields:
method = getattr(self, 'get_' + f)
val = method()
ret[f] = val
return ret
# Channel creation
class Channel(BaseChannelObject):
_table_name = 'rhnChannel'
_sequence_name = 'rhn_channel_id_seq'
_generic_fields = ['label', 'name', 'summary', 'description', 'basedir',
'org_id', 'gpg_key_url', 'gpg_key_id', 'gpg_key_fp', 'end_of_life',
'channel_families', 'channel_arch', ]
def __init__(self):
BaseChannelObject.__init__(self)
self._channel_families = []
self._dists = {}
self._parent_channel_arch = None
def load_by_label(self, label):
BaseChannelObject.load_by_label(self, label)
self._load_channel_families()
self._load_dists()
return self
def load_by_id(self, label):
BaseChannelObject.load_by_id(self, label)
self._load_channel_families()
self._load_dists()
return self
def _load_rest(self, dict):
dists = dict.get('dists')
if not dists:
return
for dist in dists:
release = dist.get('release')
os = dist.get('os')
self._dists[release] = os
_query_get_db_channel_families = rhnSQL.Statement("""
select channel_family_id
from rhnChannelFamilyMembers
where channel_id = :channel_id
""")
def _get_db_channel_families(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_channel_families)
h.execute(channel_id=channel_id)
return [x['channel_family_id'] for x in h.fetchall_dict() or []]
def _load_channel_families(self):
channel_id = self._row.get('id')
self._channel_families = self._get_db_channel_families(channel_id)
return 1
def _load_dists(self):
channel_id = self._row.get('id')
dists = self._get_db_dists(channel_id)
self.set_dists(dists)
_query_get_db_dists = rhnSQL.Statement("""
select os, release
from rhnDistChannelMap
where channel_id = :channel_id
and org_id is null
""")
def _get_db_dists(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_dists)
h.execute(channel_id=channel_id)
return h.fetchall_dict() or []
# Setters
def set_channel_arch(self, val):
self._new_row()
arch = self._sanitize_arch(val)
row = self._lookup_channel_arch(arch)
if not row.real:
raise InvalidDataError("No such architecture", arch)
self._row['channel_arch_id'] = row['id']
def _sanitize_arch(self, arch):
if arch == 'i386':
return 'channel-ia32'
p = 'channel-'
if arch[:len(p)] != p:
return p + arch
return arch
def set_parent_channel(self, val):
self._new_row()
if val is None:
self._row['parent_channel'] = None
return
row = self._lookup_channel_by_label(val)
if not row.real:
raise InvalidDataError("Invalid parent channel", val)
self._row['parent_channel'] = row['id']
self._parent_channel_arch = row['channel_arch_id']
def set_channel_families(self, val):
self._new_row()
self._channel_families = []
for cf_label in val:
self.add_channel_family(cf_label)
def set_end_of_life(self, val):
self._new_row()
if val is None:
self._row['end_of_life'] = None
return
t = time.strptime(val, "%Y-%m-%d")
seconds = time.mktime(t)
t = rhnSQL.TimestampFromTicks(seconds)
self._row['end_of_life'] = t
def add_channel_family(self, name):
self._new_row()
cf = self._lookup_channel_family_by_label(name)
if not cf.real:
raise InvalidDataError("Invalid channel family", name)
self._channel_families.append(cf['id'])
def add_dist(self, release, os=None):
if os is None:
os = 'Red Hat Linux'
self._dists[release] = os
def set_dists(self, val):
self._dists.clear()
for h in val:
release = h['release']
os = h['os']
self.add_dist(release, os)
# Getters
def get_parent_channel(self):
pc_id = self._row['parent_channel']
if pc_id is None:
return None
return self._lookup_channel_by_id(pc_id)['label']
def get_channel_families(self):
cf_labels = []
for cf_id in self._channel_families:
row = self._lookup_channel_family_by_id(cf_id)
if row.real:
cf_labels.append(row['label'])
return cf_labels
def get_channel_arch(self):
channel_arch_id = self._row['channel_arch_id']
row = self._lookup_channel_arch_by_id(channel_arch_id)
assert row.real
return row['label']
def get_end_of_life(self):
date_obj = self._row['end_of_life']
if date_obj is None:
return None
return "%s-%02d-%02d %02d:%02d:%02d" % (
date_obj.year, date_obj.month, date_obj.day,
date_obj.hour, date_obj.minute, date_obj.second)
def get_dists(self):
ret = []
for release, os in self._dists.items():
ret.append({'release': release, 'os': os})
return ret
def _lookup_channel_by_id(self, channel_id):
row = rhnSQL.Row('rhnChannel', 'id')
row.load(channel_id)
return row
def _lookup_channel_by_label(self, channel):
row = rhnSQL.Row('rhnChannel', 'label')
row.load(channel)
return row
def _lookup_channel_arch(self, channel_arch):
row = rhnSQL.Row('rhnChannelArch', 'label')
row.load(channel_arch)
return row
def _lookup_channel_arch_by_id(self, channel_arch_id):
row = rhnSQL.Row('rhnChannelArch', 'id')
row.load(channel_arch_id)
return row
def _save(self, with_updates=1):
if self._parent_channel_arch:
if not self._compatible_channel_arches(self._parent_channel_arch,
self._row['channel_arch_id']):
raise IncompatibilityError("Incompatible channel arches")
BaseChannelObject._save(self, with_updates=with_updates)
# Save channel families now
self._save_channel_families()
self._save_dists()
_query_remove_channel_families = rhnSQL.Statement("""
delete from rhnChannelFamilyMembers
where channel_id = :channel_id
and channel_family_id = :channel_family_id
""")
_query_add_channel_families = rhnSQL.Statement("""
insert into rhnChannelFamilyMembers (channel_id, channel_family_id)
values (:channel_id, :channel_family_id)
""")
def _save_channel_families(self):
channel_id = self._row['id']
db_cfids = self._get_db_channel_families(channel_id)
h = {}
for db_cfid in db_cfids:
h[db_cfid] = None
to_add = []
for cfid in self._channel_families:
if cfid in h:
del h[cfid]
continue
to_add.append(cfid)
to_delete = list(h.keys())
if to_delete:
h = rhnSQL.prepare(self._query_remove_channel_families)
cids = [channel_id] * len(to_delete)
h.executemany(channel_id=cids, channel_family_id=to_delete)
if to_add:
h = rhnSQL.prepare(self._query_add_channel_families)
cids = [channel_id] * len(to_add)
h.executemany(channel_id=cids, channel_family_id=to_add)
def _save_dists(self):
channel_id = self._row['id']
db_dists = self._get_db_dists(channel_id)
d = self._dists.copy()
to_add = [[], []]
to_remove = []
to_update = [[], []]
for h in db_dists:
release = h['release']
os = h['os']
if release not in d:
to_remove.append(release)
continue
# Need to update?
m_os = d[release]
if m_os == os:
# Nothing to do
del d[release]
continue
to_update[0].append(release)
to_update[1].append(os)
# Everything else should be added
for release, os in list(d.items()):
to_add[0].append(release)
to_add[1].append(os)
self._remove_dists(to_remove)
self._update_dists(to_update[0], to_update[1])
self._add_dists(to_add[0], to_add[1])
_query_add_dists = rhnSQL.Statement("""
insert into rhnDistChannelMap
(channel_id, channel_arch_id, release, os, org_id)
values (:channel_id, :channel_arch_id, :release, :os, null)
""")
def _add_dists(self, releases, oses):
self._modify_dists(self._query_add_dists, releases, oses)
def _modify_dists(self, query, releases, oses):
if not releases:
return
count = len(releases)
channel_ids = [self._row['id']] * count
query_args = {'channel_id': channel_ids, 'release': releases}
if oses:
channel_arch_ids = [self._row['channel_arch_id']] * count
query_args.update({'channel_arch_id': channel_arch_ids,
'os': oses})
h = rhnSQL.prepare(query)
h.executemany(**query_args)
_query_update_dists = rhnSQL.Statement("""
update rhnDistChannelMap
set channel_arch_id = :channel_arch_id,
os = :os
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _update_dists(self, releases, oses):
self._modify_dists(self._query_update_dists, releases, oses)
_query_remove_dists = rhnSQL.Statement("""
delete from rhnDistChannelMap
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _remove_dists(self, releases):
self._modify_dists(self._query_remove_dists, releases, None)
def _compatible_channel_arches(self, parent_channel_arch, channel_arch):
# This could get more complicated later
return (parent_channel_arch == channel_arch)
def as_dict(self):
ret = BaseChannelObject.as_dict(self)
ret['dists'] = self.get_dists()
return ret
class ChannelFamily(BaseChannelObject):
_table_name = 'rhnChannelFamily'
_sequence_name = 'rhn_channel_family_id_seq'
_generic_fields = ['label', 'name', 'product_url']
def _load_by_id(query, item_object, pattern=None):
qargs = {}
if pattern:
query += "and label like :pattern"
qargs['pattern'] = pattern
h = rhnSQL.prepare(query)
h.execute(**qargs)
ret = []
while 1:
row = h.fetchone_dict()
if not row:
break
c = item_object.load_by_id(row['id'])
ret.append(c.as_dict())
return ret
def list_channel_families(pattern=None):
query = """
select id
from rhnChannelFamily
where org_id is null
"""
return _load_by_id(query, ChannelFamily(), pattern)
def list_channels(pattern=None):
query = """
select id
from rhnChannel
where 1=1
"""
return _load_by_id(query, Channel(), pattern)
# makes sure there are no None values in dictionaries, etc.
def __stringify(object):
if object is None:
return ''
if type(object) == type([]):
return list(map(__stringify, object))
# We need to know __stringify converts immutable types into immutable
# types
if type(object) == type(()):
return tuple(map(__stringify, object))
if type(object) == type({}):
ret = {}
for k, v in object.items():
ret[__stringify(k)] = __stringify(v)
return ret
# by default, we just str() it
return str(object)
# return the channel information
def channel_info(channel):
log_debug(3, channel)
# get the channel information
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannel c,
rhnChannelArch ca
where
c.channel_arch_id = ca.id
and c.label = :channel
""")
h.execute(channel=str(channel))
ret = h.fetchone_dict()
return __stringify(ret)
# return information about a base channel for a server_id
def get_base_channel(server_id, none_ok=0):
log_debug(3, server_id)
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c, rhnChannelArch ca, rhnServerChannel sc
where sc.server_id = :server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
and c.parent_channel is NULL
""")
h.execute(server_id=str(server_id))
ret = h.fetchone_dict()
if not ret:
if not none_ok:
log_error("Server not subscribed to a base channel!", server_id)
return None
return __stringify(ret)
def channels_for_server(server_id):
"""channel info list for all channels accessible by this server.
list channels a server_id is subscribed to
We DO NOT want to cache this one because we depend on getting
accurate information and the caching would only introduce more
overhead on an otherwise very fast query
"""
log_debug(3, server_id)
try:
server_id = int(server_id)
except:
raise_with_tb(rhnFault(8, server_id), sys.exc_info()[2]) # Invalid rhnServer.id
# XXX: need to return unsubsubcribed channels and a way to indicate
# they arent already subscribed
# list all the channels this server is subscribed to. We also want
# to know if any of those channels has local packages in it... A
# local package has a org_id set.
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
c.gpg_key_url,
case s.org_id when c.org_id then 1 else 0 end local_channel,
TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s
where
c.id = sc.channel_id
and sc.server_id = :server_id
and s.id = :server_id
and ca.id = c.channel_arch_id
order by c.parent_channel nulls first
""")
h.execute(server_id=str(server_id))
channels = h.fetchall_dict()
if not channels:
log_error("Server not subscribed to any channels", server_id)
channels = []
return __stringify(channels)
def getSubscribedChannels(server_id):
"""
Format the response from channels_for_server in the way that the
handlers expect.
"""
channelList = channels_for_server(server_id)
channels = []
for each in channelList:
if 'last_modified' not in each:
# No last_modified attribute
# Probably an empty channel, so ignore
continue
channel = [each['label'], each['last_modified']]
# isBaseChannel
if each['parent_channel']:
flag = "0"
else:
flag = "1"
channel.append(flag)
# isLocalChannel
if each['local_channel']:
flag = "1"
else:
flag = "0"
channel.append(flag)
channels.append(channel)
return channels
def isCustomChannel(channel_id):
"""
Input: channel_id (from DB Table rhnChannel.id)
Returns: True if this is a custom channel
False if this is not a custom channel
"""
log_debug(3, channel_id)
h = rhnSQL.prepare("""
select
rcf.label
from
rhnChannelFamily rcf,
rhnChannelFamilyMembers rcfm
where
rcfm.channel_id = :channel_id
and rcfm.channel_family_id = rcf.id
and rcf.org_id is not null
""")
h.execute(channel_id=str(channel_id))
label = h.fetchone()
if label:
if label[0].startswith("private-channel-family"):
log_debug(3, channel_id, "is a custom channel")
return True
return False
# Fetch base channel for a given release and arch
def base_channel_for_rel_arch(release, server_arch, org_id=-1,
user_id=None):
log_debug(4, release, server_arch, org_id, user_id)
query = """
select ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnChannelArch ca
where c.channel_arch_id = ca.id
and c.id = rhn_channel.base_channel_for_release_arch(
:release, :server_arch, :org_id, :user_id)
"""
rhnSQL.transaction("base_channel_for_rel_arch")
h = rhnSQL.prepare(query)
try:
h.execute(release=str(release), server_arch=str(server_arch),
org_id=org_id, user_id=user_id)
except rhnSQL.SQLSchemaError:
e = sys.exc_info()[1]
rhnSQL.rollback("base_channel_for_rel_arch")
if e.errno == 20263:
# Insufficient permissions for subscription
log_debug(4, 'BaseChannelDeniedError')
raise_with_tb(BaseChannelDeniedError(), sys.exc_info()[2])
if e.errno == 20244:
# Server architecture could not be found
log_debug(4, 'InvalidServerArchError')
raise_with_tb(InvalidServerArchError(str(server_arch)), sys.exc_info()[2])
# Re-raise unknown eceptions
log_debug(4, 'unkown exception')
raise
log_debug(4, 'got past exceptions')
return h.fetchone_dict()
def base_eus_channel_for_ver_rel_arch(version, release, server_arch,
org_id=-1, user_id=None):
"""
given a redhat-release version, release, and server arch, return a list
of dicts containing the details of the channel z streams either match the
version/release pair, or are greater.
"""
log_debug(4, version, release, server_arch, org_id, user_id)
eus_channels_query = """
select c.id,
c.label,
c.name,
rcm.release,
c.receiving_updates
from
rhnChannelPermissions cp,
rhnChannel c,
rhnServerArch sa,
rhnServerChannelArchCompat scac,
rhnReleaseChannelMap rcm
where
rcm.version = :version
and scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = rcm.channel_arch_id
and rcm.channel_id = c.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and rhn_channel.loose_user_role_check(c.id, :user_id,
'subscribe') = 1
"""
eus_channels_prepared = rhnSQL.prepare(eus_channels_query)
eus_channels_prepared.execute(version=version,
server_arch=server_arch,
user_id=user_id,
org_id=org_id)
channels = []
while True:
channel = eus_channels_prepared.fetchone_dict()
if channel is None:
break
# the release part of redhat-release for rhel 4 is like
# 6.1 or 7; we just look at the first digit.
# for rhel 5 and up it's the full release number of rhel, followed by
# the true release number of the rpm, like 5.0.0.9 (for the 9th
# version of the redhat-release rpm, for RHEL GA)
db_release = channel['release']
if version in ['4AS', '4ES']:
parts = 1
else:
parts = 3
server_rel = '.'.join(release.split('.')[:parts])
channel_rel = '.'.join(db_release.split('.')[:parts])
# XXX we're no longer using the is_default column from the db
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) == 0:
channel['is_default'] = 'Y'
channels.append(channel)
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) < 0:
channel['is_default'] = 'N'
channels.append(channel)
return channels
def get_channel_for_release_arch(release, server_arch, org_id=None):
log_debug(3, release, server_arch)
server_arch = rhnLib.normalize_server_arch(str(server_arch))
log_debug(3, 'normalized arch as %s' % server_arch)
if org_id is None:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnDistChannelMap dcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = dcm.channel_arch_id
and dcm.release = :release
and dcm.channel_id = c.id
and dcm.channel_arch_id = c.channel_arch_id
and dcm.org_id is null
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
else:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = odcm.channel_arch_id
and odcm.release = :release
and odcm.channel_id = c.id
and odcm.channel_arch_id = c.channel_arch_id
and odcm.org_id = :org_id
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(release=str(release), server_arch=server_arch, org_id=org_id)
row = h.fetchone_dict()
if not row:
# No channles for this guy
log_debug(3, 'No channles for this guy')
return None
log_debug(3, 'row is %s' % str(row))
return row
def applet_channels_for_uuid(uuid):
log_debug(3, uuid)
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
to_char(s.channels_changed, 'YYYYMMDDHH24MISS') server_channels_changed
from rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s,
rhnServerUuid su
where su.uuid = :uuid
and su.server_id = s.id
and su.server_id = sc.server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(uuid=uuid)
rows = h.fetchall_dict() or []
return rows
# retrieve a list of public channels for a given release and architecture
# we cannot cache this if it involves an org_id
# If a user_id is passed to this function, and all the available base channels
# for this server_arch/release combination are denied by the org admin, this
# function raises BaseChannelDeniedError
def channels_for_release_arch(release, server_arch, org_id=-1, user_id=None):
if not org_id:
org_id = -1
org_id = string.strip(str(org_id))
log_debug(3, release, server_arch, org_id)
# Can raise BaseChannelDeniedError or InvalidServerArchError
base_channel = base_channel_for_rel_arch(release, server_arch,
org_id=org_id, user_id=user_id)
if not base_channel:
raise NoBaseChannelError()
# At this point, base_channel is not null
# We assume here that subchannels are compatible with the base channels,
# so there would be no need to check for arch compatibility from this
# point
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
-- If user_id is null, then the channel is subscribable
rhn_channel.loose_user_role_check(c.id, :user_id, 'subscribe')
subscribable
from
rhnChannelPermissions cp,
rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca
where
c.id = odcm.channel_id
and odcm.os in (
'Powertools'
)
and odcm.for_org_id = :org_id
and c.channel_arch_id = ca.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and c.parent_channel = :parent_channel
""")
h.execute(org_id=org_id,
parent_channel=base_channel['id'], user_id=user_id)
channels = [base_channel]
while 1:
row = h.fetchone_dict()
if not row:
break
subscribable = row['subscribable']
del row['subscribable']
if not subscribable:
# Not allowed to subscribe to this channel
continue
channels.append(row)
return __stringify(channels)
_query_get_source_packages_from_ids = rhnSQL.Statement("""
select srpm.name
from rhnChannelPackage cp,
rhnPackage p,
rhnSourceRPM srpm
where cp.channel_id = :channel_id
and cp.package_id = p.id
and p.source_rpm_id = srpm.id
""")
def list_packages_source(channel_id):
ret = []
h = rhnSQL.prepare(_query_get_source_packages_from_ids)
h.execute(channel_id=channel_id)
results = h.fetchall_dict()
if results:
for r in results:
r = r['name']
if string.find(r, ".rpm") != -1:
r = string.replace(r, ".rpm", "")
new_evr = rhnLib.make_evr(r, source=1)
new_evr_list = [new_evr['name'], new_evr['version'], new_evr['release'], new_evr['epoch']]
ret.append(new_evr_list)
return ret
# the latest packages from the specified channel
_query_all_packages_from_channel_checksum = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages with checksum info
def list_all_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
h = rhnSQL.prepare(_query_all_packages_from_channel_checksum)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']) for a in __stringify(ret)]
return ret
# This function executes the SQL call for listing latest packages with
# checksum info
def list_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size,
full_channel.checksum_type,
full_channel.checksum
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
h = rhnSQL.prepare(query)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']) for a in __stringify(ret)]
return ret
# This function executes the SQL call for listing packages
def _list_packages_sql(query, channel_id):
h = rhnSQL.prepare(query)
h.execute(channel_id=str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"]) for a in __stringify(ret)]
return ret
def list_packages_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id
from
rhnChannelPackage cp,
rhnPackage p
where
cp.channel_id = :channel_id
and cp.package_id = p.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
return _list_packages_sql(query, channel_id)
# the latest packages from the specified channel
_query_latest_packages_from_channel = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages
def list_all_packages_sql(channel_id):
log_debug(3, channel_id)
return _list_packages_sql(_query_latest_packages_from_channel, channel_id)
# This function executes the SQL call for listing packages with all the
# dep information for each package also
def list_all_packages_complete_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare(_query_latest_packages_from_channel)
# This gathers the provides, requires, conflicts, obsoletes info
g = rhnSQL.prepare("""
select
pp.package_id,
'provides' as capability_type,
pp.capability_id,
pp.sense,
pc.name,
pc.version
from
rhnPackageProvides pp,
rhnPackageCapability pc
where
pp.package_id = :package_id
and pp.capability_id = pc.id
union all
select
pr.package_id,
'requires' as capability_type,
pr.capability_id,
pr.sense,
pc.name,
pc.version
from
rhnPackageRequires pr,
rhnPackageCapability pc
where
pr.package_id = :package_id
and pr.capability_id = pc.id
union all
select
prec.package_id,
'recommends' as capability_type,
prec.capability_id,
prec.sense,
pc.name,
pc.version
from
rhnPackageRecommends prec,
rhnPackageCapability pc
where
prec.package_id = :package_id
and prec.capability_id = pc.id
union all
select
sugg.package_id,
'suggests' as capability_type,
sugg.capability_id,
sugg.sense,
pc.name,
pc.version
from
rhnPackageSuggests sugg,
rhnPackageCapability pc
where
sugg.package_id = :package_id
and sugg.capability_id = pc.id
union all
select
supp.package_id,
'supplements' as capability_type,
supp.capability_id,
supp.sense,
pc.name,
pc.version
from
rhnPackageSupplements supp,
rhnPackageCapability pc
where
supp.package_id = :package_id
and supp.capability_id = pc.id
union all
select
enh.package_id,
'enhances' as capability_type,
enh.capability_id,
enh.sense,
pc.name,
pc.version
from
rhnPackageEnhances enh,
rhnPackageCapability pc
where
enh.package_id = :package_id
and enh.capability_id = pc.id
union all
select
pcon.package_id,
'conflicts' as capability_type,
pcon.capability_id,
pcon.sense,
pc.name,
pc.version
from
rhnPackageConflicts pcon,
rhnPackageCapability pc
where
pcon.package_id = :package_id
and pcon.capability_id = pc.id
union all
select
po.package_id,
'obsoletes' as capability_type,
po.capability_id,
po.sense,
pc.name,
pc.version
from
rhnPackageObsoletes po,
rhnPackageCapability pc
where
po.package_id = :package_id
and po.capability_id = pc.id
union all
select
brks.package_id,
'breaks' as capability_type,
brks.capability_id,
brks.sense,
pc.name,
pc.version
from
rhnPackageBreaks brks,
rhnPackageCapability pc
where
brks.package_id = :package_id
and brks.capability_id = pc.id
union all
select
pdep.package_id,
'predepends' as capability_type,
pdep.capability_id,
pdep.sense,
pc.name,
pc.version
from
rhnPackagePredepends pdep,
rhnPackageCapability pc
where
pdep.package_id = :package_id
and pdep.capability_id = pc.id
""")
h.execute(channel_id=str(channel_id))
# XXX This query has to order the architectures somehow; the 7.2 up2date
# client was broken and was selecting the wrong architecture if athlons
# are passed first. The rank ordering here should make sure that i386
# kernels appear before athlons.
ret = h.fetchall_dict()
if not ret:
return []
for pkgi in ret:
pkgi['provides'] = []
pkgi['requires'] = []
pkgi['conflicts'] = []
pkgi['obsoletes'] = []
pkgi['recommends'] = []
pkgi['suggests'] = []
pkgi['supplements'] = []
pkgi['enhances'] = []
pkgi['breaks'] = []
pkgi['predepends'] = []
g.execute(package_id=pkgi["id"])
deps = g.fetchall_dict() or []
for item in deps:
version = item['version'] or ""
relation = ""
if version:
sense = item['sense'] or 0
if sense & 2:
relation = relation + "<"
if sense & 4:
relation = relation + ">"
if sense & 8:
relation = relation + "="
if relation:
relation = " " + relation
if version:
version = " " + version
dep = item['name'] + relation + version
pkgi[item['capability_type']].append(dep)
# process the results
ret = [(a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['provides'],
a['requires'], a['conflicts'], a['obsoletes'], a['recommends'], a['suggests'], a['supplements'], a['enhances'], a['breaks'], a['predepends']) for a in __stringify(ret)]
return ret
def list_packages_path(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare("""
select
p.path
from
rhnPackage p,
rhnChannelPackage cp
where
cp.channel_id = :channel_id
and cp.package_id = p.id
""")
h.execute(channel_id=str(channel_id))
ret = h.fetchall()
if not ret:
return []
# process the results
# ret = map(lambda a: (a["path"]),
# __stringify(ret))
return ret
# list the latest packages for a channel
def list_packages(channel):
return _list_packages(channel, cache_prefix="list_packages",
function=list_packages_sql)
# list _all_ the packages for a channel
def list_all_packages(channel):
return _list_packages(channel, cache_prefix="list_all_packages",
function=list_all_packages_sql)
# list _all_ the packages for a channel, including checksum info
def list_all_packages_checksum(channel):
return _list_packages(channel, cache_prefix="list_all_packages_checksum",
function=list_all_packages_checksum_sql)
# list _all_ the packages for a channel
def list_all_packages_complete(channel):
return _list_packages(channel, cache_prefix="list_all_packages_complete",
function=list_all_packages_complete_sql)
# Common part of list_packages and list_all_packages*
# cache_prefix is the prefix for the file name we're caching this request as
# function is the generator function
def _list_packages(channel, cache_prefix, function):
log_debug(3, channel, cache_prefix)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "%s-%s" % (cache_prefix, channel)
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
return ret
ret = function(c_info["id"])
if not ret:
# we assume that channels with no packages are very fast to list,
# so we don't bother caching...
log_error("No packages found in channel",
c_info["id"], c_info["label"])
return []
# we need to append the channel label to the list
ret = list(map(lambda a, c=channel: a + (c,), ret))
ret = xmlrpclib.dumps((ret, ), methodresponse=1)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
# set the cache
rhnCache.set(cache_entry, ret, c_info["last_modified"])
return ret
def getChannelInfoForKickstart(kickstart):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart))
return h.fetchone_dict()
def getChannelInfoForKickstartOrg(kickstart, org_id):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
and kt.org_id = :org_id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart), org_id=int(org_id))
return h.fetchone_dict()
def getChannelInfoForKickstartSession(session):
# decode the session string
try:
session_id = int(session.split('x')[0].split(':')[0])
except Exception:
return None, None
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks
where c.id = kt.channel_id
and kt.id = ks.kstree_id
and ks.id = :session_id
"""
h = rhnSQL.prepare(query)
h.execute(session_id=session_id)
return h.fetchone_dict()
def getChildChannelInfoForKickstart(kickstart, child):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks,
rhnChannel c2
where c2.id = kt.channel_id
and kt.label = :kickstart_label
and c.label = :child_label
and c.parent_channel = c2.id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label=str(kickstart), child_label=str(child))
return h.fetchone_dict()
def getChannelInfoForTinyUrl(tinyurl):
query = """
select tu.url
from rhnTinyUrl tu
where tu.enabled = 'Y'
and tu.token = :tinyurl
"""
h = rhnSQL.prepare(query)
h.execute(tinyurl=str(tinyurl))
return h.fetchone_dict()
# list the obsoletes for a channel
def list_obsoletes(channel):
log_debug(3, channel)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "list_obsoletes-%s" % channel
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
return ret
# Get the obsoleted packages
h = rhnSQL.prepare("""
select distinct
pn.name,
pe.version, pe.release, pe.epoch,
pa.label arch,
pc.name obsolete_name,
pc.version obsolete_version,
p_info.sense
from rhnPackageCapability pc,
rhnPackageArch pa,
rhnPackageEVR pe,
rhnPackageName pn,
rhnPackage p,
( select cp.channel_id,
po.package_id, po.capability_id, po.sense
from rhnPackageObsoletes po,
rhnChannelPackage cp,
rhnChannel c
where 1=1
and c.label = :channel
and c.id = cp.channel_id
and cp.package_id = po.package_id
) p_info
where 1=1
and p_info.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pe.id
and p.package_arch_id = pa.id
and p_info.capability_id = pc.id
""")
h.execute(channel=str(channel))
# Store stuff in a dictionary to makes things simpler
hash = {}
while 1:
row = h.fetchone_dict()
if not row:
break
row = __stringify(row)
key = (row['name'], row['version'], row['release'],
row["epoch"], row['arch'])
value = key + (row['obsolete_name'], row['obsolete_version'],
row['sense'])
if key not in hash:
hash[key] = []
hash[key].append(value)
# Now grab a listall and match it against what we got
pkglist = list_packages_sql(c_info["id"])
result = []
for pkg in pkglist:
key = tuple(pkg[:5])
if key in hash:
for p in hash[key]:
result.append(p)
# we can cache this now
rhnCache.set(cache_entry, result, c_info["last_modified"])
return result
def __auth_user(server_id, username, password):
""" Auth if user can add/remove channel from given server """
log_debug(3, server_id, username)
# check the username and password for compliance
user = rhnUser.auth_username_password(username, password)
# The user's password checks, verify that they have perms on that
# server.
h = rhnSQL.prepare("""
select count(*)
from rhnUserServerPerms usp
where usp.user_id = :user_id
and usp.server_id = :server_id
""")
h.execute(user_id=str(user.getid()), server_id=str(server_id))
res = h.fetchone_dict()
if not res:
# Not allowed to perform administrative tasks on this server
raise rhnFault(37)
return 1
# small wrapper around a PL/SQL function
def subscribe_sql(server_id, channel_id, commit=1):
log_debug(3, server_id, channel_id, commit)
subscribe_channel = rhnSQL.Procedure("rhn_channel.subscribe_server")
try:
# don't run the EC yet
subscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLSchemaError:
e = sys.exc_info()[1]
if e.errno == 20102: # channel_server_one_base
log_error("Channel subscribe failed, "
"%s already subscribed to %s (?)" % (server_id, channel_id))
raise_with_tb(rhnFault(38, "Server already subscribed to %s" % channel_id), sys.exc_info()[2])
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLSchemaError", e)
raise_with_tb(rhnException(e), sys.exc_info()[2])
except rhnSQL.SQLError:
e = sys.exc_info()[1]
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLError", e)
raise_with_tb(rhnException(e), sys.exc_info()[2])
if commit:
rhnSQL.commit()
return 1
_query_parent_channel_subscribed = rhnSQL.Statement("""
select 1
from rhnChannel c
join rhnServerChannel sc on c.parent_channel = sc.channel_id
where sc.server_id = :sid
and c.label = :channel
""")
_query_can_subscribe = rhnSQL.Statement("""
select rhn_channel.user_role_check(:cid, wc.id, 'subscribe') as can_subscribe
from web_contact wc
where wc.login_uc = upper(:username)
""")
# subscribe a server to a channel with authentication
def subscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# get the channel_id
h = rhnSQL.prepare("select id from rhnChannel where label = :channel")
h.execute(channel=str(channel))
ret = h.fetchone_dict()
if not ret:
log_error("Channel %s does not exist?" % channel)
raise rhnFault(40, "Channel %s does not exist?" % channel)
channel_id = ret['id']
# check if server is subscribed to the parent of the given channel
h = rhnSQL.prepare(_query_parent_channel_subscribed)
h.execute(sid=server_id, channel=str(channel))
ret = h.fetchone_dict()
if not ret:
log_error("Parent of channel %s is not subscribed to server" % channel)
raise rhnFault(32, "Parent of channel %s is not subscribed to server" % channel)
# check specific channel subscription permissions
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
subscribe_sql(server_id, channel_id)
return 1
raise rhnFault(71)
# This class is only a convenient encapsulation of a server's attributes:
# server_id, org_id, release, arch, user_id. Sometimes we only pass the
# server_id, and later down the road we have to message "no channel for
# release foo, arch bar", but we don't know the release and arch anymore
class LiteServer:
_attributes = ['id', 'org_id', 'release', 'arch']
def __init__(self, **kwargs):
# Initialize attributes from **kwargs (set to None if value is not
# present)
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr))
def init_from_server(self, server):
self.id = server.getid()
self.org_id = server.server['org_id']
self.release = server.server['release']
self.arch = server.archname
return self
def __repr__(self):
dict = {}
for attr in self._attributes:
dict[attr] = getattr(self, attr)
return "<%s instance at %s: attributes=%s>" % (
self.__class__.__name__, id(self), dict)
# If raise_exceptions is set, BaseChannelDeniedError, NoBaseChannelError are
# raised
def guess_channels_for_server(server, user_id=None, none_ok=0,
raise_exceptions=0):
log_debug(3, server)
if not isinstance(server, LiteServer):
raise rhnException("Server object is not a LiteServer")
if None in (server.org_id, server.release, server.arch):
# need to obtain the release and/or arch and/or org_id
h = rhnSQL.prepare("""
select s.org_id, s.release, sa.label arch
from rhnServer s, rhnServerArch sa
where s.id = :server_id and s.server_arch_id = sa.id
""")
h.execute(server_id=server.id)
ret = h.fetchone_dict()
if not ret:
log_error("Could not get the release/arch "
"for server %s" % server.id)
raise rhnFault(8, "Could not find the release/arch "
"for server %s" % server.id)
if server.org_id is None:
server.org_id = ret["org_id"]
if server.release is None:
server.release = ret["release"]
if server.arch is None:
server.arch = ret["arch"]
if raise_exceptions and not none_ok:
# Let exceptions pass through
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
try:
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
except NoBaseChannelError:
if none_ok:
return []
log_error("No available channels for (server, org)",
(server.id, server.org_id), server.release, server.arch)
msg = _("Your account does not have access to any channels matching "
"(release='%(release)s', arch='%(arch)s')%(www_activation)s")
error_strings = {
'release': server.release,
'arch': server.arch,
'www_activation': ''
}
if CFG.REFER_TO_WWW:
error_strings['www_activation'] = _("\nIf you have a "
"registration number, please register with it first at "
"http://www.redhat.com/apps/activate/ and then try again.\n\n")
raise_with_tb(rhnFault(19, msg % error_strings), sys.exc_info()[2])
except BaseChannelDeniedError:
if none_ok:
return []
raise raise_with_tb(rhnFault(71,
_("Insufficient subscription permissions for release (%s, %s")
% (server.release, server.arch)), sys.exc_info()[2])
# Subscribes the server to channels
# can raise BaseChannelDeniedError, NoBaseChannelError
# Only used for new server registrations
def subscribe_server_channels(server, user_id=None, none_ok=0):
s = LiteServer().init_from_server(server)
# bretm 02/19/2007 -- have to leave none_ok in here for now due to how
# the code is setup for reg token crap; it'd be very nice to clean up that
# path to eliminate any chance for a server to be registered and not have base
# channels, excluding expiration of channel entitlements
channels = guess_channels_for_server(s, user_id=user_id, none_ok=none_ok,
raise_exceptions=1)
rhnSQL.transaction('subscribe_server_channels')
for c in channels:
subscribe_sql(s.id, c["id"], 0)
return channels
# small wrapper around a PL/SQL function
def unsubscribe_sql(server_id, channel_id, commit=1):
log_debug(3, server_id, channel_id, commit)
unsubscribe_channel = rhnSQL.Procedure("rhn_channel.unsubscribe_server")
try:
# don't run the EC yet
unsubscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLError:
log_error("Channel unsubscribe from %s failed for %s" % (
channel_id, server_id))
return 0
if commit:
rhnSQL.commit()
return 1
# unsubscribe a server from a channel
def unsubscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# now get the id of the channel
h = rhnSQL.prepare("""
select id, parent_channel from rhnChannel where label = :channel
""")
h.execute(channel=channel)
ret = h.fetchone_dict()
if not ret:
log_error("Asked to unsubscribe server %s from non-existent channel %s" % (
server_id, channel))
raise rhnFault(40, "The specified channel '%s' does not exist." % channel)
if not ret["parent_channel"]:
log_error("Cannot unsubscribe %s from base channel %s" % (
server_id, channel))
raise rhnFault(72, "You can not unsubscribe %s from base channel %s." % (
server_id, channel))
# check specific channel subscription permissions
channel_id = ret['id']
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
return unsubscribe_sql(server_id, channel_id)
raise rhnFault(71)
# unsubscribe from all channels
def unsubscribe_all_channels(server_id):
log_debug(3, server_id)
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_all_channels")
h = rhnSQL.prepare("""
select
sc.channel_id id
from
rhnChannel c,
rhnServerChannel sc
where
sc.server_id = :server_id
and sc.channel_id = c.id
order by c.parent_channel nulls last
""")
h.execute(server_id=str(server_id))
while 1:
c = h.fetchone_dict()
if not c:
break
ret = unsubscribe_sql(server_id, c["id"], 0)
if not ret:
rhnSQL.rollback("unsub_all_channels")
raise rhnFault(36, "Could not unsubscribe server %s "
"from existing channels" % (server_id,))
# finished unsubscribing
return 1
# Unsubscribe the server from the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def unsubscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_channels")
base_channels = [x for x in channels if not x['parent_channel']]
child_channels = [x for x in channels if x['parent_channel']]
for channel in child_channels + base_channels:
ret = unsubscribe_sql(server_id, channel["id"], 0)
if not ret:
rhnSQL.rollback("unsub_channels")
raise rhnFault(36, "Could not unsubscribe server %s "
"from channel %s" % (server_id, channel["label"]))
# finished unsubscribing
return 1
# Subscribe the server to the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def subscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to subscribe the base channel before the child ones.
base_channels = [x for x in channels if not x['parent_channel']]
child_channels = [x for x in channels if x['parent_channel']]
for channel in base_channels + child_channels:
subscribe_sql(server_id, channel["id"], 0)
# finished subscribing
return 1
# check if a server is subscribed to a channel
def is_subscribed(server_id, channel):
log_debug(3, server_id, channel)
h = rhnSQL.prepare("""
select 1 subscribed
from rhnServerChannel sc, rhnChannel c
where
sc.channel_id = c.id
and c.label = :channel
and sc.server_id = :server_id
""")
h.execute(server_id=str(server_id), channel=str(channel))
ret = h.fetchone_dict()
if not ret:
# System not subscribed to channel
return 0
return 1
# Returns 0, "", "" if system does not need any message, or
# (error_code, message_title, message) otherwise
def system_reg_message(server):
server_id = server.server['id']
# Is this system subscribed to a channel?
h = rhnSQL.prepare("""
select sc.channel_id
from rhnServerChannel sc
where sc.server_id = :server_id
""")
h.execute(server_id=server_id)
ret = h.fetchone_dict()
if not ret:
# System not subscribed to any channel
#
return (-1, s_invalid_channel_title,
s_invalid_channel_message %
(server.server["release"], server.archname))
# System does have a base channel; check entitlements
from rhnServer import server_lib # having this on top, cause TB due circular imports
entitlements = server_lib.check_entitlement(server_id)
if not entitlements:
# No entitlement
# We don't have an autoentitle preference for now, so display just one
# message
templates = rhnFlags.get('templateOverrides')
if templates and 'hostname' in templates:
hostname = templates['hostname']
else:
# Default to www
hostname = "rhn.redhat.com"
params = {
'entitlement_url': "https://%s"
"/rhn/systems/details/Edit.do?sid=%s" %
(hostname, server_id)
}
return -1, no_entitlement_title, no_entitlement_message % params
return 0, "", ""
def subscribe_to_tools_channel(server_id):
"""
Subscribes server_id to the RHN Tools channel associated with its base channel, if one exists.
"""
base_channel_dict = get_base_channel(server_id, none_ok=1)
if base_channel_dict is None:
raise NoBaseChannelError("Server %s has no base channel." %
str(server_id))
lookup_child_channels = rhnSQL.Statement("""
select id, label, parent_channel
from rhnChannel
where parent_channel = :id
""")
child_channel_data = rhnSQL.prepare(lookup_child_channels)
child_channel_data.execute(id=base_channel_dict['id'])
child_channels = child_channel_data.fetchall_dict()
if child_channels is None:
raise NoChildChannels("Base channel id %s has no child channels associated with it." %
base_channel_dict['id'])
tools_channel = None
for channel in child_channels:
if 'label' in channel:
if 'rhn-tools' in channel['label']:
tools_channel = channel
if tools_channel is None:
raise NoToolsChannel("Base channel id %s does not have a RHN Tools channel as a child channel." %
base_channel_dict['id'])
else:
if 'id' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no id.")
if 'label' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no label.")
if 'parent_channel' not in tools_channel:
raise InvalidChannel("RHN Tools channel has no parent_channel.")
subscribe_channels(server_id, [tools_channel])
# Various messages that can be reused
#
# bretm 02/07/2007 -- when we have better old-client documentation, probably
# will be safe to get rid of all this crap
h_invalid_channel_title = _("System Registered but Inactive")
h_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it is not subscribed to a channel. If you have not yet
activated your product for service, please visit our website at:
http://www.redhat.com/apps/activate/
...to activate your product.""")
s_invalid_channel_title = _("System Registered but Inactive")
s_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it could not be subscribed to a base channel.
Please contact your organization administrator for assistance.
""")
no_autoentitlement_message = _("""
This system has been successfully registered, but is not yet entitled
to service. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
no_entitlement_title = _("System Registered but Inactive")
no_entitlement_message = _("""
This system has been successfully registered, but no service entitlements
were available. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
|
lhellebr/spacewalk
|
backend/server/rhnChannel.py
|
Python
|
gpl-2.0
| 70,178
|
[
"VisIt"
] |
a7f112ac4a539a072744a00185565a30b12a9538aada86b73cd6a67851c51f96
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
from __future__ import print_function
import sys, inspect, re, bisect
from collections import OrderedDict
from os.path import join
import mx
class MxCompatibility500(object):
@staticmethod
def version():
return mx.VersionSpec("5.0.0")
def supportsLicenses(self):
return False
def licenseAttribute(self):
return 'licence'
def licensesAttribute(self):
return 'licences'
def defaultLicenseAttribute(self):
return 'defaultLicence'
def supportedMavenMetadata(self):
return []
def supportsRepositories(self):
return False
def newestInputIsTimeStampFile(self):
'''
Determines if the 'newestInput' parameter of BuildTask.needsBuild()
is a TimeStampFile or a simple time stamp (i.e. a float).
'''
return False
def getSuiteOutputRoot(self, suite):
return suite.dir
def mavenDeployJavadoc(self):
return False
def validate_maven_javadoc(self):
return False
def mavenSupportsClassifier(self):
return False
def checkstyleVersion(self):
return '6.0'
def checkDependencyJavaCompliance(self):
"""
Determines if a project must have a higher or equal Java compliance level
than a project it depends upon.
"""
return False
def improvedImportMatching(self):
return False
def verifySincePresent(self):
return []
def moduleDepsEqualDistDeps(self):
"""
Determines if the constituents of a module derived from a distribution are
exactly the same as the constituents of the distribution.
"""
return False
def useDistsForUnittest(self):
"""
Determines if Unittest uses jars from distributions for testing.
"""
return False
def excludeDisableJavaDebuggging(self):
"""
Excludes the misspelled class name.
"""
return False
def makePylintVCInputsAbsolute(self):
"""
Makes pylint input paths discovered by VC absolute.
"""
return False
def disableImportOfTestProjects(self):
"""
Requires that test projects can only be imported by test projects.
"""
return False
def useJobsForMakeByDefault(self):
"""
Uses -j for make by default, can be prevented using `single_job` attribute on the project.
"""
return False
def overwriteProjectAttributes(self):
"""
Attributes from the configuration that are not explicitly handled overwrite values set by the constructor.
"""
return True
def requireJsonifiableSuite(self):
return False
def supportSuiteImportGitBref(self):
return True
def enforceTestDistributions(self):
return False
def deprecateIsTestProject(self):
return False
def filterFindbugsProjectsByJavaCompliance(self):
"""
Should selection of projects to analyze with FindBugs filter
out projects whose Java compliance is greater than 8.
"""
return False
def addVersionSuffixToExplicitVersion(self):
return False
def __str__(self):
return str("MxCompatibility({})".format(self.version()))
def __repr__(self):
return str(self)
def jarsUseJDKDiscriminant(self):
"""
Should `mx.JARDistribution` use the jdk version used for the build as a `Dependency._extra_artifact_discriminant`
to avoid collisions of build artifacts when building with different JAVA_HOME/EXTRA_JAVA_HOMES settings.
"""
return False
def check_package_locations(self):
"""
Should `canonicalizeprojects` check whether the java package declarations and source location match.
"""
return False
def check_checkstyle_config(self):
"""
Should sanity check Checkstyle configuration for a project.
"""
return False
def verify_multirelease_projects(self):
"""
Should multi-release projects be verified (see mx.verifyMultiReleaseProjects).
"""
return False
def spotbugs_version(self):
"""
Which version of findbugs/spotbugs should be used?
"""
return "3.0.0"
def automatic_overlay_distribution_deps(self):
"""
When a distribution depends on a project that has versioned overlays, are the
overlay projects automatically added as dependencies to the distribution?
"""
return False
def supports_disjoint_JavaCompliance_range(self):
"""
Specifies if disjoint JavaCompliance ranges (e.g. "8,13+") are supported.
"""
return False
def maven_deploy_unsupported_is_error(self):
"""
Specifies if trying to deploy a distribution whose type is not supported is an error.
"""
return False
def enhanced_module_usage_info(self):
"""
Returns True if a Java project must specify its use of concealed packages with
a "requiresConcealed" attribute and use of modules other than java.base with
a "requires" attribute.
"""
return False
def get_sigtest_jar(self):
"""
Returns the proper version of the SIGTEST jar used by `mx sigtest`.
"""
return mx.library('SIGTEST_1_2').get_path(resolve=True)
def fix_extracted_dependency_prefix(self):
"""
Returns True if the `./` prefix should be removed from `extracted-dependency` sources of layout distributions.
"""
return False
def is_using_jdk_headers_implicitly(self, project):
"""Returns whether a native project is using JDK headers implicitly.
The use of JDK headers is implied if any build dependency is a Java project with JNI headers.
"""
assert project.isNativeProject()
is_using_jdk_headers = any(d.isJavaProject() and d.include_dirs for d in project.buildDependencies)
if is_using_jdk_headers and project.suite._output_root_includes_config():
project.abort('This project is using JDK headers implicitly. For MX_OUTPUT_ROOT_INCLUDES_CONFIG=true to '
'work, it must set the "use_jdk_headers" attribute explicitly.')
return is_using_jdk_headers
def bench_suite_needs_suite_args(self):
"""
Returns whether extracting the benchmark suite name depends on the `bmSuiteArgs` or not.
"""
return False
def enforce_spec_compliant_exports(self):
"""Returns whether modular multi-release JARs must have spec compliant exports."""
return False
def jmh_dist_benchmark_extracts_add_opens_from_manifest(self):
"""Returns whether jmh benchmarks should extract --add-opens and --add-exports from the manifest file to
place it explicitly on the command line."""
return False
class MxCompatibility520(MxCompatibility500):
@staticmethod
def version():
return mx.VersionSpec("5.2.0")
def supportsLicenses(self):
return True
def supportedMavenMetadata(self):
return ['library-coordinates', 'suite-url', 'suite-developer', 'dist-description']
class MxCompatibility521(MxCompatibility520):
@staticmethod
def version():
return mx.VersionSpec("5.2.1")
def supportsRepositories(self):
return True
class MxCompatibility522(MxCompatibility521):
@staticmethod
def version():
return mx.VersionSpec("5.2.2")
def licenseAttribute(self):
return 'license'
def licensesAttribute(self):
return 'licenses'
def defaultLicenseAttribute(self):
return 'defaultLicense'
class MxCompatibility533(MxCompatibility522):
@staticmethod
def version():
return mx.VersionSpec("5.3.3")
def newestInputIsTimeStampFile(self):
return True
class MxCompatibility555(MxCompatibility533):
@staticmethod
def version():
return mx.VersionSpec("5.5.5")
def getSuiteOutputRoot(self, suite):
return join(suite.dir, 'mxbuild')
class MxCompatibility566(MxCompatibility555):
@staticmethod
def version():
return mx.VersionSpec("5.6.6")
def mavenDeployJavadoc(self):
return True
class MxCompatibility5616(MxCompatibility566):
@staticmethod
def version():
return mx.VersionSpec("5.6.16")
def checkstyleVersion(self):
return '6.15'
class MxCompatibility59(MxCompatibility5616):
@staticmethod
def version():
return mx.VersionSpec("5.9.0")
def verifySincePresent(self):
return ['-verifysincepresent']
class MxCompatibility5200(MxCompatibility59):
@staticmethod
def version():
return mx.VersionSpec("5.20.0")
def checkDependencyJavaCompliance(self):
return True
def improvedImportMatching(self):
return True
class MxCompatibility5344(MxCompatibility5200):
@staticmethod
def version():
return mx.VersionSpec("5.34.4")
def moduleDepsEqualDistDeps(self):
return True
class MxCompatibility5590(MxCompatibility5344):
@staticmethod
def version():
return mx.VersionSpec("5.59.0")
def useDistsForUnittest(self):
return True
class MxCompatibility5680(MxCompatibility5590):
@staticmethod
def version():
return mx.VersionSpec("5.68.0")
def excludeDisableJavaDebuggging(self):
return True
class MxCompatibility51104(MxCompatibility5680):
@staticmethod
def version():
return mx.VersionSpec("5.110.4")
def makePylintVCInputsAbsolute(self):
return True
class MxCompatibility51120(MxCompatibility51104):
@staticmethod
def version():
return mx.VersionSpec("5.113.0")
def disableImportOfTestProjects(self):
return True
class MxCompatibility51150(MxCompatibility51120):
@staticmethod
def version():
return mx.VersionSpec("5.115.0")
def useJobsForMakeByDefault(self):
return True
class MxCompatibility51247(MxCompatibility51150):
@staticmethod
def version():
return mx.VersionSpec("5.124.7")
def overwriteProjectAttributes(self):
return False
class MxCompatibility51330(MxCompatibility51247):
@staticmethod
def version():
return mx.VersionSpec("5.133.0")
def requireJsonifiableSuite(self):
return True
class MxCompatibility51380(MxCompatibility51330):
@staticmethod
def version():
return mx.VersionSpec("5.138.0")
def supportSuiteImportGitBref(self):
return False
class MxCompatibility51400(MxCompatibility51380):
@staticmethod
def version():
return mx.VersionSpec("5.140.0")
def enforceTestDistributions(self):
return True
def deprecateIsTestProject(self):
return True
class MxCompatibility51492(MxCompatibility51400):
@staticmethod
def version():
return mx.VersionSpec("5.149.2")
def filterFindbugsProjectsByJavaCompliance(self):
return True
class MxCompatibility51760(MxCompatibility51492):
@staticmethod
def version():
return mx.VersionSpec("5.176.0")
def addVersionSuffixToExplicitVersion(self):
return True
class MxCompatibility5181(MxCompatibility51760):
@staticmethod
def version():
return mx.VersionSpec("5.181.0")
def jarsUseJDKDiscriminant(self):
return True
class MxCompatibility5194(MxCompatibility5181):
@staticmethod
def version():
return mx.VersionSpec("5.194.0")
def check_package_locations(self):
return True
class MxCompatibility51950(MxCompatibility5194):
@staticmethod
def version():
return mx.VersionSpec("5.195.0")
def mavenSupportsClassifier(self):
return True
class MxCompatibility51951(MxCompatibility51950):
@staticmethod
def version():
return mx.VersionSpec("5.195.1")
def check_checkstyle_config(self):
return True
class MxCompatibility52061(MxCompatibility51951):
@staticmethod
def version():
return mx.VersionSpec("5.206.1")
def verify_multirelease_projects(self):
return True
class MxCompatibility52102(MxCompatibility52061):
@staticmethod
def version():
return mx.VersionSpec("5.210.2")
def spotbugs_version(self):
return "4.4.2"
class MxCompatibility52230(MxCompatibility52102):
@staticmethod
def version():
return mx.VersionSpec("5.223.0")
def automatic_overlay_distribution_deps(self):
return True
def supports_disjoint_JavaCompliance_range(self):
return True
class MxCompatibility52290(MxCompatibility52230):
@staticmethod
def version():
return mx.VersionSpec("5.229.0")
def maven_deploy_unsupported_is_error(self):
return True
class MxCompatibility52310(MxCompatibility52290):
@staticmethod
def version():
return mx.VersionSpec("5.231.0")
def enhanced_module_usage_info(self):
return True
class MxCompatibility52710(MxCompatibility52310):
@staticmethod
def version():
return mx.VersionSpec("5.271.0")
def validate_maven_javadoc(self):
return True
class MxCompatibility52791(MxCompatibility52710):
@staticmethod
def version():
return mx.VersionSpec("5.279.1")
def get_sigtest_jar(self):
return mx.library('SIGTEST_1_3').get_path(resolve=True)
class MxCompatibility52820(MxCompatibility52791):
@staticmethod
def version():
return mx.VersionSpec("5.282.0")
def fix_extracted_dependency_prefix(self):
return True
class MxCompatibility53000(MxCompatibility52820):
@staticmethod
def version():
return mx.VersionSpec("5.300.0")
def is_using_jdk_headers_implicitly(self, project):
assert project.isNativeProject()
if any(d.isJavaProject() and d.include_dirs for d in project.buildDependencies):
project.abort('This project is using JDK headers implicitly. Instead, it must set the "use_jdk_headers" '
'attribute explicitly.')
return False
class MxCompatibility53010(MxCompatibility53000):
@staticmethod
def version():
return mx.VersionSpec("5.301.0")
def bench_suite_needs_suite_args(self):
return True
class MxCompatibility53169(MxCompatibility53010):
@staticmethod
def version():
return mx.VersionSpec("5.316.9")
def enforce_spec_compliant_exports(self):
return True
class MxCompatibility531615(MxCompatibility53169):
@staticmethod
def version():
return mx.VersionSpec("5.316.15")
def jmh_dist_benchmark_extracts_add_opens_from_manifest(self):
return True
def minVersion():
_ensureCompatLoaded()
return list(_versionsMap)[0]
def getMxCompatibility(version):
""":rtype: MxCompatibility500"""
if version < minVersion(): # ensures compat loaded
return None
keys = list(_versionsMap.keys())
return _versionsMap[keys[bisect.bisect_right(keys, version)-1]]
_versionsMap = OrderedDict()
def _ensureCompatLoaded():
if not _versionsMap:
def flattenClassTree(tree):
root = tree[0][0]
assert isinstance(root, type), root
yield root
if len(tree) > 1:
assert len(tree) == 2
rest = tree[1]
assert isinstance(rest, list), rest
for c in flattenClassTree(rest):
yield c
classes = []
regex = re.compile(r'^MxCompatibility[0-9a-z]*$')
for name, clazz in inspect.getmembers(sys.modules[__name__], inspect.isclass):
m = regex.match(name)
if m:
classes.append(clazz)
previousVersion = None
for clazz in flattenClassTree(inspect.getclasstree(classes)):
if clazz == object:
continue
assert previousVersion is None or previousVersion < clazz.version()
previousVersion = clazz.version()
_versionsMap[previousVersion] = clazz()
|
graalvm/mx
|
mx_compat.py
|
Python
|
gpl-2.0
| 17,513
|
[
"VisIt"
] |
613907b5bd074b3d60a713659248ab6f2ceccfee38c016d67b5cd993744fc4e6
|
# Orca
#
# Copyright 2005-2008 Google Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
#
"""Dectalk voice definitions using ACSS.
This module encapsulates Dectalk-specific voice definitions. It
maps device-independent ACSS voice definitions into appropriate
Dectalk voice parameter settings.
"""
__id__ = "$Id$"
__author__ = "T. V. Raman"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Google Inc."
__license__ = "LGPL"
import chnames
# Handling of special characters
#
# Emacspeak uses Tcl syntax to communicate with its speech servers. It
# embraces text in curly braces, so that at least {, }, and \ must be quoted
# when sending text to speech server. But individual speech engines have
# their own special characters in addition to those of Tcl. Dectalk
# perceives speech parameters enclosed in square brackets, and Emacspeak
# exploits this to transmit speech settings to Dectalk. Thus we must quote
# [ and ] too.
def makeSpecialCharMap():
"""Returns list of pairs mapping characters which are special for
Dectalk speech server to their replacements.
"""
chars = r'{\}[]'
return [(ch, ' '+chnames.getCharacterName(ch)+' ') for ch in chars]
# Speech parameters
_defined_voices = {}
# Map from ACSS dimensions to Dectalk settings:
_table = {}
#family codes:
_table['family'] = {
'male' : ' :np ',
'paul' : ':np',
'man' : ':nh',
'harry' : ' :nh ',
'dennis' : ':nd',
'frank' : ':nf',
'betty' : ':nb',
'female' : ' :nb ',
'ursula' : ':nu',
'wendy' : ':nw',
'rita' : ':nr',
'kid' : ':nk',
'child' : ' :nk '
}
# average-pitch :
# Average pitch for standard male voice is 122hz --this is mapped to
# a setting of 5.
# Average pitch varies inversely with speaker head size --a child
# has a small head and a higher pitched voice.
# We change parameter head-size in conjunction with average pitch to
# produce a more natural change on the Dectalk.
#male average pitch
def _update_map(table, key, format, settings):
"""Internal function to update acss->synth mapping."""
table[key] = {}
for setting in settings:
_table[key][setting[0]] = format % setting[1:]
_male_ap = [
(0, 96, 115),
(1, 101, 112),
(2, 108, 109),
(3, 112, 106),
(4, 118, 103),
(5, 122, 100),
(6, 128, 98),
(7, 134, 96),
(8, 140, 94),
(9, 147, 91)
]
_update_map(_table, ('male', 'average-pitch'),
" ap %s hs %s ", _male_ap)
_update_map(_table, ('paul', 'average-pitch'),
" ap %s hs %s ", _male_ap)
#Man has a big head --and a lower pitch for the middle setting
_man_ap = [
(0, 50, 125),
(1, 59, 123),
(2, 68, 121),
(3, 77, 120),
(4, 83, 118),
(5, 89, 115),
(6, 95, 112),
(7, 110, 105),
(8, 125, 100),
(9, 140, 95)
]
_update_map(_table, ('man', 'average-pitch'),
" ap %s hs %s ",_man_ap)
_update_map(_table, ('harry', 'average-pitch'),
" ap %s hs %s ",_man_ap)
_female_ap = [
(0, 160, 115),
(1, 170, 112),
(2, 181, 109),
(3, 192, 106),
(4, 200, 103),
(5, 208, 100),
(6, 219, 98),
(7, 225, 96),
(8, 240, 94),
(9, 260, 91)
]
_update_map(_table, ('female', 'average-pitch'),
" ap %s hs %s ",_female_ap)
_update_map(_table, ('betty', 'average-pitch'),
" ap %s hs %s ",_female_ap)
# The default DECtalk values for the pitch of the other voices seem
# to be as follows:
# Frank = 155, Dennis = 110, Ursula = 240, Rita = 106, Wendy = 200
# Kit = Child = 306
# Therefore, follow TV Raman's lead:
_frank_ap = [
(0, 129, 115),
(1, 134, 112),
(2, 141, 109),
(3, 145, 106),
(4, 151, 103),
(5, 155, 100),
(6, 159, 98),
(7, 165, 96),
(8, 171, 94),
(9, 178, 91)
]
_update_map(_table, ('frank', 'average-pitch'),
" ap %s hs %s ", _frank_ap)
_dennis_ap = [
(0, 84, 115),
(1, 89, 112),
(2, 96, 109),
(3, 100, 106),
(4, 106, 103),
(5, 110, 100),
(6, 116, 98),
(7, 122, 96),
(8, 128, 94),
(9, 135, 91)
]
_update_map(_table, ('dennis', 'average-pitch'),
" ap %s hs %s ", _dennis_ap)
_ursula_ap = [
(0, 196, 115),
(1, 206, 112),
(2, 215, 109),
(3, 224, 106),
(4, 232, 103),
(5, 240, 100),
(6, 251, 98),
(7, 265, 96),
(8, 280, 94),
(9, 300, 91)
]
_update_map(_table, ('ursula', 'average-pitch'),
" ap %s hs %s ", _ursula_ap)
_rita_ap = [
(0, 62, 115),
(1, 72, 112),
(2, 81, 109),
(3, 90, 106),
(4, 98, 103),
(5, 106, 100),
(6, 117, 98),
(7, 131, 96),
(8, 146, 94),
(9, 166, 91)
]
_update_map(_table, ('rita', 'average-pitch'),
" ap %s hs %s ", _rita_ap)
# For some reason, Wendy at a high pitch causes the
# synthesizer to click and eventually make a feedback sound!
# It doesn't seem to be the result of the pitch.
# Keeping head size constant for higher pitch seems to eliminate
# the problem.
_wendy_ap = [
(0, 156, 115),
(1, 166, 112),
(2, 175, 109),
(3, 184, 106),
(4, 192, 103),
(5, 200, 100),
(6, 211, 100),
(7, 225, 100),
(8, 240, 100),
(9, 260, 100)
]
_update_map(_table, ('wendy', 'average-pitch'),
" ap %s hs %s ", _wendy_ap)
# Kit/Child can't have the traditional adult head size
# Setting the largest head size is the smallest adult
# female head size.
_child_ap = [
(0, 256, 91),
(1, 266, 89),
(2, 276, 87),
(3, 286, 85),
(4, 296, 83),
(5, 306, 81),
(6, 316, 79),
(7, 326, 77),
(8, 336, 75),
(9, 346, 73)
]
_update_map(_table, ('child', 'average-pitch'),
" ap %s hs %s ", _child_ap)
_update_map(_table, ('kit', 'average-pitch'),
" ap %s hs %s ", _child_ap)
# pitch-range for male:
# Standard pitch range is 100 and is mapped to
# a setting of 5.
# A value of 0 produces a flat monotone voice --maximum value of 250
# produces a highly animated voice.
# Additionally, we also set the assertiveness of the voice so the
# voice is less assertive at lower pitch ranges.
_male_pr = [
(0, 0, 0),
(1, 20, 10),
(2, 40, 20),
(3, 60, 30),
(4, 80, 40, ),
(5, 100, 50, ),
(6, 137, 60),
(7, 174, 70),
(8, 211, 80),
(9, 250, 100),
]
_update_map(_table, ('male', 'pitch-range'),
" pr %s as %s ", _male_pr)
_update_map(_table, ('paul', 'pitch-range'),
" pr %s as %s ", _male_pr)
# For now, assume that standard pitch range is reasonably
# consistent for all male voices with the execption of harry
_update_map(_table, ('frank', 'pitch-range'),
" pr %s as %s ", _male_pr)
_update_map(_table, ('dennis', 'pitch-range'),
" pr %s as %s ", _male_pr)
_man_pr = [
(0, 0, 0),
(1, 16, 20),
(2, 32, 40),
(3, 48, 60),
(4, 64, 80, ),
(5, 80, 100, ),
(6, 137, 100),
(7, 174, 100),
(8, 211, 100),
(9, 250, 100)
]
_update_map(_table, ('man', 'pitch-range'),
" pr %s as %s ", _man_pr)
_update_map(_table, ('harry', 'pitch-range'),
" pr %s as %s ", _man_pr)
_female_pr = [
(0, 0, 0),
(1, 50, 10),
(2, 80, 20),
(3, 100, 25),
(4, 110, 30, ),
(5, 140, 35),
(6, 165, 57),
(7, 190, 75),
(8, 220, 87),
(9, 250, 100)
]
_update_map(_table, ('female', 'pitch-range'),
" pr %s as %s ", _female_pr)
_update_map(_table, ('betty', 'pitch-range'),
" pr %s as %s ", _female_pr)
# For now, assume that standard pitch range is reasonably
# consistent for all female voices, including kit
_update_map(_table, ('ursula', 'pitch-range'),
" pr %s as %s ", _female_pr)
_update_map(_table, ('rita', 'pitch-range'),
" pr %s as %s ", _female_pr)
_update_map(_table, ('wendy', 'pitch-range'),
" pr %s as %s ", _female_pr)
_update_map(_table, ('kit', 'pitch-range'),
" pr %s as %s ", _female_pr)
_update_map(_table, ('child', 'pitch-range'),
" pr %s as %s ", _female_pr)
# Stress:
# On the Dectalk we vary four parameters
# The hat rise which controls the overall shape of the F0 contour
# for sentence level intonation and stress,
# The stress rise that controls the level of stress on stressed
# syllables,
# the baseline fall for paragraph level intonation
# and the quickness --a parameter that controls whether the final
# frequency targets are completely achieved in the phonetic transitions.
_male_stress = [
(0, 0, 0, 0, 0),
(1, 3, 6, 20, 3),
(2, 6, 12, 40, 6),
(3, 9, 18, 60, 9, ),
(4, 12, 24, 80, 14),
(5, 18, 32, 100, 18),
(6, 34, 50, 100, 20),
(7, 48, 65, 100, 35),
(8, 63, 82, 100, 60),
(9, 80, 90, 100, 40)
]
_update_map(_table, ('male', 'stress'),
" hr %s sr %s qu %s bf %s ", _male_stress)
_update_map(_table, ('paul', 'stress'),
" hr %s sr %s qu %s bf %s ", _male_stress)
# For now, grabbing these values for all males but Harry
_update_map(_table, ('frank', 'stress'),
" hr %s sr %s qu %s bf %s ", _male_stress)
_update_map(_table, ('dennis', 'stress'),
" hr %s sr %s qu %s bf %s ", _male_stress)
_man_stress = [
(0, 0, 0, 0, 0),
(1, 4, 6, 2, 2),
(2, 8, 12, 4, 4),
(3, 12, 18, 6, 6),
(4, 16, 24, 8, 8),
(5, 20, 30, 10, 9),
(6, 40, 48, 32, 16),
(7, 60, 66, 54, 22),
(8, 80, 78, 77, 34),
(9, 100, 100, 100, 40)
]
_update_map(_table, ('man', 'stress'),
" hr %s sr %s qu %s bf %s ", _man_stress)
_update_map(_table, ('harry', 'stress'),
" hr %s sr %s qu %s bf %s ", _man_stress)
_female_stress = [
(0, 1, 1, 0, 0),
(1, 3, 4, 11, 0),
(2, 5, 8, 22, 0),
(3, 8, 12, 33, 0),
(4, 11, 16, 44, 0),
(5, 14, 20, 55, 0),
(6, 35, 40, 65, 10),
(7, 56, 80, 75, 20),
(8, 77, 90, 85, 30),
(9, 100, 100, 100, 40)
]
_update_map(_table, ('female', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
_update_map(_table, ('betty', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
# For now, grabbing these values for all females including kit
_update_map(_table, ('ursula', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
_update_map(_table, ('rita', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
_update_map(_table, ('wendy', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
_update_map(_table, ('kit', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
_update_map(_table, ('child', 'stress'),
" hr %s sr %s qu %s bf %s ", _female_stress)
#richness
# Smoothness and richness vary inversely.
# a maximally smooth voice produces a quieter effect
# a rich voice is "bright" in contrast.
_male_richness = [
(0, 0, 100),
(1, 14, 80),
(2, 28, 60),
(3, 42, 40),
(4, 56, 30),
(5, 70, 28),
(6, 60, 24 ),
(7, 70, 16),
(8, 80, 8),
(9, 100, 0)
]
_update_map(_table, ('male', 'richness'),
" ri %s sm %s " ,_male_richness)
_update_map(_table, ('paul', 'richness'),
" ri %s sm %s " ,_male_richness)
# For now, grabbing these values for all males but Harry
_update_map(_table, ('frank', 'richness'),
" ri %s sm %s " ,_male_richness)
_update_map(_table, ('dennis', 'richness'),
" ri %s sm %s " ,_male_richness)
_man_richness = [
(0, 100, 0),
(1, 96, 3),
(2, 93, 6),
(3, 90, 9),
(4, 88, 11),
(5, 86, 12),
(6, 60, 24, ),
(7, 40, 44),
(8, 20, 65),
(9, 0, 70)
]
_update_map(_table, ('man', 'richness'),
" ri %s sm %s " , _man_richness)
_update_map(_table, ('harry', 'richness'),
" ri %s sm %s " , _man_richness)
_female_richness = [
(0, 0, 100),
(1, 8, 76),
(2, 16, 52),
(3, 24,28),
(4, 32, 10),
(5, 40, 4),
(6, 50, 3),
(7, 65, 3),
(8, 80, 2),
(9, 100, 0)
]
_update_map(_table, ('female', 'richness'),
" ri %s sm %s ", _female_richness)
_update_map(_table, ('betty', 'richness'),
" ri %s sm %s ", _female_richness)
# For now, grabbing these values for all females including kit
_update_map(_table, ('ursula', 'richness'),
" ri %s sm %s ", _female_richness)
_update_map(_table, ('rita', 'richness'),
" ri %s sm %s ", _female_richness)
_update_map(_table, ('wendy', 'richness'),
" ri %s sm %s ", _female_richness)
_update_map(_table, ('kit', 'richness'),
" ri %s sm %s ", _female_richness)
_update_map(_table, ('child', 'richness'),
" ri %s sm %s ", _female_richness)
def getrate(r):
return int(180 + 4*r)
def getvolume(v):
return int(10*v)
def getvoicelist():
return _table['family'].keys()
def getvoice(acss):
"""Memoized function that returns synthesizer code for
specified ACSS setting.
Synthesizer code is a tupple of the form (open,close)
where open sets the voice, and close resets it."""
name = acss.name()
if name in _defined_voices:
return _defined_voices[name]
_defined_voices[name] = acss2voice(acss)
return _defined_voices[name]
def acss2voice(acss):
"""Return synthesizer code."""
code = ""
familyName = 'male'
if 'family' in acss:
familyName = acss['family']['name']
if familyName in _table['family']:
code += _table['family'][familyName]
if 'rate' in acss:
code += " :ra %s" % getrate(acss['rate'])
if 'punctuations' in acss:
code += " :punc %s" % acss['punctuations']
if 'gain' in acss:
code += " :volume set %s" % getvolume(acss['gain'])
voice = ""
dv = ""
for d in ['average-pitch', 'pitch-range',
'richness', 'stress']:
if d in acss:
if (familyName, d) in _table:
voice += _table[(familyName, d)][int(acss[d])]
if voice:
dv = " :dv %s" % voice
if code or voice:
code = "[%s %s]" % (code, dv)
return (code, " [:np] ")
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/dectalk.py
|
Python
|
gpl-3.0
| 14,816
|
[
"ORCA"
] |
7b1d28780b981e1b1edbf3c24ebb06115a4009f2cfbb1ba6c7ad534433f870f1
|
""" Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
import pickle
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, assert_warns, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',
'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',
'foldcauchy', 'gamma', 'gengamma', 'loggamma',
'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma',
'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',
'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',
'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',
'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',
'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',
'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',
'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang', 'tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice', 'kappa4', 'kappa3', 'truncnorm', 'argus']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
assert_(pval > alpha,
msg="D = {}; pval = {}; alpha = {}; args = {}".format(
D, pval, alpha, args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'trapz':
args = tuple(np.sort(np.random.random(nargs)))
elif dist == 'triang':
args = tuple(np.random.random(nargs))
elif dist == 'reciprocal' or dist == 'truncnorm':
vals = np.random.random(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0 + np.random.random(nargs))
else:
args = tuple(1.0 + np.random.random(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
def test_support():
"""gh-6235"""
def check_open_support(rvs, args):
dist = getattr(stats, rvs)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
dists = ['alpha', 'arcsine', 'betaprime', 'burr', 'burr12',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald']
dct = dict(distcont)
for dist in dists:
args = dct[dist]
yield check_open_support, dist, args
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(TestCase):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges,
eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
exspected = -2239.771 # From R
assert_almost_equal(result, exspected, decimal=3)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(TestCase):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(TestCase):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(TestCase):
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
class TestSkewNorm(TestCase):
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk'),
decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk'),
decimal=2)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
class TestGumbelL(TestCase):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res, floc=0)
vals2 = distfunc.fit(res, fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestRayleigh(TestCase):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(TestCase):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
class TestTrapz(TestCase):
def test_reduces_to_triang(self):
modes = [0.3, 0.5]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
old_err = np.seterr(divide='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
assert_almost_equal(stats.trapz.pdf(x, 0, 1),
stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1),
stats.uniform.cdf(x))
np.seterr(**old_err)
def test_cases(self):
old_err = np.seterr(divide='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8), 1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0), 1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
np.seterr(**old_err)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
p = stats.genextreme.cdf(.5, 0)
p = stats.genextreme.pdf(.5, 0)
p = stats.genextreme.ppf(.5, 0)
p = stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-q)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_argus_function():
# There is no usable reference implementation.
# (RooFit implementation returns unreasonable results which are not normalized correctly)
# Instead we do some tests if the distribution behaves as expected for different shapes and scales
for i in range(1, 10):
for j in range(1, 10):
assert_equal(stats.argus.pdf(i + 0.001, chi=j, scale=i), 0.0)
assert_(stats.argus.pdf(i - 0.001, chi=j, scale=i) > 0.0)
assert_equal(stats.argus.pdf(-0.001, chi=j, scale=i), 0.0)
assert_(stats.argus.pdf(+0.001, chi=j, scale=i) > 0.0)
for i in range(1, 10):
assert_equal(stats.argus.cdf(1.0, chi=i), 1.0)
assert_equal(stats.argus.cdf(1.0, chi=i), 1.0 - stats.argus.sf(1.0, chi=i))
class TestHistogram(TestCase):
def setUp(self):
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5,size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm._munp(n, 1.0, 2.5), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
if __name__ == "__main__":
run_module_suite()
|
HesselTjeerdsma/Cyber-Physical-Pacman-Game
|
Algor/flask/lib/python2.7/site-packages/scipy/stats/tests/test_distributions.py
|
Python
|
apache-2.0
| 117,215
|
[
"Gaussian"
] |
ceb1c78a046ced8c5f7e11be78d75fb89f7c7b2750671683f646b3b8f0a46c15
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PTransform and descendants.
A PTransform is an object describing (not executing) a computation. The actual
execution semantics for a transform is captured by a runner object. A transform
object always belongs to a pipeline object.
A PTransform derived class needs to define the expand() method that describes
how one or more PValues are created by the transform.
The module defines a few standard transforms: FlatMap (parallel do),
GroupByKey (group by key), etc. Note that the expand() methods for these
classes contain code that will add nodes to the processing graph associated
with a pipeline.
As support for the FlatMap transform, the module also defines a DoFn
class and wrapper class that allows lambda functions to be used as
FlatMap processing functions.
"""
from __future__ import absolute_import
import copy
import itertools
import operator
import os
import sys
import threading
from builtins import hex
from builtins import object
from builtins import zip
from functools import reduce
from google.protobuf import message
from apache_beam import error
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.internal import util
from apache_beam.portability import python_urns
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.display import HasDisplayData
from apache_beam.typehints import typehints
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import WithTypeHints
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.decorators import getfullargspec
from apache_beam.typehints.trivial_inference import instance_to_type
from apache_beam.typehints.typehints import validate_composite_type_param
from apache_beam.utils import proto_utils
__all__ = [
'PTransform',
'ptransform_fn',
'label_from_callable',
]
class _PValueishTransform(object):
"""Visitor for PValueish objects.
A PValueish is a PValue, or list, tuple, dict of PValuesish objects.
This visits a PValueish, contstructing a (possibly mutated) copy.
"""
def visit_nested(self, node, *args):
if isinstance(node, (tuple, list)):
args = [self.visit(x, *args) for x in node]
if isinstance(node, tuple) and hasattr(node.__class__, '_make'):
# namedtuples require unpacked arguments in their constructor
return node.__class__(*args)
else:
return node.__class__(args)
elif isinstance(node, dict):
return node.__class__(
{key: self.visit(value, *args) for (key, value) in node.items()})
else:
return node
class _SetInputPValues(_PValueishTransform):
def visit(self, node, replacements):
if id(node) in replacements:
return replacements[id(node)]
else:
return self.visit_nested(node, replacements)
# Caches to allow for materialization of values when executing a pipeline
# in-process, in eager mode. This cache allows the same _MaterializedResult
# object to be accessed and used despite Runner API round-trip serialization.
_pipeline_materialization_cache = {}
_pipeline_materialization_lock = threading.Lock()
def _allocate_materialized_pipeline(pipeline):
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
_pipeline_materialization_cache[(pid, pipeline_id)] = {}
def _allocate_materialized_result(pipeline):
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
if (pid, pipeline_id) not in _pipeline_materialization_cache:
raise ValueError('Materialized pipeline is not allocated for result '
'cache.')
result_id = len(_pipeline_materialization_cache[(pid, pipeline_id)])
result = _MaterializedResult(pipeline_id, result_id)
_pipeline_materialization_cache[(pid, pipeline_id)][result_id] = result
return result
def _get_materialized_result(pipeline_id, result_id):
pid = os.getpid()
with _pipeline_materialization_lock:
if (pid, pipeline_id) not in _pipeline_materialization_cache:
raise Exception(
'Materialization in out-of-process and remote runners is not yet '
'supported.')
return _pipeline_materialization_cache[(pid, pipeline_id)][result_id]
def _release_materialized_pipeline(pipeline):
pid = os.getpid()
with _pipeline_materialization_lock:
pipeline_id = id(pipeline)
del _pipeline_materialization_cache[(pid, pipeline_id)]
class _MaterializedResult(object):
def __init__(self, pipeline_id, result_id):
self._pipeline_id = pipeline_id
self._result_id = result_id
self.elements = []
def __reduce__(self):
# When unpickled (during Runner API roundtrip serailization), get the
# _MaterializedResult object from the cache so that values are written
# to the original _MaterializedResult when run in eager mode.
return (_get_materialized_result, (self._pipeline_id, self._result_id))
class _MaterializedDoOutputsTuple(pvalue.DoOutputsTuple):
def __init__(self, deferred, results_by_tag):
super(_MaterializedDoOutputsTuple, self).__init__(
None, None, deferred._tags, deferred._main_tag)
self._deferred = deferred
self._results_by_tag = results_by_tag
def __getitem__(self, tag):
if tag not in self._results_by_tag:
raise KeyError(
'Tag %r is not a a defined output tag of %s.' % (
tag, self._deferred))
return self._results_by_tag[tag].elements
class _AddMaterializationTransforms(_PValueishTransform):
def _materialize_transform(self, pipeline):
result = _allocate_materialized_result(pipeline)
# Need to define _MaterializeValuesDoFn here to avoid circular
# dependencies.
from apache_beam import DoFn
from apache_beam import ParDo
class _MaterializeValuesDoFn(DoFn):
def process(self, element):
result.elements.append(element)
materialization_label = '_MaterializeValues%d' % result._result_id
return (materialization_label >> ParDo(_MaterializeValuesDoFn()),
result)
def visit(self, node):
if isinstance(node, pvalue.PValue):
transform, result = self._materialize_transform(node.pipeline)
node | transform
return result
elif isinstance(node, pvalue.DoOutputsTuple):
results_by_tag = {}
for tag in itertools.chain([node._main_tag], node._tags):
results_by_tag[tag] = self.visit(node[tag])
return _MaterializedDoOutputsTuple(node, results_by_tag)
else:
return self.visit_nested(node)
class _FinalizeMaterialization(_PValueishTransform):
def visit(self, node):
if isinstance(node, _MaterializedResult):
return node.elements
elif isinstance(node, _MaterializedDoOutputsTuple):
return node
else:
return self.visit_nested(node)
class _GetPValues(_PValueishTransform):
def visit(self, node, pvalues):
if isinstance(node, (pvalue.PValue, pvalue.DoOutputsTuple)):
pvalues.append(node)
else:
self.visit_nested(node, pvalues)
def get_nested_pvalues(pvalueish):
pvalues = []
_GetPValues().visit(pvalueish, pvalues)
return pvalues
class _ZipPValues(object):
"""Pairs each PValue in a pvalueish with a value in a parallel out sibling.
Sibling should have the same nested structure as pvalueish. Leaves in
sibling are expanded across nested pvalueish lists, tuples, and dicts.
For example
ZipPValues().visit({'a': pc1, 'b': (pc2, pc3)},
{'a': 'A', 'b', 'B'})
will return
[('a', pc1, 'A'), ('b', pc2, 'B'), ('b', pc3, 'B')]
"""
def visit(self, pvalueish, sibling, pairs=None, context=None):
if pairs is None:
pairs = []
self.visit(pvalueish, sibling, pairs, context)
return pairs
elif isinstance(pvalueish, (pvalue.PValue, pvalue.DoOutputsTuple)):
pairs.append((context, pvalueish, sibling))
elif isinstance(pvalueish, (list, tuple)):
self.visit_sequence(pvalueish, sibling, pairs, context)
elif isinstance(pvalueish, dict):
self.visit_dict(pvalueish, sibling, pairs, context)
def visit_sequence(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, (list, tuple)):
for ix, (p, s) in enumerate(zip(
pvalueish, list(sibling) + [None] * len(pvalueish))):
self.visit(p, s, pairs, 'position %s' % ix)
else:
for p in pvalueish:
self.visit(p, sibling, pairs, context)
def visit_dict(self, pvalueish, sibling, pairs, context):
if isinstance(sibling, dict):
for key, p in pvalueish.items():
self.visit(p, sibling.get(key), pairs, key)
else:
for p in pvalueish.values():
self.visit(p, sibling, pairs, context)
class PTransform(WithTypeHints, HasDisplayData):
"""A transform object used to modify one or more PCollections.
Subclasses must define an expand() method that will be used when the transform
is applied to some arguments. Typical usage pattern will be:
input | CustomTransform(...)
The expand() method of the CustomTransform object passed in will be called
with input as an argument.
"""
# By default, transforms don't have any side inputs.
side_inputs = ()
# Used for nullary transforms.
pipeline = None
# Default is unset.
_user_label = None
def __init__(self, label=None):
super(PTransform, self).__init__()
self.label = label
@property
def label(self):
return self._user_label or self.default_label()
@label.setter
def label(self, value):
self._user_label = value
def default_label(self):
return self.__class__.__name__
def with_input_types(self, input_type_hint):
"""Annotates the input type of a :class:`PTransform` with a type-hint.
Args:
input_type_hint (type): An instance of an allowed built-in type, a custom
class, or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
~exceptions.TypeError: If **input_type_hint** is not a valid type-hint.
See
:obj:`apache_beam.typehints.typehints.validate_composite_type_param()`
for further details.
Returns:
PTransform: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
validate_composite_type_param(input_type_hint,
'Type hints for a PTransform')
return super(PTransform, self).with_input_types(input_type_hint)
def with_output_types(self, type_hint):
"""Annotates the output type of a :class:`PTransform` with a type-hint.
Args:
type_hint (type): An instance of an allowed built-in type, a custom class,
or a :class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
~exceptions.TypeError: If **type_hint** is not a valid type-hint. See
:obj:`~apache_beam.typehints.typehints.validate_composite_type_param()`
for further details.
Returns:
PTransform: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
validate_composite_type_param(type_hint, 'Type hints for a PTransform')
return super(PTransform, self).with_output_types(type_hint)
def type_check_inputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'input')
def infer_output_type(self, unused_input_type):
return self.get_type_hints().simple_output_type(self.label) or typehints.Any
def type_check_outputs(self, pvalueish):
self.type_check_inputs_or_outputs(pvalueish, 'output')
def type_check_inputs_or_outputs(self, pvalueish, input_or_output):
hints = getattr(self.get_type_hints(), input_or_output + '_types')
if not hints:
return
arg_hints, kwarg_hints = hints
if arg_hints and kwarg_hints:
raise TypeCheckError(
'PTransform cannot have both positional and keyword type hints '
'without overriding %s._type_check_%s()' % (
self.__class__, input_or_output))
root_hint = (
arg_hints[0] if len(arg_hints) == 1 else arg_hints or kwarg_hints)
for context, pvalue_, hint in _ZipPValues().visit(pvalueish, root_hint):
if pvalue_.element_type is None:
# TODO(robertwb): It's a bug that we ever get here. (typecheck)
continue
if hint and not typehints.is_consistent_with(pvalue_.element_type, hint):
at_context = ' %s %s' % (input_or_output, context) if context else ''
raise TypeCheckError(
'%s type hint violation at %s%s: expected %s, got %s' % (
input_or_output.title(), self.label, at_context, hint,
pvalue_.element_type))
def _infer_output_coder(self, input_type=None, input_coder=None):
"""Returns the output coder to use for output of this transform.
Note: this API is experimental and is subject to change; please do not rely
on behavior induced by this method.
The Coder returned here should not be wrapped in a WindowedValueCoder
wrapper.
Args:
input_type: An instance of an allowed built-in type, a custom class, or a
typehints.TypeConstraint for the input type, or None if not available.
input_coder: Coder object for encoding input to this PTransform, or None
if not available.
Returns:
Coder object for encoding output of this PTransform or None if unknown.
"""
# TODO(ccy): further refine this API.
return None
def _clone(self, new_label):
"""Clones the current transform instance under a new label."""
transform = copy.copy(self)
transform.label = new_label
return transform
def expand(self, input_or_inputs):
raise NotImplementedError
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s(PTransform)%s%s%s' % (
self.__class__.__name__,
' label=[%s]' % self.label if (hasattr(self, 'label') and
self.label) else '',
' inputs=%s' % str(self.inputs) if (hasattr(self, 'inputs') and
self.inputs) else '',
' side_inputs=%s' % str(self.side_inputs) if self.side_inputs else '')
def _check_pcollection(self, pcoll):
if not isinstance(pcoll, pvalue.PCollection):
raise error.TransformError('Expecting a PCollection argument.')
if not pcoll.pipeline:
raise error.TransformError('PCollection not part of a pipeline.')
def get_windowing(self, inputs):
"""Returns the window function to be associated with transform's output.
By default most transforms just return the windowing function associated
with the input PCollection (or the first input if several).
"""
# TODO(robertwb): Assert all input WindowFns compatible.
return inputs[0].windowing
def __rrshift__(self, label):
return _NamedPTransform(self, label)
def __or__(self, right):
"""Used to compose PTransforms, e.g., ptransform1 | ptransform2."""
if isinstance(right, PTransform):
return _ChainedPTransform(self, right)
return NotImplemented
def __ror__(self, left, label=None):
"""Used to apply this PTransform to non-PValues, e.g., a tuple."""
pvalueish, pvalues = self._extract_input_pvalues(left)
pipelines = [v.pipeline for v in pvalues if isinstance(v, pvalue.PValue)]
if pvalues and not pipelines:
deferred = False
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import pipeline
from apache_beam.options.pipeline_options import PipelineOptions
# pylint: enable=wrong-import-order, wrong-import-position
p = pipeline.Pipeline(
'DirectRunner', PipelineOptions(sys.argv))
else:
if not pipelines:
if self.pipeline is not None:
p = self.pipeline
else:
raise ValueError('"%s" requires a pipeline to be specified '
'as there are no deferred inputs.'% self.label)
else:
p = self.pipeline or pipelines[0]
for pp in pipelines:
if p != pp:
raise ValueError(
'Mixing value from different pipelines not allowed.')
deferred = not getattr(p.runner, 'is_eager', False)
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms.core import Create
# pylint: enable=wrong-import-order, wrong-import-position
replacements = {id(v): p | 'CreatePInput%s' % ix >> Create(v)
for ix, v in enumerate(pvalues)
if not isinstance(v, pvalue.PValue) and v is not None}
pvalueish = _SetInputPValues().visit(pvalueish, replacements)
self.pipeline = p
result = p.apply(self, pvalueish, label)
if deferred:
return result
_allocate_materialized_pipeline(p)
materialized_result = _AddMaterializationTransforms().visit(result)
p.run().wait_until_finish()
_release_materialized_pipeline(p)
return _FinalizeMaterialization().visit(materialized_result)
def _extract_input_pvalues(self, pvalueish):
"""Extract all the pvalues contained in the input pvalueish.
Returns pvalueish as well as the flat inputs list as the input may have to
be copied as inspection may be destructive.
By default, recursively extracts tuple components and dict values.
Generally only needs to be overriden for multi-input PTransforms.
"""
# pylint: disable=wrong-import-order
from apache_beam import pipeline
# pylint: enable=wrong-import-order
if isinstance(pvalueish, pipeline.Pipeline):
pvalueish = pvalue.PBegin(pvalueish)
def _dict_tuple_leaves(pvalueish):
if isinstance(pvalueish, tuple):
for a in pvalueish:
for p in _dict_tuple_leaves(a):
yield p
elif isinstance(pvalueish, dict):
for a in pvalueish.values():
for p in _dict_tuple_leaves(a):
yield p
else:
yield pvalueish
return pvalueish, tuple(_dict_tuple_leaves(pvalueish))
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type, constructor=None):
def register(constructor):
cls._known_urns[urn] = parameter_type, constructor
return staticmethod(constructor)
if constructor:
# Used as a statement.
register(constructor)
else:
# Used as a decorator.
return register
def to_runner_api(self, context, has_parts=False):
from apache_beam.portability.api import beam_runner_api_pb2
urn, typed_param = self.to_runner_api_parameter(context)
if urn == python_urns.GENERIC_COMPOSITE_TRANSFORM and not has_parts:
# TODO(BEAM-3812): Remove this fallback.
urn, typed_param = self.to_runner_api_pickled(context)
return beam_runner_api_pb2.FunctionSpec(
urn=urn,
payload=typed_param.SerializeToString()
if isinstance(typed_param, message.Message)
else typed_param.encode('utf-8') if isinstance(typed_param, str)
else typed_param)
@classmethod
def from_runner_api(cls, proto, context):
if proto is None or not proto.urn:
return None
parameter_type, constructor = cls._known_urns[proto.urn]
return constructor(
proto_utils.parse_Bytes(proto.payload, parameter_type),
context)
def to_runner_api_parameter(self, unused_context):
# The payload here is just to ease debugging.
return (python_urns.GENERIC_COMPOSITE_TRANSFORM,
getattr(self, '_fn_api_payload', str(self)))
def to_runner_api_pickled(self, unused_context):
return (python_urns.PICKLED_TRANSFORM,
pickler.dumps(self))
def runner_api_requires_keyed_input(self):
return False
@PTransform.register_urn(python_urns.GENERIC_COMPOSITE_TRANSFORM, None)
def _create_transform(payload, unused_context):
empty_transform = PTransform()
empty_transform._fn_api_payload = payload
return empty_transform
@PTransform.register_urn(python_urns.PICKLED_TRANSFORM, None)
def _unpickle_transform(pickled_bytes, unused_context):
return pickler.loads(pickled_bytes)
class _ChainedPTransform(PTransform):
def __init__(self, *parts):
super(_ChainedPTransform, self).__init__(label=self._chain_label(parts))
self._parts = parts
def _chain_label(self, parts):
return '|'.join(p.label for p in parts)
def __or__(self, right):
if isinstance(right, PTransform):
# Create a flat list rather than a nested tree of composite
# transforms for better monitoring, etc.
return _ChainedPTransform(*(self._parts + (right,)))
return NotImplemented
def expand(self, pval):
return reduce(operator.or_, self._parts, pval)
class PTransformWithSideInputs(PTransform):
"""A superclass for any :class:`PTransform` (e.g.
:func:`~apache_beam.transforms.core.FlatMap` or
:class:`~apache_beam.transforms.core.CombineFn`)
invoking user code.
:class:`PTransform` s like :func:`~apache_beam.transforms.core.FlatMap`
invoke user-supplied code in some kind of package (e.g. a
:class:`~apache_beam.transforms.core.DoFn`) and optionally provide arguments
and side inputs to that code. This internal-use-only class contains common
functionality for :class:`PTransform` s that fit this model.
"""
def __init__(self, fn, *args, **kwargs):
if isinstance(fn, type) and issubclass(fn, WithTypeHints):
# Don't treat Fn class objects as callables.
raise ValueError('Use %s() not %s.' % (fn.__name__, fn.__name__))
self.fn = self.make_fn(fn)
# Now that we figure out the label, initialize the super-class.
super(PTransformWithSideInputs, self).__init__()
if (any([isinstance(v, pvalue.PCollection) for v in args]) or
any([isinstance(v, pvalue.PCollection) for v in kwargs.values()])):
raise error.SideInputError(
'PCollection used directly as side input argument. Specify '
'AsIter(pcollection) or AsSingleton(pcollection) to indicate how the '
'PCollection is to be used.')
self.args, self.kwargs, self.side_inputs = util.remove_objects_from_args(
args, kwargs, pvalue.AsSideInput)
self.raw_side_inputs = args, kwargs
# Prevent name collisions with fns of the form '<function <lambda> at ...>'
self._cached_fn = self.fn
# Ensure fn and side inputs are picklable for remote execution.
self.fn = pickler.loads(pickler.dumps(self.fn))
self.args = pickler.loads(pickler.dumps(self.args))
self.kwargs = pickler.loads(pickler.dumps(self.kwargs))
# For type hints, because loads(dumps(class)) != class.
self.fn = self._cached_fn
def with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints):
"""Annotates the types of main inputs and side inputs for the PTransform.
Args:
input_type_hint: An instance of an allowed built-in type, a custom class,
or an instance of a typehints.TypeConstraint.
*side_inputs_arg_hints: A variable length argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
**side_input_kwarg_hints: A dictionary argument composed of
of an allowed built-in type, a custom class, or a
typehints.TypeConstraint.
Example of annotating the types of side-inputs::
FlatMap().with_input_types(int, int, bool)
Raises:
:class:`~exceptions.TypeError`: If **type_hint** is not a valid type-hint.
See
:func:`~apache_beam.typehints.typehints.validate_composite_type_param`
for further details.
Returns:
:class:`PTransform`: A reference to the instance of this particular
:class:`PTransform` object. This allows chaining type-hinting related
methods.
"""
super(PTransformWithSideInputs, self).with_input_types(input_type_hint)
for si in side_inputs_arg_hints:
validate_composite_type_param(si, 'Type hints for a PTransform')
for si in side_input_kwarg_hints.values():
validate_composite_type_param(si, 'Type hints for a PTransform')
self.side_inputs_types = side_inputs_arg_hints
return WithTypeHints.with_input_types(
self, input_type_hint, *side_inputs_arg_hints, **side_input_kwarg_hints)
def type_check_inputs(self, pvalueish):
type_hints = self.get_type_hints().input_types
if type_hints:
args, kwargs = self.raw_side_inputs
def element_type(side_input):
if isinstance(side_input, pvalue.AsSideInput):
return side_input.element_type
return instance_to_type(side_input)
arg_types = [pvalueish.element_type] + [element_type(v) for v in args]
kwargs_types = {k: element_type(v) for (k, v) in kwargs.items()}
argspec_fn = self._process_argspec_fn()
bindings = getcallargs_forhints(argspec_fn, *arg_types, **kwargs_types)
hints = getcallargs_forhints(argspec_fn, *type_hints[0], **type_hints[1])
for arg, hint in hints.items():
if arg.startswith('__unknown__'):
continue
if hint is None:
continue
if not typehints.is_consistent_with(
bindings.get(arg, typehints.Any), hint):
raise TypeCheckError(
'Type hint violation for \'%s\': requires %s but got %s for %s'
% (self.label, hint, bindings[arg], arg))
def _process_argspec_fn(self):
"""Returns an argspec of the function actually consuming the data.
"""
raise NotImplementedError
def make_fn(self, fn):
# TODO(silviuc): Add comment describing that this is meant to be overriden
# by methods detecting callables and wrapping them in DoFns.
return fn
def default_label(self):
return '%s(%s)' % (self.__class__.__name__, self.fn.default_label())
class _PTransformFnPTransform(PTransform):
"""A class wrapper for a function-based transform."""
def __init__(self, fn, *args, **kwargs):
super(_PTransformFnPTransform, self).__init__()
self._fn = fn
self._args = args
self._kwargs = kwargs
def display_data(self):
res = {'fn': (self._fn.__name__
if hasattr(self._fn, '__name__')
else self._fn.__class__),
'args': DisplayDataItem(str(self._args)).drop_if_default('()'),
'kwargs': DisplayDataItem(str(self._kwargs)).drop_if_default('{}')}
return res
def expand(self, pcoll):
# Since the PTransform will be implemented entirely as a function
# (once called), we need to pass through any type-hinting information that
# may have been annotated via the .with_input_types() and
# .with_output_types() methods.
kwargs = dict(self._kwargs)
args = tuple(self._args)
# TODO(BEAM-5878) Support keyword-only arguments.
try:
if 'type_hints' in getfullargspec(self._fn).args:
args = (self.get_type_hints(),) + args
except TypeError:
# Might not be a function.
pass
return self._fn(pcoll, *args, **kwargs)
def default_label(self):
if self._args:
return '%s(%s)' % (
label_from_callable(self._fn), label_from_callable(self._args[0]))
return label_from_callable(self._fn)
def ptransform_fn(fn):
"""A decorator for a function-based PTransform.
Experimental; no backwards-compatibility guarantees.
Args:
fn: A function implementing a custom PTransform.
Returns:
A CallablePTransform instance wrapping the function-based PTransform.
This wrapper provides an alternative, simpler way to define a PTransform.
The standard method is to subclass from PTransform and override the expand()
method. An equivalent effect can be obtained by defining a function that
an input PCollection and additional optional arguments and returns a
resulting PCollection. For example::
@ptransform_fn
def CustomMapper(pcoll, mapfn):
return pcoll | ParDo(mapfn)
The equivalent approach using PTransform subclassing::
class CustomMapper(PTransform):
def __init__(self, mapfn):
super(CustomMapper, self).__init__()
self.mapfn = mapfn
def expand(self, pcoll):
return pcoll | ParDo(self.mapfn)
With either method the custom PTransform can be used in pipelines as if
it were one of the "native" PTransforms::
result_pcoll = input_pcoll | 'Label' >> CustomMapper(somefn)
Note that for both solutions the underlying implementation of the pipe
operator (i.e., `|`) will inject the pcoll argument in its proper place
(first argument if no label was specified and second argument otherwise).
"""
# TODO(robertwb): Consider removing staticmethod to allow for self parameter.
def callable_ptransform_factory(*args, **kwargs):
return _PTransformFnPTransform(fn, *args, **kwargs)
return callable_ptransform_factory
def label_from_callable(fn):
if hasattr(fn, 'default_label'):
return fn.default_label()
elif hasattr(fn, '__name__'):
if fn.__name__ == '<lambda>':
return '<lambda at %s:%s>' % (
os.path.basename(fn.__code__.co_filename),
fn.__code__.co_firstlineno)
return fn.__name__
return str(fn)
class _NamedPTransform(PTransform):
def __init__(self, transform, label):
super(_NamedPTransform, self).__init__(label)
self.transform = transform
def __ror__(self, pvalueish, _unused=None):
return self.transform.__ror__(pvalueish, self.label)
def expand(self, pvalue):
raise RuntimeError("Should never be expanded directly.")
|
mxm/incubator-beam
|
sdks/python/apache_beam/transforms/ptransform.py
|
Python
|
apache-2.0
| 30,477
|
[
"VisIt"
] |
5168fefe95dd2ab68deb881177ded9103b172158573f92a47cec7339e5aa7554
|
"""Decision module.
This module contains the different decision functors used by agents to call actions.
Also contains helper classes used by these.
Designed and developed by Sever Topan.
"""
# Standard.
import os
import sys
import re
import pickle
import random
import re
import copy
import math
# Third party.
from matplotlib import pyplot
import numpy as np
# Local.
from . import utility
from . import callback
class DecisionMutableValue(object):
"""Base class for decision mutable objects.
A decision mutable value represents a value that a particular decision module will try to optimize for.
They are used to parameterize functions within AdjSim's definition of an 'action'.
"""
pass
class DecisionMutableBool(DecisionMutableValue):
"""Contains a boolean that the decision module will modify.
A decision mutable bool represents a value that a particular decision module will try to optimize for.
perturbation_scale relates to the probability the locus will flip its value.
"""
DEFAULT_PERTURBATION_SCALE = 0.3
def __init__(self, perturbation_scale=DEFAULT_PERTURBATION_SCALE):
super().__init__()
self._value = None
self._perturbation_scale = perturbation_scale
@property
def value(self):
"""bool: Obtain the value."""
return self._value
@property
def perturbation_scale(self):
"""float: Obtain the perturbation scale."""
return self._perturbation_scale
def _set_value(self, value):
"""Private setter for use by decision modules."""
self._value = bool(value)
def _set_value_random(self):
"""Private function to assign value based on uniform random distribution inside range."""
self._value = bool(random.getrandbits(1))
def _perturb_locally(self):
"""Private function to assign value based local perturbation to the current value."""
self._perturb_around_locus(self._value)
def _perturb_around_locus(self, locus):
"""Private function to assign value based local perturbation to the given parameter."""
self._set_value(not bool(locus) if random.random() < self._perturbation_scale else locus)
class DecisionMutableFloat(DecisionMutableValue):
"""Contains a bounded float that the decision module will modify.
A decision mutable float represents a value that a particular decision module will try to optimize for.
This float must be given viable bounds between which the decision module will try to find an optimal
value to fulfill its loss function.
Bounds are inclusive: value in [min_val, max_val].
perturbation_scale relates to the scale of the np.random.normal.
"""
DEFAULT_PERTURBATION_SCALE = 1.0
def __init__(self, min_val, max_val, perturbation_scale=DEFAULT_PERTURBATION_SCALE):
super().__init__()
self._value = None
self._min_val = float(min_val)
self._max_val = float(max_val)
self._perturbation_scale = perturbation_scale
@property
def value(self):
"""float: Obtain the value."""
return self._value
@property
def min_val(self):
"""float: Obtain the minimum bound."""
return self._min_val
@property
def max_val(self):
"""float: Obtain the maximum bound."""
return self._max_val
@property
def perturbation_scale(self):
"""float: Obtain the perturbation scale."""
return self._perturbation_scale
def _set_value(self, value):
"""Private setter for use by decision modules."""
if value < self._min_val or value > self._max_val:
raise ValueError
self._value = float(value)
def _set_value_random(self):
"""Private function to assign value based on uniform random distribution inside range."""
self._value = random.uniform(self.min_val, self.max_val)
def _perturb_locally(self):
"""Private function to assign value based local perturbation to the current value."""
self._perturb_around_locus(self._value)
def _perturb_around_locus(self, locus):
"""Private function to assign value based local perturbation to the given parameter."""
self._perturb_value_gaussian(locus)
def _perturb_value_gaussian(self, locus):
"""Private function to assign value based on a gaussian perturbation."""
self._set_value(np.clip(np.random.normal(locus, self._perturbation_scale), \
self.min_val, self.max_val))
class DecisionMutableInt(DecisionMutableValue):
"""Contains a bounded integer that the decision module will modify.
A decision mutable integer represents a value that a particular decision module will try to optimize for.
This integer must be given viable bounds between which the decision module will try to find an optimal
value to fulfill its loss function.
Bounds are inclusive: value in [min_val, max_val].
perturbation_scale relates to the scale of the np.random.normal.
"""
DEFAULT_PERTURBATION_SCALE = 1.0
def __init__(self, min_val, max_val, perturbation_scale=DEFAULT_PERTURBATION_SCALE):
super().__init__()
self._value = None
self._min_val = int(min_val)
self._max_val = int(max_val)
self._perturbation_scale = perturbation_scale
@property
def value(self):
"""int: Obtain the value."""
return self._value
@property
def min_val(self):
"""int: Obtain the minimum bound."""
return self._min_val
@property
def max_val(self):
"""int: Obtain the maximum bound."""
return self._max_val
@property
def perturbation_scale(self):
"""float: Obtain the perturbation scale."""
return self._perturbation_scale
def _set_value(self, value):
"""Private setter for use by decision modules."""
if value < self._min_val or value > self._max_val:
raise ValueError
self._value = int(value)
def _set_value_random(self):
"""Private function to assign value based on uniform random distribution inside range."""
self._value = random.randint(self.min_val, self.max_val)
def _perturb_locally(self):
"""Private function to assign value based local perturbation to the current value."""
self._perturb_around_locus(self._value)
def _perturb_around_locus(self, locus):
"""Private function to assign value based local perturbation to the given parameter."""
self._perturb_value_gaussian(locus)
def _perturb_value_gaussian(self, locus):
"""Private function to assign value based on a gaussian perturbation."""
self._set_value(np.clip(round(np.random.normal(locus, self._perturbation_scale)), \
self.min_val, self.max_val))
class ArrayConstraint(object):
"""Abstract base class for array constraints."""
def satisfies(self, value):
raise NotImplementedError
class PositiveSumConstraint(ArrayConstraint):
"""Constrain a decision-mutable array so that all elements sum to a given value.
All elements will be positive.
"""
def __init__(self, sum_constraint):
if sum_constraint is None:
raise ValueError("Sum may not be None.")
if math.isclose(sum_constraint, 0):
raise ValueError("Sum may not be 0.")
self.sum = sum_constraint
def satisfies(self, value):
return math.isclose(np.sum(value), self.sum)
class RangeConstraint(ArrayConstraint):
"""Constrain a decision-mutable array so that all elements fall in a given range."""
def __init__(self, min_val, max_val):
if min_val is None or max_val is None:
raise ValueError("Bounds may not be None.")
self.min_val = min_val
self.max_val = max_val
def satisfies(self, value):
return (value >= self.min_val).all() and (value <= self.max_val).all()
class DecisionMutableBoolArray(DecisionMutableValue):
"""Contains an array of booleans that the decision module will modify.
A decision mutable bool array represents a value that a particular decision module will try to optimize for.
perturbation_scale relates to the probability the locus will flip its value.
"""
DEFAULT_PERTURBATION_SCALE = 1.0
def __init__(self, shape, perturbation_scale=DEFAULT_PERTURBATION_SCALE):
super().__init__()
if not np.any(shape):
raise ValueError("Invalid shape.")
self._shape = tuple(shape)
self._value = np.zeros(self._shape, dtype=np.bool_)
self._perturbation_scale = perturbation_scale
@property
def value(self):
"""np.array: Obtain the value."""
return self._value
@property
def shape(self):
"""np.array: Obtain the array shape."""
return self._shape
@property
def perturbation_scale(self):
"""float: Obtain the perturbation scale."""
return self._perturbation_scale
def _set_value(self, value):
"""Private setter for use by decision modules."""
if not isinstance(value, np.ndarray) or value.dtype != np.bool_:
raise TypeError
if value.shape != self._shape:
raise ValueError
self._value = value
def _set_value_random(self):
"""Private function to assign random array value based on constraints."""
self._value = np.random.random(self._shape) > 0.5
def _perturb_locally(self):
"""Private function to assign value based local perturbation to the current value."""
self._perturb_around_locus(self._value)
def _perturb_around_locus(self, locus):
"""Private function to assign value based local perturbation to the given parameter."""
flip = np.random.random(self._shape) < self._perturbation_scale
self._set_value(np.logical_xor(locus, flip))
class DecisionMutableIntArray(DecisionMutableValue):
"""Contains an integer array that the decision module will modify.
A decision mutable integer array represents a value that a particular decision module will try to optimize for.
This integer must be given viable constraints between which the decision module will try to find an optimal
value to fulfill its loss function.
A constraint must be specified. SumContraint is not supported.
perturbation_scale relates to the scale of the np.random.normal.
"""
DEFAULT_PERTURBATION_SCALE = 1.0
def __init__(self, shape, constraint, perturbation_scale=DEFAULT_PERTURBATION_SCALE):
super().__init__()
# Error check constraints.
if not issubclass(type(constraint), ArrayConstraint):
raise ValueError("Invalid constraint.")
if not np.any(shape):
raise ValueError("Invalid shape.")
self._constraint = copy.copy(constraint)
self._shape = tuple(shape)
self._value = np.zeros(self._shape, dtype=np.int_)
self._perturbation_scale = perturbation_scale
@property
def value(self):
"""np.array: Obtain the value."""
return self._value
@property
def constraint(self):
"""ArrayContraint: Obtain a copy of the constraint."""
return copy.copy(self._constraint)
@property
def shape(self):
"""np.array: Obtain the array shape."""
return self._shape
@property
def perturbation_scale(self):
"""float: Obtain the perturbation scale."""
return self._perturbation_scale
def _set_value(self, value):
"""Private setter for use by decision modules."""
if not isinstance(value, np.ndarray) or value.dtype != np.int_:
raise TypeError
if value.shape != self._shape or not self.constraint.satisfies(value):
raise ValueError
self._value = value
def _set_value_random(self):
"""Private function to assign random array value based on constraints."""
if isinstance(self._constraint, RangeConstraint):
self._value = np.random.randint(self._constraint.min_val, self._constraint.max_val, size=self._shape)
else:
raise ValueError("Invalid constraint type.")
def _perturb_locally(self):
"""Private function to assign value based local perturbation to the current value."""
self._perturb_around_locus(self._value)
def _perturb_around_locus(self, locus):
"""Private function to assign value based local perturbation to the given parameter."""
if isinstance(self._constraint, RangeConstraint):
self._perturb_value_gaussian_range_constraint(locus)
else:
raise ValueError("Invalid constraint type.")
def _perturb_value_gaussian_range_constraint(self, locus):
"""Private function to assign value based on a gaussian perturbation."""
self._set_value(np.clip(np.random.normal(locus, self._perturbation_scale).astype(np.int), \
self._constraint.min_val, self._constraint.max_val))
class DecisionMutableFloatArray(DecisionMutableValue):
"""Contains an float array that the decision module will modify.
A decision mutable float array represents a value that a particular decision module will try to optimize for.
This float must be given viable constraints between which the decision module will try to find an optimal
value to fulfill its loss function.
A constraint must be specified.
perturbation_scale relates to the scale of the np.random.normal.
"""
DEFAULT_PERTURBATION_SCALE = 1.0
def __init__(self, shape, constraint, perturbation_scale=DEFAULT_PERTURBATION_SCALE):
super().__init__()
# Error check constraints.
if not issubclass(type(constraint), ArrayConstraint):
raise ValueError("Invalid constraint.")
if not np.any(shape):
raise ValueError("Invalid shape.")
self._constraint = copy.copy(constraint)
self._shape = tuple(shape)
self._value = np.zeros(self._shape, dtype=np.float_)
self._perturbation_scale = perturbation_scale
@property
def value(self):
"""np.array: Obtain the value."""
return self._value
@property
def constraint(self):
"""ArrayContraint: Obtain a copy of the constraint."""
return copy.copy(self._constraint)
@property
def shape(self):
"""np.array: Obtain the array shape."""
return self._shape
@property
def perturbation_scale(self):
"""float: Obtain the perturbation scale."""
return self._perturbation_scale
def _set_value(self, value):
"""Private setter for use by decision modules."""
if not isinstance(value, np.ndarray) or value.dtype != np.float_:
raise TypeError
if value.shape != self._shape or not self.constraint.satisfies(value):
raise ValueError
self._value = value
def _set_value_random(self):
"""Private function to assign random array value based on constraints."""
if isinstance(self._constraint, RangeConstraint):
self._value = np.random.uniform(self._constraint.min_val, self._constraint.max_val, size=self._shape)
elif isinstance(self._constraint, PositiveSumConstraint):
temp = np.random.random(size=self._shape)
self._value = temp/np.sum(temp)*self._constraint.sum
else:
raise ValueError("Invalid constraint type.")
def _perturb_locally(self):
"""Private function to assign value based local perturbation to the current value."""
self._perturb_around_locus(self._value)
def _perturb_around_locus(self, locus):
"""Private function to assign value based local perturbation to the given parameter."""
if isinstance(self._constraint, RangeConstraint):
self._perturb_value_gaussian_range_constraint(locus)
elif isinstance(self._constraint, PositiveSumConstraint):
self._perturb_value_gaussian_sum_constraint(locus)
else:
raise ValueError("Invalid constraint type.")
def _perturb_value_gaussian_range_constraint(self, locus):
"""Private function to assign value based on a gaussian perturbation."""
self._set_value(np.clip(np.random.normal(locus, self._perturbation_scale), \
self._constraint.min_val, self._constraint.max_val))
def _perturb_value_gaussian_sum_constraint(self, locus):
"""Private function to assign value based on a gaussian perturbation."""
temp = np.random.normal(np.zeros(self._shape), self._perturbation_scale)
perturbation = temp - np.sum(temp)/temp.size
self._set_value(locus + perturbation)
class _DecisionMutablePremise(object):
"""Container for a decision mutable in an action premise iteration.
Attributes:
name (string): The decision mutable attribute name.
value (object): The value of the decision mutable.
"""
def __init__(self, name=None, value=None):
self.name = name
self.value = value
def set(self, target):
getattr(target, self.name)._set_value(self.value)
def __repr__(self):
"""Debug printing."""
return "\n(" + repr(self.name) + " - " + repr(self.value) + ")"
class _ActionPremiseIteration(object):
"""Container for an action premse iteration.
Contains one iteration of an _ActionPremise.
Attributes:
action_name (string): The action name.
decision_mutables (list): The list of _DecisionMutablePremise objects.
"""
def __init__(self, action_name=None, decision_mutables=None):
self.action_name = action_name
self.decision_mutables = [] if decision_mutables is None else decision_mutables
def set_mutables(self, target):
"""Sets the decision mutable attributes in the iteration.
Args:
source (Agent): The target agent to have its decision-mutables set.
"""
try:
for decision_mutable in self.decision_mutables:
decision_mutable.set(target)
except:
raise utility.MissingAttributeException
def call_action(self, simulation, source):
"""Calls the action in the iteration.
Args:
simulation (Simulation): The Simulation.
source (Agent): The source agent.
"""
action = source.actions.get(self.action_name)
if action is None:
raise utility.MissingAttributeException
action(simulation, source)
def __repr__(self):
"""Debug printing."""
return "\n" + repr(self.action_name) + " - " + repr(self.decision_mutables)
class _ActionPremise(object):
"""Container for an action premse.
An action premise is a representation of a series of actions for an agent to call
alongside a series of values to attribute to an agent's decision mutable attributes.
It is basically a snapshot of the computation that an agent can do during a simulation step.
Attributes:
iterations (list): The list of _ActionPremiseIteration objects.
"""
def __init__(self, iterations=None):
self.iterations = [] if iterations is None else iterations
def call(self, simulation, source):
"""Calls the action premise.
Args:
simulation (Simulation): The Simulation.
source (Agent): The source agent.
"""
# Call all iterations.
for iteration in self.iterations:
# Check if complete.
if source.step_complete:
return
# Setup and cast.
iteration.set_mutables(source)
iteration.call_action(simulation, source)
def __repr__(self):
"""Debug printing."""
return repr(self.iterations)
class Decision(object):
"""The base decision class.
A decision is a functor that performs selective and structured agent computation during a
simulation step.
"""
def __call__(self, simulation, source):
"""Perform computation."""
raise NotImplementedError
class NoCastDecision(object):
"""A decision to not do any computation during a simulation step.
"""
def __call__(self, simulation, source):
"""Perform no computation."""
return
class RandomSingleCastDecision(Decision):
"""A decision to cast a single randomly-chosen action during a simulation step.
"""
def __call__(self, simulation, source):
"""Call a single randomly-selected action."""
# If no actions to choose from, skip.
if len(source.actions) == 0:
return
# Set decision mutable values to random values.
decision_mutable_names = [d for d in dir(source) if issubclass(type(getattr(source, d)), DecisionMutableValue)]
for decision_mutable_name in decision_mutable_names:
decision_mutable = getattr(source, decision_mutable_name)
decision_mutable._set_value_random()
# Randomly execute an action.
try:
action = random.choice(list(source.actions.values()))
action(simulation, source)
except:
raise utility.ActionException
class RandomRepeatedCastDecision(Decision):
"""A decision to cast multiple randomly-chosen action during a simulation step.
Actions are repeatedly cast until the agent's step_complete attribute is set to True.
"""
def __call__(self, simulation, source):
# If no actions to choose from, skip.
if len(source.actions) == 0:
return
# Randomly execute an action while the agent has not completed their timestep.
while not source.step_complete:
# Set decision mutable values to random values.
decision_mutable_names = [d for d in dir(source) if issubclass(type(getattr(source, d)), DecisionMutableValue)]
for decision_mutable_name in decision_mutable_names:
decision_mutable = getattr(source, decision_mutable_name)
decision_mutable._set_value_random()
try:
action = random.choice(list(source.actions.values()))
action(simulation, source)
except:
raise utility.ActionException
class FunctionalDecision(Decision):
"""An abastract decision class that uses adjsim's functional step implementation.
Essentially, the agent acts as a function. The inputs to this function are retrieved from
the perception callable. This is referred to as an observation. The observation is then used in
the rest of the decision process to decide which action to call, and what to set decision mutable
values to.
The loss callable is used to determine how well an agent is performing. The decision module can employ
reinforcement learning through optimizing the loss function via intelligently selected action premises.
Args:
perception (callable): The perception callable. Can return any value.
loss (callable): The loss callable. Must return a float-convertible object.
"""
def __init__(self, perception, loss):
self.perception = perception
self.loss = loss
def __call__(self, simulation, source):
"""Perform computation."""
raise NotImplementedError
class _QLearningHistoryItem(object):
"""Container class for Q-Learning history items."""
def __init__(self, observation, action_premise, loss):
self.observation = observation
self.action_premise = action_premise
self.loss = loss
class _QTableEntry(object):
"""Container class for Q-Table entries."""
def __init__(self, action_premise, loss):
self.action_premise = action_premise
self.loss = loss
def __repr__(self):
return repr(self.loss) + " : " + repr(self.action_premise)
class QLearningDecision(FunctionalDecision):
"""A decision module based on Q-Learning.
This module employs Q-Learning (https://en.wikipedia.org/wiki/Q-learning), a reinforcement
learning technique, to incrementally enhance its performance as measured by the provided loss
function. Over many simulations, the performance of the agent will be increased.
Args:
perception (callable): The perception callable. Can return any value.
loss (callable): The loss callable. Must return a float-convertible object.
callbacks (_CallbackSuite): The simulation's callback suite. Used by the decision module
to register neccessary callbacks.
input_file_name (string): The name of the .pkl file from which to read a previous Q-Table.
The previous Q-Table will be used as a starting point in the current simulation.
output_file_name (string): The name of the .pkl file where the new Q-Table will be saved.
discount_factor (float): The dscount factor (gamma) used in the temporal-differnce calculation
of agent loss. Defaults to 0.95.
nonconformity_probability (float): When an agent finds an existing entry in its Q-Table, it will
still choose to perform a random action with a probability equivalent to
nonconformity_probability. Defaults to 0.3.
"""
DEFAULT_IO_FILE_NAME = re.sub("\.py", ".qlearning.pkl", sys.argv[0])
DEFAULT_DISCOUNT_FACTOR = 0.95
DEFAULT_NONCONFORMITY_FACTOR = 0.3
print_debug = False
def __init__(self, perception, loss, simulation,
input_file_name=DEFAULT_IO_FILE_NAME,
output_file_name=DEFAULT_IO_FILE_NAME,
discount_factor=DEFAULT_DISCOUNT_FACTOR,
nonconformity_probability=DEFAULT_NONCONFORMITY_FACTOR):
super().__init__(perception, loss)
# Initialize members.
self.q_table = {}
self.history_bank = {}
self.input_file_name = input_file_name
self.output_file_name = output_file_name
self.discount_factor = discount_factor
self.nonconformity_probability = nonconformity_probability
self.completion_callback = callback.SingleParameterCallback()
# Private members.
self._tentative_history_bank = {}
self._simulation = simulation
# Load q_table
self._load_q_table_from_disk()
# Register callbacks.
self._simulation.callbacks.simulation_complete.register(self._on_simulation_complete)
self._simulation.callbacks.agent_removed.register(self._on_agent_removal)
def _load_q_table_from_disk(self):
"""Loads the Q-Table from the file described by input_file_name"""
# Print ui messages.
if not self.input_file_name is None and os.path.isfile(self.input_file_name):
sys.stdout.write("Q Learning training data found, loading from " + self.input_file_name + "...")
sys.stdout.flush()
else:
print("Q Learning training data not found.")
return
# Load data.
self.q_table = pickle.load(open(self.input_file_name, "rb"))
# Ui messages.
if QLearningDecision.print_debug:
self.print_q_table()
sys.stdout.write("done\n")
sys.stdout.flush()
def __call__(self, simulation, source):
"""The functor call that executes Q-learning.
Agents randomly choose actions if they have never encountered a particular
observation before. Otherwise, They conditionally execute the previously encountered
action premise (note nonconformity_probability).
Information regarding randomly chosen abilities is stored in the history bank until
simulation completion.
Args:
simulation (Simulation): The Simulation.
source (Agent): The source Agent.
"""
# Obtain previous step's loss for the given agent.
# Loss is calculated for every antecedent step right before the subsequent action is made by a given agent.
self._save_loss(source)
# Observe environment.
observation = None
try:
observation = self.perception(simulation, source)
except:
raise utility.PerceptionException
# Check for a known action premise.
q_table_entry = self.q_table.get(observation)
action_premise = q_table_entry.action_premise if not q_table_entry is None else None
# Cast action.
# The random action will still be called even if a q_table entry is found.
# This happens with a probability represented by the nonconformity_probability.
if action_premise is None:
action_premise = self._generate_unprecedented_action_premise(source)
elif random.random() < self.nonconformity_probability:
action_premise = self._generate_nonconformative_action_premise(source, action_premise)
else:
# Call the action premise.
action_premise.call(simulation, source)
# Bank history.
history_item = _QLearningHistoryItem(observation, action_premise, None)
agent_history = self.history_bank.get(source.id)
if agent_history is None:
self.history_bank[source.id] = [history_item]
else:
self.history_bank[source.id].append(history_item)
# Post-step existence check.
# If the agent is removed from the simulation during the current step, the on_agent_removed callback
# has fired before we had a chance to add the latest entry into the history bank. In this case,
# we recalculate the loss here.
if not source._exists:
self._save_loss(source)
def _generate_unprecedented_action_premise(self, source):
"""Generate, invoke and return an action premise for the case where no known
prior instance of a given observation has been seen by the QLearning module.
"""
return self._generate_new_random_action_premise(source)
def _generate_nonconformative_action_premise(self, source, existing_action_premise):
"""Generate, invoke and return an action premise for the case we invoke non-conformity."""
return self._generate_new_random_action_premise(source)
def _generate_new_random_action_premise(self, source):
"""Generate, invoke and return a random new action premise.
Here we essentially perorm a RandomRepeatedCast decision, and return the
resulting action premise.
"""
# Prepare action premise.
action_premise = _ActionPremise()
# This is essentially a RandomRepeatedCast.
while not source.step_complete:
action_premise_iteration = _ActionPremiseIteration()
# Set decision mutable values to random values, save to action premise.
decision_mutable_names = [d for d in dir(source) if issubclass(type(getattr(source, d)), DecisionMutableValue)]
for decision_mutable_name in decision_mutable_names:
decision_mutable = getattr(source, decision_mutable_name)
decision_mutable._set_value_random()
action_premise_iteration.decision_mutables.append(_DecisionMutablePremise(decision_mutable_name, decision_mutable.value))
# Cast fallback decision, save to action premise.
try:
identifier, action = random.choice(list(source.actions.items()))
action(self._simulation, source)
action_premise_iteration.action_name = identifier
except:
raise utility.ActionException
# Save action premise.
action_premise.iterations.append(action_premise_iteration)
return action_premise
def _update_q_table_from_history_bank(self):
"""Update Q-Table from the history bank.
Apply temporal difference to the losses stored in the history bank, and store
action premises that result in losses that are better than previously recorded
entries in the Q-Table.
"""
# Ui.
sys.stdout.write("Processing Q-Learning data...")
sys.stdout.flush()
# Process history.
num_updated = 0
for agent_history in self.history_bank.values():
# Apply temporal difference on banked data.
for i in range(len(agent_history) - 2, -1, -1):
agent_history[i].loss += self.discount_factor*agent_history[i + 1].loss
# Add entry to q_table if loss is better than existing entry, or if observation has never been seen.
for history_item in agent_history:
existing_q_entry = self.q_table.get(history_item.observation)
if existing_q_entry is None or existing_q_entry.loss > history_item.loss:
num_updated += 1
self.q_table[history_item.observation] = _QTableEntry(history_item.action_premise, history_item.loss)
# Trigger callback before history bank is cleared.
self.completion_callback(self.history_bank)
# Clear history bank.
self.history_bank.clear()
# Ui.
sys.stdout.write("done - {} entries updated\n".format(num_updated))
sys.stdout.flush()
def _save_q_table_to_disk(self):
"""Saves the Q-Table to the file described by output_file_name"""
# Don't even try to save if None
if self.output_file_name is None:
print("No output file name: Q-Learning data not saved.")
return
# Ui.
sys.stdout.write("Saving Q-Learning data...")
sys.stdout.flush()
# Rename old .pkl file into a tmp while the new file is being written (crash safety).
temp_file_name = self.output_file_name + ".tmp"
if os.path.isfile(self.output_file_name):
os.rename(self.output_file_name, temp_file_name)
# write to file
try:
pickle.dump(self.q_table, open(self.output_file_name, "wb"), pickle.HIGHEST_PROTOCOL)
except:
raise Exception("An error occured while writing the Q-Table to the output file.")
# remove old file
if os.path.isfile(temp_file_name):
os.remove(temp_file_name)
# print messages
if QLearningDecision.print_debug:
self.print_q_table()
# Ui.
sys.stdout.write("done\n")
sys.stdout.flush()
def _on_simulation_complete(self, simulation):
"""The callback that finalizes a simulation.
Args:
simulation (Simiulation): The Simulation.
"""
# Update final losses.
for agent in simulation.agents:
if agent.decision is self:
self._save_loss(agent)
# Save all final losses.
self._update_q_table_from_history_bank()
self._save_q_table_to_disk()
def _save_loss(self, source):
"""Saves the loss into the antecedent history item.
Args:
Source (Agent): The source agent.
"""
current_loss = None
try:
current_loss = self.loss(self._simulation, source)
except:
raise utility.LossException
entry = self.history_bank.get(source.id)
if not entry is None: # This occurs during the first timestep.
if entry[-1].loss is None: # This check enables us to perform the Post-step existence check.
entry[-1].loss = current_loss
def _on_agent_removal(self, agent):
"""The callback that saves an agent's loss after it is removed from the simulation.
Args:
agent (Agent): The Agent.
"""
if agent.decision is self:
self._save_loss(agent)
def print_q_table(self):
"""Debug printing of the Q-Table"""
for observation, item in self.q_table.items():
print(" ", observation, " > ", item)
class PerturbativeQLearningDecision(QLearningDecision):
"""A decision module based on Q-Learning.
This module employs Q-Learning (https://en.wikipedia.org/wiki/Q-learning), a reinforcement
learning technique, to incrementally enhance its performance as measured by the provided loss
function. Over many simulations, the performance of the agent will be increased.
The difference between this and the canonical QLearning decision module is the non-conformity behaviour.
Non-conforming calls will perturb existing action premises instead of generating completely new ones
Args:
perception (callable): The perception callable. Can return any value.
loss (callable): The loss callable. Must return a float-convertible object.
callbacks (_CallbackSuite): The simulation's callback suite. Used by the decision module
to register neccessary callbacks.
input_file_name (string): The name of the .pkl file from which to read a previous Q-Table.
The previous Q-Table will be used as a starting point in the current simulation.
output_file_name (string): The name of the .pkl file where the new Q-Table will be saved.
discount_factor (float): The dscount factor (gamma) used in the temporal-differnce calculation
of agent loss. Defaults to 0.95.
nonconformity_probability (float): When an agent finds an existing entry in its Q-Table, it will
still choose to perform a random action with a probability equivalent to
nonconformity_probability. Defaults to 0.3.
"""
class Config(object):
"""Configuration object for Perturbative Q-Learning. There are 3 types of mutually-inclusive
perturbations that can take place. The values contained within this object represent the probability
that the given perturbation will take place.
Args:
reorder_probability (float): Probability that the existing action sequence will be reordered.
ignore_probability (float): Proability that existing actions will be removed from the sequence.
local_perturbation_probability (float): Probability that local pertrurbations will be induced upon the
individual elements of the existing action sequence.
"""
DEFAULT_REORDER_PROBABILITY = 0.2
DEFAULT_IGNORE_PROBABILITY = 0.3
DEFAULT_LOCAL_PERTURBATION_PROBABILITY = 0.7
def __init__(self, reorder_probability=DEFAULT_REORDER_PROBABILITY,
ignore_proabability=DEFAULT_IGNORE_PROBABILITY,
local_perturbation_probability=DEFAULT_LOCAL_PERTURBATION_PROBABILITY):
self._reorder_probability = reorder_probability
self._ignore_proabability = ignore_proabability
self._local_perturbation_probability = local_perturbation_probability
@property
def reorder_probability(self):
"""float: obtain the probability that reordering will take place."""
return self._reorder_probability
@reorder_probability.setter
def reorder_probability(self, value):
val = float(value)
if val > 1.0 or val < 0:
raise ValueError("Value must be between 0 and 1")
self._reorder_probability = val
@property
def ignore_probability(self):
"""float: obtain the probability that existing actions will be ignored."""
return self._ignore_proabability
@ignore_probability.setter
def ignore_probability(self, value):
val = float(value)
if val > 1.0 or val < 0:
raise ValueError("Value must be between 0 and 1")
self._ignore_proabability = val
@property
def local_perturbation_probability(self):
"""float: obtain the probability that existing actions will be locally perturbed."""
return self._local_perturbation_probability
@local_perturbation_probability.setter
def local_perturbation_probability(self, value):
val = float(value)
if val > 1.0 or val < 0:
raise ValueError("Value must be between 0 and 1")
self._local_perturbation_probability = val
def __init__(self, perception, loss, simulation,
input_file_name=QLearningDecision.DEFAULT_IO_FILE_NAME,
output_file_name=QLearningDecision.DEFAULT_IO_FILE_NAME,
discount_factor=QLearningDecision.DEFAULT_DISCOUNT_FACTOR,
nonconformity_probability=QLearningDecision.DEFAULT_NONCONFORMITY_FACTOR,
perturbation_config=Config()):
super().__init__(perception, loss, simulation, input_file_name, output_file_name,
discount_factor, nonconformity_probability)
self._config = perturbation_config
@property
def config(self):
"""Config: obtain the perturbation configuration object."""
return self._config
@config.setter
def config(self, value):
if not isinstance(value, Config):
raise TypeError("Value must be of type PerturbativeQLearningDecision.Config")
self._config = value
def _generate_nonconformative_action_premise(self, source, existing_action_premise):
"""Generate, invoke and return an action premise for the case we invoke non-conformity."""
# Prepare action premise.
action_premise = copy.copy(existing_action_premise)
# Ignore-type perturbation.
ignoration_array = np.random.random((len(action_premise.iterations),)) < self._config.ignore_probability
action_premise.iterations = [e for i, e in enumerate(action_premise.iterations) if not ignoration_array[i]]
# Reorder-type perturbation.
if random.random() < self._config.reorder_probability:
random.shuffle(action_premise.iterations)
# Cast existing action premise.
iteration_index = 0
while not source.step_complete and iteration_index < len(action_premise.iterations):
# Set and locally perturb decision mutables.
action_premise.iterations[iteration_index].set_mutables(source)
decision_mutable_names = [d for d in dir(source) if issubclass(type(getattr(source, d)), DecisionMutableValue)]
for decision_mutable_name in decision_mutable_names:
decision_mutable = getattr(source, decision_mutable_name)
decision_mutable._perturb_locally()
# Invoke action.
action_premise.iterations[iteration_index].call_action(self._simulation, source)
iteration_index += 1
# Call further actions.
while not source.step_complete:
action_premise_iteration = _ActionPremiseIteration()
# Set decision mutable values to random values, save to action premise.
decision_mutable_names = [d for d in dir(source) if issubclass(type(getattr(source, d)), DecisionMutableValue)]
for decision_mutable_name in decision_mutable_names:
decision_mutable = getattr(source, decision_mutable_name)
decision_mutable._set_value_random()
action_premise_iteration.decision_mutables.append(_DecisionMutablePremise(decision_mutable_name, decision_mutable.value))
# Cast fallback decision, save to action premise.
try:
identifier, action = random.choice(list(source.actions.items()))
action(self._simulation, source)
action_premise_iteration.action_name = identifier
except:
raise utility.ActionException
# Save action premise.
action_premise.iterations.append(action_premise_iteration)
return action_premise
|
SeverTopan/AdjSim
|
adjsim/decision.py
|
Python
|
gpl-3.0
| 44,149
|
[
"Gaussian"
] |
38ca0df276f735477b7ac56546b7bcb3888d99544a2627c336a625da8a61069f
|
#! /usr/bin/env python
"""Locate files using fuzzy matching."""
from __future__ import division
import os.path
import sys
def locate(filename, start):
"""Search the filesystem under start for filename.
@param filename: The filename to look for.
@param start: The starting directory.
@return: A list of matching names, ordered from best to worst.
"""
results = []
os.path.walk(start, visit, (results, filename))
results.sort(reverse=True)
if results[0][0] == 1:
results = [path for rating, path in results if rating == 1]
else:
results = [path for rating, path in results if rating > 0]
return results
def visit(tools, dirname, names):
"""Examine the contents of a directory, rate them, and add them to results.
@param tools: A tuple (results, filename) with the results list to store
into and the target filename.
@param dirname: The current directory name.
@param names: The names of the files in the current directory.
"""
results, filename = tools
for name in names:
rating = rate(name, filename)
results.append((rating, os.path.join(dirname, name)))
def rate(name, filename):
"""Rate the closeness of two filenames.
@param name: The name of the first file.
@param filename: The name of the second file.
@return: A float between 0.0 and 1.0, with greater values indicating a
closer match.
"""
rating = 0
if name == filename:
rating = 1
elif filename in name:
# Rating is influenced by the difference in length between two strings.
rating += 0.5 + (0.5 - (0.5 * abs(len(name) - len(filename))/len(name)))
return rating
def main():
if len(sys.argv) != 3:
print """Usage: ./locate.py <filename> <directory>"""
exit(1)
filename = sys.argv[1]
start = sys.argv[2]
results = locate(filename, start)
for result in results[:5]:
print result
if __name__ == "__main__":
main()
|
nickpascucci/AppDesign
|
search/locate.py
|
Python
|
mit
| 2,002
|
[
"VisIt"
] |
393d3f706af21f9ae471b4a7cf952b84b5cc1ab3cce38cb72429c636afa68c51
|
#!/usr/bin/env python
"""
Calculate cloud duration, minimum base, maximum height, mean mass, formation
time, dissipation time, maximum depth, depth evolution and corresponding times
for tracked clouds.
Output is saved in a pkl file as a list of dictionaries, with one dictionary per
cloud id. Dictionary keys are: 'id', 'duration', 'min_height', 'max_height',
'average_mass', 'l_min', 'l_max', 'depth', 'max_depth', 'time'].
Examples
-------
$ python condensed_id_stats.py
"""
from __future__ import division, print_function
import glob
import os
import numpy as np
import numpy.ma as ma
import model_config.cgils_ctl_s6_25m as mc
from netCDF4 import Dataset
try:
import cPickle as pickle
except:
import pickle
def cloud_statistics(file_name):
"""
Return cloud duration, minimum base, maximum height, mean mass, formation
time, dissipation time, maximum depth, depth evolution and corresponding
times for tracked clouds.
Parameters
----------
file_name : netCDF file name
id_profile file for a tracked cloud with dimensions double t(t),
double z(z).
Return
------
tuple : id, lifetime, base, top, mass, l_min, l_max, depths, max_depth,
times
"""
# Read netCDF dataset
data = Dataset(file_name)
# Cloud ID
cloud_id = int(file_name[-11:-3])
# Cloud duration (seconds)
times = data.variables['t'][...]
lifetime = len(times)*mc.dt
# Formation time, dissipation time (seconds)
l_min = times.min()*mc.dt
l_max = times.max()*mc.dt
# Minimum base, maximum height, maximum depth, depth evolution (metres)
area = ma.masked_invalid(data.variables['AREA'][...])
z = data.variables['z'][...]
z = z*np.ones(np.shape(area))
z = ma.masked_array(z, ma.getmask(area))
bases = z.min(axis=1)
tops = z.max(axis=1)
depths = tops - bases + mc.dz
max_depth = depths.max()
base = bases.min()
top = tops.max()
# Mean mass mass (kilograms)
qn = ma.masked_invalid(data.variables['QN'][...])
rho = ma.masked_invalid(data.variables['RHO'][...])
mass = np.mean(np.sum(area*rho*mc.dz, axis=1))
# Remove missing values
times = ma.masked_array(times, ma.getmask(depths))
depths = depths[~depths.mask]
times = times[~times.mask]
data.close()
return cloud_id, lifetime, base, top, mass, l_min, l_max, depths, \
max_depth, times
if __name__ == '__main__':
# Create pkl directory to store cloud statistics
if not os.path.exists('pkl'):
os.makedirs('pkl')
# Collect all netCDF files containing condensed profiles
input_files = os.path.join('../id_profiles/cdf', 'condensed_profile_*.nc')
file_list = glob.glob(input_files)
file_list.sort()
# Calculate and store statistics for all tracked clouds
keys = ['id', 'duration', 'min_height', 'max_height', 'average_mass',
'l_min', 'l_max', 'depth', 'max_depth', 'time']
id_stats = []
for n, file_name in enumerate(file_list):
print("Calculating statistics for cloud id %d" % int(file_name[-11:-3]))
id_stats.append(dict(zip(keys, cloud_statistics(file_name))))
# Find id of longest lived cloud
durations = np.array([])
ids = np.array([])
for item in id_stats:
durations = np.hstack((durations, item['depth']))
ids = np.hstack((ids, item['id']))
idx_max = np.argmax(durations)
print('max_lifetime:', int(durations[idx_max]), 'id:', int(ids[idx_max]))
# Save cloud statistics
pickle.dump(id_stats, open('pkl/condensed_id_stats.pkl', 'wb'))
|
vladpopa/ent_analysis
|
id_stats/condensed_id_stats.py
|
Python
|
mit
| 3,685
|
[
"NetCDF"
] |
7681abb048631fde02a667ee368753f8f8d72bd4d25739e67c526e31623bb287
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Read a MAF from standard input and print the score of each block. It can
optionally recalculate each score using the hox70 matrix, and normalize the
score by the number of columns in the alignment.
TODO: Should be able to read an arbitrary scoring matrix.
usage: %prog [options]
-r, --recalculate: don't use the score from the maf, recalculate (using hox70 matrix)
-l, --lnorm: divide (normalize) score by alignment text length
"""
from __future__ import division
import sys
from bx.cookbook import doc_optparse
from bx.align import maf
from bx.align import score
from optparse import OptionParser
def main():
# Parse command line arguments
options, args = doc_optparse.parse( __doc__ )
try:
lnorm = bool( options.lnorm )
recalculate = bool( options.recalculate )
except:
doc_optparse.exit()
hox70 = score.build_scoring_scheme( """ A C G T
91 -114 -31 -123
-114 100 -125 -31
-31 -125 100 -114
-123 -31 -114 91 """, 400, 30, default=0 )
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
if m.text_size == 0:
print "NA"
continue
s = m.score
# Recalculate?
if recalculate:
s = hox70.score_alignment( m )
# Normalize?
if lnorm:
s = s / m.text_size
# Print
print s
if __name__ == "__main__":
main()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/maf_print_scores.py
|
Python
|
bsd-3-clause
| 1,601
|
[
"Galaxy"
] |
7e4b2a487185316be85f20c904de80add2ee758ee539439dee0ca8404f823999
|
import matplotlib
matplotlib.use("TkAgg")
import tkinter as tk
import timeit
import io
import sys
import traceback
import math
from math import sqrt
from sympy import ln
from algorithms.chebyshev import chebyshev
from algorithms.cubicsplines import cubicSpline
from algorithms.leastSquares import leastSquares
from algorithms.bezier import bezier
from algorithms.nonlinearleastsquares import nonLinearLeastSquares
from algorithms.differencemethods import differenceMethods
from algorithms.extrapolation import extrapolation
from algorithms.autodiff import autoDiff
from algorithms.trapezoidalsimpson import newtTrapSimp
from algorithms.romberg1 import romberg
from algorithms.adaptive import adaptive
from algorithms.gaussian import gaussian
from algorithms.trapezoidalsimpson import newtonTrapezoidal
from algorithms.trapezoidalsimpson import newtonSimpson
from numpy import sin, cos, tan, log
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
categories = ['Chebyshev', 'Cubic Splines', 'Bezier', 'Linear Least Squares', 'Nonlinear Least Squares',
'Difference Methods', 'Extrapolation', 'Automatic Differentiation', 'Newton-Cotes', 'Romberg', 'Adaptive', 'Gaussian']
def callback(tex, input):
plt.clf()
out = io.StringIO()
sys.stdout = out
tex.delete("1.0",tk.END)
try:
w=input.get()
start = timeit.default_timer()
exec(w)
stop = timeit.default_timer()
fig.canvas.draw()
sys.stdout = sys.__stdout__
tex.insert(tk.END, out.getvalue())
tex.insert(tk.END, 'Runtime: ' + str(stop - start) + ' seconds')
tex.see(tk.END) # Scroll if necessary
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tex.insert(tk.END, str(e))
tex.insert(tk.END, str(traceback.extract_tb(exc_traceback)))
tex.insert(tk.END, "You have entered an invalid input. Select a function from the left for example input.\n")
root = tk.Tk()
root.wm_title('Numerical Analysis Project 2.2')
right = tk.Frame()
right.pack(side=tk.RIGHT, expand=1, fill=tk.BOTH)
# hack
tex = tk.Text()
inputframe = tk.Frame(right)
inputframe.pack(side=tk.TOP, padx=(0,8), pady=(8,8), fill=tk.X)
inputlabel = tk.Label(inputframe, text='Input: ')
inputlabel.pack(side=tk.LEFT, padx=(0,4))
inputText = tk.StringVar()
def setInput(tex, category):
tex.delete("1.0", tk.END)
plt.clf()
if category == 'Chebyshev':
inputText.set('chebyshev(-1, 1, 0.5, math.sin)')
tex.insert(tk.END, 'chebyshev(a [Number], b [Number], x [Number], f [Function])\n'
'a, b = interval [a,b]; x = x value of function to approximate; f = function to approximate\n\n'
'Runs the Chebyshev algorithm up to 30 times, increasing degree n until the guess is '
'sufficiently close. Outputs the calculated Chebyshev value, the degree of the polynomial '
'where the best guess was calculated and the actual value from the function.\n\n'
'Example usage: chebyshev(-1, 1, 0.5, math.sin)\n'
'Advanced functions can be input as example: lambda x: (math.sin(x) - math.cos(x))')
elif category == 'Cubic Splines':
inputText.set('cubicSpline(\'(-1,3), (0,5), (3,1), (4,1), (5,1)\')')
tex.insert(tk.END, 'cubicSpline(points [String], resolution [Integer - Default 100])\n'
'points = string of coordinate points; resolution affects how smooth the plot is\n\n'
'Prints the cubic spline functions and displays an interpolated line plot below.\n'
'Example usage: cubicSpline(\'(-1,3), (0,5), (3,1), (4,1), (5,1)\')\n'
'or cubicSpline(\'(-1,3), (0,5), (3,1), (4,1), (5,1)\', resolution=2) for a '
'low resolution graph.')
elif category == 'Bezier':
inputText.set('bezier([[1,0,6,2],[1,-1,0,1],[1,1,6,0]])')
tex.insert(tk.END, 'bezier(points [Array])\n'
'points = Series of points in the form: [[1,0,6,2],[1,-1,0,1],[1,1,6,0]]\n\n'
'Outputs the Bezier spline\'s knots and control points based on the input coordinates.\n'
'Example usage: bezier([[1,0,6,2],[1,-1,0,1],[1,1,6,0]])')
elif category == 'Linear Least Squares':
inputText.set('leastSquares([(1.49, 44.6), (3.03, 57.8), (0.57, 49.9), (5.74, 61.3), (3.51, 49.6), '
'(3.73, 61.8), (2.98, 49.0), (-0.18, 44.7), (6.23, 59.2), (3.38, 53.9), (2.15, 46.5), '
'(2.10, 54.7), (3.93, 50.3), (2.47, 51.2), (-0.41, 45.7)],0,2)')
tex.insert(tk.END, 'leastSquares(points [Array])\n'
'leastSquares(points [Array], n [Integer])\n'
'points = Series of points in the form: [[0, 1], [1, 2], [2, 3]] or [(0, 1), (1, 2), (2, 3)]\n'
'n = degree (only used when points are provided using parentheses\n\n'
'Takes either a series of coordinate points or a series of A and B matrices in bracket form.'
'If coordinates are provided, will output least squares fit function and graph.\n'
'If an A and B matrix is provided, it will output the coefficient, residual, and rank.\n\n'
'Example usage: leastSquares([[1, 1], [1, -1], [1, 1]], [2, 1, 3], 3)')
elif category == 'Nonlinear Least Squares':
inputText.set('nonLinearLeastSquares([[-1,0],[1,1/2],[1,-1/2]],[1,1/2,1/2],[0,0])')
tex.insert(tk.END, 'nonLinearLeastSquares(points [array], radii [array], initial[array])\n'
'points = series of points in the form [[-1,0],[1,1/2],[1,-1/2]]\n'
'radii = array of the radius lengths of each circle in the form [1,1/2,1/2]\n'
'initial = set of points for your initial guess in the form [0,0]\n'
'It will take in the above values, ALL OF WHICH ARE REQUIRED, and will output\n'
'the best fit points, the standard error, and the root mean squared error RMSE\n'
'No graph outputting on this one as it is just a series of points\n'
)
elif category == 'Difference Methods':
inputText.set('differenceMethods(\'1/x\', 2, [0.1,0.01,0.001])')
tex.insert(tk.END, 'differenceMethods(func function, x int, h [array])\n'
'function: must be entered as a string. CANNOT BE ENTERED OUTSIDE string\n'
'x: the point at which you are evaluating your derivative\n'
'h: an array of values of initial step sizes. The one on the right is the one evaluated\n'
'It will, using Two-point centered-difference and Three-point centered difference calculate\n'
'with the given inputs, output the best approximation for both and the error for both.\n'
'It also plots a graph\n')
elif category == 'Extrapolation':
inputText.set('extrapolation(lambda x: -sin(x),0,2,0.01)')
tex.insert(tk.END, 'extrapolation(f [Function], xval [Number], n [Number], hval [Number])\n'
'f = function to approximate; xval = value to extrapolate from; n = levels of of extrapolation; hval = step size\n\n'
'Takes in a function in terms of x along with the xval you wish to eval f\'(xval) at\n.'
'You must provide the value for n, which is the number of levels of extrapolation, and \n'
'you must provide the initial stepsize h. This will return the most accurate value for \n'
'f\'(xval) along with the actual value and the error. You MUST enter a single variable function\n'
'Example usage: extrapolation(lambda x: -sin(x),0,2,0.01)')
elif category == 'Automatic Differentiation':
inputText.set('autoDiff(lambda x: x**2, \'x\')')
tex.insert(tk.END, 'autoDiff(f [Function])\n'
'f = function to differentiate\n\n'
'Takes a function and calculates the derivative via automatic differentiation.\n'
'Functions can be input as Pythonic lambda functions or basic sin/cos/tan et cetera.\n'
'Supports up to three variables (x, y, z) in space-delimited string form: \'x y z\'\n\n'
'Example usage: autoDiff(lambda x: x**2, \'x\')'
'\nautoDiff(lambda x, y, z: 2 * x ** 2 + 4 * y ** 3 + 8 * z ** 4, \'x y z\')')
elif category == 'Newton-Cotes':
inputText.set('newtTrapSimp(lambda x: x**2, 0, 1, 10)')
tex.insert(tk.END, 'newtTrapSimp(f [Function], a [Number], b[Number], n[Number])\n'
'f = function to approximate; a, b = interval [a,b]; n = # of steps to take\n\n'
'Calculates the best guess for the Newton-Cotes Trapezoidal/Newton-Cotes Simpson result value, and plots the '
'graph below.\n\n'
'Example usage: newtTrapSimp(lambda x: x**2, 0, 1, 10)')
elif category == 'Romberg':
inputText.set('romberg(math.sin, 0, 2, 10)')
tex.insert(tk.END, 'romberg(f [Function], a [Number], b[Number], n[Number])\n'
'f = function to approximate; a, b = interval [a,b]; n = # of steps to take\n\n'
'Plots the Romberg output and also outputs the associated array.\n\n'
'Example usage: romberg(math.sin, 0, 2, 10)\n'
'Advanced functions can be input as example: lambda x: (math.sin(x) - math.cos(x))')
elif category == 'Adaptive':
inputText.set('adaptive(lambda x: ln(x**2+1), 0, 1, 0.5E-09, 100)')
tex.insert(tk.END, 'adaptive(f [Function], a [Number], tolerance[Number], steps[Number])\n'
'a, b = interval [a,b]; tolerance = guess tolerance; steps = # of steps to take\n\n'
'Takes a function, a - b interval, tolerance, and number of steps and outputs the integrated'
' function value, the adaptive error, and the number of iterations necessary to find the '
'integrated value. \n'
'Example usage: adaptive(lambda x: ln(x**2+1), 0, 1, 0.5E-09, 100)')
elif category == 'Gaussian':
inputText.set('gaussian(lambda x: (x**2 * log(x)), 1, 3)')
tex.insert(tk.END, 'gaussian(f [Function], a [Number], b[Number], y[Number - Default None])\n'
'a, b = interval [a,b]; f = function to approximate; y = Gaussian Y value\n\n'
'Takes a function, a and b interval, and optionally, an extra Y value.'
'Outputs the estimated value, the actual value, and the error.\n'
'Example usage: gaussian(lambda x: (x**2 * log(x)), 1, 3)')
else:
print('Error')
userinput = tk.Entry(inputframe, textvariable=inputText)
userinput.pack(side=tk.LEFT, fill=tk.X, expand=1, padx=(4,4))
fig = plt.figure(1)
canvas = FigureCanvasTkAgg(fig, master=right)
plt.ion()
plot_widget = canvas.get_tk_widget()
plot_widget.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=1)
txt_frm = tk.Frame(right)
txt_frm.pack(side=tk.RIGHT, fill="x", expand=True)
# ensure a consistent GUI size
txt_frm.grid_propagate(False)
# implement stretchability
txt_frm.grid_rowconfigure(0, weight=1)
txt_frm.grid_columnconfigure(0, weight=1)
tex = tk.Text(txt_frm, height=12)
tex.pack(fill='x')
executebutton = tk.Button(inputframe, text='Execute', command=lambda: callback(tex, userinput))
executebutton.pack(side=tk.RIGHT, padx=(4, 0))
def close():
root.destroy()
exit(0)
bop = tk.Frame(width=200)
bop.pack(side=tk.LEFT, fill='y', pady=(8, 8), padx=(8, 8))
for k in range(0, 12):
tv = categories[k]
b = tk.Button(bop, text=tv, command=lambda tv=tv: setInput(tex, tv))
b.pack(fill="x", pady=(2, 2))
tk.Button(bop, text='Exit', command=lambda: close()).pack(side=tk.BOTTOM, fill='x')
# UI hacks
root.protocol("WM_DELETE_WINDOW", close)
root.lift()
root.attributes('-topmost', True)
root.after_idle(root.attributes, '-topmost', False)
def main():
inputText.set("Select a button from the left for example input.")
while True:
try:
root.mainloop()
break
# More hacks
except UnicodeDecodeError:
pass
except KeyboardInterrupt:
close()
if __name__ == '__main__':
main()
|
protocol114/numerical-2.2
|
project.py
|
Python
|
mit
| 13,010
|
[
"Gaussian"
] |
6a143d4d7d9a5fc79d0bc2bbdb2a19af2b4688ba5c0c9c63e1f1c9b0baa60684
|
"""
This implements the value recursion for numerical semirings.
V(v) = \bigoplus_{e \in BS(v)} \omega(e) \bigotimes_{u \in tail(e)} V(u)
We also have an implementation which is robust to the presence of cycles.
:Authors: - Wilker Aziz
"""
from collections import defaultdict
from functools import reduce
from grasp.semiring import SumTimes
import itertools
import logging
def derivation_weight(derivation, semiring=SumTimes, Z=None, omega=lambda e: e.weight):
"""
Compute the total weight of a derivation (as a sequence of edges) under a semiring.
:param derivation: sequence of edges
:param semiring: the given semiring (requires divide if Z is given)
:param Z: the normalisation constant (in the given semiring)
:param omega: a function over edges
:return:
"""
if not derivation:
return semiring.one # or zero?
if Z is None:
return semiring.times.reduce([omega(e) for e in derivation])
else:
return semiring.divide(semiring.times.reduce([omega(e) for e in derivation]), Z)
def acyclic_value_recursion(forest,
topsorted,
semiring,
omega=lambda e: e.weight,
infinity=1):
"""
Returns items' values in a given semiring.
This is a bottom-up pass through the forest which runs in O(|forest|).
:param forest: an acyclic hypergraph-like object.
:param tsort: a TopSortTable object.
:param semiring: must define zero, one, sum and times.
:param omega: a function that weighs edges/rules (defaults to the edge's weight).
You might want to use this to, for instance, convert between semirings.
:return:
"""
I = defaultdict(None)
# we go bottom-up
for parent in topsorted: # the inside of a node
incoming = forest.get(parent, set())
if not incoming: # a terminal node
I[parent] = semiring.one if forest.is_terminal(parent) else semiring.zero
continue
# the inside of a nonterminal node is a sum over all of its incoming edges (rewrites)
# for each rewriting rule, we get the product of the RHS nodes' insides times the rule weight
# partials = (reduce(semiring.times, (I[child] for child in rule.rhs), omega(rule)) for rule in incoming)
partials = [semiring.times(omega(rule),
semiring.times.reduce(list(I[child] for child in rule.rhs)))
for rule in incoming]
I[parent] = semiring.plus.reduce(partials)
#I[parent] = reduce(semiring.plus, partials, semiring.zero)
return I
def robust_value_recursion(forest,
tsort,
semiring,
omega=lambda e: e.weight,
infinity=20):
"""
Returns items' values in a given semiring.
This is a bottom-up pass through the forest which runs in O(|forest|).
:param forest: a hypergraph-like object.
:param tsort: a TopSortTable object.
:param semiring: must define zero, one, sum and times.
:param omega: a function that weighs edges/rules (defaults to the edge's weight).
You might want to use this to, for instance, convert between semirings.
:param infinity: the maximum number of generations in supremum computations.
:return:
"""
I = defaultdict(lambda: semiring.one)
# we go bottom-up
for bucket in tsort.iterbuckets(skip=1): # we skip the terminals
if len(bucket) == 1 and not tsort.selfdep(next(iter(bucket))): # non-loopy
parent = next(iter(bucket))
incoming = forest.get(parent, set())
if not incoming: # a terminal node
I[parent] = semiring.one if forest.is_terminal(parent) else semiring.zero
continue
# the inside of a nonterminal node is a sum over all of its incoming edges (rewrites)
# for each rewriting rule, we get the product of the RHS nodes' insides times the rule weight
partials = (reduce(semiring.times, (I[child] for child in rule.rhs), omega(rule)) for rule in incoming)
I[parent] = reduce(semiring.plus, partials, semiring.zero)
else:
V = approximate_supremum(forest, omega, I, bucket, semiring, infinity)
for node, value in V.items():
I[node] = value
return I
def approximate_supremum(forest, omega, I, bucket, semiring, infinity=-1):
# TODO: fix the following
# Interrupting this procedure before convergence leads to an inconsistent approximation
# where sum(inside[e] for e in BS[v]) != inside[v]
V = defaultdict(lambda: semiring.zero) # this will hold partial inside values for loopy nodes
if infinity > 0:
generations = range(infinity)
else:
generations = itertools.count()
for g in generations: # we iterate to "infinity"
_V = defaultdict(lambda: semiring.zero) # this is the current generation
for parent in bucket:
incoming = forest.get(parent, set())
if not incoming:
value = semiring.one if forest.is_terminal(parent) else semiring.zero
else:
partials = (reduce(semiring.times,
(V[child] if child in bucket else I[child] for child in rule.rhs),
omega(rule))
for rule in incoming)
value = reduce(semiring.plus, partials, semiring.zero)
_V[parent] = value
if V == _V:
# if the values of all items have remained unchanged,
# they have all converged to their supremum values
logging.debug('True supremum in %d iterations', g + 1)
break
else:
V = _V
return V
def _robust_value_recursion(forest,
tsort,
semiring,
omega=lambda e: e.weight,
infinity=20):
"""
Returns items' values in a given semiring.
This is a bottom-up pass through the forest which runs in O(|forest|).
:param forest: a hypergraph-like object.
:param tsort: a TopSortTable object.
:param semiring: must define zero, one, sum and times.
:param omega: a function that weighs edges/rules (defaults to the edge's weight).
You might want to use this to, for instance, convert between semirings.
:param infinity: the maximum number of generations in supremum computations.
:return:
"""
I = defaultdict(lambda: semiring.one)
# we go bottom-up
for bucket in tsort.iterbuckets(skip=1): # we skip the terminals
if False: # len(bucket) == 1: # non-loopy
parent = next(iter(bucket))
#logging.info('singleton %s', parent)
incoming = forest.get(parent, set())
if not incoming: # a terminal node
I[parent] = semiring.one if forest.is_terminal(parent) else semiring.zero
continue
# the inside of a nonterminal node is a sum over all of its incoming edges (rewrites)
# for each rewriting rule, we get the product of the RHS nodes' insides times the rule weight
partials = (reduce(semiring.times, (I[child] for child in rule.rhs), omega(rule)) for rule in incoming)
#partiasl = (semiring.times(rule.weight, semiring.times.reduce([I[child] for child in rule.rhs])) for rule in incoming)
I[parent] = reduce(semiring.plus, partials, semiring.zero)
else:
logging.info('|bucket|=%d', len(bucket))
V = defaultdict(lambda: semiring.zero) # this will hold partial inside values for loopy nodes
for g in range(infinity): # we iterate to "infinity"
logging.info('Starting generation %d/%d', g + 1, infinity)
_V = defaultdict(lambda: semiring.zero) # this is the current generation
for parent in bucket:
incoming = forest.get(parent, set())
if not incoming:
value = semiring.one if forest.is_terminal(parent) else semiring.zero
else:
partials = (reduce(semiring.times,
(V[child] if child in bucket else I[child] for child in rule.rhs),
omega(rule))
for rule in incoming)
value = reduce(semiring.plus, partials, semiring.zero)
_V[parent] = value
if V == _V:
# if the values of all items have remained unchanged,
# they have all converged to their supremum values
logging.info('Exact supremum')
break
else:
V = _V
for node, value in V.items():
I[node] = value
return I
def compute_edge_values(forest, semiring, node_values, omega=lambda e: e.weight, normalise=False):
"""
Return the normalised inside weights of the edges in a forest.
Normalisation happens with respect to an edge's head inside weight.
@param node_values: inside of nodes
@param semiring: requires times and divide
@param omega: a function that weighs edges/rules (serves as a bypass)
"""
if normalise:
return defaultdict(None, ((edge, semiring.divide(reduce(semiring.times,
(node_values[s] for s in edge.rhs),
omega(edge)),
node_values[edge.lhs]))
for edge in forest))
else:
return defaultdict(None, ((edge, reduce(semiring.times,
(node_values[s] for s in edge.rhs),
omega(edge)))
for edge in forest))
class LazyEdgeValues(object):
"""
In some cases, such as in slice sampling, we are unlikely to visit every edge.
Thus lazily computing edge values might be appropriate.
"""
def __init__(self, semiring,
node_values,
edge_values={},
omega=lambda e: e.weight,
normalise=False):
"""
:param semiring: a semiring
:param node_values: the values associated with nodes (in the given semiring)
:param edge_values: the values associated with edges (in the given semiring)
:param omega: a weight function over edges
:param normalise: whether to normalise an edge's value by its head node's value.
"""
self._semiring = semiring
self._node_values = node_values
self._edge_values = defaultdict(None, edge_values)
self._omega = omega
if normalise:
self._compute = self._normalised
else:
self._compute = self._unnormalised
def _normalised(self, edge):
tail_value = self._semiring.times.reduce(list(self._node_values[s] for s in edge.rhs))
edge_value = self._semiring.times(self._omega(edge), tail_value)
return self._semiring.divide(edge_value, self._node_values[edge.lhs])
def _unnormalised(self, edge):
tail_value = self._semiring.times.reduce(list(self._node_values[s] for s in edge.rhs))
return self._semiring.times(self._omega(edge), tail_value)
def __getitem__(self, edge):
w = self._edge_values.get(edge, None)
if w is None:
w = self._compute(edge)
self._edge_values[edge] = w
return w
|
wilkeraziz/grasp
|
grasp/inference/value.py
|
Python
|
apache-2.0
| 11,897
|
[
"VisIt"
] |
f66716673cf9738d4a1ef7ad51baab14b343cdaf15f7c4da5c4cd1eb216f96d7
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-jobexec
# Author : Stuart Paterson
########################################################################
__RCSID__ = "$Id$"
""" The dirac-jobexec script is equipped to execute workflows that
are specified via their XML description. The main client of
this script is the Job Wrapper.
"""
import DIRAC
from DIRAC.Core.Base import Script
# Register workflow parameter switch
Script.registerSwitch( 'p:', 'parameter=', 'Parameters that are passed directly to the workflow' )
Script.parseCommandLine()
from DIRAC.Core.Workflow.Parameter import *
from DIRAC.Core.Workflow.Module import *
from DIRAC.Core.Workflow.Step import *
from DIRAC.Core.Workflow.Workflow import *
from DIRAC.Core.Workflow.WorkflowReader import *
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
from DIRAC.AccountingSystem.Client.DataStoreClient import DataStoreClient
from DIRAC.RequestManagementSystem.Client.Request import Request
import DIRAC
import os, os.path, sys, string
# Forcing the current directory to be the first in the PYTHONPATH
sys.path.insert( 0, os.path.realpath( '.' ) )
gLogger.showHeaders( True )
def jobexec( jobxml, wfParameters = {} ):
jobfile = os.path.abspath( jobxml )
if not os.path.exists( jobfile ):
gLogger.warn( 'Path to specified workflow %s does not exist' % ( jobfile ) )
sys.exit( 1 )
workflow = fromXMLFile( jobfile )
gLogger.debug( workflow )
code = workflow.createCode()
gLogger.debug( code )
jobID = 0
if os.environ.has_key( 'JOBID' ):
jobID = os.environ['JOBID']
gLogger.info( 'DIRAC JobID %s is running at site %s' % ( jobID, DIRAC.siteName() ) )
workflow.addTool( 'JobReport', JobReport( jobID ) )
workflow.addTool( 'AccountingReport', DataStoreClient() )
workflow.addTool( 'Request', Request() )
# Propagate the command line parameters to the workflow if any
for name, value in wfParameters.items():
workflow.setValue( name, value )
result = workflow.execute()
return result
positionalArgs = Script.getPositionalArgs()
if len( positionalArgs ) != 1:
gLogger.debug( 'Positional arguments were %s' % ( positionalArgs ) )
DIRAC.abort( 1, "Must specify the Job XML file description" )
if os.environ.has_key( 'JOBID' ):
gLogger.info( 'JobID: %s' % ( os.environ['JOBID'] ) )
jobXMLfile = positionalArgs[0]
parList = Script.getUnprocessedSwitches()
parDict = {}
for switch, parameter in parList:
if switch == "p":
name, value = parameter.split( '=' )
value = value.strip()
# The comma separated list in curly brackets is interpreted as a list
if value.startswith( "{" ):
value = value[1:-1].replace( '"', '' ).replace( " ", '' ).split( ',' )
value = ';'.join( value )
parDict[name] = value
gLogger.debug( 'PYTHONPATH:\n%s' % ( string.join( sys.path, '\n' ) ) )
result = jobexec( jobXMLfile, parDict )
if not result['OK']:
gLogger.debug( 'Workflow execution finished with errors, exiting' )
sys.exit( 1 )
else:
gLogger.debug( 'Workflow execution successful, exiting' )
sys.exit( 0 )
|
avedaee/DIRAC
|
WorkloadManagementSystem/scripts/dirac-jobexec.py
|
Python
|
gpl-3.0
| 3,212
|
[
"DIRAC"
] |
6502b3ab5118cf24713f92bac5804438ee7c50f709771961929a7eca8cd643a9
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#TODO: To create a beautiful API
import time
import wx
import vtk
from vtk.util import numpy_support
from vtk.wx.wxVTKRenderWindowInteractor import wxVTKRenderWindowInteractor
import constants as const
from reader import dicom_reader
import data.vtk_utils as vtku
import utils
NROWS = 3
NCOLS = 6
NUM_PREVIEWS = NCOLS*NROWS
PREVIEW_WIDTH = 70
PREVIEW_HEIGTH = 70
PREVIEW_BACKGROUND = (255, 255, 255) # White
STR_SIZE = _("Image size: %d x %d")
STR_SPC = _("Spacing: %.2f")
STR_LOCAL = _("Location: %.2f")
STR_PATIENT = "%s\n%s"
STR_ACQ = _("%s %s\nMade in InVesalius")
myEVT_PREVIEW_CLICK = wx.NewEventType()
EVT_PREVIEW_CLICK = wx.PyEventBinder(myEVT_PREVIEW_CLICK, 1)
myEVT_PREVIEW_DBLCLICK = wx.NewEventType()
EVT_PREVIEW_DBLCLICK = wx.PyEventBinder(myEVT_PREVIEW_DBLCLICK, 1)
myEVT_CLICK_SLICE = wx.NewEventType()
# This event occurs when the user select a preview
EVT_CLICK_SLICE = wx.PyEventBinder(myEVT_CLICK_SLICE, 1)
myEVT_CLICK_SERIE = wx.NewEventType()
# This event occurs when the user select a preview
EVT_CLICK_SERIE = wx.PyEventBinder(myEVT_CLICK_SERIE, 1)
myEVT_CLICK = wx.NewEventType()
EVT_CLICK = wx.PyEventBinder(myEVT_CLICK, 1)
class SelectionEvent(wx.PyCommandEvent):
pass
class PreviewEvent(wx.PyCommandEvent):
def __init__(self , evtType, id):
super(PreviewEvent, self).__init__(evtType, id)
def GetSelectID(self):
return self.SelectedID
def SetSelectedID(self, id):
self.SelectedID = id
def GetItemData(self):
return self.data
def SetItemData(self, data):
self.data = data
class SerieEvent(PreviewEvent):
def __init__(self , evtType, id):
super(SerieEvent, self).__init__(evtType, id)
class DicomInfo(object):
"""
Keep the informations and the image used by preview.
"""
def __init__(self, id, dicom, title, subtitle):
self.id = id
self.dicom = dicom
self.title = title
self.subtitle = subtitle
self._preview = None
self.selected = False
@property
def preview(self):
if self._preview:
return self._preview
else:
colorer = vtk.vtkImageMapToWindowLevelColors()
colorer.SetInput(self.dicom.image.imagedata)
colorer.SetWindow(float(self.dicom.image.window))
colorer.SetLevel(float(self.dicom.image.level))
colorer.SetOutputFormatToRGB()
colorer.Update()
width, height, z = colorer.GetOutput().GetDimensions()
r = colorer.GetOutput().GetPointData().GetScalars()
ni = numpy_support.vtk_to_numpy(r)
self.img = wx.ImageFromBuffer(width, height, ni)
self._preview = self.img.Mirror(False)
return self._preview
class DicomPaintPanel(wx.Panel):
def __init__(self, parent):
super(DicomPaintPanel, self).__init__(parent)
self._bind_events()
self.image = None
self.last_size = (10,10)
def _bind_events(self):
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
def _build_bitmap(self, image):
bmp = wx.BitmapFromImage(image)
return bmp
def _image_resize(self, image):
self.Update()
self.Layout()
new_size = self.GetSize()
# This is necessary due to darwin problem #
if new_size != (0,0):
self.last_size = new_size
return image.Scale(*new_size)
else:
return image.Scale(*self.last_size)
def SetImage(self, image):
self.image = image
r_img = self._image_resize(image)
self.bmp = self._build_bitmap(r_img)
self.Refresh()
def OnPaint(self, evt):
if self.image:
dc = wx.PaintDC(self)
dc.Clear()
dc.DrawBitmap(self.bmp, 0, 0)
def OnSize(self, evt):
if self.image:
self.bmp = self._build_bitmap(self._image_resize(self.image))
self.Refresh()
evt.Skip()
class Preview(wx.Panel):
"""
The little previews.
"""
def __init__(self, parent):
super(Preview, self).__init__(parent)
# Will it be white?
self.select_on = False
self.dicom_info = None
self._init_ui()
self._bind_events()
def _init_ui(self):
self.SetBackgroundColour(PREVIEW_BACKGROUND)
self.title = wx.StaticText(self, -1, _("Image"))
self.subtitle = wx.StaticText(self, -1, _("Image"))
self.image_viewer = DicomPaintPanel(self)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.title, 0,
wx.ALIGN_CENTER_HORIZONTAL)
self.sizer.Add(self.subtitle, 0,
wx.ALIGN_CENTER_HORIZONTAL)
self.sizer.Add(self.image_viewer, 1, wx.ALIGN_CENTRE_HORIZONTAL \
| wx.SHAPED | wx.ALL, 5)
self.sizer.Fit(self)
self.SetSizer(self.sizer)
self.Layout()
self.Update()
self.Fit()
self.SetAutoLayout(1)
def _bind_events(self):
self.Bind( wx.EVT_LEFT_DCLICK, self.OnDClick)
#self.interactor.Bind( wx.EVT_LEFT_DCLICK, self.OnDClick)
#self.panel.Bind( wx.EVT_LEFT_DCLICK, self.OnDClick)
#self.title.Bind( wx.EVT_LEFT_DCLICK, self.OnDClick)
#self.subtitle.Bind( wx.EVT_LEFT_DCLICK, self.OnDClick)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
#self.interactor.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
#self.panel.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
#self.title.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
#self.subtitle.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
#self.interactor.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
#self.panel.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
#self.title.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
#self.subtitle.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
self.Bind(wx.EVT_LEFT_DOWN, self.OnSelect)
self.title.Bind(wx.EVT_LEFT_DOWN, self.OnSelect)
self.subtitle.Bind(wx.EVT_LEFT_DOWN, self.OnSelect)
self.image_viewer.Bind(wx.EVT_LEFT_DOWN, self.OnSelect)
#self.Bind(wx.EVT_SIZE, self.OnSize)
def SetDicomToPreview(self, dicom_info):
"""
Set a dicom to preview.
"""
self.dicom_info = dicom_info
self.SetTitle(dicom_info.title)
self.SetSubtitle(dicom_info.subtitle)
self.ID = dicom_info.id
dicom_info.size = self.image_viewer.GetSize()
image = dicom_info.preview
self.image_viewer.SetImage(image)
self.data = dicom_info.id
self.select_on = dicom_info.selected
self.Select()
self.Update()
def SetTitle(self, title):
self.title.SetLabel(title)
def SetSubtitle(self, subtitle):
self.subtitle.SetLabel(subtitle)
def OnEnter(self, evt):
if not self.select_on:
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DHILIGHT)
c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE)
self.SetBackgroundColour(c)
def OnLeave(self, evt):
if not self.select_on:
c = (PREVIEW_BACKGROUND)
self.SetBackgroundColour(c)
def OnSelect(self, evt):
self.select_on = True
self.dicom_info.selected = True
##c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT)
##c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HOTLIGHT)
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT)
##c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_GRADIENTACTIVECAPTION)
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW)
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVEBORDER)
#*c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DLIGHT)
#*c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DHILIGHT)
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DHIGHLIGHT)
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DDKSHADOW)
#c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DSHADOW)
#self.SetBackgroundColour(c)
self.Select()
# Generating a EVT_PREVIEW_CLICK event
my_evt = SerieEvent(myEVT_PREVIEW_CLICK, self.GetId())
my_evt.SetSelectedID(self.dicom_info.id)
my_evt.SetItemData(self.dicom_info.dicom)
my_evt.SetEventObject(self)
self.GetEventHandler().ProcessEvent(my_evt)
def OnSize(self, evt):
if self.dicom_info:
self.SetDicomToPreview(self.dicom_info)
evt.Skip()
def Select(self, on=True):
if self.select_on:
c = wx.SystemSettings_GetColour(wx.SYS_COLOUR_HIGHLIGHT)
else:
c = (PREVIEW_BACKGROUND)
self.SetBackgroundColour(c)
self.Refresh()
def OnDClick(self, evt):
my_evt = SerieEvent(myEVT_PREVIEW_DBLCLICK, self.GetId())
my_evt.SetSelectedID(self.dicom_info.id)
my_evt.SetItemData(self.dicom_info.dicom)
my_evt.SetEventObject(self)
self.GetEventHandler().ProcessEvent(my_evt)
class DicomPreviewSeries(wx.Panel):
"""A dicom series preview panel"""
def __init__(self, parent):
super(DicomPreviewSeries, self).__init__(parent)
# TODO: 3 pixels between the previews is a good idea?
# I have to test.
#self.sizer = wx.BoxSizer(wx.HORIZONTAL)
#self.SetSizer(self.sizer)
self.displayed_position = 0
self.nhidden_last_display = 0
self.selected_dicom = None
self.selected_panel = None
self._init_ui()
def _init_ui(self):
scroll = wx.ScrollBar(self, -1, style=wx.SB_VERTICAL)
self.scroll = scroll
self.grid = wx.GridSizer(rows=NROWS, cols=NCOLS, vgap=3, hgap=3)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.AddSizer(self.grid, 1, wx.EXPAND|wx.GROW|wx.ALL, 2)
background_sizer = wx.BoxSizer(wx.HORIZONTAL)
background_sizer.AddSizer(sizer, 1, wx.EXPAND|wx.GROW|wx.ALL, 2)
background_sizer.Add(scroll, 0, wx.EXPAND|wx.GROW)
self.SetSizer(background_sizer)
background_sizer.Fit(self)
self.Layout()
self.Update()
self.SetAutoLayout(1)
self.sizer = background_sizer
self._Add_Panels_Preview()
self._bind_events()
def _Add_Panels_Preview(self):
self.previews = []
for i in xrange(NROWS):
for j in xrange(NCOLS):
p = Preview(self)
p.Bind(EVT_PREVIEW_CLICK, self.OnSelect)
#if (i == j == 0):
#self._show_shadow(p)
#p.Hide()
self.previews.append(p)
self.grid.Add(p, 1, flag=wx.EXPAND)
#def _show_shadow(self, preview):
# preview.ShowShadow()
def _bind_events(self):
# When the user scrolls the window
self.Bind(wx.EVT_SCROLL, self.OnScroll)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheel)
def OnSelect(self, evt):
my_evt = SerieEvent(myEVT_CLICK_SERIE, self.GetId())
my_evt.SetSelectedID(evt.GetSelectID())
my_evt.SetItemData(evt.GetItemData())
if self.selected_dicom:
self.selected_dicom.selected = self.selected_dicom is \
evt.GetEventObject().dicom_info
self.selected_panel.select_on = self.selected_panel is evt.GetEventObject()
self.selected_panel.Select()
self.selected_panel = evt.GetEventObject()
self.selected_dicom = self.selected_panel.dicom_info
self.GetEventHandler().ProcessEvent(my_evt)
def SetPatientGroups(self, patient):
self.files = []
self.displayed_position = 0
self.nhidden_last_display = 0
group_list = patient.GetGroups()
self.group_list = group_list
n = 0
for group in group_list:
info = DicomInfo((group.dicom.patient.id,
group.dicom.acquisition.serie_number),
group.dicom,
group.title,
_("%d Images") %(group.nslices))
self.files.append(info)
n+=1
scroll_range = len(self.files)/NCOLS
if scroll_range * NCOLS < len(self.files):
scroll_range +=1
self.scroll.SetScrollbar(0, NROWS, scroll_range, NCOLS)
self._display_previews()
def _display_previews(self):
initial = self.displayed_position * NCOLS
final = initial + NUM_PREVIEWS
if len(self.files) < final:
for i in xrange(final-len(self.files)):
try:
self.previews[-i-1].Hide()
except IndexError:
utils.debug("doesn't exist!")
pass
self.nhidden_last_display = final-len(self.files)
else:
if self.nhidden_last_display:
for i in xrange(self.nhidden_last_display):
try:
self.previews[-i-1].Show()
except IndexError:
utils.debug("doesn't exist!")
pass
self.nhidden_last_display = 0
for f, p in zip(self.files[initial:final], self.previews):
p.SetDicomToPreview(f)
if f.selected:
self.selected_panel = p
for f, p in zip(self.files[initial:final], self.previews):
p.Show()
def OnScroll(self, evt=None):
if evt:
if self.displayed_position != evt.GetPosition():
self.displayed_position = evt.GetPosition()
else:
if self.displayed_position != self.scroll.GetThumbPosition():
self.displayed_position = self.scroll.GetThumbPosition()
self._display_previews()
def OnWheel(self, evt):
d = evt.GetWheelDelta() / evt.GetWheelRotation()
self.scroll.SetThumbPosition(self.scroll.GetThumbPosition() - d)
self.OnScroll()
class DicomPreviewSlice(wx.Panel):
"""A dicom preview panel"""
def __init__(self, parent):
super(DicomPreviewSlice, self).__init__(parent)
# TODO: 3 pixels between the previews is a good idea?
# I have to test.
self.displayed_position = 0
self.nhidden_last_display = 0
self.selected_dicom = None
self.selected_panel = None
self._init_ui()
def _init_ui(self):
scroll = wx.ScrollBar(self, -1, style=wx.SB_VERTICAL)
self.scroll = scroll
self.grid = wx.GridSizer(rows=NROWS, cols=NCOLS, vgap=3, hgap=3)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.AddSizer(self.grid, 1, wx.EXPAND|wx.GROW|wx.ALL, 2)
background_sizer = wx.BoxSizer(wx.HORIZONTAL)
background_sizer.AddSizer(sizer, 1, wx.EXPAND|wx.GROW|wx.ALL, 2)
background_sizer.Add(scroll, 0, wx.EXPAND|wx.GROW)
self.SetSizer(background_sizer)
background_sizer.Fit(self)
self.Layout()
self.Update()
self.SetAutoLayout(1)
self.sizer = background_sizer
self._Add_Panels_Preview()
self._bind_events()
def _Add_Panels_Preview(self):
self.previews = []
for i in xrange(NROWS):
for j in xrange(NCOLS):
p = Preview(self)
p.Bind(EVT_PREVIEW_CLICK, self.OnPreviewClick)
#p.Hide()
self.previews.append(p)
self.grid.Add(p, 1, flag=wx.EXPAND)
def _bind_events(self):
# When the user scrolls the window
self.Bind(wx.EVT_SCROLL, self.OnScroll)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheel)
def SetDicomDirectory(self, directory):
utils.debug("Setting Dicom Directory %s" % directory)
self.directory = directory
self.series = dicom_reader.GetSeries(directory)[0]
def SetPatientGroups(self, patient):
self.group_list = patient.GetGroups()
def SetDicomSerie(self, pos):
self.files = []
self.displayed_position = 0
self.nhidden_last_display = 0
group = self.group_list[pos]
self.group = group
#dicom_files = group.GetList()
dicom_files = group.GetHandSortedList()
n = 0
for dicom in dicom_files:
info = DicomInfo(n, dicom,
_("Image %d") % (dicom.image.number),
"%.2f" % (dicom.image.position[2]))
self.files.append(info)
n+=1
scroll_range = len(self.files)/NCOLS
if scroll_range * NCOLS < len(self.files):
scroll_range +=1
self.scroll.SetScrollbar(0, NROWS, scroll_range, NCOLS)
self._display_previews()
def SetDicomGroup(self, group):
self.files = []
self.displayed_position = 0
self.nhidden_last_display = 0
#dicom_files = group.GetList()
dicom_files = group.GetHandSortedList()
n = 0
for dicom in dicom_files:
info = DicomInfo(n, dicom,
_("Image %d") % (dicom.image.number),
"%.2f" % (dicom.image.position[2]),
)
self.files.append(info)
n+=1
scroll_range = len(self.files)/NCOLS
if scroll_range * NCOLS < len(self.files):
scroll_range +=1
self.scroll.SetScrollbar(0, NROWS, scroll_range, NCOLS)
self._display_previews()
def _display_previews(self):
initial = self.displayed_position * NCOLS
final = initial + NUM_PREVIEWS
if len(self.files) < final:
for i in xrange(final-len(self.files)):
try:
self.previews[-i-1].Hide()
except IndexError:
utils.debug("doesn't exist!")
self.nhidden_last_display = final-len(self.files)
else:
if self.nhidden_last_display:
for i in xrange(self.nhidden_last_display):
try:
self.previews[-i-1].Show()
except IndexError:
utils.debug("doesn't exist!")
self.nhidden_last_display = 0
for f, p in zip(self.files[initial:final], self.previews):
p.SetDicomToPreview(f)
if f.selected:
self.selected_panel = p
#p.interactor.Render()
for f, p in zip(self.files[initial:final], self.previews):
p.Show()
def OnPreviewClick(self, evt):
my_evt = SerieEvent(myEVT_CLICK_SLICE, self.GetId())
my_evt.SetSelectedID(evt.GetSelectID())
my_evt.SetItemData(evt.GetItemData())
if self.selected_dicom:
self.selected_dicom.selected = self.selected_dicom is \
evt.GetEventObject().dicom_info
self.selected_panel.select_on = self.selected_panel is evt.GetEventObject()
self.selected_panel.Select()
self.selected_panel = evt.GetEventObject()
self.selected_dicom = self.selected_panel.dicom_info
self.GetEventHandler().ProcessEvent(my_evt)
def OnScroll(self, evt=None):
if evt:
if self.displayed_position != evt.GetPosition():
self.displayed_position = evt.GetPosition()
else:
if self.displayed_position != self.scroll.GetThumbPosition():
self.displayed_position = self.scroll.GetThumbPosition()
self._display_previews()
def OnWheel(self, evt):
d = evt.GetWheelDelta() / evt.GetWheelRotation()
self.scroll.SetThumbPosition(self.scroll.GetThumbPosition() - d)
self.OnScroll()
class SingleImagePreview(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.__init_gui()
self.__init_vtk()
self.__bind_evt_gui()
self.dicom_list = []
self.nimages = 1
self.current_index = 0
self.window_width = const.WINDOW_LEVEL[_("Bone")][0]
self.window_level = const.WINDOW_LEVEL[_("Bone")][1]
def __init_vtk(self):
actor = vtk.vtkImageActor()
self.actor = actor
text_image_size = vtku.Text()
text_image_size.SetPosition(const.TEXT_POS_LEFT_UP)
text_image_size.SetValue("")
text_image_size.SetSize(const.TEXT_SIZE_SMALL)
self.text_image_size = text_image_size
text_image_location = vtku.Text()
text_image_location.SetVerticalJustificationToBottom()
text_image_location.SetPosition(const.TEXT_POS_LEFT_DOWN)
text_image_location.SetValue("")
text_image_location.SetSize(const.TEXT_SIZE_SMALL)
self.text_image_location = text_image_location
text_patient = vtku.Text()
text_patient.SetJustificationToRight()
text_patient.SetPosition(const.TEXT_POS_RIGHT_UP)
text_patient.SetValue("")
text_patient.SetSize(const.TEXT_SIZE_SMALL)
self.text_patient = text_patient
text_acquisition = vtku.Text()
text_acquisition.SetJustificationToRight()
text_acquisition.SetVerticalJustificationToBottom()
text_acquisition.SetPosition(const.TEXT_POS_RIGHT_DOWN)
text_acquisition.SetValue("")
text_acquisition.SetSize(const.TEXT_SIZE_SMALL)
self.text_acquisition = text_acquisition
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.AddActor(text_image_size.actor)
renderer.AddActor(text_image_location.actor)
renderer.AddActor(text_patient.actor)
renderer.AddActor(text_acquisition.actor)
self.renderer = renderer
style = vtk.vtkInteractorStyleImage()
interactor = wxVTKRenderWindowInteractor(self.panel, -1,
size=wx.Size(340,340))
interactor.GetRenderWindow().AddRenderer(renderer)
interactor.SetInteractorStyle(style)
interactor.Render()
self.interactor = interactor
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(interactor, 1, wx.GROW|wx.EXPAND)
sizer.Fit(self.panel)
self.panel.SetSizer(sizer)
self.Layout()
self.Update()
def __init_gui(self):
self.panel = wx.Panel(self, -1)
slider = wx.Slider(self,
id=-1,
value=0,
minValue=0,
maxValue=99,
style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS)
slider.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
slider.SetTickFreq(1, 1)
self.slider = slider
checkbox = wx.CheckBox(self, -1, _("Auto-play"))
self.checkbox = checkbox
in_sizer = wx.BoxSizer(wx.HORIZONTAL)
in_sizer.Add(slider, 1, wx.GROW|wx.EXPAND)
in_sizer.Add(checkbox, 0)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.panel, 20, wx.GROW|wx.EXPAND)
sizer.Add(in_sizer, 1, wx.GROW|wx.EXPAND)
sizer.Fit(self)
self.SetSizer(sizer)
self.Layout()
self.Update()
self.SetAutoLayout(1)
def __bind_evt_gui(self):
self.slider.Bind(wx.EVT_SLIDER, self.OnSlider)
self.checkbox.Bind(wx.EVT_CHECKBOX, self.OnCheckBox)
def OnSlider(self, evt):
pos = evt.GetInt()
self.ShowSlice(pos)
evt.Skip()
def OnCheckBox(self, evt):
self.ischecked = evt.IsChecked()
if evt.IsChecked():
wx.CallAfter(self.OnRun)
evt.Skip()
def OnRun(self):
pos = self.slider.GetValue()
pos += 1
if not (self.nimages- pos):
pos = 0
self.slider.SetValue(pos)
self.ShowSlice(pos)
time.sleep(0.2)
if self.ischecked:
try:
wx.Yield()
#TODO: temporary fix necessary in the Windows XP 64 Bits
#BUG in wxWidgets http://trac.wxwidgets.org/ticket/10896
except(wx._core.PyAssertionError):
utils.debug("wx._core.PyAssertionError")
finally:
wx.CallAfter(self.OnRun)
def SetDicomGroup(self, group):
self.dicom_list = group.GetHandSortedList()
self.current_index = 0
self.nimages = len(self.dicom_list)
# GUI
self.slider.SetMax(self.nimages-1)
self.slider.SetValue(0)
self.ShowSlice()
def ShowSlice(self, index = 0):
dicom = self.dicom_list[index]
# UPDATE GUI
## Text related to size
value = STR_SIZE %(dicom.image.size[0], dicom.image.size[1])
self.text_image_size.SetValue(value)
## Text related to slice position
value1 = STR_SPC %(dicom.image.spacing[2])
value2 = STR_LOCAL %(dicom.image.position[2])
value = "%s\n%s" %(value1, value2)
self.text_image_location.SetValue(value)
## Text related to patient/ acquisiiton data
value = STR_PATIENT %(dicom.patient.id,\
dicom.acquisition.protocol_name)
self.text_patient.SetValue(value)
## Text related to acquisition date and time
value = STR_ACQ % (dicom.acquisition.date,
dicom.acquisition.time)
self.text_acquisition.SetValue(value)
# ADJUST CONTRAST
window_level = dicom.image.level
window_width = dicom.image.window
colorer = vtk.vtkImageMapToWindowLevelColors()
colorer.SetInput(dicom.image.imagedata)
colorer.SetWindow(float(window_width))
colorer.SetLevel(float(window_level))
# PLOT IMAGE INTO VIEWER
self.actor.SetInput(colorer.GetOutput())
self.renderer.ResetCamera()
self.interactor.Render()
# Setting slider position
self.slider.SetValue(index)
|
tatiana/invesalius
|
invesalius/gui/dicom_preview_panel.py
|
Python
|
gpl-2.0
| 27,079
|
[
"VTK"
] |
83e90d5a9c7a9eb2dc5367b574afbd30db9367e80683126ede0a3afb81b741b7
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
tech_support_uploader_configuration:
description:
- Techsupportuploaderconfiguration settings for systemconfiguration.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
admin_auth_configuration=dict(type='dict',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
tech_support_uploader_configuration=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
|
RackSec/ansible
|
lib/ansible/modules/network/avi/avi_systemconfiguration.py
|
Python
|
gpl-3.0
| 6,203
|
[
"VisIt"
] |
4c921a39965a20d06cdb5d6c18f750feb939a862d939feba1c1fed5ec1a65dc1
|
import re
from .. import lut, manip
from . import helpers
section_re = re.compile(r'^\$(basis|ecp|cbas|jbas|jkbas)$')
element_re = re.compile(r'^([a-zA-Z]{1,3})\s+(.*)$')
shell_re = re.compile(r'^(\d+) +([a-zA-Z])$')
ecp_info_re = re.compile(r'^ncore\s*=\s*(\d+)\s+lmax\s*=\s*(\d+)$', flags=re.IGNORECASE)
ecp_pot_am_re = re.compile(r'^([a-z])(-[a-z])?$')
def _parse_electron_lines(basis_lines, bs_data):
# Strip all lines beginning with $
basis_lines = helpers.prune_lines(basis_lines, '$')
# Last line should be *
# We don't need it
if basis_lines[-1] != '*':
raise RuntimeError("Missing terminating * line")
basis_lines.pop()
# Partition based on lines beginning with a character
element_blocks = helpers.partition_lines(basis_lines, element_re.match, before=1, min_size=4)
# Element lines should be surrounded by *
# Check all first. the partition_lines above will eat part of the previous
# element if the * is missing
for element_lines in element_blocks:
if element_lines[0] != '*':
raise RuntimeError("Element line not preceded by *")
if element_lines[2] != '*':
raise RuntimeError("Element line not followed by *")
# Check for any other lines starting with *
for line in element_lines[3:]:
if line.startswith('*'):
raise RuntimeError("Found line starting with * that probably doesn't belong: " + line)
# Now process them all
for element_lines in element_blocks:
element_sym, _ = helpers.parse_line_regex(element_re, element_lines[1], 'Element line')
element_Z = lut.element_Z_from_sym(element_sym, as_str=True)
element_data = manip.create_element_data(bs_data, element_Z, 'electron_shells')
# Partition into shells
shell_blocks = helpers.partition_lines(element_lines[3:], shell_re.match, min_size=2)
for sh_lines in shell_blocks:
nprim, shell_am = helpers.parse_line_regex(shell_re, sh_lines[0], 'shell nprim, am')
shell_am = lut.amchar_to_int(shell_am)
func_type = lut.function_type_from_am(shell_am, 'gto', 'spherical')
exponents, coefficients = helpers.parse_primitive_matrix(sh_lines[1:], nprim=nprim, ngen=1)
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': shell_am,
'exponents': exponents,
'coefficients': coefficients
}
element_data['electron_shells'].append(shell)
def _parse_ecp_potential_lines(element_lines, bs_data):
#########################################################
# This is split out because the turbomole ECP format is
# almost identical to the genbas ECP format
#########################################################
element_sym, _ = helpers.parse_line_regex(element_re, element_lines[0], 'Element line')
element_Z = lut.element_Z_from_sym(element_sym, as_str=True)
# We don't need the return value - we will use the one from creating ecp_electrons
manip.create_element_data(bs_data, element_Z, 'ecp_potentials')
# 4th line should be ncore and lmax
n_elec, max_am = helpers.parse_line_regex(ecp_info_re, element_lines[1], 'ECP ncore, lmax')
element_data = manip.create_element_data(bs_data, element_Z, 'ecp_electrons', key_exist_ok=False, create=int)
element_data['ecp_electrons'] = n_elec
# split the remaining lines by lines starting with a character
ecp_potentials = helpers.partition_lines(element_lines[2:], lambda x: x[0].isalpha(), min_size=2)
# Keep track of what the max AM we actually found is
found_max = False
for pot_lines in ecp_potentials:
pot_am, pot_base_am = helpers.parse_line_regex(ecp_pot_am_re, pot_lines[0], 'ECP potential am')
pot_am = lut.amchar_to_int(pot_am)
if pot_base_am:
pot_base_am = lut.amchar_to_int(pot_base_am[1:]) # Strip the - from the beginning
if pot_base_am[0] != max_am:
raise RuntimeError("Potential does not use max_am of {}. Uses {}".format(max_am, pot_base_am[0]))
else:
if found_max:
raise RuntimeError("Found multiple potentials with single AM")
if pot_am[0] != max_am:
raise RuntimeError("Potential with single AM {} is not the same as lmax = {}".format(
pot_am[0], max_am))
found_max = True
ecp_data = helpers.parse_ecp_table(pot_lines[1:], order=['coeff', 'r_exp', 'g_exp'])
ecp_pot = {
'angular_momentum': pot_am,
'ecp_type': 'scalar_ecp',
'r_exponents': ecp_data['r_exp'],
'gaussian_exponents': ecp_data['g_exp'],
'coefficients': ecp_data['coeff']
}
element_data['ecp_potentials'].append(ecp_pot)
def _parse_ecp_lines(basis_lines, bs_data):
# Strip all lines beginning with $
basis_lines = helpers.prune_lines(basis_lines, '$')
# Last line should be *
# We don't need it
if basis_lines[-1] != '*':
raise RuntimeError("Missing terminating * line")
basis_lines.pop()
# Partition based on lines beginning with a character
element_blocks = helpers.partition_lines(basis_lines, element_re.match, before=1)
# Element lines should be surrounded by *
# Check all first. the partition_lines above will eat part of the previous
# element if the * is missing
for element_lines in element_blocks:
if element_lines[0] != '*':
raise RuntimeError("Element line not preceded by *")
if element_lines[2] != '*':
raise RuntimeError("Element line not followed by *")
# Check for any other lines starting with *
for line in element_lines[3:]:
if line.startswith('*'):
raise RuntimeError("Found line starting with * that probably doesn't belong: " + line)
# Now process all elements
for element_lines in element_blocks:
# Remove the two * lines and parse using the separate function
element_lines = element_lines[1:2] + element_lines[3:]
_parse_ecp_potential_lines(element_lines, bs_data)
def read_turbomole(basis_lines):
'''Reads turbomole-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the turbomole format does not store all the fields we
have, so some fields are left blank
'''
basis_lines = helpers.prune_lines(basis_lines, '#')
# first line must begin with $, last line must be $end
if basis_lines and not basis_lines[0][0].startswith('$'):
raise RuntimeError("First line does not begin with $. Line: " + basis_lines[0])
if basis_lines and basis_lines[-1] != '$end':
raise RuntimeError("Last line of basis is not $end. Line: " + basis_lines[-1])
bs_data = {}
# Split into basis and ecp
# Just split based on lines beginning with $
basis_sections = helpers.partition_lines(basis_lines,
lambda x: x.startswith('$') and x != '$end',
min_blocks=1,
max_blocks=2)
for s in basis_sections:
# Check if section is empty. If so, all lines start with $
if all(x.startswith('$') for x in s):
continue
if len(s) == 0: # Empty section
continue
elif s[0].lower() == '$ecp':
_parse_ecp_lines(s, bs_data)
elif section_re.match(s[0]):
_parse_electron_lines(s, bs_data)
else:
raise RuntimeError("Unknown section " + s[0])
return bs_data
|
MOLSSI-BSE/basis_set_exchange
|
basis_set_exchange/readers/turbomole.py
|
Python
|
bsd-3-clause
| 7,786
|
[
"TURBOMOLE"
] |
0fd69b85f1e80c64dc972f4564b83dbbb36edafd3167d90d6e22b2ac3ae384eb
|
"""
Convergence comparison of different adaptive filtering algorithms (with
different step sizes) in white Gaussian noise.
"""
import numpy as np
import matplotlib.pyplot as plt
import adaptfilt as adf
# Generating inpud and desired signal
N = 3000
coeffs = np.concatenate(([-4, 3.2], np.zeros(20), [0.7], np.zeros(33), [-0.1]))
u = np.random.randn(N)
d = np.convolve(u, coeffs)
# Perform filtering
M = 60 # No. of taps to estimate
mu1 = 0.0008 # Step size 1 in LMS
mu2 = 0.0004 # Step size 1 in LMS
beta1 = 0.08 # Step size 2 in NLMS and AP
beta2 = 0.04 # Step size 2 in NLMS and AP
K = 3 # Projection order 1 in AP
# LMS
y_lms1, e_lms1, w_lms1 = adf.lms(u, d, M, mu1, returnCoeffs=True)
y_lms2, e_lms2, w_lms2 = adf.lms(u, d, M, mu2, returnCoeffs=True)
mswe_lms1 = adf.mswe(w_lms1, coeffs)
mswe_lms2 = adf.mswe(w_lms2, coeffs)
# NLMS
y_nlms1, e_nlms1, w_nlms1 = adf.nlms(u, d, M, beta1, returnCoeffs=True)
y_nlms2, e_nlms2, w_nlms2 = adf.nlms(u, d, M, beta2, returnCoeffs=True)
mswe_nlms1 = adf.mswe(w_nlms1, coeffs)
mswe_nlms2 = adf.mswe(w_nlms2, coeffs)
# AP
y_ap1, e_ap1, w_ap1 = adf.ap(u, d, M, beta1, K, returnCoeffs=True)
y_ap2, e_ap2, w_ap2 = adf.ap(u, d, M, beta2, K, returnCoeffs=True)
mswe_ap1 = adf.mswe(w_ap1, coeffs)
mswe_ap2 = adf.mswe(w_ap2, coeffs)
# Plot results
plt.figure()
plt.title('Convergence comparison of different adaptive filtering algorithms')
plt.plot(mswe_lms1, 'b', label='LMS with stepsize=%.4f' % mu1)
plt.plot(mswe_lms2, 'b--', label='LMS with stepsize=%.4f' % mu2)
plt.plot(mswe_nlms1, 'g', label='NLMS with stepsize=%.2f' % beta1)
plt.plot(mswe_nlms2, 'g--', label='NLMS with stepsize=%.2f' % beta2)
plt.plot(mswe_ap1, 'r', label='AP with stepsize=%.2f' % beta1)
plt.plot(mswe_ap2, 'r--', label='AP with stepsize=%.2f' % beta2)
plt.legend()
plt.grid()
plt.xlabel('Iterations')
plt.ylabel('Mean-squared weight error')
plt.show()
|
Wramberg/adaptfilt
|
examples/convergence.py
|
Python
|
mit
| 1,879
|
[
"ADF",
"Gaussian"
] |
be544ba7ce83cad5ff1d7cf415c8f170d319b6479f2e2fd76b0cb0408cdfb310
|
#!/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Created on Monday March 27
@author: Brian L. Dorney
"""
#Imports
import sys, os
import numpy as np
from Utilities import *
from ROOT import gROOT, Double, TCanvas, TDirectory, TF1, TFile, TGraph2D, TGraphErrors, TH1F, TH2F, TLegend
class AnalysisSuiteGainMap:
__slots__ = ['ADCPKPOS_SECTOR_AVG',
'ADCPKPOS_SECTOR_STDDEV',
'ANA_UNI_GRANULARITY',
'AVGCLUSTSIZE_SECTOR_AVG',
'AVGCLUSTSIZE_SECTOR_STDDEV',
'DEBUG',
'DETECTOR',
'DET_IMON_QC5_RESP_UNI',
'DET_IMON_POINTS',
'FILE_IN',
'FILE_OUT',
'GAIN_CALCULATOR',
'GAIN_LAMBDA',
'GAIN_LAMBDA_ERR',
'GAIN_AVG_POINTS',
'GAIN_STDDEV_POINTS',
'GAIN_MAX_POINTS',
'G2D_MAP_ABS_RESP_UNI',
'G2D_MAP_AVG_CLUST_SIZE_ORIG',
'G2D_MAP_AVG_CLUST_SIZE_NORM',
'G2D_MAP_GAIN_ORIG',
'PD_CALCULATOR',
'PD_AVG_POINTS',
'PD_STDDEV_POINTS',
'PD_MAX_POINTS',
'PD_MIN_POINTS'
]
#def __init__(self, inputfilename="", outputfilename="AnaSuiteGainMapOutput.root", outputfileoption="RECREATE", params_gain=PARAMS_GAIN(), params_det=PARAMS_DET(), params_discharge=PARAMS_PD(), debug=False):
def __init__(self, file_out, inputfilename="", params_gain=PARAMS_GAIN(), params_det=PARAMS_DET(), params_discharge=PARAMS_PD(), debug=False):
self.ADCPKPOS_SECTOR_AVG = 0. #Average of the fitted cluster ADC PkPos in defined (ieta,iphi) sector
self.ADCPKPOS_SECTOR_STDDEV = 0. #Std. Dev. of the fitted cluster ADC PkPos in defined (ieta,iphi) sector
self.ANA_UNI_GRANULARITY = 32
self.AVGCLUSTSIZE_SECTOR_AVG = 0. #Average of Average Cluster Size distributions in defined (ieta,iphi) sector
self.AVGCLUSTSIZE_SECTOR_STDDEV = 0. #Std. Dev. of Average Cluster Size distributions in defined (ieta,iphi) sector
self.DEBUG = debug
self.DETECTOR = params_det
self.DET_IMON_QC5_RESP_UNI = params_det.DET_IMON_QC5_RESP_UNI
self.DET_IMON_POINTS = []
self.FILE_IN = []
if len(inputfilename) > 0:
self.FILE_IN = TFile(str(inputfilename),"READ","",1)
#if len(outputfilename) == 0:
#outputfilename = inputfilename.split('/')
#outputfilename = "AnaSuiteGainMapOutput_" + outputfilename[len(outputFileName)-1]
#self.FILE_OUT = TFile(str(outputfilename),outputfileoption,"",1)
self.FILE_OUT = file_out
self.GAIN_CALCULATOR = params_gain
self.GAIN_LAMBDA = 1.
self.GAIN_LAMBDA_ERR = 0.
self.GAIN_AVG_POINTS = [] #Average Gain over the entire detector
self.GAIN_STDDEV_POINTS = [] #Std. Dev of Gain over the entire detector
self.GAIN_MAX_POINTS = [] #Max Gain over the entire detector
self.GAIN_MIN_POINTS = [] #Min Gain over the entire detector
self.G2D_MAP_ABS_RESP_UNI = TGraph2D() #Absolute Response Uniformity Map
self.G2D_MAP_AVG_CLUST_SIZE_ORIG = TGraph2D() #Absolute Avg Cluster Size Map
self.G2D_MAP_AVG_CLUST_SIZE_NORM = TGraph2D() #Normalized " "
self.G2D_MAP_GAIN_ORIG = TGraph2D() #Effective Gain Map
self.PD_CALCULATOR = params_discharge
self.PD_AVG_POINTS = [] #Avg P_D over entire detector
self.PD_STDDEV_POINTS = [] #Std. Dev of P_D over entire detector
self.PD_MAX_POINTS = [] #Max P_D over the entire detector
self.PD_MIN_POINTS = [] #Min P_D over the entire detector
return
def reset(self, debug=False):
#Close TFiles
self.closeTFiles(debug)
#Reset Variables
self.DEBUG = debug
self.ADCPKPOS_SECTOR_AVG = 0.
self.ADCPKPOS_SECTOR_STDDEV = 0.
self.ANA_UNI_GRANULARITY = 32
self.AVGCLUSTSIZE_SECTOR_AVG = 0.
self.AVGCLUSTSIZE_SECTOR_STDDEV = 0.
self.DET_IMON_QC5_RESP_UNI = 0.
self.GAIN_LAMBDA = 1.
self.GAIN_LAMBDA_ERR = 0.
#Reset classes
self.DETECTOR.reset()
#self.GAIN_CALCULATOR
#self.PD_CALCULATOR
#Clear Lists
del self.DET_IMON_POINTS[:]
del self.GAIN_AVG_POINTS[:]
del self.GAIN_STDDEV_POINTS[:]
del self.GAIN_MAX_POINTS[:]
del self.GAIN_MIN_POINTS[:]
del self.PD_AVG_POINTS[:]
del self.PD_STDDEV_POINTS[:]
del self.PD_MAX_POINTS[:]
del self.PD_MIN_POINTS[:]
#Clear TObjects?
#self.G2D_MAP_ABS_RESP_UNI
#self.G2D_MAP_AVG_CLUST_SIZE_ORIG
#self.G2D_MAP_AVG_CLUST_SIZE_NORM
#self.G2D_MAP_GAIN_ORIG
return
#Determines the Average & Std. Dev. ADC PkPos in the (DETPOS_IETA, DETPOS_IPHI) sector
def avgROSectorADCPkPos(self):
#Load the plot
#strPlotName = "g_iEta" + str(self.DETECTOR.DETPOS_IETA) + "_clustADC_Fit_PkPos"
strPlotName = "SectorEta{0}/g_iEta{0}_clustADC_Fit_PkPos".format(self.DETECTOR.DETPOS_IETA)
#gSector_clustADC_Fit_PkPos = self.FILE_IN.Get( "SectorEta" + str(self.DETECTOR.DETPOS_IETA) + "/" + strPlotName )
gSector_clustADC_Fit_PkPos = self.FILE_IN.Get(strPlotName)
#Calculate the iphi sector boundaries
list_sectBoundary = self.DETECTOR.calcROSectorBoundariesByEta(self.DETECTOR.DETPOS_IETA)
#Print to user - Section Boundaries
#if self.DEBUG == True:
#for i in range(0,len(list_sectBoundary)):
#print list_sectBoundary[i]
#Loop over points in the plot
list_clustADC_Fit_PkPos = []
for i in range(0, gSector_clustADC_Fit_PkPos.GetN() ):
#Get the i^th point in this plot
fPx=Double(0.0)
fPy=Double(0.0)
gSector_clustADC_Fit_PkPos.GetPoint(i, fPx, fPy)
#Check if this point is within the defined (ieta,iphi) sector, if so store it for later use
if list_sectBoundary[self.DETECTOR.DETPOS_IPHI-1] <= fPx and fPx <= list_sectBoundary[self.DETECTOR.DETPOS_IPHI]:
#Print to user - selected data points
#if self.DEBUG == True:
#print "{0}\t{1}\t{2}".format(i, fPx, fPy)
#store data point
list_clustADC_Fit_PkPos.append(fPy)
#Store this list as a numpy array and then remove all outliers
array_clustADC_Fit_PkPos = np.array(list_clustADC_Fit_PkPos)
array_clustADC_Fit_PkPos = rejectOutliers(array_clustADC_Fit_PkPos)
if self.DEBUG:
#print "np.mean(list_clustADC_Fit_PkPos) = " + str(np.mean(list_clustADC_Fit_PkPos))
print "np.mean(list_clustADC_Fit_PkPos) = {0}".format(np.mean(list_clustADC_Fit_PkPos))
#print "np.mean(array_clustADC_Fit_PkPos) = " + str(np.mean(array_clustADC_Fit_PkPos)) + "\t No Outliers"
print "np.mean(array_clustADC_Fit_PkPos) = {0}\t No Outliers".format(str(np.mean(array_clustADC_Fit_PkPos)))
#Calculate Average
self.ADCPKPOS_SECTOR_AVG = np.mean(array_clustADC_Fit_PkPos) #Average of the fitted cluster ADC PkPos in defined (ieta,iphi) sector
self.ADCPKPOS_SECTOR_STDDEV = np.std(array_clustADC_Fit_PkPos) #Std Dev of the fitted cluster ADC PkPos in defined (ieta,iphi) sector
#print "Avg PkPos = " + str(self.ADCPKPOS_SECTOR_AVG) + "+/-" + str(self.ADCPKPOS_SECTOR_STDDEV)
print "Avg PkPos = {0}+/-{1}".format(self.ADCPKPOS_SECTOR_AVG, self.ADCPKPOS_SECTOR_STDDEV)
return
#Determine the average of the average cluster sizes within a single readout sector
def avgROSectorAvgClustSize(self):
#Load the plot
#strPlotName = "h_iEta" + str(self.DETECTOR.DETPOS_IETA) + "_clustSize_v_clustPos"
strPlotName = "SectorEta{0}/h_iEta{0}_clustSize_v_clustPos".format(self.DETECTOR.DETPOS_IETA)
#hSector_clustSize_v_clustPos = self.FILE_IN.Get( "SectorEta" + str(self.DETECTOR.DETPOS_IETA) + "/" + strPlotName )
hSector_clustSize_v_clustPos = self.FILE_IN.Get( strPlotName )
#Calculate the iphi sector boundaries
#list_sectBoundary = self.calcROSectorBoundaries(self.LIST_DET_GEO_PARAMS[self.DETECTOR.DETPOS_IETA-1])
list_sectBoundary = self.DETECTOR.calcROSectorBoundariesByEta(self.DETECTOR.DETPOS_IETA)
#Print to user - Section Boundaries
if self.DEBUG == True:
for i in range(0,len(list_sectBoundary)):
print list_sectBoundary[i]
#Loop over points in the plot
list_avgClustSize = []
for i in range(1, hSector_clustSize_v_clustPos.GetNbinsX() +1):
fBinCenter = hSector_clustSize_v_clustPos.GetXaxis().GetBinCenter(i)
#Check if this point is within the defined (ieta,iphi) sector, if so store it for later use
if list_sectBoundary[self.DETECTOR.DETPOS_IPHI-1] <= fBinCenter and fBinCenter <= list_sectBoundary[self.DETECTOR.DETPOS_IPHI]:
#Project out cluster size distribution for *this* slice
#strPlotName = "h_iEta" + str(self.DETECTOR.DETPOS_IETA) + "Slice" + str(i) + "_clustSize"
strPlotName = "h_iEta{0}Slice{1}_clustSize".format(self.DETECTOR.DETPOS_IETA,i)
h_clustSize = hSector_clustSize_v_clustPos.ProjectionY(strPlotName, i, i, "")
fAvgClustSize = h_clustSize.GetMean()
#store data point
list_avgClustSize.append(fAvgClustSize)
#Print to user - selected data points
if self.DEBUG == True:
#print (str(i) + "\t" + str(fBinCenter) + "\t" + str(fAvgClustSize) )
print "{0}\t{1}\t{2}".format(i,fBinCenter,fAvgClustSize)
#Store this list as a numpy array and then remove all outliers
array_avgClustSize = np.array(list_avgClustSize)
array_avgClustSize = rejectOutliers(array_avgClustSize)
if self.DEBUG:
#print "np.mean(list_avgClustSize) = " + str(np.mean(list_avgClustSize))
print "np.mean(list_avgClustSize) = {0}".format(np.mean(list_avgClustSize))
#print "np.mean(array_avgClustSize) = " + str(np.mean(array_avgClustSize)) + "\t No Outliers"
print "np.mean(array_avgClustSize) = {0}\t No Outliers".format(np.mean(array_avgClustSize))
#Calculate Average
self.AVGCLUSTSIZE_SECTOR_AVG = np.mean(array_avgClustSize) #Average of the fitted cluster ADC PkPos in defined (ieta,iphi) sector
self.AVGCLUSTSIZE_SECTOR_STDDEV = np.std(array_avgClustSize) #Std. Dev. of the fitted cluster ADC PkPos in defined (ieta,iphi) sector
#print "Avg of Avg Clust Size = " + str(self.AVGCLUSTSIZE_SECTOR_AVG) + "+/-" + str(self.AVGCLUSTSIZE_SECTOR_STDDEV)
print "Avg of Avg Clust Size = {0}+/-{1}".format(self.AVGCLUSTSIZE_SECTOR_AVG,self.AVGCLUSTSIZE_SECTOR_STDDEV)
return
#alpha(x) = exp([0]*(x-x0) ) where x is hvPt and x0 is self.DET_IMON_QC5_RESP_UNI
def calcAlpha(self, hvPt):
return np.exp(self.GAIN_CALCULATOR.GAIN_CURVE_P0 * (hvPt - self.DETECTOR.DET_IMON_QC5_RESP_UNI) )
#Determines the linear correlation factor lambda which relates Gain to ADC counts
def calcROSectorLambda(self):
gain = self.GAIN_CALCULATOR.calcGain(self.DET_IMON_QC5_RESP_UNI)
gain_err = self.GAIN_CALCULATOR.calcGainErr(self.DET_IMON_QC5_RESP_UNI)
self.GAIN_LAMBDA = gain / self.ADCPKPOS_SECTOR_AVG
self.GAIN_LAMBDA_ERR = ( 1. / self.ADCPKPOS_SECTOR_AVG ) * np.sqrt( np.square(gain_err) + np.square(self.ADCPKPOS_SECTOR_STDDEV * gain / self.ADCPKPOS_SECTOR_AVG) - 2. * gain_err * self.ADCPKPOS_SECTOR_STDDEV * gain / self.ADCPKPOS_SECTOR_AVG)
#print "lambda = " + str(self.GAIN_LAMBDA) + "+/-" + str(self.GAIN_LAMBDA_ERR)
print "lambda = {0}+/-{1}".format(self.GAIN_LAMBDA,self.GAIN_LAMBDA_ERR)
return
#Determines the gain map from the absolute response uniformity map
def calcGainMap(self, strDetName):
#Load the absolute response uniformity map
#strPlotName = "g2D_" + strDetName + "_ResponseFitPkPos_AllEta"
strPlotName = "Summary/g2D_{0}_ResponseFitPkPos_AllEta".format(strDetName)
if self.DEBUG:
print "Attempted to Load:"
#print "Summary/" + strPlotName
print strPlotName
self.G2D_MAP_ABS_RESP_UNI = self.FILE_IN.Get( strPlotName )
#Setup the gain map
self.G2D_MAP_GAIN_ORIG.Set( self.G2D_MAP_ABS_RESP_UNI.GetN() )
#self.G2D_MAP_GAIN_ORIG.SetName( "g2D_" + strDetName + "_EffGain_AllEta_" + str(int(self.DET_IMON_QC5_RESP_UNI)) )
self.G2D_MAP_GAIN_ORIG.SetName( "g2D_{0}_EffGain_AllEta_{1}".format(strDetName, int(self.DET_IMON_QC5_RESP_UNI) ) )
#Get the arrays that make the response uniformity map
array_fPx = self.G2D_MAP_ABS_RESP_UNI.GetX()
array_fPy = self.G2D_MAP_ABS_RESP_UNI.GetY()
array_fPz = self.G2D_MAP_ABS_RESP_UNI.GetZ()
#Loop Over all Points of self.G2D_MAP_ABS_RESP_UNI
array_Gain_Vals = np.zeros(self.G2D_MAP_ABS_RESP_UNI.GetN())
array_PD_Vals = np.zeros(self.G2D_MAP_ABS_RESP_UNI.GetN())
for i in range(0, self.G2D_MAP_ABS_RESP_UNI.GetN() ):
#Set the i^th point in self.G2D_MAP_GAIN_ORIG
array_Gain_Vals[i] = array_fPz[i] * self.GAIN_LAMBDA
array_PD_Vals[i] = self.PD_CALCULATOR.calcPD(array_fPz[i] * self.GAIN_LAMBDA)
self.G2D_MAP_GAIN_ORIG.SetPoint(i, array_fPx[i], array_fPy[i], array_fPz[i] * self.GAIN_LAMBDA)
#Store Average, Std. Dev., Max, & Min Gain
array_Gain_Vals = rejectOutliers(array_Gain_Vals)
self.DET_IMON_POINTS.append(self.DET_IMON_QC5_RESP_UNI)
self.GAIN_AVG_POINTS.append(np.mean(array_Gain_Vals) )
self.GAIN_STDDEV_POINTS.append(np.std(array_Gain_Vals) )
self.GAIN_MAX_POINTS.append(np.max(array_Gain_Vals) )
self.GAIN_MIN_POINTS.append(np.min(array_Gain_Vals) )
#Store Average, Std. Dev., Max & Min P_D
array_PD_Vals = rejectOutliers(array_PD_Vals)
self.PD_AVG_POINTS.append(np.mean(array_PD_Vals) )
self.PD_STDDEV_POINTS.append(np.std(array_PD_Vals) )
self.PD_MAX_POINTS.append(np.max(array_PD_Vals) )
self.PD_MIN_POINTS.append(np.min(array_PD_Vals) )
#Draw the effective gain map
#canv_Gain_Map_Orig = TCanvas("canv_" + strDetName + "_EffGain_AllEta_" + str(int(self.DET_IMON_QC5_RESP_UNI)),"Gain Map - Original " + str(self.DET_IMON_QC5_RESP_UNI),600,600)
canv_Gain_Map_Orig = TCanvas("canv_{0}_EffGain_AllEta_{1}".format(strDetName, int(self.DET_IMON_QC5_RESP_UNI)),"Gain Map - Original {0}".format(self.DET_IMON_QC5_RESP_UNI),600,600)
canv_Gain_Map_Orig.cd()
canv_Gain_Map_Orig.cd().SetLogz(1)
self.G2D_MAP_GAIN_ORIG.Draw("TRI2Z")
#Write the effective gain map to the output file
#dir_hvOrig = self.FILE_OUT.mkdir( "GainMap_HVPt" + str(int(self.DET_IMON_QC5_RESP_UNI)) )
dir_hvOrig = self.FILE_OUT.mkdir( "GainMap_HVPt{0}".format(int(self.DET_IMON_QC5_RESP_UNI)) )
dir_hvOrig.cd()
canv_Gain_Map_Orig.Write()
self.G2D_MAP_GAIN_ORIG.Write()
return
#Determines the gain map from the absolute response uniformity map for an arbitrary voltage
def calcGainMapHV(self, strDetName, hvPt):
#Create the new TGraph2D - Gain
g2D_Map_Gain_hvPt = TGraph2D( self.G2D_MAP_GAIN_ORIG.GetN() )
#g2D_Map_Gain_hvPt.SetName( "g2D_" + strDetName + "_EffGain_AllEta_" + str(int(hvPt)) )
g2D_Map_Gain_hvPt.SetName( "g2D_{0}_EffGain_AllEta_{1}".format(strDetName, int(hvPt)) )
#Create the new TGraph2D - Discharge Probability
g2D_Map_PD_hvPt = TGraph2D( self.G2D_MAP_GAIN_ORIG.GetN() )
#g2D_Map_PD_hvPt.SetName( "g2D_" + strDetName + "_PD_AllEta_" + str(int(hvPt)) )
g2D_Map_PD_hvPt.SetName( "g2D_{0}_PD_AllEta_{1}".format(strDetName, int(hvPt)) )
#Get the arrays that make the response uniformity map
array_fPx = self.G2D_MAP_GAIN_ORIG.GetX()
array_fPy = self.G2D_MAP_GAIN_ORIG.GetY()
array_fPz = self.G2D_MAP_GAIN_ORIG.GetZ()
#Calculate alpha
alpha = self.calcAlpha(hvPt)
#Loop Over all Points of self.G2D_MAP_ABS_RESP_UNI
array_Gain_Vals = np.zeros(self.G2D_MAP_ABS_RESP_UNI.GetN())
array_PD_Vals = np.zeros(self.G2D_MAP_ABS_RESP_UNI.GetN())
for i in range(0, self.G2D_MAP_ABS_RESP_UNI.GetN() ):
#Set the i^th point in self.G2D_MAP_GAIN_ORIG
array_Gain_Vals[i] = array_fPz[i] * alpha
array_PD_Vals[i] = self.PD_CALCULATOR.calcPD(array_fPz[i] * alpha)
g2D_Map_Gain_hvPt.SetPoint(i, array_fPx[i], array_fPy[i], array_fPz[i] * alpha)
g2D_Map_PD_hvPt.SetPoint(i, array_fPx[i], array_fPy[i], self.PD_CALCULATOR.calcPD(array_fPz[i] * alpha) )
#Store Average, Std. Dev., Max, & Min Gain
array_Gain_Vals = rejectOutliers(array_Gain_Vals)
self.DET_IMON_POINTS.append(hvPt)
self.GAIN_AVG_POINTS.append(np.mean(array_Gain_Vals) )
self.GAIN_STDDEV_POINTS.append(np.std(array_Gain_Vals) )
self.GAIN_MAX_POINTS.append(np.max(array_Gain_Vals) )
self.GAIN_MIN_POINTS.append(np.min(array_Gain_Vals) )
#Store Average, Std. Dev., Max & Min P_D
array_PD_Vals = rejectOutliers(array_PD_Vals)
self.PD_AVG_POINTS.append(np.mean(array_PD_Vals) )
self.PD_STDDEV_POINTS.append(np.std(array_PD_Vals) )
self.PD_MAX_POINTS.append(np.max(array_PD_Vals) )
self.PD_MIN_POINTS.append(np.min(array_PD_Vals) )
#Draw the effective gain map
#canv_Gain_Map_hvPt = TCanvas("canv_" + strDetName + "_EffGain_AllEta_" + str(int(hvPt)),"Gain Map - hvPt = " + str(hvPt),600,600)
canv_Gain_Map_hvPt = TCanvas("canv_{0}_EffGain_AllEta_{1}".format(strDetName, int(hvPt)),"Gain Map - hvPt = {0}".format(hvPt),600,600)
canv_Gain_Map_hvPt.cd()
canv_Gain_Map_hvPt.cd().SetLogz(1)
g2D_Map_Gain_hvPt.Draw("TRI2Z")
#Draw the discharge probability map
#canv_PD_Map_hvPt = TCanvas("canv_" + strDetName + "_PD_AllEta_" + str(int(hvPt)),"Discharge Probability Map - hvPt = " + str(hvPt),600,600)
canv_PD_Map_hvPt = TCanvas("canv_{0}_PD_AllEta_{1}".format(strDetName,int(hvPt)),"Discharge Probability Map - hvPt = {0}".format(hvPt),600,600)
canv_PD_Map_hvPt.cd()
canv_PD_Map_hvPt.cd().SetLogz(1)
g2D_Map_PD_hvPt.Draw("TRI2Z")
#Write the effective gain map to the output file
#dir_hvPt = self.FILE_OUT.mkdir( "GainMap_HVPt" + str(int(hvPt)) )
dir_hvPt = self.FILE_OUT.mkdir( "GainMap_HVPt{0}".format(int(hvPt)) )
dir_hvPt.cd()
canv_Gain_Map_hvPt.Write()
g2D_Map_Gain_hvPt.Write()
canv_PD_Map_hvPt.Write()
g2D_Map_PD_hvPt.Write()
return g2D_Map_Gain_hvPt
#Determines the average cluster size map for the entire detector
def calcClusterSizeMap(self, strDetName):
#Create the container which will store the clusterSize
#array_shape = ( self.G2D_MAP_ABS_RESP_UNI.GetN(), 3) #Not gauranteed to work since some points are thrown out during the fitting process in the C++ class AnalyzeResponseUniformityClusters
iNEtaSectors = len(self.DETECTOR.LIST_DET_GEO_PARAMS)
iNBinNum = self.ANA_UNI_GRANULARITY * iNEtaSectors * self.DETECTOR.LIST_DET_GEO_PARAMS[0].NBCONNECT
array_shape = (iNBinNum, 3)
array_clustSize = np.zeros(array_shape)
#Create the average cluster size map
#strPlotName = "g2D_" + strDetName + "_AvgClustSize_AllEta_" + str(int(self.DET_IMON_QC5_RESP_UNI))
strPlotName = "g2D_{0}_AvgClustSize_AllEta_{1}".format(strDetName,int(self.DET_IMON_QC5_RESP_UNI))
#self.G2D_MAP_AVG_CLUST_SIZE_ORIG.Set( self.G2D_MAP_ABS_RESP_UNI.GetN() ) #Set number of pts
self.G2D_MAP_AVG_CLUST_SIZE_ORIG.Set( iNBinNum ) #Set number of pts, see comments above
self.G2D_MAP_AVG_CLUST_SIZE_ORIG.SetName( strPlotName )
self.G2D_MAP_AVG_CLUST_SIZE_ORIG.SetTitle("")
#Create the average cluster size map
#strPlotName = "g2D_" + strDetName + "_AvgClustSizeNormalized_AllEta_" + str(int(self.DET_IMON_QC5_RESP_UNI))
strPlotName = "g2D_{0}_AvgClustSizeNormalized_AllEta_{1}".format(strDetName, int(self.DET_IMON_QC5_RESP_UNI))
#self.G2D_MAP_AVG_CLUST_SIZE_NORM.Set( self.G2D_MAP_ABS_RESP_UNI.GetN() ) #Set number of pts
self.G2D_MAP_AVG_CLUST_SIZE_NORM.Set( iNBinNum ) #Set number of pts, see comments above
self.G2D_MAP_AVG_CLUST_SIZE_NORM.SetName( strPlotName )
self.G2D_MAP_AVG_CLUST_SIZE_NORM.SetTitle("")
for iEta in range(1, iNEtaSectors+1):
#Get the Eta Sector
etaSector = self.DETECTOR.LIST_DET_GEO_PARAMS[iEta-1]
#Load the cluster size vs cluster position plot for this iEta value
#strPlotName = "SectorEta" + str(iEta) + "/h_iEta" + str(iEta) + "_clustSize_v_clustPos"
strPlotName = "SectorEta{0}/h_iEta{0}_clustSize_v_clustPos".format(iEta)
if self.DEBUG:
print "Attempted to Load:"
print strPlotName
h_clustSize_v_clustPos = self.FILE_IN.Get( strPlotName )
#Loop over the x-bins of this plot
for iSlice in range(1, h_clustSize_v_clustPos.GetNbinsX() + 1):
#Project out cluster size distribution for *this* slice
#strPlotName = "h_iEta" + str(iEta) + "Slice" + str(iSlice) + "_clustSize"
strPlotName = "h_iEta{0}Slice{1}_clustSize".format(iEta,iSlice)
h_clustSize = h_clustSize_v_clustPos.ProjectionY(strPlotName, iSlice, iSlice, "")
#Store average cluster size, y-position and x-position
array_clustSize[ (iEta-1) * h_clustSize_v_clustPos.GetNbinsX() + iSlice-1 ] = (h_clustSize_v_clustPos.GetXaxis().GetBinCenter(iSlice), etaSector.SECTPOS, h_clustSize.GetMean() )
#Set this point in the plot - Absolute
self.G2D_MAP_AVG_CLUST_SIZE_ORIG.SetPoint( (iEta-1) * h_clustSize_v_clustPos.GetNbinsX() + iSlice-1, h_clustSize_v_clustPos.GetXaxis().GetBinCenter(iSlice), etaSector.SECTPOS, h_clustSize.GetMean() )
#Set this point in the plot - Normalized
self.G2D_MAP_AVG_CLUST_SIZE_NORM.SetPoint( (iEta-1) * h_clustSize_v_clustPos.GetNbinsX() + iSlice-1, h_clustSize_v_clustPos.GetXaxis().GetBinCenter(iSlice), etaSector.SECTPOS, h_clustSize.GetMean() / self.AVGCLUSTSIZE_SECTOR_AVG )
#Print the cluster map to the user if requested
if self.DEBUG:
print "Average Cluster Size Map:"
print array_clustSize
#Draw the average cluster size map - Absolute
#canv_AvgClustSize_Map_Orig = TCanvas("canv_" + strDetName + "_AvgClustSize_AllEta_" + str(int(self.DET_IMON_QC5_RESP_UNI)),"Average Cluster Size Map - Original " + str(self.DET_IMON_QC5_RESP_UNI),600,600)
canv_AvgClustSize_Map_Orig = TCanvas("canv_{0}_AvgClustSize_AllEta_{1}".format(strDetName,int(self.DET_IMON_QC5_RESP_UNI)),"Average Cluster Size Map - Original {0}".format(self.DET_IMON_QC5_RESP_UNI),600,600)
canv_AvgClustSize_Map_Orig.cd()
self.G2D_MAP_AVG_CLUST_SIZE_ORIG.Draw("TRI2Z")
#Draw the average cluster size map - Normalized
#canv_AvgClustSize_Map_Norm = TCanvas("canv_" + strDetName + "_AvgClustSizeNormalized_AllEta_" + str(int(self.DETECTOR.DET_IMON_QC5_RESP_UNI)),"Average Cluster Size Map - Normalized " + str(self.DETECTOR.DET_IMON_QC5_RESP_UNI),600,600)
canv_AvgClustSize_Map_Norm = TCanvas("canv_{0}_AvgClustSizeNormalized_AllEta_{1}".format(strDetName, int(self.DETECTOR.DET_IMON_QC5_RESP_UNI)),"Average Cluster Size Map - Normalized {0}".format(self.DETECTOR.DET_IMON_QC5_RESP_UNI),600,600)
canv_AvgClustSize_Map_Norm.cd()
self.G2D_MAP_AVG_CLUST_SIZE_NORM.Draw("TRI2Z")
#Write the average cluster size map to the output file
#dir_hvOrig = self.FILE_OUT.GetDirectory( "GainMap_HVPt" + str(int(self.DETECTOR.DET_IMON_QC5_RESP_UNI)), False, "GetDirectory" )
dir_hvOrig = self.FILE_OUT.GetDirectory( "GainMap_HVPt{0}".format(int(self.DETECTOR.DET_IMON_QC5_RESP_UNI)), False, "GetDirectory" )
dir_hvOrig.cd()
canv_AvgClustSize_Map_Orig.Write()
self.G2D_MAP_AVG_CLUST_SIZE_ORIG.Write()
canv_AvgClustSize_Map_Norm.Write()
self.G2D_MAP_AVG_CLUST_SIZE_NORM.Write()
return
#Closes TFiles
def closeTFiles(self, debug=False):
if self.FILE_IN.IsOpen():
self.FILE_IN.Close()
if self.FILE_OUT.IsOpen():
self.FILE_OUT.Close()
return
#Plot Average Gain Over Entire Detector Area
def plotGainSummary(self, strDetName):
#Create the Plot - Average
gDet_AvgEffGain = TGraphErrors( len(self.GAIN_AVG_POINTS) )
#gDet_AvgEffGain.SetName("g_" + strDetName + "_EffGainAvg")
gDet_AvgEffGain.SetName("g_{0}_EffGainAvg".format(strDetName))
#Create the Plot - Max Gain
gDet_MaxEffGain = TGraphErrors( len(self.GAIN_MAX_POINTS) )
#gDet_MaxEffGain.SetName("g_" + strDetName + "_EffGainMax")
gDet_MaxEffGain.SetName("g_{0}_EffGainMax".format(strDetName))
#Create the Plot - Min Gain
gDet_MinEffGain = TGraphErrors( len(self.GAIN_MIN_POINTS) )
#gDet_MinEffGain.SetName("g_" + strDetName + "_EffGainMin")
gDet_MinEffGain.SetName("g_{0}_EffGainMin".format(strDetName))
#Set the points
for i in range(0, len(self.GAIN_AVG_POINTS) ):
#Average
gDet_AvgEffGain.SetPoint(i,self.DET_IMON_POINTS[i],self.GAIN_AVG_POINTS[i])
gDet_AvgEffGain.SetPointError(i,0,self.GAIN_STDDEV_POINTS[i])
#Max
gDet_MaxEffGain.SetPoint(i,self.DET_IMON_POINTS[i],self.GAIN_MAX_POINTS[i])
#Min
gDet_MinEffGain.SetPoint(i,self.DET_IMON_POINTS[i],self.GAIN_MIN_POINTS[i])
#Draw
#canv_AvgEffGain = TCanvas("canv_" + strDetName + "_EffGainAvg",strDetName + " Average Effective Gain",600,600)
canv_AvgEffGain = TCanvas("canv_{0}_EffGainAvg".format(strDetName),"{0} Average Effective Gain".format(strDetName),600,600)
canv_AvgEffGain.cd()
canv_AvgEffGain.cd().SetLogy()
gDet_AvgEffGain.GetXaxis().SetTitle("HV")
gDet_AvgEffGain.GetYaxis().SetTitle("#LT Effective Gain #GT")
gDet_AvgEffGain.GetYaxis().SetRangeUser(1e2,1e6)
gDet_AvgEffGain.SetMarkerStyle(21)
gDet_AvgEffGain.Draw("AP")
gDet_MaxEffGain.Draw("sameL")
gDet_MinEffGain.Draw("sameL")
#Write
dir_Summary = self.FILE_OUT.mkdir("Summary")
dir_Summary.cd()
canv_AvgEffGain.Write()
gDet_AvgEffGain.Write()
gDet_MaxEffGain.Write()
gDet_MinEffGain.Write()
return
#Plot Average Gain Over Entire Detector Area
def plotPDSummary(self, strDetName):
#Create the Plot - Average
gDet_AvgPD = TGraphErrors( len(self.PD_AVG_POINTS) )
#gDet_AvgPD.SetName("g_" + strDetName + "_PDAvg")
gDet_AvgPD.SetName("g_{0}_PDAvg".format(strDetName))
#Create the Plot - Max Gain
gDet_MaxPD = TGraphErrors( len(self.PD_MAX_POINTS) )
#gDet_MaxPD.SetName("g_" + strDetName + "_PDMax")
gDet_MaxPD.SetName("g_{0}_PDMax".format(strDetName))
#Create the Plot - Min Gain
gDet_MinPD = TGraphErrors( len(self.PD_MIN_POINTS) )
gDet_MinPD.SetName("g_" + strDetName + "_PDMin")
gDet_MinPD.SetName("g_{0}_PDMin".format(strDetName))
#Set the points
for i in range(0, len(self.PD_AVG_POINTS) ):
#Average
gDet_AvgPD.SetPoint(i,self.GAIN_AVG_POINTS[i],self.PD_AVG_POINTS[i])
gDet_AvgPD.SetPointError(i,self.GAIN_STDDEV_POINTS[i],self.PD_STDDEV_POINTS[i])
#Max
gDet_MaxPD.SetPoint(i,self.GAIN_AVG_POINTS[i],self.PD_MAX_POINTS[i])
#Min
gDet_MinPD.SetPoint(i,self.GAIN_AVG_POINTS[i],self.PD_MIN_POINTS[i])
#Draw
#canv_AvgPD = TCanvas("canv_" + strDetName + "_PDAvg",strDetName + " Discharge Probability",600,600)
canv_AvgPD = TCanvas("canv_{0}_PDAvg".format(strDetName),"{0} Discharge Probability".format(strDetName),600,600)
canv_AvgPD.cd()
canv_AvgPD.cd().SetLogx()
canv_AvgPD.cd().SetLogy()
gDet_AvgPD.GetXaxis().SetTitle("#LT Effective Gain #GT")
gDet_AvgPD.GetYaxis().SetTitle("Discharge Probability P_{D}")
gDet_AvgPD.GetYaxis().SetRangeUser(1e-11,1e-6)
gDet_AvgPD.SetMarkerStyle(21)
gDet_AvgPD.Draw("AP")
gDet_MaxPD.Draw("sameL")
gDet_MinPD.Draw("sameL")
#Write
dir_Summary = self.FILE_OUT.GetDirectory("Summary")
dir_Summary.cd()
canv_AvgPD.Write()
gDet_AvgPD.Write()
gDet_MaxPD.Write()
gDet_MinPD.Write()
return
#Open Input File
def openInputFile(self, inputfilename):
self.FILE_IN = TFile(str(inputfilename),"READ","",1)
return
#Set the detector
def setDetector(self, params_det=PARAMS_DET()):
self.DETECTOR = params_det
return
|
bdorney/CMS_GEM_Analysis_Framework
|
python/AnalysisSuiteGainMap.py
|
Python
|
gpl-3.0
| 30,373
|
[
"Brian"
] |
fbf3e47eb21a43e01efa99a2fa2e5fb075a6ceb6b34c36cb71561e101991a100
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import hashlib
md5er = hashlib.md5
except ImportError, e:
import md5
md5er = md5.new
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
# Disabled LINT rules and reason.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
ENABLED_LINT_RULES = """
build/class
build/deprecated
build/endif_comment
build/forward_decl
build/include_alpha
build/include_order
build/printf_format
build/storage_class
legal/copyright
readability/boost
readability/braces
readability/casting
readability/constructors
readability/fn_size
readability/function
readability/multiline_comment
readability/multiline_string
readability/streams
readability/todo
readability/utf8
runtime/arrays
runtime/casting
runtime/deprecated_fn
runtime/explicit
runtime/int
runtime/memset
runtime/mutex
runtime/nonconf
runtime/printf
runtime/printf_format
runtime/rtti
runtime/sizeof
runtime/string
runtime/virtual
runtime/vlog
whitespace/blank_line
whitespace/braces
whitespace/comma
whitespace/comments
whitespace/ending_newline
whitespace/indent
whitespace/labels
whitespace/line_length
whitespace/newline
whitespace/operators
whitespace/parens
whitespace/tab
whitespace/todo
""".split()
# TODO(bmeurer): Fix and re-enable readability/check
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print "Failed to process %s" % command.pop()
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
if sums_file:
sums_file.close()
try:
os.unlink(self.sums_file_name)
except:
pass
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def Run(self, path):
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
if not self.ProcessFiles(all_files, path):
return False
return True
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
'octane', 'sunspider'))
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CppLintProcessor(SourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = ['flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
return ['src', 'include', 'samples', join('test', 'cctest'),
join('test', 'unittests')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
cpplint = os.path.join(path, "cpplint.py")
if os.path.isfile(cpplint):
return cpplint
return None
def ProcessFiles(self, files, path):
good_files_cache = FileContentsCache('.cpplint-cache')
good_files_cache.Load()
files = good_files_cache.FilterUnchangedFiles(files)
if len(files) == 0:
print 'No changes in files detected. Skipping cpplint check.'
return True
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = [sys.executable, 'cpplint.py', '--filter', filt]
cpplint = self.GetCpplintScript(join(path, "tools"))
if cpplint is None:
print('Could not find cpplint.py. Make sure '
'depot_tools is installed and in the path.')
sys.exit(1)
command = [sys.executable, cpplint, '--filter', filt]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(CppLintWorker, commands).get(999999)
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt, terminating workers."
sys.exit(1)
for i in range(len(files)):
if results[i] > 0:
good_files_cache.RemoveFile(files[i])
total_errors = sum(results)
print "Total errors found: %d" % total_errors
good_files_cache.Save()
return total_errors == 0
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
'.status', '.gyp', '.gypi']
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
if (self.IsRelevant(file) and os.path.exists(file)
and not self.IgnoreFile(file)):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
'copy.js',
'corrections.js',
'crypto.js',
'daemon.py',
'earley-boyer.js',
'fannkuch.js',
'fasta.js',
'jsmin.py',
'libraries.cc',
'libraries-empty.cc',
'lua_binarytrees.js',
'memops.js',
'poppler.js',
'primes.js',
'raytrace.js',
'regexp-pcre.js',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
'sqlite-safe-heap.js',
'gnuplot-4.6.3-emscripten.js',
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
def StartOfDeclaration(self, line):
return line.find("//") == 0 or \
line.find("/*") == 0 or \
line.find(") {") != -1
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print "%s contains tabs" % name
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print "%s does not end with a single new line." % name
result = False
# Check two empty lines between declarations.
if name.endswith(".cc"):
line = 0
lines = []
parts = contents.split('\n')
while line < len(parts) - 2:
if self.EndOfDeclaration(parts[line]):
if self.StartOfDeclaration(parts[line + 1]):
lines.append(str(line + 1))
line += 1
elif parts[line + 1] == "" and \
self.StartOfDeclaration(parts[line + 2]):
lines.append(str(line + 1))
line += 2
line += 1
if len(lines) >= 1:
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s does not have two empty lines between declarations " \
"in lines %s." % (name, linenumbers)
else:
print "%s does not have two empty lines between declarations " \
"in line %s." % (name, linenumbers)
result = False
return result
def ProcessFiles(self, files, path):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print "Total violating files: %s" % violations
return success
def CheckRuntimeVsNativesNameClashes(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "check-name-clashes.py")])
return code == 0
def CheckExternalReferenceRegistration(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
def CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print "Running C++ lint check..."
if not options.no_lint:
success = CppLintProcessor().Run(workspace) and success
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success = SourceProcessor().Run(workspace) and success
success = CheckRuntimeVsNativesNameClashes(workspace) and success
success = CheckExternalReferenceRegistration(workspace) and success
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
|
guorendong/iridium-browser-ubuntu
|
v8/tools/presubmit.py
|
Python
|
bsd-3-clause
| 15,774
|
[
"VisIt"
] |
76da2814bd8161d8fa821d82d9725c5f67d88c4d75b76d0a096c8a5bb9533ce0
|
from functools import partial
from client.player import Player
from client.updater import fetchClientUpdate
from config import Settings
import fa
from fa.factions import Factions
'''
Created on Dec 1, 2011
@author: thygrrr
'''
from PyQt4 import QtCore, QtGui, QtNetwork, QtWebKit
from PyQt4.QtCore import QDataStream
from types import IntType, FloatType, ListType, DictType
from client import ClientState, GAME_PORT_DEFAULT, LOBBY_HOST, \
LOBBY_PORT, LOCAL_REPLAY_PORT
import logging
logger = logging.getLogger(__name__)
import util
import secondaryServer
import json
import sys
import replays
import time
import os
import random
import notificatation_system as ns
FormClass, BaseClass = util.loadUiType("client/client.ui")
class mousePosition(object):
def __init__(self, parent):
self.parent = parent
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
self.warning_buttons = dict()
def computeMousePosition(self, pos):
self.onLeftEdge = pos.x() < 8
self.onRightEdge = pos.x() > self.parent.size().width() - 8
self.onTopEdge = pos.y() < 8
self.onBottomEdge = pos.y() > self.parent.size().height() - 8
self.onTopLeftEdge = self.onTopEdge and self.onLeftEdge
self.onBottomLeftEdge = self.onBottomEdge and self.onLeftEdge
self.onTopRightEdge = self.onTopEdge and self.onRightEdge
self.onBottomRightEdge = self.onBottomEdge and self.onRightEdge
self.onEdges = self.onLeftEdge or self.onRightEdge or self.onTopEdge or self.onBottomEdge
def resetToFalse(self):
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
def isOnEdge(self):
return self.onEdges
class ClientWindow(FormClass, BaseClass):
'''
This is the main lobby client that manages the FAF-related connection and data,
in particular players, games, ranking, etc.
Its UI also houses all the other UIs for the sub-modules.
'''
topWidget = QtGui.QWidget()
#These signals are emitted when the client is connected or disconnected from FAF
connected = QtCore.pyqtSignal()
disconnected = QtCore.pyqtSignal()
#This signal is emitted when the client is done rezising
doneresize = QtCore.pyqtSignal()
#These signals notify connected modules of game state changes (i.e. reasons why FA is launched)
viewingReplay = QtCore.pyqtSignal(QtCore.QUrl)
#Game state controls
gameEnter = QtCore.pyqtSignal()
gameExit = QtCore.pyqtSignal()
#These signals propagate important client state changes to other modules
statsInfo = QtCore.pyqtSignal(dict)
tourneyTypesInfo = QtCore.pyqtSignal(dict)
tutorialsInfo = QtCore.pyqtSignal(dict)
tourneyInfo = QtCore.pyqtSignal(dict)
modInfo = QtCore.pyqtSignal(dict)
gameInfo = QtCore.pyqtSignal(dict)
modVaultInfo = QtCore.pyqtSignal(dict)
coopInfo = QtCore.pyqtSignal(dict)
avatarList = QtCore.pyqtSignal(list)
playerAvatarList = QtCore.pyqtSignal(dict)
usersUpdated = QtCore.pyqtSignal(list)
localBroadcast = QtCore.pyqtSignal(str, str)
autoJoin = QtCore.pyqtSignal(list)
channelsUpdated = QtCore.pyqtSignal(list)
replayVault = QtCore.pyqtSignal(dict)
coopLeaderBoard = QtCore.pyqtSignal(dict)
#These signals are emitted whenever a certain tab is activated
showReplays = QtCore.pyqtSignal()
showMaps = QtCore.pyqtSignal()
showGames = QtCore.pyqtSignal()
showTourneys = QtCore.pyqtSignal()
showLadder = QtCore.pyqtSignal()
showChat = QtCore.pyqtSignal()
showMods = QtCore.pyqtSignal()
showCoop = QtCore.pyqtSignal()
joinGameFromURL = QtCore.pyqtSignal(str)
matchmakerInfo = QtCore.pyqtSignal(dict)
def __init__(self, *args, **kwargs):
BaseClass.__init__(self, *args, **kwargs)
logger.debug("Client instantiating")
# Hook to Qt's application management system
QtGui.QApplication.instance().aboutToQuit.connect(self.cleanup)
#Init and wire the TCP Network socket to communicate with faforever.com
# This is the evil stream API.
self.socket = QtNetwork.QTcpSocket()
self.socket.readyRead.connect(self.readFromServer)
self.socket.disconnected.connect(self.disconnectedFromServer)
self.socket.error.connect(self.socketError)
self.blockSize = 0
self.uniqueId = None
self.sendFile = False
self.progress = QtGui.QProgressDialog()
self.progress.setMinimum(0)
self.progress.setMaximum(0)
#Tray icon
self.tray = QtGui.QSystemTrayIcon()
self.tray.setIcon(util.icon("client/tray_icon.png"))
self.tray.show()
self.state = ClientState.NONE
self.session = None
#Timer for resize events
self.resizeTimer = QtCore.QTimer(self)
self.resizeTimer.timeout.connect(self.resized)
self.preferedSize = 0
#Process used to run Forged Alliance (managed in module fa)
fa.instance.started.connect(self.startedFA)
fa.instance.finished.connect(self.finishedFA)
fa.instance.error.connect(self.errorFA)
self.gameInfo.connect(fa.instance.processGameInfo)
#Local Replay Server (and relay)
self.replayServer = fa.replayserver.ReplayServer(self)
#Local Relay Server
self.relayServer = fa.relayserver.RelayServer(self)
#Local proxy servers
self.proxyServer = fa.proxies.proxies(self)
#stat server
self.statsServer = secondaryServer.SecondaryServer("Statistic", 11002, self)
#create user interface (main window) and load theme
self.setupUi(self)
self.setStyleSheet(util.readstylesheet("client/client.css"))
self.windowsTitleLabel = QtGui.QLabel(self)
self.windowsTitleLabel.setText("FA Forever " + util.VERSION_STRING)
self.windowsTitleLabel.setProperty("titleLabel", True)
self.setWindowTitle("FA Forever " + util.VERSION_STRING)
# Frameless
self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint)
self.rubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle)
self.mousePosition = mousePosition(self)
self.installEventFilter(self)
self.minimize = QtGui.QToolButton(self)
self.minimize.setIcon(util.icon("client/minimize-button.png"))
self.maximize = QtGui.QToolButton(self)
self.maximize.setIcon(util.icon("client/maximize-button.png"))
close = QtGui.QToolButton(self)
close.setIcon(util.icon("client/close-button.png"))
self.minimize.setMinimumHeight(10)
close.setMinimumHeight(10)
self.maximize.setMinimumHeight(10)
close.setIconSize(QtCore.QSize(22, 22))
self.minimize.setIconSize(QtCore.QSize(22, 22))
self.maximize.setIconSize(QtCore.QSize(22, 22))
close.setProperty("windowControlBtn", True)
self.maximize.setProperty("windowControlBtn", True)
self.minimize.setProperty("windowControlBtn", True)
self.menu = self.menuBar()
self.topLayout.addWidget(self.menu)
self.topLayout.addWidget(self.windowsTitleLabel)
self.topLayout.addWidget(self.minimize)
self.topLayout.addWidget(self.maximize)
self.topLayout.addWidget(close)
self.topLayout.insertStretch(1, 500)
self.topLayout.setSpacing(0)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.maxNormal = False
close.clicked.connect(self.close)
self.minimize.clicked.connect(self.showSmall)
self.maximize.clicked.connect(self.showMaxRestore)
self.moving = False
self.dragging = False
self.draggingHover = False
self.offset = None
self.curSize = None
sizeGrip = QtGui.QSizeGrip(self)
self.mainGridLayout.addWidget(sizeGrip, 2, 2)
#Wire all important signals
self.mainTabs.currentChanged.connect(self.mainTabChanged)
self.topTabs.currentChanged.connect(self.vaultTabChanged)
#Verrry important step!
self.loadSettingsPrelogin()
self.players = {} # Players known to the client, contains the player_info messages sent by the server
self.urls = {}
# Handy reference to the Player object representing the logged-in user.
self.me = None
# names of the client's friends
self.friends = set()
# names of the client's foes
self.foes = set()
self.clanlist = set() # members of clients clan
self.power = 0 # current user power
self.id = 0
self.coloredNicknames = False
#Initialize the Menu Bar according to settings etc.
self.initMenus()
#Load the icons for the tabs
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.whatNewTab), util.icon("client/feed.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.chatTab), util.icon("client/chat.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.gamesTab), util.icon("client/games.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.coopTab), util.icon("client/coop.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.vaultsTab), util.icon("client/mods.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.ladderTab), util.icon("client/ladder.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tourneyTab), util.icon("client/tourney.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.livestreamTab), util.icon("client/twitch.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.replaysTab), util.icon("client/replays.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tutorialsTab), util.icon("client/tutorials.png"))
QtWebKit.QWebSettings.globalSettings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
#for moderator
self.modMenu = None
def eventFilter(self, obj, event):
if (event.type() == QtCore.QEvent.HoverMove):
self.draggingHover = self.dragging
if self.dragging:
self.resizeWidget(self.mapToGlobal(event.pos()))
else:
if self.maxNormal == False:
self.mousePosition.computeMousePosition(event.pos())
else:
self.mousePosition.resetToFalse()
self.updateCursorShape(event.pos())
return False
def updateCursorShape(self, pos):
if self.mousePosition.onTopLeftEdge or self.mousePosition.onBottomRightEdge:
self.mousePosition.cursorShapeChange = True
self.setCursor(QtCore.Qt.SizeFDiagCursor)
elif self.mousePosition.onTopRightEdge or self.mousePosition.onBottomLeftEdge:
self.setCursor(QtCore.Qt.SizeBDiagCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onLeftEdge or self.mousePosition.onRightEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onTopEdge or self.mousePosition.onBottomEdge:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.mousePosition.cursorShapeChange = True
else:
if self.mousePosition.cursorShapeChange == True:
self.unsetCursor()
self.mousePosition.cursorShapeChange = False
def showSmall(self):
self.showMinimized()
def showMaxRestore(self):
if(self.maxNormal):
self.maxNormal = False
if self.curSize:
self.setGeometry(self.curSize)
else:
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
def mouseDoubleClickEvent(self, event):
self.showMaxRestore()
def mouseReleaseEvent(self, event):
self.dragging = False
self.moving = False
if self.rubberBand.isVisible():
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(self.rubberBand.geometry())
self.rubberBand.hide()
#self.showMaxRestore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.mousePosition.isOnEdge() and self.maxNormal == False:
self.dragging = True
return
else :
self.dragging = False
self.moving = True
self.offset = event.pos()
def mouseMoveEvent(self, event):
if self.dragging and self.draggingHover == False:
self.resizeWidget(event.globalPos())
elif self.moving and self.offset != None:
desktop = QtGui.QDesktopWidget().availableGeometry(self)
if event.globalPos().y() == 0:
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == 0:
desktop.setRight(desktop.right() / 2.0)
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == desktop.right():
desktop.setRight(desktop.right() / 2.0)
desktop.moveLeft(desktop.right())
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
else:
self.rubberBand.hide()
if self.maxNormal == True:
self.showMaxRestore()
self.move(event.globalPos() - self.offset)
def resizeWidget(self, globalMousePos):
if globalMousePos.y() == 0:
self.rubberBand.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
self.rubberBand.show()
else:
self.rubberBand.hide()
origRect = self.frameGeometry()
left, top, right, bottom = origRect.getCoords()
minWidth = self.minimumWidth()
minHeight = self.minimumHeight()
if self.mousePosition.onTopLeftEdge:
left = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomLeftEdge:
left = globalMousePos.x();
bottom = globalMousePos.y();
elif self.mousePosition.onTopRightEdge:
right = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomRightEdge:
right = globalMousePos.x()
bottom = globalMousePos.y()
elif self.mousePosition.onLeftEdge:
left = globalMousePos.x()
elif self.mousePosition.onRightEdge:
right = globalMousePos.x()
elif self.mousePosition.onTopEdge:
top = globalMousePos.y()
elif self.mousePosition.onBottomEdge:
bottom = globalMousePos.y()
newRect = QtCore.QRect(QtCore.QPoint(left, top), QtCore.QPoint(right, bottom))
if newRect.isValid():
if minWidth > newRect.width():
if left != origRect.left() :
newRect.setLeft(origRect.left())
else:
newRect.setRight(origRect.right())
if minHeight > newRect.height() :
if top != origRect.top():
newRect.setTop(origRect.top())
else:
newRect.setBottom(origRect.bottom())
self.setGeometry(newRect)
def setup(self):
import chat
import tourneys
import stats
import vault
import games
import tutorials
import modvault
import coop
from chat._avatarWidget import avatarWidget
# Initialize chat
self.chat = chat.Lobby(self)
#build main window with the now active client
self.ladder = stats.Stats(self)
self.games = games.Games(self)
self.tourneys = tourneys.Tourneys(self)
self.vault = vault.MapVault(self)
self.modvault = modvault.ModVault(self)
self.replays = replays.Replays(self)
self.tutorials = tutorials.Tutorials(self)
self.Coop = coop.Coop(self)
self.notificationSystem = ns.NotificationSystem(self)
# set menu states
self.actionNsEnabled.setChecked(self.notificationSystem.settings.enabled)
# Other windows
self.avatarAdmin = self.avatarSelection = avatarWidget(self, None)
# warning setup
self.warning = QtGui.QHBoxLayout()
self.warnPlayer = QtGui.QLabel(self)
self.warnPlayer.setText("A player of your skill level is currently searching for a 1v1 game. Click a faction to join them! ")
self.warnPlayer.setAlignment(QtCore.Qt.AlignHCenter)
self.warnPlayer.setAlignment(QtCore.Qt.AlignVCenter)
self.warnPlayer.setProperty("warning", True)
self.warning.addStretch()
def add_warning_button(faction):
button = QtGui.QToolButton(self)
button.setMaximumSize(25, 25)
button.setIcon(util.icon("games/automatch/%s.png" % faction.to_name()))
button.clicked.connect(self.games.join_ladder_listeners[faction])
self.warning.addWidget(button)
return button
self.warning_buttons = {faction: add_warning_button(faction) for faction in Factions}
self.warning.addStretch()
self.mainGridLayout.addLayout(self.warning, 2, 0)
self.warningHide()
def warningHide(self):
'''
hide the warning bar for matchmaker
'''
self.warnPlayer.hide()
for i in self.warning_buttons.values():
i.hide()
def warningShow(self):
'''
show the warning bar for matchmaker
'''
self.warnPlayer.show()
for i in self.warning_buttons.values():
i.show()
@QtCore.pyqtSlot()
def cleanup(self):
'''
Perform cleanup before the UI closes
'''
self.state = ClientState.SHUTDOWN
self.progress.setWindowTitle("FAF is shutting down")
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.progress.setValue(0)
self.progress.setCancelButton(None)
self.progress.show()
#Important: If a game is running, offer to terminate it gently
self.progress.setLabelText("Closing ForgedAllianceForever.exe")
if fa.instance.running():
fa.instance.close()
#Terminate Lobby Server connection
if self.socket.state() == QtNetwork.QTcpSocket.ConnectedState:
self.progress.setLabelText("Closing main connection.")
self.socket.disconnectFromHost()
# Clear UPnP Mappings...
if self.useUPnP:
self.progress.setLabelText("Removing UPnP port mappings")
fa.upnp.removePortMappings()
#Terminate local ReplayServer
if self.replayServer:
self.progress.setLabelText("Terminating local replay server")
self.replayServer.close()
self.replayServer = None
#Terminate local ReplayServer
if self.relayServer:
self.progress.setLabelText("Terminating local relay server")
self.relayServer.close()
self.relayServer = None
#Clean up Chat
if self.chat:
self.progress.setLabelText("Disconnecting from IRC")
self.chat.disconnect()
self.chat = None
# Get rid of the Tray icon
if self.tray:
self.progress.setLabelText("Removing System Tray icon")
self.tray.deleteLater()
self.tray = None
#Terminate UI
if self.isVisible():
self.progress.setLabelText("Closing main window")
self.close()
self.progress.close()
def closeEvent(self, event):
logger.info("Close Event for Application Main Window")
self.saveWindow()
if fa.instance.running():
if QtGui.QMessageBox.question(self, "Are you sure?", "Seems like you still have Forged Alliance running!<br/><b>Close anyway?</b>", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
event.ignore()
return
return QtGui.QMainWindow.closeEvent(self, event)
def resizeEvent(self, size):
self.resizeTimer.start(400)
def resized(self):
self.resizeTimer.stop()
self.doneresize.emit()
def initMenus(self):
self.actionLinkMumble.triggered.connect(partial(self.open_url, Settings.get("MUMBLE_URL").format(login=self.login)))
self.actionLink_account_to_Steam.triggered.connect(partial(self.open_url, Settings.get("STEAMLINK_URL")))
self.actionLinkWebsite.triggered.connect(partial(self.open_url, Settings.get("WEBSITE_URL")))
self.actionLinkWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionLinkForums.triggered.connect(partial(self.open_url, Settings.get("FORUMS_URL")))
self.actionLinkUnitDB.triggered.connect(partial(self.open_url, Settings.get("UNITDB_URL")))
self.actionNsSettings.triggered.connect(lambda : self.notificationSystem.on_showSettings())
self.actionNsEnabled.triggered.connect(lambda enabled : self.notificationSystem.setNotificationEnabled(enabled))
self.actionWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionReportBug.triggered.connect(partial(self.open_url, Settings.get("TICKET_URL")))
self.actionShowLogs.triggered.connect(self.linkShowLogs)
self.actionTechSupport.triggered.connect(partial(self.open_url, Settings.get("SUPPORT_URL")))
self.actionAbout.triggered.connect(self.linkAbout)
self.actionClearCache.triggered.connect(self.clearCache)
self.actionClearSettings.triggered.connect(self.clearSettings)
self.actionClearGameFiles.triggered.connect(self.clearGameFiles)
self.actionSetGamePath.triggered.connect(self.switchPath)
self.actionSetGamePort.triggered.connect(self.switchPort)
self.actionSetMumbleOptions.triggered.connect(self.setMumbleOptions)
#Toggle-Options
self.actionSetAutoLogin.triggered.connect(self.updateOptions)
self.actionSetSoundEffects.triggered.connect(self.updateOptions)
self.actionSetOpenGames.triggered.connect(self.updateOptions)
self.actionSetJoinsParts.triggered.connect(self.updateOptions)
self.actionSetLiveReplays.triggered.connect(self.updateOptions)
self.actionSaveGamelogs.triggered.connect(self.updateOptions)
self.actionColoredNicknames.triggered.connect(self.updateOptions)
self.actionActivateMumbleSwitching.triggered.connect(self.saveMumbleSwitching)
#Init themes as actions.
themes = util.listThemes()
for theme in themes:
action = self.menuTheme.addAction(str(theme))
action.triggered.connect(self.switchTheme)
action.theme = theme
action.setCheckable(True)
if util.getTheme() == theme:
action.setChecked(True)
# Nice helper for the developers
self.menuTheme.addSeparator()
self.menuTheme.addAction("Reload Stylesheet", lambda: self.setStyleSheet(util.readstylesheet("client/client.css")))
@QtCore.pyqtSlot()
def updateOptions(self):
self.autologin = self.actionSetAutoLogin.isChecked()
self.soundeffects = self.actionSetSoundEffects.isChecked()
self.opengames = self.actionSetOpenGames.isChecked()
self.joinsparts = self.actionSetJoinsParts.isChecked()
self.livereplays = self.actionSetLiveReplays.isChecked()
self.gamelogs = self.actionSaveGamelogs.isChecked()
self.coloredNicknames = self.actionColoredNicknames.isChecked()
self.saveChat()
self.saveCredentials()
@QtCore.pyqtSlot()
def switchTheme(self):
util.setTheme(self.sender().theme, True)
@QtCore.pyqtSlot()
def switchPath(self):
fa.wizards.Wizard(self).exec_()
@QtCore.pyqtSlot()
def switchPort(self):
import loginwizards
loginwizards.gameSettingsWizard(self).exec_()
@QtCore.pyqtSlot()
def setMumbleOptions(self):
import loginwizards
loginwizards.mumbleOptionsWizard(self).exec_()
@QtCore.pyqtSlot()
def clearSettings(self):
result = QtGui.QMessageBox.question(None, "Clear Settings", "Are you sure you wish to clear all settings, login info, etc. used by this program?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if (result == QtGui.QMessageBox.Yes):
util.settings.clear()
util.settings.sync()
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def clearGameFiles(self):
util.clearDirectory(util.BIN_DIR)
util.clearDirectory(util.GAMEDATA_DIR)
@QtCore.pyqtSlot()
def clearCache(self):
changed = util.clearDirectory(util.CACHE_DIR)
if changed:
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def open_url(self, url):
QtGui.QDesktopServices.openUrl(url)
@QtCore.pyqtSlot()
def linkShowLogs(self):
util.showInExplorer(util.LOG_DIR)
@QtCore.pyqtSlot()
def linkAbout(self):
dialog = util.loadUi("client/about.ui")
dialog.exec_()
def saveCredentials(self):
util.settings.beginGroup("user")
util.settings.setValue("user/remember", self.remember) #always remember to remember
if self.remember:
util.settings.setValue("user/login", self.login)
util.settings.setValue("user/password", self.password)
util.settings.setValue("user/autologin", self.autologin) #only autologin if remembering
else:
util.settings.setValue("user/login", None)
util.settings.setValue("user/password", None)
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def clearAutologin(self):
self.autologin = False
self.actionSetAutoLogin.setChecked(False)
util.settings.beginGroup("user")
util.settings.setValue("user/autologin", False)
util.settings.endGroup()
util.settings.sync()
def saveWindow(self):
util.settings.beginGroup("window")
util.settings.setValue("geometry", self.saveGeometry())
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/falogs", self.gamelogs)
util.settings.endGroup()
def savePort(self):
util.settings.beginGroup("ForgedAlliance")
util.settings.setValue("app/gameport", self.gamePort)
util.settings.setValue("app/upnp", self.useUPnP)
util.settings.endGroup()
util.settings.sync()
def saveMumble(self):
util.settings.beginGroup("Mumble")
util.settings.setValue("app/mumble", self.enableMumble)
util.settings.endGroup()
util.settings.sync()
def saveMumbleSwitching(self):
self.activateMumbleSwitching = self.actionActivateMumbleSwitching.isChecked()
util.settings.beginGroup("Mumble")
util.settings.setValue("app/activateMumbleSwitching", self.activateMumbleSwitching)
util.settings.endGroup()
util.settings.sync()
@QtCore.pyqtSlot()
def saveChat(self):
util.settings.beginGroup("chat")
util.settings.setValue("soundeffects", self.soundeffects)
util.settings.setValue("livereplays", self.livereplays)
util.settings.setValue("opengames", self.opengames)
util.settings.setValue("joinsparts", self.joinsparts)
util.settings.setValue("coloredNicknames", self.coloredNicknames)
util.settings.endGroup()
def loadSettingsPrelogin(self):
util.settings.beginGroup("user")
self.login = util.settings.value("user/login")
self.password = util.settings.value("user/password")
self.remember = (util.settings.value("user/remember") == "true")
# This is the new way we do things.
self.autologin = (util.settings.value("user/autologin") == "true")
self.actionSetAutoLogin.setChecked(self.autologin)
util.settings.endGroup()
def loadSettings(self):
#Load settings
util.settings.beginGroup("window")
geometry = util.settings.value("geometry", None)
if geometry:
self.restoreGeometry(geometry)
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
self.gamePort = int(util.settings.value("app/gameport", GAME_PORT_DEFAULT))
self.useUPnP = (util.settings.value("app/upnp", "false") == "true")
self.gamelogs = (util.settings.value("app/falogs", "false") == "true")
self.actionSaveGamelogs.setChecked(self.gamelogs)
util.settings.endGroup()
util.settings.beginGroup("Mumble")
if util.settings.value("app/mumble", "firsttime") == "firsttime":
# The user has never configured mumble before. Be a little intrusive and ask him if he wants to use it.
if QtGui.QMessageBox.question(self, "Enable Voice Connector?", "FA Forever can connect with <a href=\"http://mumble.sourceforge.net/\">Mumble</a> to support the automatic setup of voice connections between you and your team mates. Would you like to enable this feature? You can change the setting at any time by going to options -> settings -> Voice", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
util.settings.setValue("app/mumble", "true")
else:
util.settings.setValue("app/mumble", "false")
if util.settings.value("app/activateMumbleSwitching", "firsttime") == "firsttime":
util.settings.setValue("app/activateMumbleSwitching", "true")
self.enableMumble = (util.settings.value("app/mumble", "false") == "true")
self.activateMumbleSwitching = (util.settings.value("app/activateMumbleSwitching", "false") == "true")
util.settings.endGroup()
self.actionActivateMumbleSwitching.setChecked(self.activateMumbleSwitching)
self.loadChat()
def loadChat(self):
try:
util.settings.beginGroup("chat")
self.soundeffects = (util.settings.value("soundeffects", "true") == "true")
self.opengames = (util.settings.value("opengames", "true") == "true")
self.joinsparts = (util.settings.value("joinsparts", "false") == "true")
self.livereplays = (util.settings.value("livereplays", "true") == "true")
self.coloredNicknames = (util.settings.value("coloredNicknames", "false") == "true")
util.settings.endGroup()
self.actionColoredNicknames.setChecked(self.coloredNicknames)
self.actionSetSoundEffects.setChecked(self.soundeffects)
self.actionSetLiveReplays.setChecked(self.livereplays)
self.actionSetOpenGames.setChecked(self.opengames)
self.actionSetJoinsParts.setChecked(self.joinsparts)
except:
pass
def doConnect(self):
if not self.replayServer.doListen(LOCAL_REPLAY_PORT):
return False
if not self.relayServer.doListen():
return False
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Connecting...")
self.progress.setLabelText("Establishing connection to {}:{}".format(LOBBY_HOST, LOBBY_PORT))
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
return True
def reconnect(self):
''' try to reconnect to the server'''
self.socket.disconnected.disconnect(self.disconnectedFromServer)
self.socket.disconnectFromHost()
self.socket.disconnected.connect(self.disconnectedFromServer)
self.progress.setCancelButtonText("Cancel")
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(False)
self.progress.setAutoReset(False)
self.progress.setModal(1)
self.progress.setWindowTitle("Re-connecting...")
self.progress.setLabelText("Re-establishing connection ...")
self.progress.show()
# Begin connecting.
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
while (self.socket.state() != QtNetwork.QAbstractSocket.ConnectedState) and self.progress.isVisible():
QtGui.QApplication.processEvents()
self.state = ClientState.NONE
self.localIP = str(self.socket.localAddress().toString())
# #Perform Version Check first
if not self.socket.state() == QtNetwork.QAbstractSocket.ConnectedState:
self.progress.close() # in case it was still showing...
# We either cancelled or had a TCP error, meaning the connection failed..
if self.progress.wasCanceled():
logger.warn("doConnect() aborted by user.")
else:
logger.error("doConnect() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
return False
else:
self.send(dict(command="hello", version=0, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP, session=self.session))
return True
def waitSession(self):
self.progress.setLabelText("Setting up Session...")
self.send(dict(command="ask_session"))
start = time.time()
while self.session == None and self.progress.isVisible() :
QtGui.QApplication.processEvents()
if time.time() - start > 15 :
break
if not self.session :
if self.progress.wasCanceled():
logger.warn("waitSession() aborted by user.")
else :
logger.error("waitSession() failed with clientstate " + str(self.state) + ", socket errorstring: " + self.socket.errorString())
QtGui.QMessageBox.critical(self, "Notice from Server", "Unable to get a session : <br> Server under maintenance.<br><br>Please retry in some minutes.")
return False
self.uniqueId = util.uniqueID(self.login, self.session)
self.loadSettings()
#
# Voice connector (This isn't supposed to be here, but I need the settings to be loaded before I can determine if we can hook in the mumbleConnector
#
if self.enableMumble:
self.progress.setLabelText("Setting up Mumble...")
import mumbleconnector
self.mumbleConnector = mumbleconnector.MumbleConnector(self)
return True
def doLogin(self):
#Determine if a login wizard needs to be displayed and do so
if not self.autologin or not self.password or not self.login:
import loginwizards
if not loginwizards.LoginWizard(self).exec_():
return False;
self.progress.setLabelText("Logging in...")
self.progress.reset()
self.progress.show()
self.login = self.login.strip()
logger.info("Attempting to login as: " + str(self.login))
self.state = ClientState.NONE
if not self.uniqueId :
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Unable to login", "It seems that you miss some important DLL.<br>Please install :<br><a href =\"http://www.microsoft.com/download/en/confirmation.aspx?id=8328\">http://www.microsoft.com/download/en/confirmation.aspx?id=8328</a> and <a href = \"http://www.microsoft.com/en-us/download/details.aspx?id=17851\">http://www.microsoft.com/en-us/download/details.aspx?id=17851</a><br><br>You probably have to restart your computer after installing them.<br><br>Please visit this link in case of problems : <a href=\"http://forums.faforever.com/forums/viewforum.php?f=3\">http://forums.faforever.com/forums/viewforum.php?f=3</a>", QtGui.QMessageBox.Close)
return False
else:
self.send(dict(command="hello", version=0, login=self.login, password=self.password, unique_id=self.uniqueId, local_ip=self.localIP))
while (not self.state) and self.progress.isVisible():
QtGui.QApplication.processEvents()
if self.progress.wasCanceled():
logger.warn("Login aborted by user.")
return False
self.progress.close()
if self.state == ClientState.OUTDATED :
logger.warn("Client is OUTDATED.")
elif self.state == ClientState.ACCEPTED:
logger.info("Login accepted.")
# update what's new page
self.whatNewsView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=114&username={user}&pwdhash={pwdhash}".format(user=self.login, pwdhash=self.password)))
# live streams
self.LivestreamWebView.setUrl(QtCore.QUrl("http://www.faforever.com/?page_id=974"))
util.crash.CRASH_REPORT_USER = self.login
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
#success: save login data (if requested) and carry on
self.actionSetAutoLogin.setChecked(self.autologin)
self.updateOptions()
self.progress.close()
self.connected.emit()
return True
elif self.state == ClientState.REJECTED:
logger.warning("Login rejected.")
#seems that there isa bug in a key ..
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
self.clearAutologin()
return self.doLogin() #Just try to login again, slightly hackish but I can get away with it here, I guess.
else:
# A more profound error has occurred (cancellation or disconnection)
return False
def isFriend(self, name):
'''
Convenience function for other modules to inquire about a user's friendliness.
'''
return name in self.friends
def isFoe(self, name):
'''
Convenience function for other modules to inquire about a user's foeliness.
'''
return name in self.foes
def isPlayer(self, name):
'''
Convenience function for other modules to inquire about a user's civilian status.
'''
return name in self.players or name == self.login
#Color table used by the following method
# CAVEAT: This will break if the theme is loaded after the client package is imported
colors = json.loads(util.readfile("client/colors.json"))
randomcolors = json.loads(util.readfile("client/randomcolors.json"))
def getUserColor(self, name):
'''
Returns a user's color depending on their status with relation to the FAF client
'''
if name == self.login:
return self.getColor("self")
elif name in self.friends:
return self.getColor("friend")
elif name in self.foes:
return self.getColor("foe")
elif name in self.clanlist:
return self.getColor("clan")
else:
if self.coloredNicknames:
return self.getRandomColor(name)
if name in self.players:
return self.getColor("player")
return self.getColor("default")
def getRandomColor(self, name):
'''Generate a random color from a name'''
random.seed(name)
return random.choice(self.randomcolors)
def getColor(self, name):
if name in self.colors:
return self.colors[name]
else:
return self.colors["default"]
@QtCore.pyqtSlot()
def startedFA(self):
'''
Slot hooked up to fa.instance when the process has launched.
It will notify other modules through the signal gameEnter().
'''
logger.info("FA has launched in an attached process.")
self.gameEnter.emit()
@QtCore.pyqtSlot(int)
def finishedFA(self, exit_code):
'''
Slot hooked up to fa.instance when the process has ended.
It will notify other modules through the signal gameExit().
'''
if not exit_code:
logger.info("FA has finished with exit code: " + str(exit_code))
else:
logger.warn("FA has finished with exit code: " + str(exit_code))
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def errorFA(self, error_code):
'''
Slot hooked up to fa.instance when the process has failed to start.
'''
if error_code == 0:
logger.error("FA has failed to start")
QtGui.QMessageBox.critical(self, "Error from FA", "FA has failed to start.")
elif error_code == 1:
logger.error("FA has crashed or killed after starting")
else:
text = "FA has failed to start with error code: " + str(error_code)
logger.error(text)
QtGui.QMessageBox.critical(self, "Error from FA", text)
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def mainTabChanged(self, index):
'''
The main visible tab (module) of the client's UI has changed.
In this case, other modules may want to load some data or cease
particularly CPU-intensive interactive functionality.
LATER: This can be rewritten as a simple Signal that each module can then individually connect to.
'''
new_tab = self.mainTabs.widget(index)
if new_tab is self.gamesTab:
self.showGames.emit()
if new_tab is self.chatTab:
self.showChat.emit()
if new_tab is self.replaysTab:
self.showReplays.emit()
if new_tab is self.ladderTab:
self.showLadder.emit()
if new_tab is self.tourneyTab:
self.showTourneys.emit()
if new_tab is self.coopTab:
self.showCoop.emit()
@QtCore.pyqtSlot(int)
def vaultTabChanged(self, index):
new_tab = self.topTabs.widget(index)
if new_tab is self.mapsTab:
self.showMaps.emit()
if new_tab is self.modsTab:
self.showMods.emit()
def joinGameFromURL(self, url):
'''
Tries to join the game at the given URL
'''
logger.debug("joinGameFromURL: " + url.toString())
if fa.instance.available():
add_mods = []
try:
modstr = url.queryItemValue("mods")
add_mods = json.loads(modstr) # should be a list
except:
logger.info("Couldn't load urlquery value 'mods'")
if fa.check.game(self):
if fa.check.check(url.queryItemValue("mod"), url.queryItemValue("map"), sim_mods=add_mods):
self.send(dict(command="game_join", uid=int(url.queryItemValue("uid")), gameport=self.gamePort))
def writeToServer(self, action, *args, **kw):
'''
Writes data to the deprecated stream API. Do not use.
'''
logger.debug("Client: " + action)
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite)
out.setVersion(QtCore.QDataStream.Qt_4_2)
out.writeUInt32(0)
out.writeQString(action)
out.writeQString(self.login or "")
out.writeQString(self.session or "")
for arg in args :
if type(arg) is IntType:
out.writeInt(arg)
elif isinstance(arg, basestring):
out.writeQString(arg)
elif type(arg) is FloatType:
out.writeFloat(arg)
elif type(arg) is ListType:
out.writeQVariantList(arg)
elif type(arg) is DictType:
out.writeQString(json.dumps(arg))
elif type(arg) is QtCore.QFile :
arg.open(QtCore.QIODevice.ReadOnly)
fileDatas = QtCore.QByteArray(arg.readAll())
out.writeInt(fileDatas.size())
out.writeRawData(fileDatas)
# This may take a while. We display the progress bar so the user get a feedback
self.sendFile = True
self.progress.setLabelText("Sending file to server")
self.progress.setCancelButton(None)
self.progress.setWindowFlags(QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint)
self.progress.setAutoClose(True)
self.progress.setMinimum(0)
self.progress.setMaximum(100)
self.progress.setModal(1)
self.progress.setWindowTitle("Uploading in progress")
self.progress.show()
arg.close()
else:
logger.warn("Uninterpreted Data Type: " + str(type(arg)) + " sent as str: " + str(arg))
out.writeQString(str(arg))
out.device().seek(0)
out.writeUInt32(block.size() - 4)
self.bytesToSend = block.size() - 4
self.socket.write(block)
def serverTimeout(self):
if self.timeout == 0:
logger.info("Connection timeout - Checking if server is alive.")
self.writeToServer("PING")
self.timeout = self.timeout + 1
else:
self.socket.abort()
@QtCore.pyqtSlot()
def readFromServer(self):
ins = QtCore.QDataStream(self.socket)
ins.setVersion(QtCore.QDataStream.Qt_4_2)
while ins.atEnd() == False :
if self.blockSize == 0:
if self.socket.bytesAvailable() < 4:
return
self.blockSize = ins.readUInt32()
if self.socket.bytesAvailable() < self.blockSize:
return
action = ins.readQString()
logger.info("Server: '%s'" % action)
if action == "PING":
self.writeToServer("PONG")
self.blockSize = 0
return
try:
self.dispatch(json.loads(action))
except:
logger.error("Error dispatching JSON: " + action, exc_info=sys.exc_info())
self.blockSize = 0
@QtCore.pyqtSlot()
def disconnectedFromServer(self):
logger.warn("Disconnected from lobby server.")
if self.state == ClientState.ACCEPTED:
QtGui.QMessageBox.warning(QtGui.QApplication.activeWindow(), "Disconnected from FAF", "The lobby lost the connection to the FAF server.<br/><b>You might still be able to chat.<br/>To play, try reconnecting a little later!</b>", QtGui.QMessageBox.Close)
#Clear the online users lists
oldplayers = self.players.keys()
self.players = {}
self.urls = {}
self.usersUpdated.emit(oldplayers)
self.disconnected.emit()
self.mainTabs.setCurrentIndex(0)
for i in range(2, self.mainTabs.count()):
self.mainTabs.setTabEnabled(i, False)
self.mainTabs.setTabText(i, "offline")
self.state = ClientState.DROPPED
@QtCore.pyqtSlot(QtNetwork.QAbstractSocket.SocketError)
def socketError(self, error):
logger.error("TCP Socket Error: " + self.socket.errorString())
if self.state > ClientState.NONE: # Positive client states deserve user notification.
QtGui.QMessageBox.critical(None, "TCP Error", "A TCP Connection Error has occurred:<br/><br/><b>" + self.socket.errorString() + "</b>", QtGui.QMessageBox.Close)
self.progress.cancel()
@QtCore.pyqtSlot()
def forwardLocalBroadcast(self, source, message):
self.localBroadcast.emit(source, message)
def manage_power(self):
''' update the interface accordingly to the power of the user'''
if self.power >= 1 :
if self.modMenu == None :
self.modMenu = self.menu.addMenu("Administration")
actionAvatar = QtGui.QAction("Avatar manager", self.modMenu)
actionAvatar.triggered.connect(self.avatarManager)
self.modMenu.addAction(actionAvatar)
def requestAvatars(self, personal):
if personal :
self.send(dict(command="avatar", action="list_avatar"))
else :
self.send(dict(command="admin", action="requestavatars"))
def joinChannel(self, username, channel):
'''Join users to a channel'''
self.send(dict(command="admin", action="join_channel", user_ids=[self.players[username].id], channel=channel))
def closeFA(self, username):
'''Close FA remotly'''
self.send(dict(command="admin", action="closeFA", user_id=self.players[username].id))
def closeLobby(self, username):
'''Close lobby remotly'''
self.send(dict(command="admin", action="closelobby", user_id=self.players[username].id))
def addFriend(self, friend_name):
'''Adding a new friend by user'''
self.friends.add(friend_name)
self.send(dict(command="social_add", friend=self.players[friend_name].id))
self.usersUpdated.emit([friend_name])
def addFoe(self, foe_name):
'''Adding a new foe by user'''
self.foes.add(foe_name)
self.send(dict(command="social_add", foe=self.players[foe_name].id))
self.usersUpdated.emit([foe_name])
def remFriend(self, friend_name):
'''Removal of a friend by user'''
self.friends.remove(friend_name)
self.send(dict(command="social_remove", friend=self.players[friend_name].id))
self.usersUpdated.emit([friend_name])
def remFoe(self, foe_name):
'''Removal of a foe by user'''
self.foes.remove(foe_name)
self.send(dict(command="social_remove", foe=self.players[foe_name].id))
self.usersUpdated.emit([foe_name])
#
# JSON Protocol v2 Implementation below here
#
def send(self, message):
data = json.dumps(message)
logger.info("Outgoing JSON Message: " + data)
self.writeToServer(data)
def dispatch(self, message):
'''
A fairly pythonic way to process received strings as JSON messages.
'''
if "command" in message:
cmd = "handle_" + message['command']
if hasattr(self, cmd):
getattr(self, cmd)(message)
else:
logger.error("Unknown JSON command: %s" % message['command'])
raise ValueError
else:
logger.debug("No command in message.")
def handle_stats(self, message):
self.statsInfo.emit(message)
def handle_session(self, message):
self.session = str(message["session"])
def handle_update(self, message):
# Mystereous voodoo nonsense.
# fix a problem with Qt.
util.settings.beginGroup("window")
util.settings.remove("geometry")
util.settings.endGroup()
logger.warn("Server says that Updating is needed.")
self.progress.close()
self.state = ClientState.OUTDATED
fetchClientUpdate(message["update"])
def handle_welcome(self, message):
self.id = message["id"]
self.login = message["login"]
logger.debug("Login success")
self.state = ClientState.ACCEPTED
def handle_registration_response(self, message):
if message["result"] == "SUCCESS":
self.state = ClientState.CREATED
return
self.state = ClientState.REJECTED
self.handle_notice({"style": "notice", "text": message["error"]})
def handle_game_launch(self, message):
logger.info("Handling game_launch via JSON " + str(message))
silent = False
if 'args' in message:
arguments = message['args']
else:
arguments = []
# Do some special things depending of the reason of the game launch.
rank = False
# HACK: Ideally, this comes from the server, too. LATER: search_ranked message
if message["featured_mod"] == "ladder1v1":
arguments.append('/' + self.games.race)
#Player 1v1 rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["ladder_rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["ladder_rating_deviation"]))
# Launch the auto lobby
self.relayServer.init_mode = 1
else :
#Player global rating
arguments.append('/mean')
arguments.append(str(self.players[self.login]["rating_mean"]))
arguments.append('/deviation')
arguments.append(str(self.players[self.login]["rating_deviation"]))
if self.me.country is not None:
arguments.append('/country ')
arguments.append(self.me.country)
# Launch the normal lobby
self.relayServer.init_mode = 0
if self.me.clan is not None:
arguments.append('/clan')
arguments.append(self.me.clan)
# Ensure we have the map
if "mapname" in message:
fa.check.map(message['mapname'], force=True, silent=silent)
if "sim_mods" in message:
fa.mods.checkMods(message['sim_mods'])
# Writing a file for options
if "options" in message:
filename = os.path.join(util.CACHE_DIR, "options.lua")
options = QtCore.QFile(filename)
options.open(QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Text)
numOpt = 0
options.write("Options = { ")
lenopt = len(message['options'])
for option in message['options'] :
if option == True :
options.write("'1'")
else :
options.write("'0'")
numOpt = numOpt + 1
if lenopt != numOpt :
options.write(", ")
options.write(" }")
options.close()
#Experimental UPnP Mapper - mappings are removed on app exit
if self.useUPnP:
fa.upnp.createPortMapping(self.localIP, self.gamePort, "UDP")
info = dict(uid=message['uid'], recorder=self.login, featured_mod=message[modkey], game_time=time.time())
fa.run(game_info, self.relayServer.serverPort(), arguments)
def handle_coop_info(self, message):
self.coopInfo.emit(message)
def handle_tournament_types_info(self, message):
self.tourneyTypesInfo.emit(message)
def handle_tournament_info(self, message):
self.tourneyInfo.emit(message)
def handle_tutorials_info(self, message):
self.tutorialsInfo.emit(message)
def handle_mod_info(self, message):
self.modInfo.emit(message)
def handle_game_info(self, message):
self.gameInfo.emit(message)
def handle_modvault_list_info(self, message):
modList = message["modList"]
for mod in modList:
self.handle_modvault_info(mod)
def handle_modvault_info(self, message):
self.modVaultInfo.emit(message)
def handle_replay_vault(self, message):
self.replayVault.emit(message)
def handle_coop_leaderboard(self, message):
self.coopLeaderBoard.emit(message)
def handle_matchmaker_info(self, message):
if "action" in message:
self.matchmakerInfo.emit(message)
elif "potential" in message:
if message["potential"] :
self.warningShow()
else:
self.warningHide()
def handle_avatar(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
def handle_admin(self, message):
if "avatarlist" in message :
self.avatarList.emit(message["avatarlist"])
elif "player_avatar_list" in message :
self.playerAvatarList.emit(message)
def handle_social(self, message):
if "friends" in message:
self.friends = set(message["friends"])
self.usersUpdated.emit(self.players.keys())
if "foes" in message:
self.foes = set(message["foes"])
self.usersUpdated.emit(self.players.keys())
if "channels" in message:
# Add a delay to the notification system (insane cargo cult)
self.notificationSystem.disabledStartup = False
self.channelsUpdated.emit(message["channels"])
if "autojoin" in message:
self.autoJoin.emit(message["autojoin"])
if "power" in message:
self.power = message["power"]
self.manage_power()
def handle_player_info(self, message):
players = message["players"]
# Firstly, find yourself. Things get easier one "me" is assigned.
for player in players:
if player["login"] == self.login:
self.me = Player(player)
for player in players:
name = player["login"]
new_player = Player(player)
self.players[name] = new_player
self.usersUpdated.emit([name])
if new_player.clan == self.me.clan:
self.clanlist.add(name)
def avatarManager(self):
self.requestAvatars(0)
self.avatarSelection.show()
def handle_notice(self, message):
if "text" in message:
if message["style"] == "error" :
if self.state != ClientState.NONE :
QtGui.QMessageBox.critical(self, "Error from Server", message["text"])
else :
QtGui.QMessageBox.critical(self, "Login Failed", message["text"])
self.state = ClientState.REJECTED
elif message["style"] == "warning":
QtGui.QMessageBox.warning(self, "Warning from Server", message["text"])
elif message["style"] == "scores":
self.tray.showMessage("Scores", message["text"], QtGui.QSystemTrayIcon.Information, 3500)
self.localBroadcast.emit("Scores", message["text"])
else:
QtGui.QMessageBox.information(self, "Notice from Server", message["text"])
if message["style"] == "kill":
logger.info("Server has killed your Forged Alliance Process.")
fa.instance.kill()
if message["style"] == "kick":
logger.info("Server has kicked you from the Lobby.")
self.cleanup()
|
Sheeo/client
|
src/client/_clientwindow.py
|
Python
|
gpl-3.0
| 60,694
|
[
"VisIt"
] |
f242d3e17a255ef32bab2ab945047f523b1af2d8769a14cd5f1fa81f3ac82690
|
#-------------------------------------------------------------------------------
# Name: MODIS-Link-Retrieve
# Purpose: Retrieve latest NetCDF chlorophyll file from NASA MODIS - Different URLS than other script in this Repo
#
# Author: JFry
#
# Created: 12/10/2015
# Edited: 11/30/2016
# Copyright: (c) john6807 2016
# Licence: Apache License
# Version 2.0, January 2004
# http://www.apache.org/licenses/
#-------------------------------------------------------------------------------
import datetime, time, sys, os, arcpy
scriptPath = sys.path[0]
# local folder where wanting to download data
downloadfolder = sys.path[0] + ("\\Data\\")
if not os.path.exists(downloadfolder):
os.makedirs(downloadfolder)
##Base Urls where data resides
baseurl = "http://coastwatch.pfeg.noaa.gov/erddap/files/"
sstmid = "erdMWsstd1day/"
chlmid = "erdMWchla1day/"
producturlChl = "_chla.nc"
producturlSST = "_sstd.nc"
#Calculate local time of yesterday - depending on where you are, the data may not yet be available for the current day
yesterdaytime = str(datetime.datetime.now()- datetime.timedelta(2))
print (yesterdaytime)
#Today's date
#todaytime = str(datetime.datetime.now())
#print (todaytime)
formattime = time.strftime('%Y%j',time.strptime(yesterdaytime,"%Y-%m-%d %H:%M:%S.%f"))
print (formattime)
##format string request
downloadlocationChl = str(os.path.join(downloadfolder) + "MW" + formattime + "_" + formattime + producturlChl)
downloadlocationSST = str(os.path.join(downloadfolder) + "MW" + formattime + "_" + formattime + producturlSST)
print ("Chlorophyll Location " + downloadlocationChl)
print ("SST Location " + downloadlocationSST)
#Format string request for Cholorphyll
dlMODISdataChl = str(baseurl + chlmid + "MW" + formattime + "_" + formattime + producturlChl)
print ("Downloading Chl " + dlMODISdataChl)
#Format string request for Sea Surface Temperature
dlMODISdataSST= str(baseurl + sstmid + "MW" + formattime + "_" + formattime + producturlSST)
print ("Downloading SST " + dlMODISdataSST)
if sys.version_info[0]== 3:
import urllib.request
# from .DOYtoTime import getdate
#What Python
sysver = sys.version
print (sysver)
urllib.request.urlretrieve(dlMODISdataChl, downloadlocationChl)
urllib.request.urlretrieve(dlMODISdataSST, downloadlocationSST)
print ("Downloaded MODIS Data")
else:
if sys.version_info[0]== 2:
import urllib
# from DOYtoTime import getdate
sysver = sys.version
print (sysver)
urllib.urlretrieve(dlMODISdataChl, downloadlocationChl)
urllib.urlretrieve(dlMODISdataSST, downloadlocationSST)
print ("Downloaded MODIS Data")
arcpy.SetParameter(0,downloadfolder)
quit()
|
jfrygeo/FishSuitabilityTemplate
|
Process-MODIS-Data-py.py
|
Python
|
apache-2.0
| 2,743
|
[
"NetCDF"
] |
40d8360fff195abb8572f511c4c52a8e015bcadf8bcdc19d55be52005100fb09
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from rapidtide.filter import NoncausalFilter
from rapidtide.helper_classes import FrequencyTracker
from rapidtide.io import writevec
from rapidtide.util import valtoindex
def spectralfilterprops(thefilter, debug=False):
lowerstop, lowerpass, upperpass, upperstop = thefilter["filter"].getfreqs()
lowerstopindex = valtoindex(thefilter["frequencies"], lowerstop)
lowerpassindex = valtoindex(thefilter["frequencies"], lowerpass)
upperpassindex = valtoindex(thefilter["frequencies"], upperpass)
upperstopindex = np.min(
[
valtoindex(thefilter["frequencies"], upperstop),
len(thefilter["frequencies"]) - 1,
]
)
if debug:
print("target freqs:", lowerstop, lowerpass, upperpass, upperstop)
print(
"actual freqs:",
thefilter["frequencies"][lowerstopindex],
thefilter["frequencies"][lowerpassindex],
thefilter["frequencies"][upperpassindex],
thefilter["frequencies"][upperstopindex],
)
response = {}
passbandmean = np.mean(thefilter["transferfunc"][lowerpassindex:upperpassindex])
passbandmax = np.max(thefilter["transferfunc"][lowerpassindex:upperpassindex])
passbandmin = np.min(thefilter["transferfunc"][lowerpassindex:upperpassindex])
response["passbandripple"] = (passbandmax - passbandmin) / passbandmean
if lowerstopindex > 2:
response["lowerstopmean"] = (
np.mean(thefilter["transferfunc"][0:lowerstopindex]) / passbandmean
)
response["lowerstopmax"] = (
np.max(np.abs(thefilter["transferfunc"][0:lowerstopindex])) / passbandmean
)
else:
response["lowerstopmean"] = 0.0
response["lowerstopmax"] = 0.0
if len(thefilter["transferfunc"]) - upperstopindex > 2:
response["upperstopmean"] = (
np.mean(thefilter["transferfunc"][upperstopindex:-1]) / passbandmean
)
response["upperstopmax"] = (
np.max(np.abs(thefilter["transferfunc"][upperstopindex:-1])) / passbandmean
)
else:
response["upperstopmean"] = 0.0
response["upperstopmax"] = 0.0
return response
def makewaves(sampletime=0.50, tclengthinsecs=300.0, display=False):
tclen = int(tclengthinsecs // sampletime)
lowestfreq = 1.0 / (sampletime * tclen)
nyquist = 0.5 / sampletime
print(
" sampletime=",
sampletime,
", timecourse length=",
tclengthinsecs,
"s, possible frequency range:",
lowestfreq,
nyquist,
)
timeaxis = np.arange(0.0, 1.0 * tclen) * sampletime
# construct some test waveforms
testwaves = []
testwaves.append(
{
"name": "descending chirp",
"timeaxis": 1.0 * timeaxis,
"waveform": sp.signal.chirp(
timeaxis, f0=0.3, f1=0.1, t1=timeaxis[-1], method="linear"
),
}
)
if display:
plt.figure()
plt.plot(testwaves[-1]["timeaxis"], testwaves[-1]["waveform"])
plt.legend([testwaves[-1]["name"]])
plt.show()
testwaves.append(
{
"name": "sinusoidal modulated",
"timeaxis": 1.0 * timeaxis,
"waveform": np.cos(timeaxis),
}
)
if display:
plt.figure()
plt.plot(testwaves[-1]["timeaxis"], testwaves[-1]["waveform"])
plt.legend([testwaves[-1]["name"]])
plt.show()
scratch = np.ones(len(timeaxis), dtype=float)
freqs = [0.1, 0.12, 0.15, 0.2]
seglen = int(len(scratch) // len(freqs))
print("seglen:", seglen)
for i in range(len(freqs)):
scratch[i * seglen : i * seglen + seglen] = sampletime * 2.0 * np.pi * freqs[i]
print(scratch)
plt.figure()
plt.plot(np.cumsum(scratch))
plt.show()
testwaves.append(
{
"name": "stepped freq",
"timeaxis": 1.0 * timeaxis,
"waveform": np.cos(np.cumsum(scratch)),
}
)
if display:
plt.figure()
plt.plot(testwaves[-1]["timeaxis"], testwaves[-1]["waveform"])
plt.legend([testwaves[-1]["name"]])
plt.show()
# writevec(testwaves[-1]['waveform'], 'stepped.txt')
return testwaves
def eval_filterprops(sampletime=0.50, tclengthinsecs=300.0, numruns=100, display=False):
tclen = int(tclengthinsecs // sampletime)
print("Testing transfer function:")
lowestfreq = 1.0 / (sampletime * tclen)
nyquist = 0.5 / sampletime
print(
" sampletime=",
sampletime,
", timecourse length=",
tclengthinsecs,
"s, possible frequency range:",
lowestfreq,
nyquist,
)
timeaxis = np.arange(0.0, 1.0 * tclen) * sampletime
overall = np.random.normal(size=tclen)
nperseg = np.min([tclen, 256])
f, dummy = sp.signal.welch(overall, fs=1.0 / sampletime, nperseg=nperseg)
allfilters = []
# construct all the filters
for filtertype in ["lfo", "resp", "cardiac"]:
testfilter = NoncausalFilter(filtertype=filtertype)
lstest, lptest, uptest, ustest = testfilter.getfreqs()
if lptest < nyquist:
allfilters.append(
{
"name": filtertype + " brickwall",
"filter": NoncausalFilter(filtertype=filtertype, transferfunc="brickwall"),
}
)
allfilters.append(
{
"name": filtertype + " trapezoidal",
"filter": NoncausalFilter(filtertype=filtertype, transferfunc="trapezoidal"),
}
)
allfilters.append(
{
"name": filtertype + " gaussian",
"filter": NoncausalFilter(filtertype=filtertype, transferfunc="gaussian"),
}
)
# calculate the transfer functions for the filters
for index in range(0, len(allfilters)):
psd_raw = 0.0 * dummy
psd_filt = 0.0 * dummy
for i in range(0, numruns):
inputsig = np.random.normal(size=tclen)
outputsig = allfilters[index]["filter"].apply(1.0 / sampletime, inputsig)
f, raw = sp.signal.welch(inputsig, fs=1.0 / sampletime, nperseg=nperseg)
f, filt = sp.signal.welch(outputsig, fs=1.0 / sampletime, nperseg=nperseg)
psd_raw += raw
psd_filt += filt
allfilters[index]["frequencies"] = f
allfilters[index]["transferfunc"] = psd_filt / psd_raw
# show transfer functions
if display:
legend = []
plt.figure()
plt.ylim([-1.1, 1.1 * len(allfilters)])
offset = 0.0
for thefilter in allfilters:
plt.plot(thefilter["frequencies"], thefilter["transferfunc"] + offset)
legend.append(thefilter["name"])
offset += 1.1
plt.legend(legend)
plt.show()
# test transfer function responses
for thefilter in allfilters:
response = spectralfilterprops(thefilter)
print(" Evaluating", thefilter["name"], "transfer function")
print("\tpassbandripple:", response["passbandripple"])
print("\tlowerstopmax:", response["lowerstopmax"])
print("\tlowerstopmean:", response["lowerstopmean"])
print("\tupperstopmax:", response["upperstopmax"])
print("\tupperstopmean:", response["upperstopmean"])
assert response["passbandripple"] < 0.45
assert response["lowerstopmax"] < 1e4
assert response["lowerstopmean"] < 1e4
assert response["upperstopmax"] < 1e4
assert response["upperstopmean"] < 1e4
scratch = timeaxis * 0.0
scratch[int(tclen / 5) : int(2 * tclen / 5)] = 1.0
scratch[int(3 * tclen / 5) : int(4 * tclen / 5)] = 1.0
testwaves.append(
{
"name": "block regressor",
"timeaxis": 1.0 * timeaxis,
"waveform": 1.0 * scratch,
}
)
# show the end effects waveforms
if display:
legend = []
plt.figure()
plt.ylim([-2.2, 2.2 * len(testwaves)])
offset = 0.0
for thewave in testwaves:
for thefilter in allfilters:
plt.plot(
thewave["timeaxis"],
offset + thefilter["filter"].apply(1.0 / sampletime, thewave["waveform"]),
)
legend.append(thewave["name"] + ": " + thefilter["name"])
offset += 1.1
# plt.plot(thewave['timeaxis'], thewave['waveform'] + offset)
# legend.append(thewave['name'])
# offset += 2.2
plt.legend(legend)
plt.show()
assert True
def test_filterprops(display=False):
eval_filterprops(sampletime=0.72, tclengthinsecs=300.0, numruns=100, display=display)
eval_filterprops(sampletime=2.0, tclengthinsecs=300.0, numruns=100, display=display)
eval_filterprops(sampletime=0.1, tclengthinsecs=1000.0, numruns=10, display=display)
def main():
makewaves(display=True)
if __name__ == "__main__":
main()
|
bbfrederick/rapidtide
|
rapidtide/disabledtests/xtest_spectrogram.py
|
Python
|
apache-2.0
| 9,837
|
[
"Gaussian"
] |
e695c578542a3b91da6842a4ec332e98a0983be5d41445306a37f7363c42d1a8
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import difflib
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
print('File %s is missing the year' % filename, file=verbose_out)
return False
# Replace all occurrences of the regex "2017|2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"pkg/generated/bindata.go"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015, 2016, or 2017; company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
|
mdshuai/service-catalog
|
vendor/k8s.io/kubernetes/hack/boilerplate/boilerplate.py
|
Python
|
apache-2.0
| 6,214
|
[
"VisIt"
] |
d21ad7ff546143653bd381d09f7c655415d10374442ed75470db916915be0159
|
from ase import Atoms
from gpaw import GPAW, PW, FermiDirac
# Setup up bulk NiO in an antiferromagnetic configuration:
a = 4.19 # lattice constants
b = a / 2**0.5
m = 2.0
atoms = Atoms('Ni2O2',
pbc=True,
cell=(b, b, a),
positions=[(0, 0, 0),
(b / 2, b / 2, a / 2),
(0, 0, a / 2),
(b / 2, b / 2, 0)],
magmoms=[m, -m, 0, 0])
k = 2 # number of k-points
atoms.calc = GPAW(mode=PW(400),
occupations=FermiDirac(width=0.05),
setups={'Ni': ':d,6.0'}, # U=6 eV for Ni d orbitals
txt='nio.txt',
kpts=(k, k, k),
xc='PBE')
e = atoms.get_potential_energy()
|
robwarm/gpaw-symm
|
doc/tutorials/hubbardu/nio.py
|
Python
|
gpl-3.0
| 770
|
[
"ASE",
"GPAW"
] |
7a770d3600268e782ca49414dcb0f66859d4b643b29ffb99cb1f901ee5f31d23
|
# -*- coding: utf-8 -*-
from __future__ import print_function
"""CLI scripts."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import argparse
import os
import os.path as op
import glob
import json
from ..ext.six import string_types
from .format_manager import convert, format_manager
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
def _flatten(l):
return [item for sublist in l for item in sublist]
def _ensure_list(l):
if isinstance(l, string_types):
return [l]
elif isinstance(l, list):
return l
else:
raise RuntimeError("This should be a string or a list: "
"{0:s}.".format(str(l)))
def _to_skip(dirname):
out = op.basename(dirname).startswith(('.', '_', '/'))
return out
def _expand_dirs_to_files(files_or_dirs, recursive=False):
files = []
files_or_dirs = _ensure_list(files_or_dirs)
for file_or_dir in files_or_dirs:
file_or_dir = op.realpath(file_or_dir)
if op.isdir(file_or_dir):
# Skip dirnames starting with '.'
if _to_skip(file_or_dir):
continue
# Recursively visit the directories and add the files.
if recursive:
files.extend(_expand_dirs_to_files([op.join(file_or_dir, file)
for file in os.listdir(file_or_dir)],
recursive=recursive))
else:
files.extend([op.join(file_or_dir, file)
for file in os.listdir(file_or_dir)])
elif '*' in file_or_dir:
files.extend(glob.glob(file_or_dir))
else:
files.append(file_or_dir)
return files
def _common_root(files):
files = [op.realpath(file) for file in files]
root = op.commonprefix(files)
if not op.exists(root):
root = op.dirname(root)
if root:
assert op.exists(root)
assert op.isdir(root), root
return root
def _construct_tree(path):
if not op.exists(path):
try:
os.makedirs(op.dirname(path))
except OSError:
pass
def _file_has_extension(file, extensions):
if not isinstance(extensions, list):
extensions = [extensions]
return any(file.endswith(extension) for extension in extensions)
def _filter_files_by_extension(files, extensions):
return [file for file in files if _file_has_extension(file, extensions)]
def _load_file(file, from_):
return format_manager().load(file, name=from_)
def _save_file(file, to, contents, overwrite=False):
format_manager().save(file, contents, name=to, overwrite=overwrite)
#------------------------------------------------------------------------------
# Conversion functions
#------------------------------------------------------------------------------
def _converted_filename(file, from_, to):
base, from_extension = op.splitext(file)
to_extension = format_manager().file_extension(to)
return ''.join((base, to_extension))
def convert_files(files_or_dirs,
overwrite=None,
from_=None,
to=None,
from_kwargs=None,
to_kwargs=None,
output_folder=None,
recursive=False,
simulate=False,
extension=None,
):
# Find all files.
files = _expand_dirs_to_files(files_or_dirs, recursive=recursive)
# Filter by from extension.
from_extension = format_manager().file_extension(from_)
files = _filter_files_by_extension(files, from_extension)
# Get the common root of all files.
if output_folder:
output_folder = op.realpath(output_folder)
root = _common_root(files) if len(files) > 1 else op.dirname(files[0])
# Convert all files.
for file in files:
print("Converting {0:s}...".format(file), end=' ')
converted = convert(file, from_, to,
from_kwargs=from_kwargs, to_kwargs=to_kwargs)
file_to = _converted_filename(file, from_, to)
if extension:
file_to = op.splitext(file_to)[0] + '.' + extension
print("done.")
# Compute the output path.
if output_folder:
# Path relative to the common root.
rel_file = op.relpath(file_to, root)
# Reconstruct the internal folder structure within the output
# folder.
file_to = op.join(output_folder, rel_file)
# Create the subfolders if necessary.
_construct_tree(file_to)
print(" Saving to {0:s}...".format(file_to), end=' ')
if simulate:
print("skipped (simulation).")
else:
_save_file(file_to, to, converted, overwrite=overwrite)
print('done.')
def main():
desc = 'Convert files across formats supported by ipymd.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('files_or_dirs', nargs='+',
help=('list of files or directories to convert'))
formats = ', '.join(format_manager().formats)
parser.add_argument('--from', dest='from_', required=True,
help='one of {0:s}'.format(formats))
parser.add_argument('--to', dest='to', required=True,
help='one of {0:s}'.format(formats))
parser.add_argument('--output', dest='output',
help='output folder')
parser.add_argument('--extension', dest='extension',
help='output file extension')
parser.add_argument('--overwrite', dest='overwrite', action='store_true',
help=('overwrite target file if it exists '
'(false by default)'))
# Parse the CLI arguments.
args = parser.parse_args()
convert_files(args.files_or_dirs,
overwrite=args.overwrite,
from_=args.from_,
to=args.to,
extension=args.extension,
output_folder=args.output,
)
if __name__ == '__main__':
main()
|
rossant/ipymd
|
ipymd/core/scripts.py
|
Python
|
bsd-3-clause
| 6,417
|
[
"VisIt"
] |
5b6ec1c9cc0abac873db0c929d593917b810496ad630a66026fa9ee443cfc727
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.aextras.about import AAbout
from agui.aextras.icon import AIcon
from agui.aextras.message import AMessage
from agui.aextras.popup import APopup
from agui.aextras.sound import ASound
from agui.aextras.timeout import ATimeout
|
bhdouglass/agui
|
agui/aextras/__init__.py
|
Python
|
gpl-3.0
| 997
|
[
"Brian"
] |
7a2ee2e7b6d96b34b801436d922eb3a85f13fc7f14c1653be5ed2e930a80a4b3
|
#!/usr/bin/env python3
from LoLIM.stationTimings.autoCorrelator3_fromLOC import save_EventByLoc
## these lines are anachronistic and should be fixed at some point
from LoLIM import utilities
utilities.default_raw_data_loc = "/exp_app2/appexp1/public/raw_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
if __name__=="__main__":
station_delays = {
'CS002': 0.0,
'CS003': 1.40436380151e-06 ,
'CS004': 4.31343360778e-07 ,
'CS005': -2.18883924536e-07 ,
'CS006': 4.33532992523e-07 ,
'CS007': 3.99644095007e-07 ,
'CS011': -5.85451477265e-07 ,
'CS013': -1.81434735154e-06 ,
'CS017': -8.4398374875e-06 ,
'CS021': 9.23663075135e-07 ,
'CS030': -2.74255354078e-06,
'CS032': -1.57305580305e-06,
'CS101': -8.17154277682e-06,
'CS103': -2.85194082718e-05,
'RS208': 6.97951240511e-06 ,
'CS301': -7.15482701536e-07 ,
'CS302': -5.35024064624e-06 ,
'RS306': 7.04283154727e-06,
'RS307': 6.96315727897e-06 ,
'RS310': 7.04140267551e-06,
'CS401': -9.5064990747e-07 ,
'RS406': 6.96866309712e-06,
'RS409': 7.02251772331e-06,
'CS501': -9.61256584076e-06 ,
'RS503': 6.93934919654e-06 ,
'RS508': 6.98208245779e-06 ,
'RS509': 7.01900854365e-06,
}
station_delays['CS002'] = 0.0 ## need to add referance station
save_EventByLoc(timeID = "D20170929T202255.000Z",
XYZT = [-16794.30127223 , 9498.38995127 , 3297.47036309, 1.2642410207364205 ],
station_timing_delays = station_delays,
pulse_index = 24,
output_folder = "callibrator_fromLOC_intOut4",
pulse_width=50,
min_ant_amp=5,
upsample_factor = 4,
polarization_flips="polarization_flips.txt",
bad_antennas="bad_antennas.txt",
additional_antenna_delays = "ant_delays.txt")
|
Bhare8972/LOFAR-LIM
|
LIM_scripts/stationTimings/examples/save_pulseByLoc.py
|
Python
|
mit
| 1,923
|
[
"Brian"
] |
121934297c8eaa27a00653ddb6adcc55c6e5a1a1dfec33432a17e8295bf58f2e
|
# author: brian dillmann
# for rscs
from context import AnalogInput
import unittest
import RPi.GPIO as GPIO
class test_analog_input(unittest.TestCase):
def setUp(self):
self.out_pin = 2
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
def test_init(self):
# try to initialize output with illegal location
with self.assertRaises(TypeError):
analog_input = AnalogInput('i', 'a')
with self.assertRaises(ValueError):
# try to initialize a port that is a legal GPIO output
analog_input = AnalogInput('i', 500)
def test_works(self):
analog_input = AnalogInput('i', 18)
x = analog_input.read()
# sanity check
print x
self.assertNotEqual(0, x)
if __name__ == 'main':
unittest.main()
|
dillmann/rscs
|
test/devicetests/analog_input_test.py
|
Python
|
mit
| 714
|
[
"Brian"
] |
91cb159713634626f8720996e486cced340351b339ca5d4aa7ff0349a3779dc6
|
import logging
import logging.handlers
import sys
from octopus.lib.mail import send_mail
class TlsSMTPHandler(logging.Handler):
def __init__(self, mailhost, mailport, fromaddr, toaddrs, subject, credentials):
super(TlsSMTPHandler, self).__init__()
self.mailhost = mailhost
self.mailport = mailport
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
raise ValueError("credentials must be a tuple: ('username', 'password')")
self.fromaddr = fromaddr
if isinstance(toaddrs, basestring):
self.toaddrs = [toaddrs]
else:
self.toaddrs = toaddrs
if subject and isinstance(subject, basestring):
self.subject = subject
else:
raise ValueError("subject can't be blank and must be a string")
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
msg = self.format(record)
send_mail(to=self.toaddrs, subject=self.subject, template_name="emails/error_report.txt", error_report=msg)
except Exception:
self.handleError(record)
def setup_error_logging(app, email_subject, stdout_logging_level=logging.ERROR, email_logging_level=logging.ERROR):
# Custom logging WILL BE IGNORED by Flask if app.debug == True -
# even if you remove the condition below.
if app.debug:
return
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
send_to = app.config.get('ERROR_LOGGING_ADDRESSES', [app.config.get('ADMIN_EMAIL')])
if send_to and not app.config.get('SUPPRESS_ERROR_EMAILS'):
if 'MAIL_SERVER' in app.config and 'MAIL_PORT' in app.config and 'MAIL_USERNAME' in app.config and 'MAIL_PASSWORD' in app.config:
import platform
hostname = platform.uname()[1]
mail_handler = TlsSMTPHandler(
app.config['MAIL_SERVER'],
app.config['MAIL_PORT'],
'server-error@' + hostname,
send_to,
email_subject,
credentials=(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
)
mail_handler.setLevel(email_logging_level)
mail_handler.setFormatter(formatter)
app.logger.addHandler(mail_handler)
# send errors to stderr, supervisord will capture them in the app's
# error log
send_errors_to_supervisor = logging.StreamHandler(sys.stderr)
send_errors_to_supervisor.setLevel(stdout_logging_level)
send_errors_to_supervisor.setFormatter(formatter)
app.logger.addHandler(send_errors_to_supervisor)
|
JiscPER/magnificent-octopus
|
octopus/lib/error_handler.py
|
Python
|
apache-2.0
| 2,775
|
[
"Octopus"
] |
4ed5fe7e1d1012602cf137fe63911386a299a44e792c27369cc7934cc963f06e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import os
import unittest2 as unittest
import numpy as np
from pymatgen import Structure
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit import ETSF_Reader
try:
import netCDF4
except ImportError:
netCDF4 = None
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', "abinit")
def ref_file(filename):
return os.path.join(_test_dir, filename)
class ETSF_Reader_TestCase(PymatgenTest):
def setUp(self):
formulas = ["Si2",]
self.GSR_paths = d = {}
for formula in formulas:
d[formula] = ref_file(formula + "_GSR.nc")
@unittest.skipIf(netCDF4 is None, "Requires Netcdf4")
def test_read_Si2(self):
path = self.GSR_paths["Si2"]
ref_dims = {
"number_of_spins": 1
}
ref_int_values = {
"space_group": 227,
"number_of_states": np.reshape([15, 15], (1,2)),
}
ref_float_values = {
"etotal": -8.85911566912484,
"primitive_vectors": np.reshape([0, 5.125, 5.125, 5.125, 0, 5.125,
5.125, 5.125, 0], (3,3)),
}
with ETSF_Reader(path) as data:
self.assertEqual(data.ngroups, 1)
print(data.read_varnames())
# Test dimensions.
for dimname, int_ref in ref_dims.items():
value = data.read_dimvalue(dimname)
self.assert_equal(value, int_ref)
# Test int variables
for varname, int_ref in ref_int_values.items():
value = data.read_value(varname)
print(varname, value)
self.assert_equal(value, int_ref)
# Test float variables
for varname, float_ref in ref_float_values.items():
value = data.read_value(varname)
print(varname, value)
self.assert_almost_equal(value, float_ref)
#assert 0
# Reading non-existent variables or dims should raise
# a subclass of NetcdReaderError
with self.assertRaises(data.Error):
data.read_value("foobar")
with self.assertRaises(data.Error):
data.read_dimvalue("foobar")
# Unless default is given
assert data.read_value("foobar", default=None) is None
data.print_tree()
for group in data.walk_tree():
print("group: " + str(group))
# Initialize pymatgen structure from GSR.
structure = data.read_structure()
self.assertTrue(isinstance(structure, Structure))
# Read ixc.
# TODO: Upgrade GSR file.
#xc = data.read_abinit_xcfunc()
#assert xc == "LDA"
|
aykol/pymatgen
|
pymatgen/io/abinit/tests/test_netcdf.py
|
Python
|
mit
| 3,021
|
[
"ABINIT",
"pymatgen"
] |
bc56630c698fa5b22745367f678a57decba38b22318a5f40d10483f81e78c653
|
"""`swordfish` is a Python tool to study the information yield of counting experiments.
Motivation
----------
With `swordfish` you can quickly and accurately forecast experimental
sensitivities without all the fuss with time-intensive Monte Carlos, mock data
generation and likelihood maximization.
With `swordfish` you can
- Calculate the expected upper limit or discovery reach of an instrument.
- Derive expected confidence contours for parameter reconstruction.
- Visualize confidence contours as well as the underlying information metric field.
- Calculate the *information flux*, an effective signal-to-noise ratio that
accounts for background systematics and component degeneracies.
- Calculate the Euclideanized signal which approximately maps the signal to
a new vector which can be used to calculate the Euclidean distance between points
A large range of experiments in particle physics and astronomy are
statistically described by a Poisson point process. The `swordfish` module
implements at its core a rather general version of a Poisson point process with
background uncertainties described by a Gaussian random field, and provides
easy access to its information geometrical properties. Based on this
information, a number of common and less common tasks can be performed.
Get started
-----------
Most of the functionality of `swordfish` is demonstrated in two jupyter
notebooks.
- [Equivalent counts method and Fisher Information Flux](https://github.com/cweniger/swordfish/tree/master/docs/jupyter/Examples_I.ipynb)
- [Confidence contours, streamline visualisation, and Euclideanized signal](https://github.com/cweniger/swordfish/tree/master/docs/jupyter/Examples_II.ipynb)
In addition we provide two physics examples from direct and indirect detection
- [CTA](https://github.com/cweniger/swordfish/blob/master/Examples/swordfish_ID.ipynb)
- [Xenon-1T](https://github.com/cweniger/swordfish/blob/master/Examples/swordfish_DD.ipynb)
Documentation
-------------
A full documentation of `swordfish` can be found on
[github.io](https://cweniger.github.io/swordfish). For extensive details about
Fisher forecasting with Poisson likelihoods, the effective counts method, the
definition of information flux and the treatment of background systematics see
[http://arxiv.org/abs/1704.05458](http://arxiv.org/abs/1704.05458) and
[http://arxiv.org/abs/1712.xxxxx](http://arxiv.org/abs/1712.xxxxx).
Installation
------------
`swordfish` has been tested with Python 2.7.13 and the packages
- `numpy 1.13.1`
- `scipy 0.19.0`
- `matplotlib 2.0.0`
Let us know if you run into problems.
`swordfish` can be installed by invoking
git clone https://github.com/cweniger/swordfish
cd swordfish
python setup.py install
Citation
--------
If you use the package, please cite one or both of the papers
[http://arxiv.org/abs/1712.xxxxx](http://arxiv.org/abs/1712.xxxxx)
and
[http://arxiv.org/abs/1704.05458](http://arxiv.org/abs/1704.05458).
"""
from swordfish.core import *
from swordfish.metricplot import *
from swordfish.Utils import *
# from swordfish.BkgComponent import *
__all__ = ["Swordfish", "Utils", "metricplot", "BkgComponent"]
|
cweniger/swordfish
|
swordfish/__init__.py
|
Python
|
mit
| 3,174
|
[
"Gaussian"
] |
c8059b7c2703d51e2f9c10fccabe3abc3aac029b56b40fe89eae5941ff85f99a
|
# Orca
#
# Copyright (C) 2013-2014 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2013-2014 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import time
import orca.debug as debug
import orca.orca as orca
import orca.orca_state as orca_state
import orca.scripts.default as default
from .script_utilities import Utilities
class Script(default.Script):
def __init__(self, app):
default.Script.__init__(self, app)
def getUtilities(self):
return Utilities(self)
def locusOfFocusChanged(self, event, oldFocus, newFocus):
"""Handles changes of focus of interest to the script."""
if self.utilities.isInOpenMenuBarMenu(newFocus):
window = self.utilities.topLevelObject(newFocus)
windowChanged = window and orca_state.activeWindow != window
if windowChanged:
orca_state.activeWindow = window
self.windowActivateTime = time.time()
super().locusOfFocusChanged(event, oldFocus, newFocus)
def onActiveDescendantChanged(self, event):
"""Callback for object:active-descendant-changed accessibility events."""
if not self.utilities.isTypeahead(orca_state.locusOfFocus):
super().onActiveDescendantChanged(event)
return
msg = "GAIL: locusOfFocus believed to be typeahead. Presenting change."
debug.println(debug.LEVEL_INFO, msg, True)
self.presentObject(event.any_data, interrupt=True)
def onFocus(self, event):
"""Callback for focus: accessibility events."""
# NOTE: This event type is deprecated and Orca should no longer use it.
# This callback remains just to handle bugs in applications and toolkits
# that fail to reliably emit object:state-changed:focused events.
if self.utilities.isLayoutOnly(event.source):
return
if self.utilities.isTypeahead(orca_state.locusOfFocus) \
and "Table" in pyatspi.listInterfaces(event.source) \
and not event.source.getState().contains(pyatspi.STATE_FOCUSED):
return
ancestor = pyatspi.findAncestor(orca_state.locusOfFocus, lambda x: x == event.source)
if not ancestor:
orca.setLocusOfFocus(event, event.source)
return
if ancestor and "Table" in pyatspi.listInterfaces(ancestor):
return
isMenu = lambda x: x and x.getRole() == pyatspi.ROLE_MENU
if isMenu(ancestor) and not pyatspi.findAncestor(ancestor, isMenu):
return
orca.setLocusOfFocus(event, event.source)
def onSelectionChanged(self, event):
"""Callback for object:selection-changed accessibility events."""
isFocused = event.source.getState().contains(pyatspi.STATE_FOCUSED)
role = event.source.getRole()
if not isFocused and self.utilities.isTypeahead(orca_state.locusOfFocus):
msg = "GAIL: locusOfFocus believed to be typeahead. Presenting change."
debug.println(debug.LEVEL_INFO, msg, True)
selectedChildren = self.utilities.selectedChildren(event.source)
for child in selectedChildren:
if not self.utilities.isLayoutOnly(child):
self.presentObject(child)
return
if role == pyatspi.ROLE_LAYERED_PANE \
and self.utilities.selectedChildCount(event.source) > 1:
return
super().onSelectionChanged(event)
def onTextSelectionChanged(self, event):
"""Callback for object:text-selection-changed accessibility events."""
obj = event.source
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
default.Script.onTextSelectionChanged(self, event)
|
GNOME/orca
|
src/orca/scripts/toolkits/GAIL/script.py
|
Python
|
lgpl-2.1
| 4,622
|
[
"ORCA"
] |
fd9c1b3efae23b2407556db981dacb27c98246cced26926a91198300bb28afed
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s file %s' % (category, pyfile))
def extract_xyz(pyfile):
text1line = ''
textMline = ''
comment = ''
b2a = 0.52917720859
efpCoord = re.compile(r"\s*COORDINATES\s+\(BOHR\)\s*")
efpAtomSymbol = re.compile(r"^\s*A\d*([A-Z]{1,2})\d*")
pastComment = False
ffrag = open(pyfile, 'r')
contents = ffrag.readlines()
ffrag.close()
atoms = []
ii = 0
while (ii < len(contents)):
line = contents[ii]
if efpCoord.search(line):
pastComment = True
if ii > 0 and not pastComment:
comment += ' ' + line
if 'STOP' in line:
break
if efpAtomSymbol.search(line):
sline = line.split()
atoms.append([efpAtomSymbol.search(line).group(1),
float(sline[1]) * b2a, float(sline[2]) * b2a, float(sline[3]) * b2a])
ii += 1
text1line += str(len(atoms)) + r"""\ncomment\n"""
for atom in atoms:
text1line += r"""%-6s %12.6f %12.6f %12.6f\n""" % (atom[0], atom[1], atom[2], atom[3])
textMline += """ %-6s %12.6f %12.6f %12.6f\n""" % (atom[0], atom[1], atom[2], atom[3])
return text1line, textMline, comment
chemdoodle = r"""
.. raw:: html
<meta http-equiv="X-UA-Compatible" content="chrome=1">
<link rel="stylesheet" href="http://hub.chemdoodle.com/cwc/latest/ChemDoodleWeb.css" type="text/css">
<script type="text/javascript" src="http://hub.chemdoodle.com/cwc/latest/ChemDoodleWeb-libs.js"></script>
<script type="text/javascript" src="http://hub.chemdoodle.com/cwc/latest/ChemDoodleWeb.js"></script>
<script>
ChemDoodle.default_atoms_useJMOLColors = true;
ChemDoodle.default_atoms_circles_2D = true;
ChemDoodle.default_atoms_circleDiameter_2D = 0.7;
ChemDoodle.default_atoms_circleBorderWidth_2D = 0.05;
ChemDoodle.default_bonds_width_2D = 0.10;
ChemDoodle.default_shapes_lineWidth_2D = 0.1;
ChemDoodle.default_shapes_arrowLength_2D = 0.07;
</script>
"""
def canvas(fragname, molxyz):
text = r"""
.. raw:: html
<center>
<script>
// Molecule from XYZ file
var mol = ChemDoodle.readXYZ('%s')
// the Canvas
var qq = new ChemDoodle.TransformCanvas('%s_l', 400, 300, true);
qq.loadContent([mol]);
qq.specs.scale = 25;
qq.repaint();
</script>
<center>
<div style="font-size:12px;">
<strong>rotate</strong>: click+drag<br>
<strong>translate</strong>: alt+click+drag<br>
<strong>zoom</strong>: scroll
</div>
Visualization by <a href="http://web.chemdoodle.com" target="_blank">ChemDoodle Web</a>
</center>
""" % (molxyz, fragname)
return text
# Available fragments in psi4/share/psi4/efpfrag
fdriver = open('source/autodoc_available_efpfrag.rst', 'w')
fdriver.write('\n\n')
fdriver.write(chemdoodle)
fdriver.write('\n\n')
for pyfile in glob.glob(DriverPath + '../../psi4/share/psi4/efpfrag/*.efp'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in []:
pts('efp fragment', basename)
fdriver.write(':srcefpfrag:`%s`\n%s\n\n' % (basename, '"' * (14 + len(basename))))
molstr, molMstr, comment = extract_xyz(pyfile)
fdriver.write(canvas(basename, molstr))
fdriver.write('\n\nComment ::\n\n%s\n\n' % (comment))
fdriver.write('\n\nFull Geometry in Angstroms ::\n\n%s\n\n' % (molMstr))
fdriver.write('----\n')
fdriver.write('\n')
fdriver.close()
#// create a synthetic arrow
#//var origin = mol.getCenter()
#//var ptX = new ChemDoodle.structures.Point(0.0, 0.1);
#//var axisX = new ChemDoodle.structures.d2.Line(origin, ptX);
#//axisX.arrowType = ChemDoodle.structures.d2.Line.ARROW_SYNTHETIC;
|
susilehtola/psi4
|
doc/sphinxman/document_efpfrag.py
|
Python
|
lgpl-3.0
| 5,033
|
[
"Psi4"
] |
c8e817613b5799a5aee9bea073c03237b2689a77699219d4a59409d6146d853a
|
""" ProxyProvider implementation for the proxy generation using local (DIRAC) CA credentials
This class is a simple, limited CA, its main purpose is to generate a simple proxy for DIRAC users
who do not have any certificate register on the fly.
Required parameters in the DIRAC configuration for its implementation:
.. literalinclude:: /dirac.cfg
:start-after: ## DIRACCA type:
:end-before: ##
:dedent: 2
:caption: /Resources/ProxyProviders section
Also, as an additional feature, this class can read properties from a simple openssl CA configuration file.
To do this, just set the path to an existing configuration file as a CAConfigFile parameter. In this case,
the distinguished names order in the created proxy will be the same as in the configuration file policy block.
The Proxy provider supports the following distinguished names
(https://www.cryptosys.net/pki/manpki/pki_distnames.html)::
SN(surname)
GN(givenName)
C(countryName)
CN(commonName)
L(localityName)
Email(emailAddress)
O(organizationName)
OU(organizationUnitName)
SP,ST(stateOrProvinceName)
SERIALNUMBER(serialNumber)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import re
import time
import random
import datetime
import collections
from M2Crypto import m2, util, X509, ASN1, EVP, RSA
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.Resources.ProxyProvider.ProxyProvider import ProxyProvider
class DIRACCAProxyProvider(ProxyProvider):
def __init__(self, parameters=None):
""" Constructor
"""
super(DIRACCAProxyProvider, self).__init__(parameters)
self.log = gLogger.getSubLogger(__name__)
# Initialize
self.maxDict = {}
self.minDict = {}
self.bits = 2048
self.algoritm = 'sha256'
self.match = []
self.supplied = ['CN']
self.optional = ['C', 'O', 'OU', 'emailAddress']
self.dnList = ['C', 'O', 'OU', 'CN', 'emailAddress']
# Distinguished names
self.fields2nid = X509.X509_Name.nid.copy()
self.fields2nid['DC'] = -1 # Add DN that is not liested in X509.X509_Name
self.fields2nid['domainComponent'] = -1 # Add DN description that is not liested in X509.X509_Name
self.fields2nid['organizationalUnitName'] = 18 # Add 'OU' description
self.fields2nid['countryName'] = 14 # Add 'C' description
self.fields2nid['SERIALNUMBER'] = 105 # Add 'SERIALNUMBER' distinguished name
self.nid2fields = {} # nid: list of distinguished names
# Specify standart fields
for field, nid in self.fields2nid.items():
self.nid2fields.setdefault(nid, []).append(field)
self.dnInfoDictCA = {}
def setParameters(self, parameters):
""" Set new parameters
:param dict parameters: provider parameters
:return: S_OK()/S_ERROR()
"""
for k, v in parameters.items():
if not isinstance(v, list) and k in ['Match', 'Supplied', 'Optional', 'DNOrder'] + list(self.fields2nid):
parameters[k] = v.replace(', ', ',').split(',')
self.parameters = parameters
# If CA configuration file exist
if parameters.get('CAConfigFile'):
self.__parseCACFG()
if 'Bits' in parameters:
self.bits = int(parameters['Bits'])
if 'Algoritm' in parameters:
self.algoritm = parameters['Algoritm']
if 'Match' in parameters:
self.match = [self.fields2nid[f] for f in parameters['Match']]
if 'Supplied' in parameters:
self.supplied = [self.fields2nid[f] for f in parameters['Supplied']]
if 'Optional' in parameters:
self.optional = [self.fields2nid[f] for f in parameters['Optional']]
allFields = self.optional + self.supplied + self.match
if 'DNOrder' in parameters:
self.dnList = []
if not any([any([f in parameters['DNOrder'] for f in self.nid2fields[n]]) for n in allFields]):
return S_ERROR('DNOrder must contain all configured fields.')
for field in parameters['DNOrder']:
if self.fields2nid[field] in allFields:
self.dnList.append(field)
# Set defaults for distridutes names
self.nid2defField = {}
for field, value in list(self.parameters.items()):
if field in self.fields2nid and self.fields2nid[field] in allFields:
self.parameters[self.fields2nid[field]] = value
self.nid2defField[self.fields2nid[field]] = field
# Read CA certificate
chain = X509Chain()
result = chain.loadChainFromFile(self.parameters['CertFile'])
if result['OK']:
result = chain.getCredentials()
if result['OK']:
result = self.__parseDN(result['Value']['subject'])
if not result['OK']:
return result
self.dnInfoDictCA = result['Value']
return S_OK()
def checkStatus(self, userDN):
""" Read ready to work status of proxy provider
:param str userDN: user DN
:return: S_OK()/S_ERROR()
"""
self.log.debug('Ckecking work status of', self.parameters['ProviderName'])
result = self.__parseDN(userDN)
if not result['OK']:
return result
dnInfoDict = result['Value']
try:
userNIDs = [self.fields2nid[f.split('=')[0]] for f in userDN.lstrip('/').split('/')]
except (ValueError, KeyError) as e:
return S_ERROR('Unknown DN field in used DN: %s' % e)
nidOrder = [self.fields2nid[f] for f in self.dnList]
for index, nid in enumerate(userNIDs):
if nid not in nidOrder:
return S_ERROR('"%s" field not found in order.' %
self.nid2defField.get(nid, min(self.nid2fields[nid], key=len)))
if index > nidOrder.index(nid):
return S_ERROR('Bad DNs order')
for i in range(nidOrder.index(nid) - 1):
try:
if userNIDs.index(nidOrder[i]) > index:
return S_ERROR('Bad DNs order')
except (ValueError, KeyError):
continue
for i in range(nidOrder.index(nid) + 1, len(nidOrder)):
try:
if userNIDs.index(nidOrder[i]) < index:
return S_ERROR('Bad DNs order')
except (ValueError, KeyError):
continue
for nid in self.supplied:
if nid not in [self.fields2nid[f] for f in dnInfoDict]:
return S_ERROR('Current DN is invalid, "%s" field must be set.' %
self.nid2defField.get(nid, min(self.nid2fields[nid], key=len)))
for field, values in dnInfoDict.items():
nid = self.fields2nid[field]
err = 'Current DN is invalid, "%s" field' % field
if nid not in self.supplied + self.match + self.optional:
return S_ERROR('%s is not found for current CA.' % err)
if nid in self.match and not self.dnInfoDictCA[field] == values:
return S_ERROR('%s must be /%s=%s.' % (err, field,
('/%s=' % field).joing(self.dnInfoDictCA[field])))
if nid in self.maxDict:
rangeMax = list(range(min(len(values), len(self.maxDict[nid]))))
if any([True if len(values[i]) > self.maxDict[nid][i] else False for i in rangeMax]):
return S_ERROR('%s values must be less then %s.' % (err, ', '.join(self.maxDict[nid])))
if nid in self.minDict:
rangeMin = list(range(min(len(values), len(self.minDict[nid]))))
if any([True if len(values[i]) < self.minDict[nid][i] else False for i in rangeMin]):
return S_ERROR('%s values must be more then %s.' % (err, ', '.join(self.minDict[nid])))
result = self.__fillX509Name(field, values)
if not result['OK']:
return result
return S_OK()
def getProxy(self, userDN):
""" Generate user proxy
:param str userDN: user DN
:return: S_OK(str)/S_ERROR() -- contain a proxy string
"""
self.__X509Name = X509.X509_Name()
result = self.checkStatus(userDN)
if result['OK']:
result = self.__createCertM2Crypto()
if result['OK']:
certStr, keyStr = result['Value']
chain = X509Chain()
result = chain.loadChainFromString(certStr)
if result['OK']:
result = chain.loadKeyFromString(keyStr)
if result['OK']:
result = chain.generateProxyToString(365 * 24 * 3600, rfc=True)
return result
def generateDN(self, **kwargs):
""" Get DN of the user certificate that will be created
:param dict kwargs: user description dictionary with possible fields:
- FullName or CN
- Email or emailAddress
:return: S_OK(str)/S_ERROR() -- contain DN
"""
if kwargs.get('FullName'):
kwargs['CN'] = [kwargs['FullName']]
if kwargs.get('Email'):
kwargs['emailAddress'] = [kwargs['Email']]
self.__X509Name = X509.X509_Name()
self.log.info('Creating distinguished names chain')
for nid in self.supplied:
if nid not in [self.fields2nid[f] for f in self.dnList]:
return S_ERROR('DNs order list does not contain supplied DN "%s"' %
self.nid2defField.get(nid, min(self.nid2fields[nid], key=len)))
for field in self.dnList:
values = []
nid = self.fields2nid[field]
if nid in self.match:
for field in self.nid2fields[nid]:
if field in self.dnInfoDictCA:
values = self.dnInfoDictCA[field]
if not values:
return S_ERROR('Not found "%s" match DN in CA' % field)
for field in self.nid2fields[nid]:
if kwargs.get(field):
values = kwargs[field] if isinstance(kwargs[field], list) else [kwargs[field]]
if not values and nid in self.supplied:
# Search default value
if nid not in self.nid2defField:
return S_ERROR('No values set for "%s" DN' % min(self.nid2fields[nid], key=len))
values = self.parameters[nid]
result = self.__fillX509Name(field, values)
if not result['OK']:
return result
# WARN: This logic not support list of distribtes name elements
resDN = m2.x509_name_oneline(self.__X509Name.x509_name) # pylint: disable=no-member
result = self.checkStatus(resDN)
if not result['OK']:
return result
return S_OK(resDN)
def __parseCACFG(self):
""" Parse CA configuration file
"""
block = ''
self.cfg = {}
self.supplied, self.optional, self.match, self.dnList = [], [], [], []
with open(self.parameters['CAConfigFile'], "r") as caCFG:
for line in caCFG:
# Ignore comments
line = re.sub(r'#.*', '', line)
if re.findall(r"\[([A-Za-z0-9_]+)\]", line.replace(' ', '')):
block = ''.join(re.findall(r"\[([A-Za-z0-9_]+)\]", line.replace(' ', '')))
if block not in self.cfg:
self.cfg[block] = {}
if not block:
continue
if len(re.findall('=', line)) == 1:
field, val = line.split('=')
field = field.strip()
variables = re.findall(r'[$]([A-Za-z0-9_]+)', val)
for v in variables:
for b in self.cfg:
if v in self.cfg[b]:
val = val.replace('$' + v, self.cfg[b][v])
if 'default_ca' in self.cfg.get('ca', {}):
if 'policy' in self.cfg.get(self.cfg['ca']['default_ca'], {}):
if block == self.cfg[self.cfg['ca']['default_ca']]['policy']:
self.dnList.append(field)
self.cfg[block][field] = val.strip()
self.bits = int(self.cfg['req'].get('default_bits') or self.bits)
self.algoritm = self.cfg[self.cfg['ca']['default_ca']].get('default_md') or self.algoritm
if not self.parameters.get('CertFile'):
self.parameters['CertFile'] = self.cfg[self.cfg['ca']['default_ca']]['certificate']
self.parameters['KeyFile'] = self.cfg[self.cfg['ca']['default_ca']]['private_key']
# Read distinguished names
for k, v in self.cfg[self.cfg[self.cfg['ca']['default_ca']]['policy']].items():
nid = self.fields2nid[k]
self.parameters[nid], self.minDict[nid], self.maxDict[nid] = [], [], []
for k in ['%s.%s' % (i, k) for i in range(0, 5)] + [k]:
if k + '_default' in self.cfg['req']['distinguished_name']:
self.parameters[nid].append(self.cfg['req']['distinguished_name'][k + '_default'])
if k + '_min' in self.cfg['req']['distinguished_name']:
self.minDict[nid].append(self.cfg['req']['distinguished_name'][k + '_min'])
if k + '_max' in self.cfg['req']['distinguished_name']:
self.maxDict[nid].append(self.cfg['req']['distinguished_name'][k + '_max'])
if v == 'supplied':
self.supplied.append(nid)
elif v == 'optional':
self.optional.append(nid)
elif v == 'match':
self.match.append(nid)
def __parseDN(self, dn):
""" Return DN fields
:param str dn: DN
:return: list -- contain tuple with positionOfField.fieldName, fieldNID, fieldValue
"""
dnInfoDict = collections.OrderedDict()
for f, v in [f.split('=') for f in dn.lstrip('/').split('/')]:
if not v:
return S_ERROR('No value set for "%s"' % f)
if f not in dnInfoDict:
dnInfoDict[f] = [v]
else:
dnInfoDict[f].append(v)
return S_OK(dnInfoDict)
def __fillX509Name(self, field, values):
""" Fill x509_Name object by M2Crypto
:param str field: DN field name
:param list values: values of field, order important
:return: S_OK()/S_ERROR()
"""
for value in values:
if value and m2.x509_name_set_by_nid(self.__X509Name.x509_name, # pylint: disable=no-member
self.fields2nid[field], value.encode()) == 0:
if not self.__X509Name.add_entry_by_txt(field=field, type=ASN1.MBSTRING_ASC,
entry=value, len=-1, loc=-1, set=0) == 1:
return S_ERROR('Cannot set "%s" field.' % field)
return S_OK()
def __createCertM2Crypto(self):
""" Create new certificate for user
:return: S_OK(tuple)/S_ERROR() -- tuple contain certificate and pulic key as strings
"""
# Create public key
userPubKey = EVP.PKey()
userPubKey.assign_rsa(RSA.gen_key(self.bits, 65537, util.quiet_genparam_callback))
# Create certificate
userCert = X509.X509()
userCert.set_pubkey(userPubKey)
userCert.set_version(2)
userCert.set_subject(self.__X509Name)
userCert.set_serial_number(int(random.random() * 10 ** 10))
# Add extentionals
userCert.add_ext(X509.new_extension('basicConstraints', 'CA:' + str(False).upper()))
userCert.add_ext(X509.new_extension('extendedKeyUsage', 'clientAuth', critical=1))
# Set livetime
validityTime = datetime.timedelta(days=400)
notBefore = ASN1.ASN1_UTCTIME()
notBefore.set_time(int(time.time()))
notAfter = ASN1.ASN1_UTCTIME()
notAfter.set_time(int(time.time()) + int(validityTime.total_seconds()))
userCert.set_not_before(notBefore)
userCert.set_not_after(notAfter)
# Add subject from CA
with open(self.parameters['CertFile']) as cf:
caCertStr = cf.read()
caCert = X509.load_cert_string(caCertStr)
userCert.set_issuer(caCert.get_subject())
# Use CA key
with open(self.parameters['KeyFile'], "rb") as cf:
caKeyStr = cf.read()
pkey = EVP.PKey()
pkey.assign_rsa(RSA.load_key_string(caKeyStr, callback=util.no_passphrase_callback))
# Sign
userCert.sign(pkey, self.algoritm)
userCertStr = userCert.as_pem()
userPubKeyStr = userPubKey.as_pem(cipher=None, callback=util.no_passphrase_callback)
return S_OK((userCertStr, userPubKeyStr))
def _forceGenerateProxyForDN(self, dn, time, group=None):
""" An additional helper method for creating a proxy without any substantial validation,
it can be used for a specific case(such as testing) where just need to generate a proxy
with specific DN on the fly.
:param str dn: requested proxy DN
:param int time: expired time in a seconds
:param str group: if need to add DIRAC group
:return: S_OK(tuple)/S_ERROR() -- contain proxy as chain and as string
"""
self.__X509Name = X509.X509_Name()
result = self.__parseDN(dn)
if not result['OK']:
return result
dnInfoDict = result['Value']
for field, values in dnInfoDict.items():
result = self.__fillX509Name(field, values)
if not result['OK']:
return result
result = self.__createCertM2Crypto()
if result['OK']:
certStr, keyStr = result['Value']
chain = X509Chain()
if chain.loadChainFromString(certStr)['OK'] and chain.loadKeyFromString(keyStr)['OK']:
result = chain.generateProxyToString(time, rfc=True, diracGroup=group)
if not result['OK']:
return result
chain = X509Chain()
chain.loadProxyFromString(result['Value'])
return S_OK((chain, result['Value']))
|
yujikato/DIRAC
|
src/DIRAC/Resources/ProxyProvider/DIRACCAProxyProvider.py
|
Python
|
gpl-3.0
| 16,966
|
[
"DIRAC"
] |
e74a6ca0455409a14f221e02205e78112cc3b7b6bc9d67d73398b9dc573a0e82
|
import numpy as np
from scipy.optimize import leastsq
import pylab as pl
def get_orbitals(calc):
"""Get LCAO orbitals on 3D grid by lcao_to_grid method."""
bfs_a = [setup.phit_j for setup in calc.wfs.setups]
from gpaw.lfc import BasisFunctions
bfs = BasisFunctions(calc.wfs.gd, bfs_a, calc.wfs.kpt_comm, cut=True)
spos_ac = calc.atoms.get_scaled_positions()
bfs.set_positions(spos_ac)
nLCAO = calc.get_number_of_bands()
orb_MG = calc.wfs.gd.zeros(nLCAO)
C_M = np.identity(nLCAO)
bfs.lcao_to_grid(C_M, orb_MG,q=-1)
return orb_MG
def find_peaks(x,y,threshold = None):
""" Find peaks for a certain curve.
Usage:
threshold = (xmin, xmax, ymin, ymax)
"""
assert type(x) == np.ndarray and type(y) == np.ndarray
assert x.ndim == 1 and y.ndim == 1
assert x.shape[0] == y.shape[0]
if threshold is None:
threshold = (x.min(), x.max(), y.min(), y.max())
if type(threshold) is not tuple:
threshold = (threshold, )
if len(threshold) == 1:
threshold += (x.max(), y.min(), y.max())
elif len(threshold) == 2:
threshold += (y.min(), y.max())
elif len(threshold) == 3:
threshold += (y.max(),)
else:
pass
xmin = threshold[0]
xmax = threshold[1]
ymin = threshold[2]
ymax = threshold[3]
peak = {}
npeak = 0
for i in range(1, x.shape[0]-1):
if (y[i] >= ymin and y[i] <= ymax and
x[i] >= xmin and x[i] <= xmax ):
if y[i] > y[i-1] and y[i] > y[i+1]:
peak[npeak] = np.array([x[i], y[i]])
npeak += 1
peakarray = np.zeros([npeak, 2])
for i in range(npeak):
peakarray[i] = peak[i]
return peakarray
def lorz_fit(x,y, npeak=1, initpara = None):
""" Fit curve using Lorentzian function
Note: currently only valid for one and two lorentizian
The lorentzian function is defined as::
A w
lorz = --------------------- + y0
(x-x0)**2 + w**2
where A is the peak amplitude, w is the width, (x0,y0) the peak position
Parameters:
x, y: ndarray
Input data for analyze
p: ndarray
Parameters for curving fitting function. [A, x0, y0, w]
p0: ndarray
Parameters for initial guessing. similar to p
"""
def residual(p, x, y):
err = y - lorz(x, p, npeak)
return err
def lorz(x, p, npeak):
if npeak == 1:
return p[0] * p[3] / ( (x-p[1])**2 + p[3]**2 ) + p[2]
if npeak == 2:
return ( p[0] * p[3] / ( (x-p[1])**2 + p[3]**2 ) + p[2]
+ p[4] * p[7] / ( (x-p[5])**2 + p[7]**2 ) + p[6] )
else:
raise ValueError('Larger than 2 peaks not supported yet!')
if initpara is None:
if npeak == 1:
initpara[i] = np.array([1., 0., 0., 0.1])
if npeak == 2:
initpara[i] = np.array([1., 0., 0., 0.1,
3., 0., 0., 0.1])
p0 = initpara
result = leastsq(residual, p0, args=(x, y), maxfev=2000)
yfit = lorz(x, result[0],npeak)
return yfit, result[0]
def plot_setfont():
params = {
'axes.labelsize': 18,
'text.fontsize': 18,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
# 'figure.figsize': fig_size}
pl.rcParams.update(params)
def plot_setticks(x=True, y=True):
pl.minorticks_on()
ax = pl.gca()
if x:
ax.xaxis.set_major_locator(pl.AutoLocator())
x_major = ax.xaxis.get_majorticklocs()
dx_minor = (x_major[-1]-x_major[0])/(len(x_major)-1) /5.
ax.xaxis.set_minor_locator(pl.MultipleLocator(dx_minor))
else:
pl.minorticks_off()
if y:
ax.yaxis.set_major_locator(pl.AutoLocator())
y_major = ax.yaxis.get_majorticklocs()
dy_minor = (y_major[-1]-y_major[0])/(len(y_major)-1) /5.
ax.yaxis.set_minor_locator(pl.MultipleLocator(dy_minor))
else:
pl.minorticks_off()
|
qsnake/gpaw
|
gpaw/response/tool.py
|
Python
|
gpl-3.0
| 4,198
|
[
"GPAW"
] |
8a367588e95b7fea3753e0bafb0fdfed25ba67d2b3355948614c52762a1e31d8
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Open Source implementation of Bayesian RNN on Penn Treebank.
Please see https://arxiv.org/pdf/1704.02798.pdf, section 7.1.
Download the Penn Treebank (PTB) dataset from:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Usage: python ./brnn_ptb.py --data_path=<path_to_dataset>
Above, <path_to_dataset> is the path to the 'data' subdirectory within the
directory resulting from unpacking the .tgz file whose link is given above.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
# Dependency imports
import numpy as np
import sonnet as snt
from sonnet.examples import ptb_reader
import sonnet.python.custom_getters.bayes_by_backprop as bbb
import tensorflow as tf
nest = tf.contrib.framework.nest
FLAGS = tf.flags.FLAGS
# Data settings.
tf.flags.DEFINE_string("data_path", "/tmp/ptb_data/data", "path to PTB data.")
# Deep LSTM settings.
tf.flags.DEFINE_integer("embedding_size", 650, "embedding size.")
tf.flags.DEFINE_integer("hidden_size", 650, "network layer size")
tf.flags.DEFINE_integer("n_layers", 2, "number of layers")
# Training settings.
tf.flags.DEFINE_integer("num_training_epochs", 70, "number of training epochs")
tf.flags.DEFINE_integer("batch_size", 20, "SGD minibatch size")
tf.flags.DEFINE_integer("unroll_steps", 35, "Truncated BPTT unroll length.")
tf.flags.DEFINE_integer("high_lr_epochs", 20, "Number of epochs with lr_start.")
tf.flags.DEFINE_float("lr_start", 1.0, "SGD learning rate initializer")
tf.flags.DEFINE_float("lr_decay", 0.9, "Polynomical decay power.")
# BBB settings.
tf.flags.DEFINE_float("prior_pi", 0.25, "Determines the prior mixture weights.")
tf.flags.DEFINE_float("prior_sigma1", np.exp(-1.0), "Prior component 1 stddev.")
tf.flags.DEFINE_float("prior_sigma2", np.exp(-7.0), "Prior component 2 stddev.")
# Logging settings.
tf.flags.DEFINE_integer("print_every_batches", 500, "Sample every x batches.")
tf.flags.DEFINE_string("logbasedir", "/tmp/bayesian_rnn", "directory for logs")
tf.flags.DEFINE_string("logsubdir", "run1", "subdirectory for this experiment.")
tf.flags.DEFINE_string(
"mode", "train_test",
"What mode to run in. Options: ['train_only', 'test_only', 'train_test']")
tf.logging.set_verbosity(tf.logging.INFO)
_LOADED = {}
DataOps = collections.namedtuple("DataOps", "sparse_obs sparse_target")
def _run_session_with_no_hooks(sess, *args, **kwargs):
"""Only runs of the training op should contribute to speed measurement."""
return sess._tf_sess().run(*args, **kwargs) # pylint: disable=protected-access
def _get_raw_data(subset):
raw_data = _LOADED.get(subset)
if raw_data is not None:
return raw_data, _LOADED["vocab"]
else:
train_data, valid_data, test_data, vocab = ptb_reader.ptb_raw_data(
FLAGS.data_path)
_LOADED.update({
"train": np.array(train_data),
"valid": np.array(valid_data),
"test": np.array(test_data),
"vocab": vocab
})
return _LOADED[subset], vocab
class PTB(object):
"""Wraps the PTB reader of the TensorFlow tutorial."""
def __init__(self, subset, seq_len, batch_size, name="PTB"):
self.raw_data, self.word2id = _get_raw_data(subset)
self.id2word = {v: k for k, v in self.word2id.items()}
self.seq_len = seq_len
self.batch_size = batch_size
self.name = name
def to_string(self, idx_seq, join_token=" "):
return join_token.join([self.id2word[idx] for idx in idx_seq])
def to_string_tensor(self, time_major_idx_seq_batch):
def p_func(input_idx_seq):
return self.to_string(input_idx_seq)
return tf.py_func(p_func, [time_major_idx_seq_batch[:, 0]], tf.string)
def __call__(self):
x_bm, y_bm = ptb_reader.ptb_producer(
self.raw_data, self.batch_size, self.seq_len, name=self.name)
x_tm = tf.transpose(x_bm, [1, 0])
y_tm = tf.transpose(y_bm, [1, 0])
return DataOps(sparse_obs=x_tm, sparse_target=y_tm)
@property
def num_batches(self):
return np.prod(self.raw_data.shape) // (self.seq_len * self.batch_size)
@property
def vocab_size(self):
return len(self.word2id)
class GlobalNormClippingOptimizer(tf.train.Optimizer):
"""Optimizer that clips gradients by global norm."""
def __init__(self,
opt,
clip_norm,
use_locking=False,
name="GlobalNormClippingOptimizer"):
super(GlobalNormClippingOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._clip_norm = clip_norm
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, *args, **kwargs):
if self._clip_norm == np.inf:
return self._opt.apply_gradients(grads_and_vars, *args, **kwargs)
grads, vars_ = zip(*grads_and_vars)
clipped_grads, _ = tf.clip_by_global_norm(grads, self._clip_norm)
return self._opt.apply_gradients(zip(clipped_grads, vars_), *args, **kwargs)
class CustomScaleMixture(object):
def __init__(self, pi, sigma1, sigma2):
self.mu, self.pi, self.sigma1, self.sigma2 = map(
np.float32, (0.0, pi, sigma1, sigma2))
def log_prob(self, x):
n1 = tf.contrib.distributions.Normal(self.mu, self.sigma1)
n2 = tf.contrib.distributions.Normal(self.mu, self.sigma2)
mix1 = tf.reduce_sum(n1.log_prob(x), -1) + tf.log(self.pi)
mix2 = tf.reduce_sum(n2.log_prob(x), -1) + tf.log(np.float32(1.0 - self.pi))
prior_mix = tf.stack([mix1, mix2])
lse_mix = tf.reduce_logsumexp(prior_mix, [0])
return tf.reduce_sum(lse_mix)
def custom_scale_mixture_prior_builder(getter, name, *args, **kwargs):
"""A builder for the gaussian scale-mixture prior of Fortunato et al.
Please see https://arxiv.org/abs/1704.02798, section 7.1
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
*args: Positional arguments forwarded by `tf.get_variable`.
**kwargs: Keyword arguments forwarded by `tf.get_variable`.
Returns:
An instance of `tf.contrib.distributions.Distribution` representing the
prior distribution over the variable in question.
"""
# This specific prior formulation doesn't need any of the arguments forwarded
# from `get_variable`.
del getter
del name
del args
del kwargs
return CustomScaleMixture(
FLAGS.prior_pi, FLAGS.prior_sigma1, FLAGS.prior_sigma2)
def lstm_posterior_builder(getter, name, *args, **kwargs):
"""A builder for a particular diagonal gaussian posterior.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
*args: Positional arguments forwarded by `tf.get_variable`.
**kwargs: Keyword arguments forwarded by `tf.get_variable`.
Returns:
An instance of `tf.contrib.distributions.Distribution` representing the
posterior distribution over the variable in question.
"""
del args
parameter_shapes = tf.contrib.distributions.Normal.param_static_shapes(
kwargs["shape"])
# The standard deviation of the scale mixture prior.
prior_stddev = np.sqrt(
FLAGS.prior_pi * np.square(FLAGS.prior_sigma1) +
(1 - FLAGS.prior_pi) * np.square(FLAGS.prior_sigma2))
loc_var = getter(
"{}/posterior_loc".format(name),
shape=parameter_shapes["loc"],
initializer=kwargs.get("initializer"),
dtype=tf.float32)
scale_var = getter(
"{}/posterior_scale".format(name),
initializer=tf.random_uniform(
minval=np.log(np.exp(prior_stddev / 4.0) - 1.0),
maxval=np.log(np.exp(prior_stddev / 2.0) - 1.0),
dtype=tf.float32,
shape=parameter_shapes["scale"]))
return tf.contrib.distributions.Normal(
loc=loc_var,
scale=tf.nn.softplus(scale_var) + 1e-5,
name="{}/posterior_dist".format(name))
def non_lstm_posterior_builder(getter, name, *args, **kwargs):
"""A builder for a particular diagonal gaussian posterior.
Args:
getter: The `getter` passed to a `custom_getter`. Please see the
documentation for `tf.get_variable`.
name: The `name` argument passed to `tf.get_variable`.
*args: Positional arguments forwarded by `tf.get_variable`.
**kwargs: Keyword arguments forwarded by `tf.get_variable`.
Returns:
An instance of `tf.contrib.distributions.Distribution` representing the
posterior distribution over the variable in question.
"""
del args
parameter_shapes = tf.contrib.distributions.Normal.param_static_shapes(
kwargs["shape"])
# The standard deviation of the scale mixture prior.
prior_stddev = np.sqrt(
FLAGS.prior_pi * np.square(FLAGS.prior_sigma1) +
(1 - FLAGS.prior_pi) * np.square(FLAGS.prior_sigma2))
loc_var = getter(
"{}/posterior_loc".format(name),
shape=parameter_shapes["loc"],
initializer=kwargs.get("initializer"),
dtype=tf.float32)
scale_var = getter(
"{}/posterior_scale".format(name),
initializer=tf.random_uniform(
minval=np.log(np.exp(prior_stddev / 2.0) - 1.0),
maxval=np.log(np.exp(prior_stddev / 1.0) - 1.0),
dtype=tf.float32,
shape=parameter_shapes["scale"]))
return tf.contrib.distributions.Normal(
loc=loc_var,
scale=tf.nn.softplus(scale_var) + 1e-5,
name="{}/posterior_dist".format(name))
def build_modules(is_training, vocab_size):
"""Construct the modules used in the graph."""
# Construct the custom getter which implements Bayes by Backprop.
if is_training:
estimator_mode = tf.constant(bbb.EstimatorModes.sample)
else:
estimator_mode = tf.constant(bbb.EstimatorModes.mean)
lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter(
posterior_builder=lstm_posterior_builder,
prior_builder=custom_scale_mixture_prior_builder,
kl_builder=bbb.stochastic_kl_builder,
sampling_mode_tensor=estimator_mode)
non_lstm_bbb_custom_getter = bbb.bayes_by_backprop_getter(
posterior_builder=non_lstm_posterior_builder,
prior_builder=custom_scale_mixture_prior_builder,
kl_builder=bbb.stochastic_kl_builder,
sampling_mode_tensor=estimator_mode)
embed_layer = snt.Embed(
vocab_size=vocab_size,
embed_dim=FLAGS.embedding_size,
custom_getter=non_lstm_bbb_custom_getter,
name="input_embedding")
cores = [snt.LSTM(FLAGS.hidden_size,
custom_getter=lstm_bbb_custom_getter,
forget_bias=0.0,
name="lstm_layer_{}".format(i))
for i in xrange(FLAGS.n_layers)]
rnn_core = snt.DeepRNN(
cores,
skip_connections=False,
name="deep_lstm_core")
# Do BBB on weights but not biases of output layer.
output_linear = snt.Linear(
vocab_size, custom_getter={"w": non_lstm_bbb_custom_getter})
return embed_layer, rnn_core, output_linear
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):
"""This is the core model logic.
Unrolls a Bayesian RNN over the given sequence.
Args:
data_ops: A `sequence_data.SequenceDataOps` namedtuple.
embed_layer: A `snt.Embed` instance.
rnn_core: A `snt.RNNCore` instance.
output_linear: A `snt.Linear` instance.
name_prefix: A string to use to prefix local variable names.
Returns:
A 3D time-major tensor representing the model's logits for a sequence of
predictions. Shape `[time_steps, batch_size, vocab_size]`.
"""
# Embed the input index sequence.
embedded_input_seq = snt.BatchApply(
embed_layer, name="input_embed_seq")(data_ops.sparse_obs)
# Construct variables for holding the RNN state.
initial_rnn_state = nest.map_structure(
lambda t: tf.get_local_variable( # pylint: disable=g-long-lambda
"{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t),
rnn_core.initial_state(FLAGS.batch_size))
assign_zero_rnn_state = nest.map_structure(
lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)
assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))
# Unroll the RNN core over the sequence.
rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(
cell=rnn_core,
inputs=embedded_input_seq,
initial_state=initial_rnn_state,
time_major=True)
# Persist the RNN state for the next unroll.
update_rnn_state = nest.map_structure(
tf.assign, initial_rnn_state, rnn_final_state)
with tf.control_dependencies(nest.flatten(update_rnn_state)):
rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq")
output_logits = snt.BatchApply(
output_linear, name="output_embed_seq")(rnn_output_seq)
return output_logits, assign_zero_rnn_state
def build_loss(model_logits, sparse_targets):
"""Compute the log loss given predictions and targets."""
time_major_shape = [FLAGS.unroll_steps, FLAGS.batch_size]
flat_batch_shape = [FLAGS.unroll_steps * FLAGS.batch_size, -1]
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(model_logits, flat_batch_shape),
labels=tf.reshape(sparse_targets, flat_batch_shape[:-1]))
xent = tf.reshape(xent, time_major_shape)
# Sum over the sequence.
sequence_neg_log_prob = tf.reduce_sum(xent, axis=0)
# Average over the batch.
return tf.reduce_mean(sequence_neg_log_prob, axis=0)
def train(logdir):
"""Run a network on the PTB training set, checkpointing the weights."""
ptb_train = PTB(
name="ptb_train",
subset="train",
seq_len=FLAGS.unroll_steps,
batch_size=FLAGS.batch_size)
# Connect to training set.
data_ops = ptb_train()
embed_layer, rnn_core, output_linear = build_modules(
is_training=True, vocab_size=ptb_train.vocab_size)
prediction_logits, zero_state_op = build_logits(
data_ops, embed_layer, rnn_core, output_linear, name_prefix="train")
data_loss = build_loss(prediction_logits, data_ops.sparse_target)
# Add the KL cost.
total_kl_cost = bbb.get_total_kl_cost()
num_dataset_elements = FLAGS.batch_size * ptb_train.num_batches
scaled_kl_cost = total_kl_cost / num_dataset_elements
total_loss = tf.add(scaled_kl_cost, data_loss)
# Optimize as usual.
global_step = tf.get_variable(
"num_weight_updates",
initializer=tf.constant(0, dtype=tf.int32, shape=()),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])
learning_rate = tf.get_variable(
"lr", initializer=tf.constant(FLAGS.lr_start, shape=(), dtype=tf.float32))
learning_rate_update = learning_rate.assign(learning_rate * FLAGS.lr_decay)
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
optimizer = GlobalNormClippingOptimizer(optimizer, clip_norm=5.0)
with tf.control_dependencies([optimizer.minimize(total_loss)]):
global_step_and_train = global_step.assign_add(1)
# Connect to valid set.
ptb_valid = PTB(
name="ptb_valid",
subset="valid",
seq_len=FLAGS.unroll_steps,
batch_size=FLAGS.batch_size)
valid_data_ops = ptb_valid()
valid_logits, zero_valid_state = build_logits(
valid_data_ops, embed_layer, rnn_core, output_linear, name_prefix="valid")
valid_loss = build_loss(valid_logits, valid_data_ops.sparse_target)
# Compute metrics for the sake of monitoring training.
predictions = tf.cast(
tf.argmax(prediction_logits, axis=-1), tf.int32, name="pred")
correct_prediction_mask = tf.cast(
tf.equal(predictions, data_ops.sparse_target), tf.int32)
accuracy = tf.reduce_mean(
tf.cast(correct_prediction_mask, tf.float32), name="acc")
error_rate = tf.subtract(1.0, accuracy, name="err")
label_probs = tf.nn.softmax(prediction_logits, dim=-1)
predictive_entropy = tf.reduce_mean(
label_probs * tf.log(label_probs + 1e-12) * -1.0)
# Create tf.summary ops.
log_ops_to_run = {
"scalar": collections.OrderedDict([
("task_loss", data_loss),
("train_err_rate", error_rate),
("pred_entropy", predictive_entropy),
("learning_rate", learning_rate),
("elbo_loss", total_loss),
("kl_cost", total_kl_cost),
("scaled_kl_cost", scaled_kl_cost),
]),
"text": collections.OrderedDict([
("labels", ptb_train.to_string_tensor(data_ops.sparse_target)),
("predictions", ptb_train.to_string_tensor(predictions))
])
}
for name, tensor in log_ops_to_run["scalar"].items():
tf.summary.scalar(os.path.join("train", name), tensor)
# The remaining logic runs the training loop and logging.
summary_writer = tf.summary.FileWriterCache.get(logdir=logdir)
tf.logging.info(
"Beginning training for {} epochs, each with {} batches.".format(
FLAGS.num_training_epochs, ptb_train.num_batches))
with tf.train.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir, save_summaries_secs=10) as sess:
num_updates_v = _run_session_with_no_hooks(sess, global_step)
epoch_idx_start, step_idx_start = divmod(
num_updates_v, ptb_train.num_batches)
tf.logging.info("On start, epoch: {}\t step: {}".format(
epoch_idx_start, step_idx_start))
for epoch_idx in xrange(epoch_idx_start, FLAGS.num_training_epochs):
tf.logging.info("Beginning Epoch {}/{}".format(
epoch_idx, FLAGS.num_training_epochs))
tf.logging.info(
("Beginning by evaluating on the validation set, which has "
"{} batches.".format(ptb_valid.num_batches)))
valid_cost = 0
valid_steps = 0
_run_session_with_no_hooks(sess, zero_valid_state)
for _ in xrange(ptb_valid.num_batches):
valid_cost_v, num_updates_v = _run_session_with_no_hooks(
sess, [valid_loss, global_step])
valid_cost += valid_cost_v
valid_steps += FLAGS.unroll_steps
tf.logging.info("Validation set perplexity: {}".format(
np.exp(valid_cost / valid_steps)))
summary = tf.summary.Summary()
summary.value.add(
tag="valid/word_level_perplexity",
simple_value=np.exp(valid_cost / valid_steps))
summary_writer.add_summary(summary, num_updates_v)
# Run a training epoch.
epoch_cost = 0
epoch_steps = 0
for batch_idx in xrange(step_idx_start, ptb_train.num_batches):
scalars_res, num_updates_v = sess.run(
[log_ops_to_run["scalar"], global_step_and_train])
epoch_cost += scalars_res["task_loss"]
epoch_steps += FLAGS.unroll_steps
if (batch_idx - 1) % FLAGS.print_every_batches == 0:
summary = tf.summary.Summary()
summary.value.add(
tag="train/word_level_perplexity",
simple_value=np.exp(epoch_cost / epoch_steps))
summary_writer.add_summary(summary, num_updates_v)
scalars_res, strings_res = _run_session_with_no_hooks(
sess, [log_ops_to_run["scalar"], log_ops_to_run["text"]])
tf.logging.info("Num weight updates: {}".format(num_updates_v))
for name, result in scalars_res.items() + strings_res.items():
tf.logging.info("{}: {}".format(name, result))
word_level_perplexity = np.exp(epoch_cost / epoch_steps)
tf.logging.info(
"Train Perplexity after Epoch {}: {}".format(
epoch_idx, word_level_perplexity))
end_of_epoch_fetches = [zero_state_op]
if epoch_idx >= FLAGS.high_lr_epochs:
end_of_epoch_fetches.append(learning_rate_update)
_run_session_with_no_hooks(sess, end_of_epoch_fetches)
tf.logging.info("Done training. Thanks for your time.")
def test(logdir):
"""Run a network on the PTB test set, restoring from the latest checkpoint."""
global_step = tf.get_variable(
"num_weight_updates",
initializer=tf.constant(0, dtype=tf.int32, shape=()),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])
ptb_test = PTB(
name="ptb_test",
subset="test",
seq_len=FLAGS.unroll_steps,
batch_size=FLAGS.batch_size)
# Connect to test set.
data_ops = ptb_test()
# The variables in these modules will be restored from the checkpoint.
embed_layer, rnn_core, output_linear = build_modules(
is_training=False, vocab_size=ptb_test.vocab_size)
prediction_logits, _ = build_logits(
data_ops, embed_layer, rnn_core, output_linear, name_prefix="test")
avg_nats_per_sequence = build_loss(prediction_logits, data_ops.sparse_target)
dataset_cost = 0
dataset_iters = 0
with tf.train.SingularMonitoredSession(checkpoint_dir=logdir) as sess:
tf.logging.info("Running on test set in {} batches.".format(
ptb_test.num_batches))
tf.logging.info("The model has trained for {} steps.".format(
_run_session_with_no_hooks(sess, global_step)))
for _ in range(ptb_test.num_batches):
dataset_cost += _run_session_with_no_hooks(sess, avg_nats_per_sequence)
dataset_iters += FLAGS.unroll_steps
tf.logging.info("Final test set perplexity: {}.".format(
np.exp(dataset_cost / dataset_iters)))
def main(unused_argv):
logdir = os.path.join(FLAGS.logbasedir, FLAGS.logsubdir)
tf.logging.info("Log Directory: {}".format(logdir))
if FLAGS.mode == "train_only":
train(logdir)
elif FLAGS.mode == "test_only":
test(logdir)
elif FLAGS.mode == "train_test":
tf.logging.info("Beginning a training phase of {} epochs.".format(
FLAGS.num_training_epochs))
train(logdir)
tf.logging.info("Beginning testing phase.")
with tf.Graph().as_default():
# Enter new default graph so that we can read variables from checkpoint
# without getting hit by name uniquification of sonnet variables.
test(logdir)
else:
raise ValueError("Invalid mode {}. Please choose one of {}.".format(
FLAGS.mode, "['train_only', 'test_only', 'train_test']"))
if __name__ == "__main__":
tf.app.run()
|
rakshit-agrawal/sonnet
|
sonnet/examples/brnn_ptb.py
|
Python
|
apache-2.0
| 22,701
|
[
"Gaussian"
] |
b41f573a3c1f7bfc5c68b575193ff9924c33a995f141d8f682709c1777e739ba
|
"""
Usage: python_visitor_gui.py
This script shows how one can implement visitors
in pure python and inject them into OpenGM solver.
( not all OpenGM solvers support this kind of
code injection )
"""
import opengm
import numpy
import matplotlib
from matplotlib import pyplot as plt
shape=[100,100]
numLabels=10
unaries=numpy.random.rand(shape[0], shape[1],numLabels)
potts=opengm.PottsFunction([numLabels,numLabels],0.0,0.4)
gm=opengm.grid2d2Order(unaries=unaries,regularizer=potts)
inf=opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.5))
class PyCallback(object):
def __init__(self,shape,numLabels):
self.shape=shape
self.numLabels=numLabels
self.cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( self.numLabels,3))
matplotlib.interactive(True)
def begin(self,inference):
print "begin of inference"
def end(self,inference):
print "end of inference"
def visit(self,inference):
gm=inference.gm()
labelVector=inference.arg()
print "energy ",gm.evaluate(labelVector)
labelVector=labelVector.reshape(self.shape)
plt.imshow(labelVector*255.0, cmap=self.cmap,interpolation="nearest")
plt.draw()
callback=PyCallback(shape,numLabels)
visitor=inf.pythonVisitor(callback,visitNth=1)
inf.infer(visitor)
argmin=inf.arg()
|
joergkappes/opengm
|
src/interfaces/python/examples/python_visitor_gui.py
|
Python
|
mit
| 1,377
|
[
"VisIt"
] |
2d93d0b9918e864f3b0792e3a669d1b24a193e378db08d031348a1d53b318973
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
############################################################################
#
# Copyright (C) 2008-2015
# Christian Kohlöffel
# Vinzenz Schulz
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
from __future__ import absolute_import
from __future__ import division
from core.point import Point
from core.arcgeo import ArcGeo
from core.linegeo import LineGeo
from dxfimport.classes import PointsClass, ContourClass
class GeoentPolyline:
def __init__(self, Nr=0, caller=None):
self.Typ = 'Polyline'
self.Nr = Nr
self.Layer_Nr = 0
self.geo = []
self.length = 0
# Lesen der Geometrie
# Read the geometry
self.Read(caller)
def __str__(self):
# how to print the object
string = "\nTyp: Polyline" +\
"\nNr: %i" % self.Nr +\
"\nLayer Nr: %i" % self.Layer_Nr +\
"\nNr. of Lines: %i" % len(self.geo) +\
"\nlength: %0.3f" % self.length
return string
def reverse(self):
"""
reverse()
"""
self.geo.reverse()
for geo in self.geo:
geo.reverse()
def App_Cont_or_Calc_IntPts(self, cont, points, i, tol, warning):
"""
App_Cont_or_Calc_IntPts()
"""
if abs(self.length) < tol:
pass
# Hinzuf�gen falls es keine geschlossene Polyline ist
# Add if it is not a closed polyline
elif self.geo[0].Ps.within_tol(self.geo[-1].Pe, tol):
self.analyse_and_opt()
cont.append(ContourClass(len(cont), 1, [[i, 0]], self.length))
else:
points.append(PointsClass(point_nr=len(points), geo_nr=i,
Layer_Nr=self.Layer_Nr,
be=self.geo[0].Ps,
en=self.geo[-1].Pe, be_cp=[], en_cp=[]))
return warning
# if abs(self.length)>tol:
# points.append(PointsClass(point_nr=len(points),geo_nr=i,\
# Layer_Nr=self.Layer_Nr,\
# be=self.geo[0].Ps,
# en=self.geo[-1].Pe,be_cp=[],en_cp=[]))
# else:
# showwarning("Short Polyline Elemente", ("Length of Line geometrie too short!"\
# "\nLenght must be greater than tolerance."\
# "\nSkipping Line Geometrie"))
def analyse_and_opt(self):
"""
analyse_and_opt()
"""
summe = 0
# Richtung in welcher der Anfang liegen soll (unten links)
# Direction of the top (lower left) ???
Popt = Point(-1e3, -1e6)
# Calculation of the alignment after Gaussian-Elling
# Positive value means CW, negative value indicates CCW
# closed polygon
for Line in self.geo:
summe += Line.Ps.x * Line.Pe.y - Line.Pe.x * Line.Ps.y
if summe > 0.0:
self.reverse()
# Suchen des kleinsten Startpunkts von unten Links X zuerst (Muss neue Schleife sein!)
# Find the smallest starting point from bottom left X (Must be new loop!)
min_distance = self.geo[0].Ps.distance(Popt)
min_geo_nr = 0
for geo_nr in range(1, len(self.geo)):
if self.geo[geo_nr].Ps.distance(Popt) < min_distance:
min_distance = self.geo[geo_nr].Ps.distance(Popt)
min_geo_nr = geo_nr
# Kontur so anordnen das neuer Startpunkt am Anfang liegt
# Order Contour so the new starting point is at the beginning
self.geo = self.geo[min_geo_nr:len(self.geo)] + self.geo[0:min_geo_nr]
def Read(self, caller):
"""
Read()
"""
# Assign short name
lp = caller.line_pairs
e = lp.index_both(0, "SEQEND", caller.start + 1) + 1
# Assign layer
s = lp.index_code(8, caller.start + 1)
self.Layer_Nr = caller.Get_Layer_Nr(lp.line_pair[s].value)
# Ps=None for the first point
Ps = None
# Polyline flag
s_temp = lp.index_code(70, s + 1, e)
if s_temp is None:
PolyLineFlag = 0
else:
PolyLineFlag = int(lp.line_pair[s_temp].value)
s = s_temp
# print("PolylineFlag: %i" %PolyLineFlag)
while 1: # and s is not None:
s = lp.index_both(0, "VERTEX", s + 1, e)
if s == None:
break
# X Value
s = lp.index_code(10, s + 1, e)
x = float(lp.line_pair[s].value)
# Y Value
s = lp.index_code(20, s + 1, e)
y = float(lp.line_pair[s].value)
Pe = Point(x, y)
# Bulge
bulge = 0
e_vertex = lp.index_both(0, "VERTEX", s + 1, e)
if e_vertex is None:
e_vertex = e
s_temp = lp.index_code(42, s + 1, e_vertex)
# print('stemp: %s, e: %s, next 10: %s' %(s_temp,e,lp.index_both(0,"VERTEX",s+1,e)))
if s_temp is not None:
bulge = float(lp.line_pair[s_temp].value)
s = s_temp
# Vertex flag (bit-coded); default is 0; 1 = Closed; 128 = Plinegen
s_temp = lp.index_code(70, s + 1, e_vertex)
if s_temp is None:
VertexFlag = 0
else:
VertexFlag = int(lp.line_pair[s_temp].value)
s = s_temp
# print("Vertex Flag: %i" %PolyLineFlag)
# Assign the geometries for the Polyline
if VertexFlag != 16:
if Ps is not None:
if next_bulge == 0:
self.geo.append(LineGeo(Ps=Ps, Pe=Pe))
else:
# self.geo.append(LineGeo(Ps=Ps,Pe=Pe))
# print bulge
self.geo.append(self.bulge2arc(Ps, Pe, next_bulge))
# L�nge drauf rechnen wenns eine Geometrie ist
# Wenns Ldnge count on it is a geometry ???
self.length += self.geo[-1].length
# Der Bulge wird immer f�r den und den n�chsten Punkt angegeben
# The bulge is always given for the next point
next_bulge = bulge
Ps = Pe
# It is a closed polyline
if PolyLineFlag == 1:
# print("sollten �bereinstimmen: %s, %s" %(Ps,Pe))
if next_bulge == 0:
self.geo.append(LineGeo(Ps=Ps, Pe=self.geo[0].Ps))
else:
self.geo.append(self.bulge2arc(Ps, self.geo[0].Ps, next_bulge))
# L�nge drauf rechnen wenns eine Geometrie ist
# Wenns Ldnge count on it is a geometry ???
self.length += self.geo[-1].length
# Neuen Startwert f�r die n�chste Geometrie zur�ckgeben
# New starting value for the next geometry
caller.start = e
def get_start_end_points(self, direction=0):
"""
get_start_end_points()
"""
if not direction:
punkt, angle = self.geo[0].get_start_end_points(direction)
else:
punkt, angle = self.geo[-1].get_start_end_points(direction)
return punkt, angle
def bulge2arc(self, Ps, Pe, bulge):
"""
bulge2arc()
"""
c = (1 / bulge - bulge) / 2
# Berechnung des Mittelpunkts (Formel von Mickes!)
# Calculation of the center (Micke's formula)
O = Point((Ps.x + Pe.x - (Pe.y - Ps.y) * c) / 2,
(Ps.y + Pe.y + (Pe.x - Ps.x) * c) / 2)
# Radius = Distance between the centre and Ps
r = O.distance(Ps)
# Kontrolle ob beide gleich sind (passt ...)
# Check if they are equal (fits ...)
# r=O.distance(Pe)
# Unterscheidung f�r den �ffnungswinkel.
# Distinction for the opening angle. ???
if bulge > 0:
return ArcGeo(Ps=Ps, Pe=Pe, O=O, r=r)
else:
arc = ArcGeo(Ps=Pe, Pe=Ps, O=O, r=r)
arc.reverse()
return arc
|
hehongyu1995/Dxf2GCode
|
dxfimport/geoent_polyline.py
|
Python
|
gpl-3.0
| 9,035
|
[
"Gaussian"
] |
5ee3f3f3ec9b7c3085bde62770ded6318286a629b886becde9d0e2fb17b28aa0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
from keystoneauth1 import session
import mock
import requests
from testtools import matchers
from openstack import exceptions
from openstack import format
from openstack import resource
from openstack.tests.unit import base
from openstack import utils
fake_parent = 'robert'
fake_name = 'rey'
fake_id = 99
fake_attr1 = 'lana'
fake_attr2 = 'del'
fake_resource = 'fake'
fake_resources = 'fakes'
fake_arguments = {'parent_name': fake_parent}
fake_base_path = '/fakes/%(parent_name)s/data'
fake_path = '/fakes/rey/data'
fake_data = {'id': fake_id,
'enabled': True,
'name': fake_name,
'parent': fake_parent,
'attr1': fake_attr1,
'attr2': fake_attr2,
'status': None}
fake_body = {fake_resource: fake_data}
class FakeParent(resource.Resource):
id_attribute = "name"
name = resource.prop('name')
class FakeResource(resource.Resource):
resource_key = fake_resource
resources_key = fake_resources
base_path = fake_base_path
allow_create = allow_retrieve = allow_update = True
allow_delete = allow_list = allow_head = True
enabled = resource.prop('enabled', type=format.BoolStr)
name = resource.prop('name')
parent = resource.prop('parent_name')
first = resource.prop('attr1')
second = resource.prop('attr2')
third = resource.prop('attr3', alias='attr_three')
status = resource.prop('status')
class FakeResourceNoKeys(FakeResource):
resource_key = None
resources_key = None
class PropTests(base.TestCase):
def test_with_alias_and_type(self):
class Test(resource.Resource):
attr = resource.prop("attr1", alias="attr2", type=bool)
t = Test(attrs={"attr2": 500})
# Don't test with assertTrue because 500 evaluates to True.
# Need to test that bool(500) happened and attr2 *is* True.
self.assertIs(t.attr, True)
def test_defaults(self):
new_default = "new_default"
class Test(resource.Resource):
attr1 = resource.prop("attr1")
attr2 = resource.prop("attr2", default=new_default)
t = Test()
self.assertIsNone(t.attr1)
self.assertEqual(new_default, t.attr2)
# When the default value is passed in, it is left untouched.
# Check that attr2 is literally the same object we set as default.
t.attr2 = new_default
self.assertIs(new_default, t.attr2)
not_default = 'not default'
t2 = Test({'attr2': not_default})
self.assertEqual(not_default, t2.attr2)
# Assert that if the default is passed in, it overrides the previously
# set value (bug #1425996)
t2.attr2 = new_default
self.assertEqual(new_default, t2.attr2)
def test_get_without_instance(self):
self.assertIsNone(FakeResource.name)
def test_set_ValueError(self):
class Test(resource.Resource):
attr = resource.prop("attr", type=int)
t = Test()
def should_raise():
t.attr = "this is not an int"
self.assertThat(should_raise, matchers.raises(ValueError))
def test_set_TypeError(self):
class Type(object):
def __init__(self):
pass
class Test(resource.Resource):
attr = resource.prop("attr", type=Type)
t = Test()
def should_raise():
t.attr = "this type takes no args"
self.assertThat(should_raise, matchers.raises(TypeError))
def test_resource_type(self):
class FakestResource(resource.Resource):
shortstop = resource.prop("shortstop", type=FakeResource)
third_base = resource.prop("third_base", type=FakeResource)
sot = FakestResource()
id1 = "Ernie Banks"
id2 = "Ron Santo"
sot.shortstop = id1
sot.third_base = id2
resource1 = FakeResource.new(id=id1)
self.assertEqual(resource1, sot.shortstop)
self.assertEqual(id1, sot.shortstop.id)
self.assertEqual(FakeResource, type(sot.shortstop))
resource2 = FakeResource.new(id=id2)
self.assertEqual(resource2, sot.third_base)
self.assertEqual(id2, sot.third_base.id)
self.assertEqual(FakeResource, type(sot.third_base))
sot2 = FakestResource()
sot2.shortstop = resource1
sot2.third_base = resource2
self.assertEqual(resource1, sot2.shortstop)
self.assertEqual(id1, sot2.shortstop.id)
self.assertEqual(FakeResource, type(sot2.shortstop))
self.assertEqual(resource2, sot2.third_base)
self.assertEqual(id2, sot2.third_base.id)
self.assertEqual(FakeResource, type(sot2.third_base))
body = {
"shortstop": id1,
"third_base": id2
}
sot3 = FakestResource(body)
self.assertEqual(FakeResource({"id": id1}), sot3.shortstop)
self.assertEqual(FakeResource({"id": id2}), sot3.third_base)
def test_set_alias_same_name(self):
class Test(resource.Resource):
attr = resource.prop("something", alias="attr")
val = "hey"
args = {"something": val}
sot = Test(args)
self.assertEqual(val, sot._attrs["something"])
self.assertEqual(val, sot.attr)
def test_property_is_none(self):
class Test(resource.Resource):
attr = resource.prop("something", type=dict)
args = {"something": None}
sot = Test(args)
self.assertIsNone(sot._attrs["something"])
self.assertIsNone(sot.attr)
class HeaderTests(base.TestCase):
class Test(resource.Resource):
base_path = "/ramones"
service = "punk"
allow_create = True
allow_update = True
hey = resource.header("vocals")
ho = resource.header("guitar")
letsgo = resource.header("bass")
def test_get(self):
val = "joey"
args = {"vocals": val}
sot = HeaderTests.Test({'headers': args})
self.assertEqual(val, sot.hey)
self.assertIsNone(sot.ho)
self.assertIsNone(sot.letsgo)
def test_set_new(self):
args = {"vocals": "joey", "bass": "deedee"}
sot = HeaderTests.Test({'headers': args})
sot._reset_dirty()
sot.ho = "johnny"
self.assertEqual("johnny", sot.ho)
self.assertTrue(sot.is_dirty)
def test_set_old(self):
args = {"vocals": "joey", "bass": "deedee"}
sot = HeaderTests.Test({'headers': args})
sot._reset_dirty()
sot.letsgo = "cj"
self.assertEqual("cj", sot.letsgo)
self.assertTrue(sot.is_dirty)
def test_set_brand_new(self):
sot = HeaderTests.Test({'headers': {}})
sot._reset_dirty()
sot.ho = "johnny"
self.assertEqual("johnny", sot.ho)
self.assertTrue(sot.is_dirty)
self.assertEqual({'headers': {"guitar": "johnny"}}, sot)
def test_1428342(self):
sot = HeaderTests.Test({'headers':
requests.structures.CaseInsensitiveDict()})
self.assertIsNone(sot.hey)
def test_create_update_headers(self):
sot = HeaderTests.Test()
sot._reset_dirty()
sot.ho = "johnny"
sot.letsgo = "deedee"
response = mock.Mock()
response_body = {'id': 1}
response.json = mock.Mock(return_value=response_body)
response.headers = None
sess = mock.Mock()
sess.post = mock.Mock(return_value=response)
sess.put = mock.Mock(return_value=response)
sot.create(sess)
headers = {'guitar': 'johnny', 'bass': 'deedee'}
sess.post.assert_called_with(HeaderTests.Test.base_path,
endpoint_filter=HeaderTests.Test.service,
headers=headers,
json={})
sot['id'] = 1
sot.letsgo = "cj"
headers = {'guitar': 'johnny', 'bass': 'cj'}
sot.update(sess)
sess.put.assert_called_with('ramones/1',
endpoint_filter=HeaderTests.Test.service,
headers=headers,
json={})
class ResourceTests(base.TestCase):
def setUp(self):
super(ResourceTests, self).setUp()
self.session = mock.Mock(spec=session.Session)
self.session.get_filter = mock.Mock(return_value={})
def assertCalledURL(self, method, url):
# call_args gives a tuple of *args and tuple of **kwargs.
# Check that the first arg in *args (the URL) has our url.
self.assertEqual(method.call_args[0][0], url)
def test_empty_id(self):
resp = mock.Mock()
resp.json = mock.Mock(return_value=fake_body)
self.session.get.return_value = resp
obj = FakeResource.new(**fake_arguments)
self.assertEqual(obj, obj.get(self.session))
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_not_allowed(self):
class Nope(resource.Resource):
allow_create = allow_retrieve = allow_update = False
allow_delete = allow_list = allow_head = False
nope = Nope()
def cant_create():
nope.create_by_id(1, 2)
def cant_retrieve():
nope.get_data_by_id(1, 2)
def cant_update():
nope.update_by_id(1, 2, 3)
def cant_delete():
nope.delete_by_id(1, 2)
def cant_list():
for i in nope.list(1):
pass
def cant_head():
nope.head_data_by_id(1, 2)
self.assertThat(cant_create,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_retrieve,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_update,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_delete,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_list,
matchers.raises(exceptions.MethodNotSupported))
self.assertThat(cant_head,
matchers.raises(exceptions.MethodNotSupported))
def _test_create_by_id(self, key, response_value, response_body,
attrs, json_body, response_headers=None):
class FakeResource2(FakeResource):
resource_key = key
service = "my_service"
response = mock.Mock()
response.json = mock.Mock(return_value=response_body)
response.headers = response_headers
expected_resp = response_value.copy()
if response_headers:
expected_resp.update({'headers': response_headers})
sess = mock.Mock()
sess.put = mock.Mock(return_value=response)
sess.post = mock.Mock(return_value=response)
resp = FakeResource2.create_by_id(sess, attrs)
self.assertEqual(expected_resp, resp)
sess.post.assert_called_with(FakeResource2.base_path,
endpoint_filter=FakeResource2.service,
json=json_body)
r_id = "my_id"
resp = FakeResource2.create_by_id(sess, attrs, resource_id=r_id)
self.assertEqual(response_value, resp)
sess.put.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
endpoint_filter=FakeResource2.service,
json=json_body)
path_args = {"parent_name": "my_name"}
resp = FakeResource2.create_by_id(sess, attrs, path_args=path_args)
self.assertEqual(response_value, resp)
sess.post.assert_called_with(FakeResource2.base_path % path_args,
endpoint_filter=FakeResource2.service,
json=json_body)
resp = FakeResource2.create_by_id(sess, attrs, resource_id=r_id,
path_args=path_args)
self.assertEqual(response_value, resp)
sess.put.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
endpoint_filter=FakeResource2.service,
json=json_body)
def test_create_without_resource_key(self):
key = None
response_value = {"a": 1, "b": 2, "c": 3}
response_body = response_value
attrs = response_value
json_body = attrs
self._test_create_by_id(key, response_value, response_body,
attrs, json_body)
def test_create_with_response_headers(self):
key = None
response_value = {"a": 1, "b": 2, "c": 3}
response_body = response_value
response_headers = {'location': 'foo'}
attrs = response_value.copy()
json_body = attrs
self._test_create_by_id(key, response_value, response_body,
attrs, json_body,
response_headers=response_headers)
def test_create_with_resource_key(self):
key = "my_key"
response_value = {"a": 1, "b": 2, "c": 3}
response_body = {key: response_value}
attrs = response_body
json_body = {key: attrs}
self._test_create_by_id(key, response_value, response_body,
attrs, json_body)
def _test_get_data_by_id(self, key, response_value, response_body):
class FakeResource2(FakeResource):
resource_key = key
service = "my_service"
response = mock.Mock()
response.json = mock.Mock(return_value=response_body)
sess = mock.Mock()
sess.get = mock.Mock(return_value=response)
r_id = "my_id"
resp = FakeResource2.get_data_by_id(sess, resource_id=r_id)
self.assertEqual(response_value, resp)
sess.get.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
endpoint_filter=FakeResource2.service)
path_args = {"parent_name": "my_name"}
resp = FakeResource2.get_data_by_id(sess, resource_id=r_id,
path_args=path_args)
self.assertEqual(response_value, resp)
sess.get.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
endpoint_filter=FakeResource2.service)
def test_get_data_without_resource_key(self):
key = None
response_value = {"a": 1, "b": 2, "c": 3}
response_body = response_value
self._test_get_data_by_id(key, response_value, response_body)
def test_get_data_with_resource_key(self):
key = "my_key"
response_value = {"a": 1, "b": 2, "c": 3}
response_body = {key: response_value}
self._test_get_data_by_id(key, response_value, response_body)
def _test_head_data_by_id(self, key, response_value):
class FakeResource2(FakeResource):
resource_key = key
service = "my_service"
response = mock.Mock()
response.headers = response_value
sess = mock.Mock()
sess.head = mock.Mock(return_value=response)
r_id = "my_id"
resp = FakeResource2.head_data_by_id(sess, resource_id=r_id)
self.assertEqual({'headers': response_value}, resp)
headers = {'Accept': ''}
sess.head.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
endpoint_filter=FakeResource2.service,
headers=headers)
path_args = {"parent_name": "my_name"}
resp = FakeResource2.head_data_by_id(sess, resource_id=r_id,
path_args=path_args)
self.assertEqual({'headers': response_value}, resp)
headers = {'Accept': ''}
sess.head.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
endpoint_filter=FakeResource2.service,
headers=headers)
def test_head_data_without_resource_key(self):
key = None
response_value = {"key1": "value1", "key2": "value2"}
self._test_head_data_by_id(key, response_value)
def test_head_data_with_resource_key(self):
key = "my_key"
response_value = {"key1": "value1", "key2": "value2"}
self._test_head_data_by_id(key, response_value)
def _test_update_by_id(self, key, response_value, response_body,
attrs, json_body, response_headers=None):
class FakeResource2(FakeResource):
patch_update = True
resource_key = key
service = "my_service"
response = mock.Mock()
response.json = mock.Mock(return_value=response_body)
response.headers = response_headers
expected_resp = response_value.copy()
if response_headers:
expected_resp.update({'headers': response_headers})
sess = mock.Mock()
sess.patch = mock.Mock(return_value=response)
r_id = "my_id"
resp = FakeResource2.update_by_id(sess, r_id, attrs)
self.assertEqual(expected_resp, resp)
sess.patch.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
endpoint_filter=FakeResource2.service,
json=json_body)
path_args = {"parent_name": "my_name"}
resp = FakeResource2.update_by_id(sess, r_id, attrs,
path_args=path_args)
self.assertEqual(expected_resp, resp)
sess.patch.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
endpoint_filter=FakeResource2.service,
json=json_body)
def test_update_without_resource_key(self):
key = None
response_value = {"a": 1, "b": 2, "c": 3}
response_body = response_value
attrs = response_value
json_body = attrs
self._test_update_by_id(key, response_value, response_body,
attrs, json_body)
def test_update_with_resource_key(self):
key = "my_key"
response_value = {"a": 1, "b": 2, "c": 3}
response_body = {key: response_value}
attrs = response_value
json_body = {key: attrs}
self._test_update_by_id(key, response_value, response_body,
attrs, json_body)
def test_update_with_response_headers(self):
key = "my_key"
response_value = {"a": 1, "b": 2, "c": 3}
response_body = {key: response_value}
response_headers = {'location': 'foo'}
attrs = response_value.copy()
json_body = {key: attrs}
self._test_update_by_id(key, response_value, response_body,
attrs, json_body,
response_headers=response_headers)
def test_delete_by_id(self):
class FakeResource2(FakeResource):
service = "my_service"
sess = mock.Mock()
sess.delete = mock.Mock(return_value=None)
r_id = "my_id"
resp = FakeResource2.delete_by_id(sess, r_id)
self.assertIsNone(resp)
headers = {'Accept': ''}
sess.delete.assert_called_with(
utils.urljoin(FakeResource2.base_path, r_id),
endpoint_filter=FakeResource2.service,
headers=headers)
path_args = {"parent_name": "my_name"}
resp = FakeResource2.delete_by_id(sess, r_id, path_args=path_args)
self.assertIsNone(resp)
headers = {'Accept': ''}
sess.delete.assert_called_with(
utils.urljoin(FakeResource2.base_path % path_args, r_id),
endpoint_filter=FakeResource2.service,
headers=headers)
def test_create(self):
resp = mock.Mock()
resp.json = mock.Mock(return_value=fake_body)
resp.headers = {'location': 'foo'}
self.session.post = mock.Mock(return_value=resp)
# Create resource with subset of attributes in order to
# verify create refreshes all attributes from response.
obj = FakeResource.new(parent_name=fake_parent,
name=fake_name,
enabled=True,
attr1=fake_attr1)
self.assertEqual(obj, obj.create(self.session))
self.assertFalse(obj.is_dirty)
last_req = self.session.post.call_args[1]["json"][
FakeResource.resource_key]
self.assertEqual(4, len(last_req))
self.assertTrue(last_req['enabled'])
self.assertEqual(fake_parent, last_req['parent_name'])
self.assertEqual(fake_name, last_req['name'])
self.assertEqual(fake_attr1, last_req['attr1'])
self.assertTrue(obj['enabled'])
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_parent, obj['parent_name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertIsNone(obj['status'])
self.assertTrue(obj.enabled)
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_parent, obj.parent_name)
self.assertEqual(fake_parent, obj.parent)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr1, obj.attr1)
self.assertEqual(fake_attr2, obj.second)
self.assertEqual(fake_attr2, obj.attr2)
self.assertIsNone(obj.status)
self.assertEqual('foo', obj.location)
def test_get(self):
resp = mock.Mock()
resp.json = mock.Mock(return_value=fake_body)
resp.headers = {'location': 'foo'}
self.session.get = mock.Mock(return_value=resp)
# Create resource with subset of attributes in order to
# verify get refreshes all attributes from response.
obj = FakeResource.from_id(str(fake_id))
obj['parent_name'] = fake_parent
self.assertEqual(obj, obj.get(self.session))
# Check that the proper URL is being built.
self.assertCalledURL(self.session.get,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertTrue(obj['enabled'])
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_parent, obj['parent_name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertIsNone(obj['status'])
self.assertTrue(obj.enabled)
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_parent, obj.parent_name)
self.assertEqual(fake_parent, obj.parent)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr1, obj.attr1)
self.assertEqual(fake_attr2, obj.second)
self.assertEqual(fake_attr2, obj.attr2)
self.assertIsNone(obj.status)
self.assertIsNone(obj.location)
def test_get_by_id(self):
resp = mock.Mock()
resp.json = mock.Mock(return_value=fake_body)
self.session.get = mock.Mock(return_value=resp)
obj = FakeResource.get_by_id(self.session, fake_id,
path_args=fake_arguments)
# Check that the proper URL is being built.
self.assertCalledURL(self.session.get,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
def test_get_by_id_with_headers(self):
header1 = "fake-value1"
header2 = "fake-value2"
headers = {"header1": header1,
"header2": header2}
resp = mock.Mock(headers=headers)
resp.json = mock.Mock(return_value=fake_body)
self.session.get = mock.Mock(return_value=resp)
class FakeResource2(FakeResource):
header1 = resource.header("header1")
header2 = resource.header("header2")
obj = FakeResource2.get_by_id(self.session, fake_id,
path_args=fake_arguments,
include_headers=True)
self.assertCalledURL(self.session.get,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertEqual(header1, obj['headers']['header1'])
self.assertEqual(header2, obj['headers']['header2'])
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr2, obj.second)
self.assertEqual(header1, obj.header1)
self.assertEqual(header2, obj.header2)
def test_head_by_id(self):
class FakeResource2(FakeResource):
header1 = resource.header("header1")
header2 = resource.header("header2")
resp = mock.Mock(headers={"header1": "one", "header2": "two"})
self.session.head = mock.Mock(return_value=resp)
obj = FakeResource2.head_by_id(self.session, fake_id,
path_args=fake_arguments)
self.assertCalledURL(self.session.head,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
self.assertEqual('one', obj['headers']['header1'])
self.assertEqual('two', obj['headers']['header2'])
self.assertEqual('one', obj.header1)
self.assertEqual('two', obj.header2)
def test_patch_update(self):
class FakeResourcePatch(FakeResource):
patch_update = True
resp = mock.Mock()
resp.json = mock.Mock(return_value=fake_body)
resp.headers = {'location': 'foo'}
self.session.patch = mock.Mock(return_value=resp)
# Create resource with subset of attributes in order to
# verify update refreshes all attributes from response.
obj = FakeResourcePatch.new(id=fake_id, parent_name=fake_parent,
name=fake_name, attr1=fake_attr1)
self.assertTrue(obj.is_dirty)
self.assertEqual(obj, obj.update(self.session))
self.assertFalse(obj.is_dirty)
self.assertCalledURL(self.session.patch,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
last_req = self.session.patch.call_args[1]["json"][
FakeResource.resource_key]
self.assertEqual(3, len(last_req))
self.assertEqual(fake_parent, last_req['parent_name'])
self.assertEqual(fake_name, last_req['name'])
self.assertEqual(fake_attr1, last_req['attr1'])
self.assertTrue(obj['enabled'])
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_parent, obj['parent_name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertIsNone(obj['status'])
self.assertTrue(obj.enabled)
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_parent, obj.parent_name)
self.assertEqual(fake_parent, obj.parent)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr1, obj.attr1)
self.assertEqual(fake_attr2, obj.second)
self.assertEqual(fake_attr2, obj.attr2)
self.assertIsNone(obj.status)
self.assertEqual('foo', obj.location)
def test_put_update(self):
class FakeResourcePut(FakeResource):
# This is False by default, but explicit for this test.
patch_update = False
resp = mock.Mock()
resp.json = mock.Mock(return_value=fake_body)
resp.headers = {'location': 'foo'}
self.session.put = mock.Mock(return_value=resp)
# Create resource with subset of attributes in order to
# verify update refreshes all attributes from response.
obj = FakeResourcePut.new(id=fake_id, parent_name=fake_parent,
name=fake_name, attr1=fake_attr1)
self.assertTrue(obj.is_dirty)
self.assertEqual(obj, obj.update(self.session))
self.assertFalse(obj.is_dirty)
self.assertCalledURL(self.session.put,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
last_req = self.session.put.call_args[1]["json"][
FakeResource.resource_key]
self.assertEqual(3, len(last_req))
self.assertEqual(fake_parent, last_req['parent_name'])
self.assertEqual(fake_name, last_req['name'])
self.assertEqual(fake_attr1, last_req['attr1'])
self.assertTrue(obj['enabled'])
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_parent, obj['parent_name'])
self.assertEqual(fake_attr1, obj['attr1'])
self.assertEqual(fake_attr2, obj['attr2'])
self.assertIsNone(obj['status'])
self.assertTrue(obj.enabled)
self.assertEqual(fake_id, obj.id)
self.assertEqual(fake_name, obj.name)
self.assertEqual(fake_parent, obj.parent_name)
self.assertEqual(fake_parent, obj.parent)
self.assertEqual(fake_attr1, obj.first)
self.assertEqual(fake_attr1, obj.attr1)
self.assertEqual(fake_attr2, obj.second)
self.assertEqual(fake_attr2, obj.attr2)
self.assertIsNone(obj.status)
self.assertEqual('foo', obj.location)
def test_update_early_exit(self):
obj = FakeResource()
obj._dirty = [] # Bail out early if there's nothing to update.
self.assertIsNone(obj.update("session"))
def test_update_no_id_attribute(self):
obj = FakeResource.existing(id=1, attr="value1",
parent_name=fake_parent)
obj.first = "value2" # Make it dirty
obj.update_by_id = mock.Mock(return_value=dict())
# If no id_attribute is returned in the update response, make sure
# we handle the resulting KeyError.
self.assertEqual(obj, obj.update("session"))
def test_delete(self):
obj = FakeResource({"id": fake_id, "parent_name": fake_parent})
obj.delete(self.session)
self.assertCalledURL(self.session.delete,
os.path.join(fake_base_path % fake_arguments,
str(fake_id))[1:])
def _test_list(self, resource_class):
results = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
for i in range(len(results)):
results[i]['id'] = fake_id + i
if resource_class.resources_key is not None:
body = {resource_class.resources_key:
self._get_expected_results()}
sentinel = {resource_class.resources_key: []}
else:
body = self._get_expected_results()
sentinel = []
resp1 = mock.Mock()
resp1.json = mock.Mock(return_value=body)
resp2 = mock.Mock()
resp2.json = mock.Mock(return_value=sentinel)
self.session.get.side_effect = [resp1, resp2]
objs = list(resource_class.list(self.session, path_args=fake_arguments,
paginated=True))
params = {'limit': 3, 'marker': results[-1]['id']}
self.assertEqual(params, self.session.get.call_args[1]['params'])
self.assertEqual(3, len(objs))
for obj in objs:
self.assertIn(obj.id, range(fake_id, fake_id + 3))
self.assertEqual(fake_name, obj['name'])
self.assertEqual(fake_name, obj.name)
self.assertIsInstance(obj, FakeResource)
def _get_expected_results(self):
results = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
for i in range(len(results)):
results[i]['id'] = fake_id + i
return results
def test_list_keyed_resource(self):
self._test_list(FakeResource)
def test_list_non_keyed_resource(self):
self._test_list(FakeResourceNoKeys)
def _test_list_call_count(self, paginated):
# Test that we've only made one call to receive all data
results = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
resp = mock.Mock()
resp.json = mock.Mock(return_value={fake_resources: results})
attrs = {"get.return_value": resp}
session = mock.Mock(**attrs)
list(FakeResource.list(session, params={'limit': len(results) + 1},
path_args=fake_arguments,
paginated=paginated))
# Ensure we only made one call to complete this.
self.assertEqual(1, session.get.call_count)
def test_list_bail_out(self):
# When we get less data than limit, make sure we made one call
self._test_list_call_count(True)
def test_list_nonpaginated(self):
# When we call with paginated=False, make sure we made one call
self._test_list_call_count(False)
def test_determine_limit(self):
full_page = [fake_data.copy(), fake_data.copy(), fake_data.copy()]
last_page = [fake_data.copy()]
session = mock.Mock()
session.get = mock.Mock()
full_response = mock.Mock()
response_body = {FakeResource.resources_key: full_page}
full_response.json = mock.Mock(return_value=response_body)
last_response = mock.Mock()
response_body = {FakeResource.resources_key: last_page}
last_response.json = mock.Mock(return_value=response_body)
pages = [full_response, full_response, last_response]
session.get.side_effect = pages
# Don't specify a limit. Resource.list will determine the limit
# is 3 based on the first `full_page`.
results = list(FakeResource.list(session, path_args=fake_arguments,
paginated=True))
self.assertEqual(session.get.call_count, len(pages))
self.assertEqual(len(full_page + full_page + last_page), len(results))
def test_empty_list(self):
page = []
session = mock.Mock()
session.get = mock.Mock()
full_response = mock.Mock()
response_body = {FakeResource.resources_key: page}
full_response.json = mock.Mock(return_value=response_body)
pages = [full_response]
session.get.side_effect = pages
results = list(FakeResource.list(session, path_args=fake_arguments,
paginated=True))
self.assertEqual(session.get.call_count, len(pages))
self.assertEqual(len(page), len(results))
def test_attrs_name(self):
obj = FakeResource()
self.assertIsNone(obj.name)
del obj.name
def test_to_dict(self):
kwargs = {
'enabled': True,
'name': 'FOO',
'parent': 'dad',
'attr1': 'BAR',
'attr2': ['ZOO', 'BAZ'],
'status': 'Active',
'headers': {
'key': 'value'
}
}
obj = FakeResource(kwargs)
res = obj.to_dict()
self.assertIsInstance(res, dict)
self.assertTrue(res['enabled'])
self.assertEqual('FOO', res['name'])
self.assertEqual('dad', res['parent'])
self.assertEqual('BAR', res['attr1'])
self.assertEqual(['ZOO', 'BAZ'], res['attr2'])
self.assertEqual('Active', res['status'])
self.assertNotIn('headers', res)
def test_composite_attr_happy(self):
obj = FakeResource.existing(**{'attr3': '3'})
try:
self.assertEqual('3', obj.third)
except AttributeError:
self.fail("third was not found as expected")
def test_composite_attr_fallback(self):
obj = FakeResource.existing(**{'attr_three': '3'})
try:
self.assertEqual('3', obj.third)
except AttributeError:
self.fail("third was not found in fallback as expected")
def test_id_del(self):
class Test(resource.Resource):
id_attribute = "my_id"
attrs = {"my_id": 100}
t = Test(attrs=attrs)
self.assertEqual(attrs["my_id"], t.id)
del t.id
self.assertTrue(Test.id_attribute not in t._attrs)
def test_from_name_with_name(self):
name = "Ernie Banks"
obj = FakeResource.from_name(name)
self.assertEqual(name, obj.name)
def test_from_id_with_name(self):
name = "Sandy Koufax"
obj = FakeResource.from_id(name)
self.assertEqual(name, obj.id)
def test_from_id_with_object(self):
name = "Mickey Mantle"
obj = FakeResource.new(name=name)
new_obj = FakeResource.from_id(obj)
self.assertIs(new_obj, obj)
self.assertEqual(obj.name, new_obj.name)
def test_from_id_with_bad_value(self):
def should_raise():
FakeResource.from_id(3.14)
self.assertThat(should_raise, matchers.raises(ValueError))
def test_dirty_list(self):
class Test(resource.Resource):
attr = resource.prop("attr")
# Check if dirty after setting by prop
sot1 = Test()
self.assertFalse(sot1.is_dirty)
sot1.attr = 1
self.assertTrue(sot1.is_dirty)
# Check if dirty after setting by mapping
sot2 = Test()
sot2["attr"] = 1
self.assertTrue(sot1.is_dirty)
# Check if dirty after creation
sot3 = Test({"attr": 1})
self.assertTrue(sot3.is_dirty)
def test_update_attrs(self):
class Test(resource.Resource):
moe = resource.prop("the-attr")
larry = resource.prop("the-attr2")
curly = resource.prop("the-attr3", type=int)
shemp = resource.prop("the-attr4")
value1 = "one"
value2 = "two"
value3 = "3"
value4 = "fore"
value5 = "fiver"
sot = Test({"the-attr": value1})
sot.update_attrs({"the-attr2": value2, "notprop": value4})
self.assertTrue(sot.is_dirty)
self.assertEqual(value1, sot.moe)
self.assertEqual(value1, sot["the-attr"])
self.assertEqual(value2, sot.larry)
self.assertEqual(value4, sot.notprop)
sot._reset_dirty()
sot.update_attrs(curly=value3)
self.assertTrue(sot.is_dirty)
self.assertEqual(int, type(sot.curly))
self.assertEqual(int(value3), sot.curly)
sot._reset_dirty()
sot.update_attrs(**{"the-attr4": value5})
self.assertTrue(sot.is_dirty)
self.assertEqual(value5, sot.shemp)
def test_get_id(self):
class Test(resource.Resource):
pass
ID = "an id"
res = Test({"id": ID})
self.assertEqual(ID, resource.Resource.get_id(ID))
self.assertEqual(ID, resource.Resource.get_id(res))
def test_convert_ids(self):
class TestResourceFoo(resource.Resource):
pass
class TestResourceBar(resource.Resource):
pass
resfoo = TestResourceFoo({'id': 'FAKEFOO'})
resbar = TestResourceBar({'id': 'FAKEBAR'})
self.assertIsNone(resource.Resource.convert_ids(None))
attrs = {
'key1': 'value1'
}
self.assertEqual(attrs, resource.Resource.convert_ids(attrs))
attrs = {
'foo': resfoo,
'bar': resbar,
'other': 'whatever',
}
res = resource.Resource.convert_ids(attrs)
self.assertEqual('FAKEFOO', res['foo'])
self.assertEqual('FAKEBAR', res['bar'])
self.assertEqual('whatever', res['other'])
def test_repr(self):
fr = FakeResource()
fr._loaded = False
fr.first = "hey"
fr.second = "hi"
fr.third = "nah"
the_repr = repr(fr)
the_repr = the_repr.replace('openstack.tests.unit.test_resource.', '')
result = eval(the_repr)
self.assertEqual(fr._loaded, result._loaded)
self.assertEqual(fr.first, result.first)
self.assertEqual(fr.second, result.second)
self.assertEqual(fr.third, result.third)
def test_id_attribute(self):
faker = FakeResource(fake_data)
self.assertEqual(fake_id, faker.id)
faker.id_attribute = 'name'
self.assertEqual(fake_name, faker.id)
faker.id_attribute = 'attr1'
self.assertEqual(fake_attr1, faker.id)
faker.id_attribute = 'attr2'
self.assertEqual(fake_attr2, faker.id)
faker.id_attribute = 'id'
self.assertEqual(fake_id, faker.id)
def test_name_attribute(self):
class Person_ES(resource.Resource):
name_attribute = "nombre"
nombre = resource.prop('nombre')
name = "Brian"
args = {'nombre': name}
person = Person_ES(args)
self.assertEqual(name, person.nombre)
self.assertEqual(name, person.name)
new_name = "Julien"
person.name = new_name
self.assertEqual(new_name, person.nombre)
self.assertEqual(new_name, person.name)
def test_boolstr_prop(self):
faker = FakeResource(fake_data)
self.assertTrue(faker.enabled)
self.assertTrue(faker['enabled'])
faker._attrs['enabled'] = False
self.assertFalse(faker.enabled)
self.assertFalse(faker['enabled'])
# should fail fast
def set_invalid():
faker.enabled = 'INVALID'
self.assertRaises(ValueError, set_invalid)
class ResourceMapping(base.TestCase):
def test__getitem(self):
value = 10
class Test(resource.Resource):
attr = resource.prop("attr")
t = Test(attrs={"attr": value})
self.assertEqual(value, t["attr"])
def test__setitem__existing_item_changed(self):
class Test(resource.Resource):
pass
t = Test()
key = "attr"
value = 1
t[key] = value
self.assertEqual(value, t._attrs[key])
self.assertTrue(key in t._dirty)
def test__setitem__existing_item_unchanged(self):
class Test(resource.Resource):
pass
key = "attr"
value = 1
t = Test(attrs={key: value})
t._reset_dirty() # Clear dirty list so this checks as unchanged.
t[key] = value
self.assertEqual(value, t._attrs[key])
self.assertTrue(key not in t._dirty)
def test__setitem__new_item(self):
class Test(resource.Resource):
pass
t = Test()
key = "attr"
value = 1
t[key] = value
self.assertEqual(value, t._attrs[key])
self.assertTrue(key in t._dirty)
def test__delitem__(self):
class Test(resource.Resource):
pass
key = "attr"
value = 1
t = Test(attrs={key: value})
del t[key]
self.assertTrue(key not in t._attrs)
self.assertTrue(key in t._dirty)
def test__len__(self):
class Test(resource.Resource):
pass
attrs = {"a": 1, "b": 2, "c": 3}
t = Test(attrs=attrs)
self.assertEqual(len(attrs.keys()), len(t))
def test__iter__(self):
class Test(resource.Resource):
pass
attrs = {"a": 1, "b": 2, "c": 3}
t = Test(attrs=attrs)
for attr in t:
self.assertEqual(attrs[attr], t[attr])
def _test_resource_serialization(self, session_method, resource_method):
attr_type = resource.Resource
class Test(resource.Resource):
allow_create = True
attr = resource.prop("attr", type=attr_type)
the_id = 123
sot = Test()
sot.attr = resource.Resource({"id": the_id})
self.assertEqual(attr_type, type(sot.attr))
def fake_call(*args, **kwargs):
attrs = kwargs["json"]
try:
json.dumps(attrs)
except TypeError as e:
self.fail("Unable to serialize _attrs: %s" % e)
resp = mock.Mock()
resp.json = mock.Mock(return_value=attrs)
return resp
session = mock.Mock()
setattr(session, session_method, mock.Mock(side_effect=fake_call))
if resource_method == "create_by_id":
session.create_by_id(session, sot._attrs)
elif resource_method == "update_by_id":
session.update_by_id(session, None, sot._attrs)
def test_create_serializes_resource_types(self):
self._test_resource_serialization("post", "create_by_id")
def test_update_serializes_resource_types(self):
self._test_resource_serialization("patch", "update_by_id")
class FakeResponse(object):
def __init__(self, response):
self.body = response
def json(self):
return self.body
class TestFind(base.TestCase):
NAME = 'matrix'
ID = 'Fishburne'
PROP = 'attribute2'
def setUp(self):
super(TestFind, self).setUp()
self.mock_session = mock.Mock()
self.mock_get = mock.Mock()
self.mock_session.get = self.mock_get
self.matrix = {'id': self.ID, 'name': self.NAME, 'prop': self.PROP}
def test_name(self):
self.mock_get.side_effect = [
exceptions.NotFoundException(),
FakeResponse({FakeResource.resources_key: [self.matrix]})
]
result = FakeResource.find(self.mock_session, self.NAME,
path_args=fake_arguments)
self.assertEqual(self.NAME, result.name)
self.assertEqual(self.PROP, result.prop)
def test_id(self):
self.mock_get.side_effect = [
FakeResponse({FakeResource.resource_key: self.matrix})
]
result = FakeResource.find(self.mock_session, self.ID,
path_args=fake_arguments)
self.assertEqual(self.ID, result.id)
self.assertEqual(self.PROP, result.prop)
path = "fakes/" + fake_parent + "/data/" + self.ID
self.mock_get.assert_any_call(path, endpoint_filter=None)
def test_id_no_retrieve(self):
self.mock_get.side_effect = [
FakeResponse({FakeResource.resources_key: [self.matrix]})
]
class NoRetrieveResource(FakeResource):
allow_retrieve = False
result = NoRetrieveResource.find(self.mock_session, self.ID,
path_args=fake_arguments)
self.assertEqual(self.ID, result.id)
self.assertEqual(self.PROP, result.prop)
def test_dups(self):
dupe = self.matrix.copy()
dupe['id'] = 'different'
self.mock_get.side_effect = [
# Raise a 404 first so we get out of the ID search and into name.
exceptions.NotFoundException(),
FakeResponse({FakeResource.resources_key: [self.matrix, dupe]})
]
self.assertRaises(exceptions.DuplicateResource, FakeResource.find,
self.mock_session, self.NAME)
def test_id_attribute_find(self):
floater = {'ip_address': "127.0.0.1", 'prop': self.PROP}
self.mock_get.side_effect = [
FakeResponse({FakeResource.resource_key: floater})
]
FakeResource.id_attribute = 'ip_address'
FakeResource.id_attribute = 'ip_address'
result = FakeResource.find(self.mock_session, "127.0.0.1",
path_args=fake_arguments)
self.assertEqual("127.0.0.1", result.id)
self.assertEqual(self.PROP, result.prop)
FakeResource.id_attribute = 'id'
p = {'ip_address': "127.0.0.1"}
path = fake_path + "?limit=2"
self.mock_get.called_once_with(path, params=p, endpoint_filter=None)
def test_nada(self):
self.mock_get.side_effect = [
exceptions.NotFoundException(),
FakeResponse({FakeResource.resources_key: []})
]
self.assertIsNone(FakeResource.find(self.mock_session, self.NAME))
def test_no_name(self):
self.mock_get.side_effect = [
exceptions.NotFoundException(),
FakeResponse({FakeResource.resources_key: [self.matrix]})
]
FakeResource.name_attribute = None
self.assertIsNone(FakeResource.find(self.mock_session, self.NAME))
def test_nada_not_ignored(self):
self.mock_get.side_effect = [
exceptions.NotFoundException(),
FakeResponse({FakeResource.resources_key: []})
]
self.assertRaises(exceptions.ResourceNotFound, FakeResource.find,
self.mock_session, self.NAME, ignore_missing=False)
class TestWaitForStatus(base.TestCase):
def __init__(self, *args, **kwargs):
super(TestWaitForStatus, self).__init__(*args, **kwargs)
self.build = FakeResponse(self.body_with_status(fake_body, 'BUILD'))
self.active = FakeResponse(self.body_with_status(fake_body, 'ACTIVE'))
self.error = FakeResponse(self.body_with_status(fake_body, 'ERROR'))
def setUp(self):
super(TestWaitForStatus, self).setUp()
self.sess = mock.Mock()
def body_with_status(self, body, status):
body_copy = copy.deepcopy(body)
body_copy[fake_resource]['status'] = status
return body_copy
def test_wait_for_status_nothing(self):
self.sess.get = mock.Mock()
sot = FakeResource.new(**fake_data)
sot.status = 'ACTIVE'
self.assertEqual(sot, resource.wait_for_status(
self.sess, sot, 'ACTIVE', [], 1, 2))
self.assertEqual([], self.sess.get.call_args_list)
def test_wait_for_status(self):
self.sess.get = mock.Mock()
self.sess.get.side_effect = [self.build, self.active]
sot = FakeResource.new(**fake_data)
self.assertEqual(sot, resource.wait_for_status(
self.sess, sot, 'ACTIVE', [], 1, 2))
def test_wait_for_status_timeout(self):
self.sess.get = mock.Mock()
self.sess.get.side_effect = [self.build, self.build]
sot = FakeResource.new(**fake_data)
self.assertRaises(exceptions.ResourceTimeout, resource.wait_for_status,
self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2)
def test_wait_for_status_failures(self):
self.sess.get = mock.Mock()
self.sess.get.side_effect = [self.build, self.error]
sot = FakeResource.new(**fake_data)
self.assertRaises(exceptions.ResourceFailure, resource.wait_for_status,
self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2)
def test_wait_for_status_no_status(self):
class FakeResourceNoStatus(resource.Resource):
allow_retrieve = True
sot = FakeResourceNoStatus.new(id=123)
self.assertRaises(AttributeError, resource.wait_for_status,
self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2)
class TestWaitForDelete(base.TestCase):
def test_wait_for_delete(self):
sess = mock.Mock()
sot = FakeResource.new(**fake_data)
sot.get = mock.Mock()
sot.get.side_effect = [
sot,
exceptions.NotFoundException()]
self.assertEqual(sot, resource.wait_for_delete(sess, sot, 1, 2))
def test_wait_for_delete_fail(self):
sess = mock.Mock()
sot = FakeResource.new(**fake_data)
sot.get = mock.Mock(return_value=sot)
self.assertRaises(exceptions.ResourceTimeout, resource.wait_for_delete,
sess, sot, 1, 2)
|
briancurtin/python-openstacksdk
|
openstack/tests/unit/test_resource.py
|
Python
|
apache-2.0
| 53,273
|
[
"Brian",
"MOE"
] |
5327e979410258ef6393aef25bb3d9e7b863b2d79b1a0e870ee33e24e63cbc2d
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import dask.array as da
import sympy
from hyperspy.component import _get_scaling_factor
from hyperspy._components.expression import Expression
from hyperspy.misc.utils import is_binned # remove in v2.0
from distutils.version import LooseVersion
sqrt2pi = np.sqrt(2 * np.pi)
def _estimate_skewnormal_parameters(signal, x1, x2, only_current):
axis = signal.axes_manager.signal_axes[0]
i1, i2 = axis.value_range_to_indices(x1, x2)
X = axis.axis[i1:i2]
if only_current is True:
data = signal()[i1:i2]
X_shape = (len(X),)
i = 0
x0_shape = (1,)
else:
i = axis.index_in_array
data_gi = [slice(None), ] * len(signal.data.shape)
data_gi[axis.index_in_array] = slice(i1, i2)
data = signal.data[tuple(data_gi)]
X_shape = [1, ] * len(signal.data.shape)
X_shape[axis.index_in_array] = data.shape[i]
x0_shape = list(data.shape)
x0_shape[i] = 1
a1 = np.sqrt(2 / np.pi)
b1 = (4 / np.pi - 1) * a1
m1 = np.sum(X.reshape(X_shape) * data, i) / np.sum(data, i)
m2 = np.abs(np.sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 2 * data, i)
/ np.sum(data, i))
m3 = np.abs(np.sum((X.reshape(X_shape) - m1.reshape(x0_shape)) ** 3 * data, i)
/ np.sum(data, i))
x0 = m1 - a1 * (m3 / b1) ** (1 / 3)
scale = np.sqrt(m2 + a1 ** 2 * (m3 / b1) ** (2 / 3))
delta = np.sqrt(1 / (a1**2 + m2 * (b1 / m3) ** (2 / 3)))
shape = delta / np.sqrt(1 - delta**2)
iheight = np.argmin(np.abs(X.reshape(X_shape) - x0.reshape(x0_shape)), i)
# height is the value of the function at x0, shich has to be computed
# differently for dask array (lazy) and depending on the dimension
if isinstance(data, da.Array):
x0, iheight, scale, shape = da.compute(x0, iheight, scale, shape)
if only_current is True or signal.axes_manager.navigation_dimension == 0:
height = data.vindex[iheight].compute()
elif signal.axes_manager.navigation_dimension == 1:
height = data.vindex[np.arange(signal.axes_manager.navigation_size),
iheight].compute()
else:
height = data.vindex[(*np.indices(signal.axes_manager.navigation_shape),
iheight)].compute()
else:
if only_current is True or signal.axes_manager.navigation_dimension == 0:
height = data[iheight]
elif signal.axes_manager.navigation_dimension == 1:
height = data[np.arange(signal.axes_manager.navigation_size),
iheight]
else:
height = data[(*np.indices(signal.axes_manager.navigation_shape),
iheight)]
return x0, height, scale, shape
class SkewNormal(Expression):
r"""Skew normal distribution component.
| Asymmetric peak shape based on a normal distribution.
| For definition see
https://en.wikipedia.org/wiki/Skew_normal_distribution
| See also http://azzalini.stat.unipd.it/SN/
|
.. math::
f(x) &= 2 A \phi(x) \Phi(x) \\
\phi(x) &= \frac{1}{\sqrt{2\pi}}\mathrm{exp}{\left[
-\frac{t(x)^2}{2}\right]} \\
\Phi(x) &= \frac{1}{2}\left[1 + \mathrm{erf}\left(\frac{
\alpha~t(x)}{\sqrt{2}}\right)\right] \\
t(x) &= \frac{x-x_0}{\omega}
============== =============
Variable Parameter
============== =============
:math:`x_0` x0
:math:`A` A
:math:`\omega` scale
:math:`\alpha` shape
============== =============
Parameters
-----------
x0 : float
Location of the peak position (not maximum, which is given by
the `mode` property).
A : float
Height parameter of the peak.
scale : float
Width (sigma) parameter.
shape: float
Skewness (asymmetry) parameter. For shape=0, the normal
distribution (Gaussian) is obtained. The distribution is
right skewed (longer tail to the right) if shape>0 and is
left skewed if shape<0.
The properties `mean` (position), `variance`, `skewness` and `mode`
(=position of maximum) are defined for convenience.
"""
def __init__(self, x0=0., A=1., scale=1., shape=0.,
module=['numpy', 'scipy'], **kwargs):
if LooseVersion(sympy.__version__) < LooseVersion("1.3"):
raise ImportError("The `SkewNormal` component requires "
"SymPy >= 1.3")
# We use `_shape` internally because `shape` is already taken in sympy
# https://github.com/sympy/sympy/pull/20791
super().__init__(
expression="2 * A * normpdf * normcdf;\
normpdf = exp(- t ** 2 / 2) / sqrt(2 * pi);\
normcdf = (1 + erf(_shape * t / sqrt(2))) / 2;\
t = (x - x0) / scale",
name="SkewNormal",
x0=x0,
A=A,
scale=scale,
shape=shape,
module=module,
autodoc=False,
rename_pars={"_shape": "shape"},
**kwargs,
)
# Boundaries
self.A.bmin = 0.
self.scale.bmin = 0
self.isbackground = False
self.convolved = True
def estimate_parameters(self, signal, x1, x2, only_current=False):
"""Estimate the skew normal distribution by calculating the momenta.
Parameters
----------
signal : Signal1D instance
x1 : float
Defines the left limit of the spectral range to use for the
estimation.
x2 : float
Defines the right limit of the spectral range to use for the
estimation.
only_current : bool
If False estimates the parameters for the full dataset.
Returns
-------
bool
Notes
-----
Adapted from Lin, Lee and Yen, Statistica Sinica 17, 909-927 (2007)
https://www.jstor.org/stable/24307705
Examples
--------
>>> g = hs.model.components1D.SkewNormal()
>>> x = np.arange(-10, 10, 0.01)
>>> data = np.zeros((32, 32, 2000))
>>> data[:] = g.function(x).reshape((1, 1, 2000))
>>> s = hs.signals.Signal1D(data)
>>> s.axes_manager._axes[-1].offset = -10
>>> s.axes_manager._axes[-1].scale = 0.01
>>> g.estimate_parameters(s, -10, 10, False)
"""
super()._estimate_parameters(signal)
axis = signal.axes_manager.signal_axes[0]
x0, height, scale, shape = _estimate_skewnormal_parameters(
signal, x1, x2, only_current
)
scaling_factor = _get_scaling_factor(signal, axis, x0)
if only_current is True:
self.x0.value = x0
self.A.value = height * sqrt2pi
self.scale.value = scale
self.shape.value = shape
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
self.A.value /= scaling_factor
return True
else:
if self.A.map is None:
self._create_arrays()
self.A.map['values'][:] = height * sqrt2pi
if is_binned(signal):
# in v2 replace by
#if axis.is_binned:
self.A.map['values'] /= scaling_factor
self.A.map['is_set'][:] = True
self.x0.map['values'][:] = x0
self.x0.map['is_set'][:] = True
self.scale.map['values'][:] = scale
self.scale.map['is_set'][:] = True
self.shape.map['values'][:] = shape
self.shape.map['is_set'][:] = True
self.fetch_stored_values()
return True
@property
def mean(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
return self.x0.value + self.scale.value * delta * np.sqrt(2 / np.pi)
@property
def variance(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
return self.scale.value**2 * (1 - 2 * delta**2 / np.pi)
@property
def skewness(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
return (4 - np.pi)/2 * (delta * np.sqrt(2/np.pi))**3 / (1 -
2 * delta**2 / np.pi)**(3/2)
@property
def mode(self):
delta = self.shape.value / np.sqrt(1 + self.shape.value**2)
muz = np.sqrt(2 / np.pi) * delta
sigmaz = np.sqrt(1 - muz**2)
if self.shape.value == 0:
return self.x0.value
else:
m0 = muz - self.skewness * sigmaz / 2 - np.sign(self.shape.value) \
/ 2 * np.exp(- 2 * np.pi / np.abs(self.shape.value))
return self.x0.value + self.scale.value * m0
|
thomasaarholt/hyperspy
|
hyperspy/_components/skew_normal.py
|
Python
|
gpl-3.0
| 9,640
|
[
"Gaussian"
] |
26e78c34beb21d400baa83277c597768319b0666415a025379f85f79f20cb6b6
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
vi_header_re = re.compile(r"^#\s+vim?:.+")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor:"),
re.compile("^\.\.\s+moduleauthor::"))
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
asse_equal_end_with_none_re = re.compile(
r"assertEqual\(.*?,\s+None\)$")
asse_equal_start_with_none_re = re.compile(
r"assertEqual\(None,")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|critical)\(\s*('|\")")
log_translation_info = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_exception = re.compile(
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
# TODO(dims): When other oslo libraries switch over non-namespace'd
# imports, we need to add them to the regexp below.
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.]"
r"(concurrency|config|context|db|i18n|"
r"log|messaging|middleware|rootwrap|"
r"serialization|utils|vmware)")
oslo_namespace_imports_2 = re.compile(r"from[\s]*oslo[\s]*import[\s]*"
r"(concurrency|config|context|db|i18n|"
r"log|messaging|middleware|rootwrap|"
r"serialization|utils|vmware)")
oslo_namespace_imports_3 = re.compile(r"import[\s]*oslo\."
r"(concurrency|config|context|db|i18n|"
r"log|messaging|middleware|rootwrap|"
r"serialization|utils|vmware)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "firewall",
"disk", "api", "imagecache", "cpu", "hardware"]:
return None
return driver
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield(0, msg)
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
def assert_equal_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N318
"""
res = (asse_equal_start_with_none_re.search(logical_line) or
asse_equal_end_with_none_re.search(logical_line))
if res:
yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed")
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield(0, "N337 Don't import translation in tests")
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
# and the Xen utilities
if ("nova/tests" in filename or
"plugins/xenserver/xenapi/etc/xapi.d" in filename):
return
if pep8.noqa(physical_line):
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_info.match(logical_line):
yield (0, msg)
msg = "N329: LOG.exception messages require translations `_LE()`!"
if log_translation_exception.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "N321: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def use_jsonutils(logical_line, filename):
# the code below that path is not meant to be executed from neutron
# tree where jsonutils module is present, so don't enforce its usage
# for this subdirectory
if "plugins/xenserver" in filename:
return
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, blank_before, filename):
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
match = re.match(oslo_namespace_imports_2, logical_line)
if match:
msg = ("N333: 'module %s should not be imported "
"from oslo namespace.") % match.group(1)
yield(0, msg)
match = re.match(oslo_namespace_imports_3, logical_line)
if match:
msg = ("N333: 'module %s should not be imported "
"from oslo namespace.") % match.group(1)
yield(0, msg)
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
register(use_timeutils_utcnow)
register(import_no_virt_driver_import_deps)
register(import_no_virt_driver_config_deps)
register(capital_cfg_help)
register(no_vi_headers)
register(no_import_translation_in_tests)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_raises_regexp)
register(no_translate_debug_logs)
register(no_setting_conf_directly_in_tests)
register(validate_log_translations)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_api_version_decorator)
register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)
register(assert_equal_in)
|
petrutlucian94/nova
|
nova/hacking/checks.py
|
Python
|
apache-2.0
| 21,668
|
[
"VisIt"
] |
0ef54a6b9c8946f8a5de0e20d1b40db8d02486fe6ada23d325d2f9536fa493e1
|
""" Plot the inference results
"""
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # To enable saving remotely
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# Get a red-white-black cmap
cdict = {'red': ((0.0, 1.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0))}
cmap = LinearSegmentedColormap('my_colormap',cdict,256)
from pyglm.utils.avg_dicts import average_list_of_dicts, std_list_of_dicts
import cPickle
def plot_connectivity_matrix(s_smpls, s_true=None):
# Compute the inferred connectivity matrix
W_inf = np.zeros_like(s_smpls[0]['net']['weights']['W'])
for smpl in s_smpls:
W_inf += smpl['net']['weights']['W'] * smpl['net']['graph']['A']
W_inf /= len(s_smpls)
true_given = s_true is not None
if true_given:
plt.subplot(1,2,1)
W_true = s_true['net']['weights']['W'] * s_true['net']['graph']['A']
W_max = np.amax(np.maximum(np.abs(W_true),np.abs(W_inf)))
else:
W_max = np.amax(np.abs(W_inf))
px_per_node = 10
if true_given:
plt.imshow(np.kron(W_true,np.ones((px_per_node,px_per_node))),
vmin=-W_max,vmax=W_max,
extent=[0,1,0,1],
interpolation='nearest',
cmap=cmap)
plt.colorbar()
plt.title('True Network')
plt.subplot(1,2,2)
# Plot the inferred network
plt.imshow(np.kron(W_inf,np.ones((px_per_node,px_per_node))),
vmin=-W_max,vmax=W_max,
extent=[0,1,0,1],
interpolation='nearest',
cmap=cmap)
plt.colorbar()
plt.title('Inferred Network')
def plot_spatiotemporal_tuning_curves(s_mean, s_std=None, s_true=None, color=None):
if 'sharedtuningcurve_provider' in s_mean['latent']:
tc_mean = s_mean['latent']['sharedtuningcurve_provider']
if s_std is not None:
tc_std = s_std['latent']['sharedtuningcurve_provider']
# Get the stimulus responses
tc_x = tc_mean['stim_response_x']
tc_t = tc_mean['stim_response_t']
# import pdb; pdb.set_trace()
R = tc_x.shape[-1]
assert tc_t.shape[-1] == R
# Plot each tuning curve
if s_true is not None:
true_tc_x = s_true['latent']['sharedtuningcurve_provider']['stim_response_x']
true_tc_t = s_true['latent']['sharedtuningcurve_provider']['stim_response_t']
true_R = true_tc_x.shape[-1]
if tc_x.ndim == 3:
ncols = 3
else:
ncols = 2
else:
ncols = 2
for r in range(R):
# Plot the spatial component of the stimulus response
plt.subplot(R,ncols,r*ncols+1)
if tc_x.ndim == 3:
px_per_node = 10
stim_x_max = np.amax(np.abs(tc_x[:,:,r]))
plt.imshow(np.kron(tc_x[:,:,r],np.ones((px_per_node,px_per_node))),
vmin=-stim_x_max,vmax=stim_x_max,
extent=[0,1,0,1],
cmap='RdGy',
interpolation='nearest')
plt.colorbar()
plt.title('$f_{%d}(\\Delta x)$' % r)
if s_true is not None and r < true_R:
plt.subplot(R,ncols,r*ncols+2)
px_per_node = 10
stim_x_max = np.amax(np.abs(true_tc_x[:,:,r]))
plt.imshow(np.kron(true_tc_x[:,:,r],np.ones((px_per_node,px_per_node))),
vmin=-stim_x_max,vmax=stim_x_max,
extent=[0,1,0,1],
cmap='RdGy',
interpolation='nearest')
plt.colorbar()
plt.title('True $f_{%d}(\\Delta x)$' % r)
elif tc_x.ndim == 2:
plt.plot(tc_x[:,r], color=color, linestyle='-')
# Plot the true tuning curve
if s_true is not None and r < true_R:
plt.plot(true_tc_x[:,r], color='k', linestyle='-')
# If standard deviation is given, plot that as well
if s_std is not None:
stim_x_std = tc_std['stim_response_x'][:,r]
plt.plot(tc_x[:,r] + 2*stim_x_std, color=color, linestyle='--')
plt.plot(tc_x [:,r]- 2*stim_x_std, color=color, linestyle='--')
plt.title('$f_{%d}(\\Delta x)$' % r)
else:
raise Exception('Invalid TC dimension.')
plt.subplot(R,ncols,(r+1)*ncols)
plt.plot(tc_t[:,r], color=color)
# Plot the true tuning curve
if s_true is not None and r < true_R:
plt.plot(true_tc_t[:,r], color='k', linestyle='-')
if s_std is not None:
stim_t_std = tc_std['stim_response_t'][:,r]
plt.plot(tc_t[:,r] + 2*stim_t_std, color=color, linestyle='--')
plt.plot(tc_t[:,r] - 2*stim_t_std, color=color, linestyle='--')
plt.title('$f_{%d}(\\Delta t)$' % r)
def plot_stim_response(s_glm, s_glm_std=None, color=None):
if 'stim_response_t' in s_glm['bkgd'].keys() and \
'stim_response_x' in s_glm['bkgd'].keys():
# Get the stimulus responses
stim_x = s_glm['bkgd']['stim_response_x']
stim_t = s_glm['bkgd']['stim_response_t']
# Plot the spatial component of the stimulus response
plt.subplot(1,2,1)
if stim_x.ndim >= 2:
px_per_node = 10
stim_x_max = np.amax(np.abs(stim_x))
plt.imshow(np.kron(stim_x,np.ones((px_per_node,px_per_node))),
vmin=-stim_x_max,vmax=stim_x_max,
extent=[0,1,0,1],
interpolation='nearest')
plt.colorbar()
else:
plt.plot(stim_x, color=color, linestyle='-')
plt.hold(True)
# If standard deviation is given, plot that as well
if s_glm_std is not None:
stim_x_std = s_glm_std['bkgd']['stim_response_x']
plt.plot(stim_x + 2*stim_x_std, color=color, linestyle='--')
plt.plot(stim_x - 2*stim_x_std, color=color, linestyle='--')
plt.subplot(1,2,2)
plt.plot(stim_t, color=color)
plt.hold(True)
if s_glm_std is not None:
stim_t_std = s_glm_std['bkgd']['stim_response_t']
plt.plot(stim_t + 2*stim_t_std, color=color, linestyle='--')
plt.plot(stim_t - 2*stim_t_std, color=color, linestyle='--')
elif 'stim_response' in s_glm['bkgd'].keys():
stim_t = s_glm['bkgd']['stim_response']
plt.plot(stim_t, color=color, linestyle='-')
plt.hold(True)
if s_glm_std is not None:
stim_t_std = s_glm_std['bkgd']['stim_response']
plt.plot(stim_t + 2*stim_t_std, color=color, linestyle='--')
plt.plot(stim_t - 2*stim_t_std, color=color, linestyle='--')
def plot_imp_responses(s_inf, s_std=None, fig=None, color=None, use_bgcolor=False, linestyle='-', dt=0.001):
""" Plot the impulse responses plus or minus two standard devs
"""
# Get the weights of the impulse responses
W_inf = s_inf['net']['weights']['W'] * s_inf['net']['graph']['A']
N = W_inf.shape[0]
s_imps = []
s_imps_std = []
for n_post in np.arange(N):
s_imp_row = []
s_imp_std_row = []
s_imp_n = s_inf['glms'][n_post]['imp']['impulse']
if s_std is not None:
s_imp_std_n = s_std['glms'][n_post]['imp']['impulse']
for n_pre in np.arange(N):
w = W_inf[n_pre,n_post]
s_imp_row.append(w*s_imp_n[n_pre,:])
if s_std is not None:
s_imp_std_row.append(w*s_imp_std_n[n_pre,:])
s_imps.append(s_imp_row)
s_imps_std.append(s_imp_std_row)
s_imps = np.array(s_imps)
s_imps_std = np.array(s_imps_std)
# Transpose so that pre is row index
s_imps = np.transpose(s_imps, [1,0,2])
if s_std is not None:
s_imps_std = np.transpose(s_imps_std, [1,0,2])
else:
s_imps_std = np.zeros_like(s_imps)
imp_max = np.amax(np.abs(s_imps+2*s_imps_std))
t_imp = dt*np.arange(s_imps.shape[2])
W_imp = np.trapz(s_imps,t_imp, axis=2)
W_imp_max = np.amax(W_imp)
# Create a figure if necessary
if fig is None:
fig = plt.figure()
for n_pre in np.arange(N):
for n_post in np.arange(N):
ax = fig.add_subplot(N,N,n_pre*N+n_post + 1)
if use_bgcolor:
# Set background color based on weight of impulse
bkgd_color = cmap((W_imp[n_pre,n_post] -(-W_imp_max))/(2*W_imp_max))
# Set it slightly transparent
tcolor = list(bkgd_color)
tcolor[3] = 0.75
tcolor = tuple(tcolor)
ax.set_axis_bgcolor(tcolor)
# Plot the inferred impulse response
ax.hold(True)
ax.plot(np.squeeze(s_imps[n_pre,n_post,:]),color=color, linestyle=linestyle)
# Plot plus or minus 2 stds
if s_std is not None:
ax.plot(np.squeeze(s_imps[n_pre,n_post,:] +
2*s_imps_std[n_pre,n_post,:]),
color=color,
linestyle='--')
ax.plot(np.squeeze(s_imps[n_pre,n_post,:] -
2*s_imps_std[n_pre,n_post,:]),
color=color,
linestyle='--')
ax.plot(np.zeros_like(np.squeeze(s_imps[n_pre,n_post,:])),
color='k', linestyle=':')
# Set labels
#if not (n_pre == N-1 and n_post == 0):
if True:
ax.set_xlabel("")
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel("")
ax.set_ylim(-imp_max,imp_max)
# Add a colorbar
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.1, 0.05, 0.8])
# Rather than using the colorbar method, directly
# instantiate a colorbar
from matplotlib.colorbar import ColorbarBase
cbar_ticks = np.array([-0.9*W_imp_max, 0.0, 0.9*W_imp_max]).round(2)
if np.allclose(cbar_ticks, 0.0):
cbar_ticks = None
cbar = ColorbarBase(cbar_ax, cmap=cmap,
values=np.linspace(-W_imp_max, W_imp_max, 500),
boundaries=np.linspace(-W_imp_max, W_imp_max, 500),
ticks=cbar_ticks)
return fig
def plot_imp_responses_fast(s_inf, s_std=None, fig=None, color=None, use_bgcolor=False, linestyle='-'):
""" Plot the impulse responses plus or minus two standard devs
In this case we use a single axes rather than multiple subplots since
matplotlib is, unfortunately, ridiculously slow when it comes to large
numbers of subplots
"""
# Get the weights of the impulse responses
W_inf = s_inf['net']['weights']['W'] * s_inf['net']['graph']['A']
N = W_inf.shape[0]
s_imps = []
s_imps_std = []
for n_post in np.arange(N):
s_imp_row = []
s_imp_std_row = []
s_imp_n = s_inf['glms'][n_post]['imp']['impulse']
if s_std is not None:
s_imp_std_n = s_std['glms'][n_post]['imp']['impulse']
for n_pre in np.arange(N):
w = W_inf[n_pre,n_post]
s_imp_row.append(w*s_imp_n[n_pre,:])
if s_std is not None:
s_imp_std_row.append(w*s_imp_std_n[n_pre,:])
s_imps.append(s_imp_row)
s_imps_std.append(s_imp_std_row)
s_imps = np.array(s_imps)
s_imps_std = np.array(s_imps_std)
# Transpose so that pre is row index
s_imps = np.transpose(s_imps, [1,0,2])
if s_std is not None:
s_imps_std = np.transpose(s_imps_std, [1,0,2])
else:
s_imps_std = np.zeros_like(s_imps)
imp_max = np.amax(np.abs(s_imps+2*s_imps_std))
W_imp = np.sum(s_imps,2)
W_imp_max = np.amax(W_imp)
# Create a figure if necessary
if fig is None:
fig = plt.figure()
# Get subplot sizes
x_sz = s_imps.shape[2] # Number of time bins per impulse response
y_sz = 2*W_imp_max # Amplitude of impulse responses
x_buf = .05*x_sz # x buffer on either side of figure
y_buf = .05*y_sz # y buffer on top and bottom of figure
ax = plt.subplot(111)
plt.setp(ax, 'frame_on', False)
ax.set_xticks([])
ax.set_yticks([])
ax.hold(True)
from matplotlib.patches import Rectangle
for n_pre in np.arange(N):
for n_post in np.arange(N):
x_foff = (x_sz + 2*x_buf) * n_post # Offset of this subfigure
y_foff = (y_sz + 2*y_buf) * n_pre
x_aoff = x_foff + x_buf # Offset of this axis
y_aoff = y_foff + y_buf
if use_bgcolor:
# Add a semitransparent patch for the background
# Set background color based on weight of impulse
bkgd_color = cmap((W_imp[n_pre,n_post] -(-W_imp_max))/(2*W_imp_max))
# Set it slightly transparent
tcolor = list(bkgd_color)
tcolor[3] = 0.75
tcolor = tuple(tcolor)
#ax.set_axis_bgcolor(tcolor)
ax.add_patch(Rectangle((x_foff,y_foff+y_sz+2*y_buf), # Lower left coordinate
x_sz+2*x_buf, # width
y_sz+2*y_buf, # height
alpha=0.75,
color=tcolor,
fill=True))
# Plot the inferred impulse response
ax.plot(x_aoff + np.arange(x_sz),
y_aoff + np.squeeze(s_imps[n_pre,n_post,:]),
color=color, linestyle=linestyle)
# Plot plus or minus 2 stds
if s_std is not None:
ax.plot(x_aoff + np.arange(x_sz),
y_aoff + np.squeeze(s_imps[n_pre,n_post,:] +
2*s_imps_std[n_pre,n_post,:]),
color=color,
linestyle='--')
ax.plot(x_aoff + np.arange(x_sz),
y_aoff + np.squeeze(s_imps[n_pre,n_post,:] -
2*s_imps_std[n_pre,n_post,:]),
color=color,
linestyle='--')
ax.plot(x_aoff + np.arange(x_sz),
y_aoff + np.zeros_like(np.squeeze(s_imps[n_pre,n_post,:])),
color='k', linestyle=':')
return fig
def plot_firing_rate(s_glm, s_glm_std=None, color=None, tt=None, T_lim=None, plot_currents=True):
if tt is None:
tt = np.arange(np.size(s_glm['lam']))
if T_lim is None:
T_lim = slice(0,np.size(s_glm['lam']))
plt.plot(tt[T_lim], s_glm['lam'][T_lim],
color=color)
plt.hold(True)
if plot_currents:
# Plot constituent currents
gray = np.array([0.5, 0.5, 0.5])
if np.isscalar(s_glm['I_bias']):
plt.plot(tt[T_lim], s_glm['I_bias']*np.ones_like(tt[T_lim]), color=gray, linestyle='--')
else:
plt.plot(tt[T_lim], s_glm['I_bias'][T_lim], color=gray, linestyle='--')
plt.plot(tt[T_lim], s_glm['I_bkgd'][T_lim], color=gray, linestyle=':')
plt.plot(tt[T_lim], s_glm['I_net'][T_lim], color=gray, linestyle='-.')
if s_glm_std is not None:
# Make a shaded patch for the error bars
from matplotlib.patches import Polygon
verts = list(zip(tt[T_lim], s_glm['lam'][T_lim] + 2*s_glm_std['lam'][T_lim])) + \
list(zip(tt[T_lim][::-1],s_glm['lam'][T_lim][::-1] - 2*s_glm_std['lam'][T_lim][::-1]))
poly = Polygon(verts, facecolor=color, edgecolor=color, alpha=0.5)
plt.gca().add_patch(poly)
# plt.plot(s_glm['lam'] + 2*s_glm_std['lam'],
# color=color,
# linestyle='--')
# plt.plot(s_glm['lam'] - 2*s_glm_std['lam'],
# color=color,
# linestyle='--')
def plot_ks(s_glm, S, dt, s_glm_std=None, color=None):
""" Plot a Kolmogorov-Smirnov goodness of fit test..
"""
lam = s_glm['lam']
# Cumulative integral of fr
I = dt * np.cumsum(lam)
# Find rescaled spike times
rescaled_isi = np.diff(I[S])
# For a PP the cdf is of the exponential distribution
z = 1-np.exp(-rescaled_isi);
z = np.sort(z)
N = len(z)
ez = (np.arange(1,N+1)-.5)/N
plt.plot(ez,ez,'k');
plt.hold(True)
# The 95% confidence interval is approximately ez+-1.36/sqrt(N)
plt.plot(ez,ez+1.36/np.sqrt(N),'--k')
plt.plot(ez,ez-1.36/np.sqrt(N),'--k')
# Plot the actual statistic
plt.plot(z,ez,'-b');
plt.ylim([0,1])
plt.xlim([0,1])
# Check if the test passes
test_passed = np.all(np.abs(z-ez)<1.36/np.sqrt(N))
return test_passed
def plot_basis(s_glm, color='k'):
if 'basis' in s_glm['glms'][0]['imp']:
plt.plot(s_glm['glms'][0]['imp']['basis'],
color=color)
def plot_log_prob(s_inf, key='logp', s_true=None, color='r'):
inf_lp_trace = np.array([s[key] for s in s_inf])
if len(inf_lp_trace) > 1:
plt.plot(inf_lp_trace, color=color)
plt.xlabel('Iteration')
else:
plt.bar(0, inf_lp_trace[0], color=color)
if s_true is not None:
plt.hold(True)
true_lp_trace = s_true[key] * np.ones_like(inf_lp_trace)
if len(inf_lp_trace) > 1:
plt.plot(true_lp_trace, color='k')
else:
plt.bar(1, true_lp_trace[0], color='k')
plt.ylabel('Log probability')
def plot_log_lkhd(s_inf, s_true=None, color='k'):
plot_log_prob(s_inf, key='ll', s_true=s_true, color=color)
plt.ylabel('Log likelihood')
def plot_locations(s_inf, name='location_provider', color='k'):
"""
Plot a histogram of the inferred locations for each neuron
"""
if name not in s_inf[0]['latent']:
return
locs = np.array([s['latent'][name]['L'] for s in s_inf])
[N_smpls, N, D] = locs.shape
maxrow = 5
N_rows = np.ceil(N/float(maxrow))
for n in range(N):
plt.subplot(N_rows,maxrow,n+1, aspect=1.0)
plt.title('N: %d' % n)
if N_smpls == 1:
if D == 1:
plt.plot([locs[0,n,0], locs[0,n,0]],
[0,2], color=color, lw=2)
elif D == 2:
plt.plot(locs[0,n,1], locs[0,n,0], 's',
color=color, markerfacecolor=color)
# TODO: Fix the limits!
plt.xlim((-0.5, 9.5))
plt.ylim((9.5, -0.5))
else:
raise Exception("Only plotting locs of dim <= 2")
else:
# Plot a histogram of samples
if D == 1:
plt.hist(locs[:,n,0], bins=20, normed=True, color=color)
elif D == 2:
plt.hist2d(locs[:,n,1], locs[:,n,0], bins=np.arange(-0.5,10), cmap='Reds', alpha=0.5, normed=True)
plt.xlim((-0.5, 9.5))
plt.ylim((9.5, -0.5))
plt.colorbar()
else:
raise Exception("Only plotting locs of dim <= 2")
def plot_latent_types(s_inf, s_true=None, name='sharedtuningcurve_provider'):
"""
Plot a histogram of the inferred locations for each neuron
"""
if name not in s_inf[0]['latent']:
return
Ys = np.array([s['latent'][name]['Y'] for s in s_inf])
[N_smpls, N] = Ys.shape
vmin = np.amin(Ys)
vmax = np.amax(Ys)
if s_true is not None:
Ytrue = s_true['latent'][name]['Y']
vmin = np.minimum(vmin, np.amin(Ytrue))
vmax = np.maximum(vmax, np.amax(Ytrue))
vmin -= 0.1
vmax += 0.1
for n in range(N):
plt.scatter(np.arange(N_smpls), n*np.ones(N_smpls),
c=Ys[:,n], s=100, cmap='jet', vmin=vmin, vmax=vmax)
if s_true is not None:
plt.scatter((N_smpls+10)*np.ones(N), np.arange(N), c=Ytrue, s=100,
cmap='jet', marker='*', vmin=vmin, vmax=vmax)
plt.colorbar()
def plot_results(population,
x_inf,
popn_true=None,
x_true=None,
resdir=None,
do_plot_connectivity=True,
do_plot_stim_resp=True,
do_plot_imp_responses=True,
do_plot_firing_rates=True,
do_plot_ks=True,
do_plot_logpr=True):
""" Plot the inferred stimulus tuning curves and impulse responses
"""
if not resdir:
resdir = '.'
true_given = x_true is not None and popn_true is not None
# Make sure we have a list of x's
if not isinstance(x_inf, list):
x_inf = [x_inf]
# Evaluate the state for each of the parameter settings
N_samples = len(x_inf)
s_inf = []
for x in x_inf:
s_inf.append(population.eval_state(x))
s_true = None
if true_given:
s_true = popn_true.eval_state(x_true)
# Average the inferred states
s_avg = average_list_of_dicts(s_inf)
s_std = std_list_of_dicts(s_inf, s_avg)
N = population.N
# TODO Fix the averaging of W and A
# E[W] * E[A] != E[W*A]
# Plot the inferred connectivity matrix
if do_plot_connectivity:
print "Plotting connectivity matrix"
f = plt.figure()
plot_connectivity_matrix(s_inf, s_true)
f.savefig(os.path.join(resdir,'conn.pdf'))
plt.close(f)
if 'location_provider' in s_inf[0]['latent']:
f = plt.figure()
plot_locations(s_inf, color='r')
if true_given:
plot_locations([s_true], color='k')
f.savefig(os.path.join(resdir, 'locations.pdf'))
plt.close(f)
# Plot shared tuning curves
if 'sharedtuningcurve_provider' in s_inf[0]['latent']:
print "Plotting shared tuning curves"
for n in range(N):
f = plt.figure()
if true_given:
plot_spatiotemporal_tuning_curves(
s_avg,
s_true=s_true,
s_std=s_std,
color='r')
else:
plot_spatiotemporal_tuning_curves(
s_avg,
s_std=s_std,
color='k')
f.savefig(os.path.join(resdir,'tuning_curves.pdf'))
plt.close(f)
print "Plotting types"
f = plt.figure()
plot_latent_types(s_inf, s_true)
f.savefig(os.path.join(resdir, 'latent_types.pdf'))
plt.close(f)
# Plot stimulus response functions
if do_plot_stim_resp:
print "Plotting stimulus response functions"
for n in range(N):
f = plt.figure()
plot_stim_response(s_avg['glms'][n],
s_glm_std=s_std['glms'][n],
color='r')
if true_given:
plot_stim_response(s_true['glms'][n],
color='k')
f.savefig(os.path.join(resdir,'stim_resp_%d.pdf' % n))
plt.close(f)
# Plot the impulse responses
if do_plot_imp_responses:
print "Plotting impulse response functions"
f = plt.figure()
plot_imp_responses(s_avg,
s_std,
fig=f,
color='r',
use_bgcolor=True)
if true_given:
plot_imp_responses(s_true,
fig=f,
color='k',
linestyle='--',
use_bgcolor=False)
f.savefig(os.path.join(resdir,'imp_resp.pdf'))
plt.close(f)
# Plot the impulse response basis
if do_plot_imp_responses:
f = plt.figure()
plot_basis(s_avg)
f.savefig(os.path.join(resdir,'imp_basis.pdf'))
plt.close(f)
# Plot the firing rates
if do_plot_firing_rates:
print "Plotting firing rates"
T_lim = slice(0,2000)
for n in range(N):
f = plt.figure()
plot_firing_rate(s_avg['glms'][n],
s_std['glms'][n],
color='r',
T_lim=T_lim)
if true_given:
plot_firing_rate(s_true['glms'][n], color='k', T_lim=T_lim)
# Plot the spike times
St = np.nonzero(population.glm.S.get_value()[T_lim,n])[0]
plt.plot(St,s_avg['glms'][n]['lam'][T_lim][St],'ko')
plt.title('Firing rate %d' % n)
f.savefig(os.path.join(resdir,'firing_rate_%d.pdf' % n))
plt.close(f)
if do_plot_ks:
print "Plotting KS test results"
for n in range(N):
f = plt.figure()
St = np.nonzero(population.glm.S.get_value()[:,n])[0]
plot_ks(s_avg['glms'][n], St, population.glm.dt.get_value())
f.savefig(os.path.join(resdir, 'ks_%d.pdf' %n))
plt.close(f)
if do_plot_logpr:
print "Plotting log probability and log likelihood trace"
f = plt.figure()
plot_log_prob(s_inf, s_true=s_true, color='r')
f.savefig(os.path.join(resdir, 'log_prob.pdf'))
plt.close(f)
f = plt.figure()
plot_log_lkhd(s_inf, s_true=s_true, color='r')
f.savefig(os.path.join(resdir, 'log_lkhd.pdf'))
plt.close(f)
if 'logprior' in s_inf[0]:
f = plt.figure()
plot_log_prob(s_inf, key='logprior', s_true=s_true, color='r')
plt.ylabel('Log prior')
f.savefig(os.path.join(resdir, 'log_prior.pdf'))
plt.close(f)
if 'predll' in x_inf[0]:
f = plt.figure()
plot_log_prob(x_inf, key='predll', s_true=x_true, color='r')
plt.ylabel('Pred. Log Likelihood')
f.savefig(os.path.join(resdir, 'pred_ll.pdf'))
plt.close(f)
print "Plots can be found in directory: %s" % resdir
def parse_cmd_line_args():
"""
Parse command line parameters
"""
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--dataFile", dest="dataFile",
help="Data file to load")
parser.add_option("-r", "--resultsFile", dest="resultsFile", default='.',
help="Results file to plot.")
(options, args) = parser.parse_args()
# Check if specified files exist
if options.resultsFile is None or not os.path.exists(options.resultsFile):
raise Exception("Invalid results file: %s" % options.resultsFile)
return (options, args)
if __name__ == "__main__":
from test.synth_harness import initialize_test_harness
options, popn, data, popn_true, x_true = initialize_test_harness()
# Load the results
with open(options.x0_file, 'r') as f:
print "Loading results from: %s" % options.x0_file
x = cPickle.load(f)
# If x is a list of samples, only keep the last (burned-in) fraction
if isinstance(x, list):
smpl_frac = 0.5
x = x[-1*int(smpl_frac*len(x)):]
print "Plotting results"
plot_results(popn,
x,
popn_true=popn_true,
x_true=x_true,
resdir=options.resultsDir,
do_plot_connectivity=False,
do_plot_stim_resp=False,
do_plot_imp_responses=True,
do_plot_firing_rates=False,
do_plot_ks=False,
do_plot_logpr=False)
|
slinderman/theano_pyglm
|
pyglm/plotting/plot_results.py
|
Python
|
mit
| 28,517
|
[
"NEURON"
] |
96f2b1ba192fbc731ebd881708f0843e1e5a184c019e140374c2f715ed11c12c
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd.lb
from tests_common import single_component_maxwell
"""
Check the Lattice Boltzmann thermostat with respect to the particle velocity
distribution.
"""
KT = 0.25
AGRID = 2.5
VISC = 2.7
DENS = 1.7
TIME_STEP = 0.05
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP,
'kT': KT,
'seed': 123}
class LBThermostatCommon:
"""Base class of the test that holds the test logic."""
lbf = None
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def prepare(self):
self.system.set_random_state_PRNG()
self.system.actors.clear()
self.system.actors.add(self.lbf)
self.system.part.add(
pos=np.random.random((100, 3)) * self.system.box_l)
self.system.thermostat.set_lb(LB_fluid=self.lbf, seed=5, gamma=2.0)
def test_velocity_distribution(self):
self.prepare()
self.system.integrator.run(20)
N = len(self.system.part)
loops = 250
v_stored = np.zeros((N * loops, 3))
for i in range(loops):
self.system.integrator.run(6)
v_stored[i * N:(i + 1) * N, :] = self.system.part[:].v
minmax = 5
n_bins = 7
error_tol = 0.01
for i in range(3):
hist = np.histogram(v_stored[:, i], range=(-minmax, minmax),
bins=n_bins, density=False)
data = hist[0] / float(v_stored.shape[0])
bins = hist[1]
for j in range(n_bins):
found = data[j]
expected = single_component_maxwell(bins[j], bins[j + 1], KT)
self.assertLessEqual(abs(found - expected), error_tol)
class LBCPUThermostat(ut.TestCase, LBThermostatCommon):
"""Test for the CPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluid(**LB_PARAMS)
@utx.skipIfMissingGPU()
class LBGPUThermostat(ut.TestCase, LBThermostatCommon):
"""Test for the GPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluidGPU(**LB_PARAMS)
if __name__ == '__main__':
ut.main()
|
mkuron/espresso
|
testsuite/python/lb_thermostat.py
|
Python
|
gpl-3.0
| 3,044
|
[
"ESPResSo"
] |
5e61f6219546f477beaa7caad2e72bc0a9c8efffb00afc330f983f4dabb6d986
|
#!/usr/bin/env python
try:
# python 2
import xmlrpclib
except:
# python 3
import xmlrpc.client as xmlrpclib
import time
import tempfile
import os.path
import sys
import pyBigWig
from deeptools.utilities import mungeChromosome
from deeptoolsintervals import GTF
import datetime
def isDeepBlue(fname):
"""
Returns true if the file ends in .wig, .wiggle, or .bedgraph, since these indicate a file on the deepBlue server
"""
if fname.endswith(".wig"):
return True
if fname.endswith(".wiggle"):
return True
if fname.endswith(".bedgraph"):
return True
if fname.startswith("http") or fname.startswith("ftp"):
return False
# For ENCODE samples, the "Name" is just the ENCODE sample ID, so as a fallback check for files that aren't there.
if not os.path.exists(fname):
return True
return False
def mergeRegions(regions):
"""
Given a list of [(chrom, start, end), ...], merge all overlapping regions
This returns a dict, where values are sorted lists of [start, end].
"""
bar = sorted(regions)
out = dict()
last = [None, None, None]
for reg in bar:
if reg[0] == last[0] and reg[1] <= last[2]:
if reg[2] > last[2]:
last[2] = reg[2]
continue
else:
if last[0]:
if last[0] not in out:
out[last[0]] = list()
out[last[0]].append([last[1], last[2]])
last = [reg[0], reg[1], reg[2]]
if last[0] not in out:
out[last[0]] = list()
out[last[0]].append([last[1], last[2]])
return out
def makeTiles(db, args):
"""
Given a deepBlue object, return a list of regions that will be queried
"""
out = []
for (k, v) in db.chromsTuple:
start = 0
while start <= v:
end = start + args.binSize
if end > v:
end = v
out.append([k, start, end])
start += end + args.distanceBetweenBins
return out
def makeChromTiles(db):
"""
Make a region for each chromosome
"""
out = []
for (k, v) in db.chromsTuple:
out.append([k, 0, v])
return out
def makeRegions(BED, args):
"""
Given a list of BED/GTF files, make a list of regions.
These are vaguely extended as appropriate. For simplicity, the maximum of --beforeRegionStartLength
and --afterRegionStartLength are tacked on to each end and transcripts are used for GTF files.
"""
itree = GTF(BED, transcriptID=args.transcriptID, transcript_id_designator=args.transcript_id_designator)
o = []
extend = 0
# The before/after stuff is specific to computeMatrix
if "beforeRegionStartLength" in args:
extend = max(args.beforeRegionStartLength, args.afterRegionStartLength)
for chrom in itree.chroms:
regs = itree.findOverlaps(chrom, 0, 4294967295) # bigWig files use 32 bit coordinates
for reg in regs:
o.append([chrom, max(0, reg[0] - extend), reg[1] + extend])
del itree
return o
def preloadWrapper(foo):
"""
This is a wrapper around the preload function for multiprocessing
"""
args = foo[2]
regs = foo[3]
res = deepBlue(foo[0], url=args.deepBlueURL, userKey=args.userKey)
return res.preload(regs, tmpDir=args.deepBlueTempDir)
class deepBlue(object):
def __init__(self, sample, url="http://deepblue.mpi-inf.mpg.de/xmlrpc", userKey="anonymous_key"):
"""
Connect to the requested deepblue server with the given user key and request the specifed sample from it.
>>> sample = "S002R5H1.ERX300721.H3K4me3.bwa.GRCh38.20150528.bedgraph"
>>> db = deepBlue(sample) # doctest: +SKIP
>>> assert(db.chroms("chr1") == 248956422) # doctest: +SKIP
"""
self.sample = sample
self.url = url
self.userKey = userKey
self.server = xmlrpclib.Server(url, allow_none=True)
self.info = None
self.experimentID = None
self.genome = None
self.chromsDict = None
self.chromsTuple = None
# Set self.experimentID
experimentID = self.getEID()
if not experimentID:
raise RuntimeError("The requested sample({}) has no associated experiment! If you did not intend to use samples on deepBlue, then it appears either you misspelled a file name or (if you're using BAM files for input) one of your BAM files is lacking a valid index.".format(sample))
# Set self.info
(status, resp) = self.server.info(self.experimentID, userKey)
if status != "okay":
raise RuntimeError("Received the following error while fetching information about '{}': {}".format(resp, sample))
self.info = resp[0]
# Set self.genome
genome = self.getGenome()
if not genome:
raise RuntimeError("Unable to determine an appropriate genome for '{}'".format(sample))
# Set self.chroms
chroms = self.getChroms()
if not chroms:
raise RuntimeError("Unable to determine chromosome names/sizes for '{}'".format(sample))
def getEID(self):
"""
Given a sample name, return its associated experiment ID (or None on error).
self.experimentID is then the internal ID (e.g., e52525)
"""
(status, resps) = self.server.search(self.sample, "experiments", self.userKey)
if status != "okay":
raise RuntimeError("Received an error ({}) while searching for the experiment associated with '{}'".format(resps, self.sample))
for resp in resps:
if resp[1] == self.sample:
self.experimentID = resp[0]
return resp[0]
return None
def getGenome(self):
"""
Determines and sets the genome assigned to a given sample. On error, this raises a runtime exception.
self.genome is then the internal genome ID.
"""
if "genome" in self.info.keys():
self.genome = self.info["genome"]
return self.genome
def getChroms(self):
"""
Determines and sets the chromosome names/sizes for a given sample. On error, this raises a runtime exception.
self.chroms is then a dictionary of chromosome:length pairs
"""
(status, resp) = self.server.chromosomes(self.genome, self.userKey)
if status != "okay":
raise RuntimeError("Received an error while fetching chromosome information for '{}': {}".format(self.sample, resp))
self.chromsDict = {k: v for k, v in resp}
self.chromsTuple = [(k, v) for k, v in resp]
return resp
def chroms(self, chrom=None):
"""
Like the chroms() function in pyBigWig, returns either chromsDict (chrom is None) or the length of a given chromosome
"""
if chrom is None:
return self.chromsDict
elif chrom in self.chromsDict:
return self.chromsDict[chrom]
return None
def close(self):
pass
def preload(self, regions, tmpDir=None):
"""
Given a sample and a set of regions, write a bigWig file containing the underlying signal.
This function returns the file name, which needs to be deleted by the calling function at some point.
This sends queries one chromosome at a time, due to memory limits on deepBlue
"""
startTime = datetime.datetime.now()
regions2 = mergeRegions(regions)
# Make a temporary file
f = tempfile.NamedTemporaryFile(delete=False, dir=tmpDir)
fname = f.name
f.close()
# Start with the bigWig file
bw = pyBigWig.open(fname, "w")
bw.addHeader(self.chromsTuple, maxZooms=0) # This won't work in IGV!
# Make a string out of everything in a resonable order
for k, v in self.chromsTuple:
# Munge chromosome names as appropriate
chrom = mungeChromosome(k, regions2.keys())
if not chrom:
continue
if chrom not in regions2 or len(regions2) == 0:
continue
regionsStr = "\n".join(["{}\t{}\t{}".format(k, reg[0], reg[1]) for reg in regions2[chrom]])
regionsStr += "\n"
# Send the regions
(status, regionsID) = self.server.input_regions(self.genome, regionsStr, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while sending regions for '{}': {}".format(regionsID, self.sample))
# Get the experiment information
(status, queryID) = self.server.select_experiments(self.sample, k, None, None, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while running select_experiments on file '{}': {}".format(self.sample, queryID))
if not queryID:
raise RuntimeError("Somehow, we received None as a query ID (file '{}')".format(self.sample))
# Intersect
(status, intersectID) = self.server.intersection(queryID, regionsID, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while running intersection on file '{}': {}".format(self.sample, intersectID))
if not intersectID:
raise RuntimeError("Somehow, we received None as an intersect ID (file '{}')".format(self.sample))
# Query the regions
(status, reqID) = self.server.get_regions(intersectID, "START,END,VALUE", self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while fetching regions in file '{}': {}".format(self.sample, reqID))
# Wait for the server to process the data
(status, info) = self.server.info(reqID, self.userKey)
request_status = info[0]["state"]
while request_status != "done" and request_status != "failed":
time.sleep(0.1)
(status, info) = self.server.info(reqID, self.userKey)
request_status = info[0]["state"]
# Get the actual data
(status, resp) = self.server.get_request_data(reqID, self.userKey)
if status != "okay":
raise RuntimeError("Received the following error while fetching data in file '{}': {}".format(self.sample, resp))
for intervals in resp.split("\n"):
interval = intervals.split("\t")
if interval[0] == '':
continue
bw.addEntries([k], [int(interval[0]) - 1], ends=[int(interval[1]) - 1], values=[float(interval[2])])
bw.close()
sys.stderr.write("{} done (took {})\n".format(self.sample, datetime.datetime.now() - startTime))
sys.stderr.flush()
return fname
|
fidelram/deepTools
|
deeptools/deepBlue.py
|
Python
|
gpl-3.0
| 10,950
|
[
"BWA"
] |
21ec2848d56c6ae9b98772665273000ca764d776adf024db554f498b1c1dc9f7
|
from io import StringIO
from django.contrib.messages import get_messages
from django.core.management import call_command
from django.test import TestCase
from django.urls import reverse
from core.models import (
User, Batch, Section, Election, UserType, VoterProfile
)
class LoginViewTest(TestCase):
"""
Tests the login view.
This view should only accept POST requests from anonymous users. GET
requests and logged in users will be redirected to `/`. After logging in,
users will be redirected to `/`.
View URL: `/auth/login`
"""
@classmethod
def setUpTestData(cls):
# Set up the test user.
_election = Election.objects.create(name='Election')
_batch = Batch.objects.create(year=2020, election=_election)
_section = Section.objects.create(section_name='Emerald')
_user = User.objects.create(
username='juan',
type=UserType.VOTER
)
_user.set_password('pepito')
_user.save()
_admin = User.objects.create(
username='admin',
type=UserType.ADMIN
)
_admin.set_password('root')
_admin.save()
VoterProfile.objects.create(
user=_user,
batch=_batch,
section=_section
)
_batch2 = Batch.objects.create(year=1, election=_election)
_section2 = Section.objects.create(section_name='Section 2')
_no_voter_profile_voter = User.objects.create(
username='votra',
first_name='Fritz La',
last_name='Von Fritz',
type=UserType.VOTER
)
_no_voter_profile_voter.set_password('votra')
_no_voter_profile_voter.save()
def test_get_requests_redirected_to_index(self):
response = self.client.get(reverse('auth-login'), follow=True)
self.assertRedirects(response, reverse('index'))
def test_logged_users_redirected_to_index(self):
self.client.login(username='juan', password='pepito')
self.test_get_requests_redirected_to_index()
def test_successful_login(self):
self.client.post(
reverse('auth-login'),
{ 'username': 'juan', 'password': 'pepito' },
follow=True
)
response = self.client.get(reverse('index'))
# Well, yeah, sure. We can test if the user gets properly logged in
# by checking if the response gives it the correct subview, i.e. the
# voting or voted subview. To make things simpler (Note: KISS), let's
# just check if the user in the response is not anonymous and has the
# username of the user we logged in. The latter also tests and makes
# sure that we log in the correct user.
response_user = response.context['user']
self.assertTrue(response_user.is_authenticated)
self.assertEquals(response_user.username, 'juan')
def test_wrong_username_password_combination_login(self):
response = self.client.post(
reverse('auth-login'),
{ 'username': 'juan', 'password': 'wrong password' },
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]),
'Wrong username/password combination.'
)
self.assertRedirects(response, reverse('index'))
def test_post_login_no_username(self):
response = self.client.post(
reverse('auth-login'),
{ 'password': 'pepito' },
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]),
'Invalid data submitted for authentication.'
)
self.assertRedirects(response, reverse('index'))
def test_post_login_no_password(self):
response = self.client.post(
reverse('auth-login'),
{ 'username': 'juan' },
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]),
'Invalid data submitted for authentication.'
)
self.assertRedirects(response, reverse('index'))
def test_post_successful_login_with_next_url(self):
response = self.client.post(
reverse('auth-login'),
{
'username': 'admin',
'password': 'root',
'next': reverse('results')
},
follow=True
)
response_user = response.context['user']
self.assertTrue(response_user.is_authenticated)
self.assertEquals(response_user.username, 'admin')
self.assertRedirects(response, reverse('results'))
def test_post_successful_login_with_blank_next_url(self):
response = self.client.post(
reverse('auth-login'),
{
'username': 'juan',
'password': 'pepito',
'next': ''
},
follow=True
)
response_user = response.context['user']
self.assertTrue(response_user.is_authenticated)
self.assertEquals(response_user.username, 'juan')
def test_post_unsuccessful_login_with_next_url(self):
response = self.client.post(
reverse('auth-login'),
{
'username': 'admin',
'password': 'wrong password',
'next': reverse('results')
},
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]),
'Wrong username/password combination.'
)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('index'), reverse('results'))
)
def test_post_deny_no_voter_profile_voters(self):
response = self.client.post(
reverse('auth-login'),
{
'username': 'votra',
'password': 'votra'
},
follow=True
)
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]),
'Voter account is incompletely configured. Please contact '
'the system administrator.'
)
self.assertRedirects(response, reverse('index'))
class LogoutViewTest(TestCase):
"""
Tests the logout view.
This view should only accept POST requests from logged in users. GET
requests and anonymous users will be immediately redirected to `/`. After
logging out, users will be redirected to `/`.
Logout views must not accept GET requests because:
1) It can be abused and have the user unknowingly get logged out of
their session, which can be done by setting an image tag's src to the
logout URL of a website, for example [1].
2) Browsers pre-fetch websites that they think you will visit next. This
pre-fetching may cause you to logout from the website you're
currently visiting [2].
References:
[1] https://stackoverflow.com/a/3522013/1116098
[2] https://stackoverflow.com/a/14587231/1116098
View URL: `/auth/logout`
"""
@classmethod
def setUpTestData(cls):
# Set up the test user.
_election = Election.objects.create(name='Election')
_batch = Batch.objects.create(year=2020, election=_election)
_section = Section.objects.create(section_name='Emerald')
_user = User.objects.create(
username='juan',
type=UserType.VOTER
)
_user.set_password('pepito')
_user.save()
VoterProfile.objects.create(
user=_user,
batch=_batch,
section=_section
)
def test_anonymous_users_get_request_redirected_to_index(self):
response = self.client.get(reverse('auth-logout'), follow=True)
self.assertRedirects(response, reverse('index'))
def test_logged_in_users_get_request_redirected_to_index(self):
self.client.login(username='juan', password='pepito')
response = self.client.get(reverse('auth-logout'), follow=True)
self.assertRedirects(response, reverse('index'))
def test_successful_logout(self):
self.client.login(username='juan', password='pepito')
response = self.client.post(reverse('auth-logout'), follow=True)
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(
str(messages[0]),
'Logged out successfully.'
)
self.assertEqual(response.status_code, 200)
# Make sure the user has been logged out.
response = self.client.get(reverse('index'), follow=True)
response_user = response.context['user']
self.assertTrue(response_user.is_anonymous)
|
seanballais/botos
|
tests/test_auth_views.py
|
Python
|
gpl-3.0
| 9,194
|
[
"VisIt"
] |
b120bca9646fe666ace6c6358ebd84c007b59f8bfd83e98fddef0c261774bae2
|
import sys
from copy import copy
import numpy as nm
from sfepy.base.base import (complex_types, dict_from_keys_init,
assert_, is_derived_class,
insert_static_method, output, get_default,
get_default_attr, Struct, basestr)
from sfepy.base.ioutils \
import skip_read_line, read_token, read_array, read_list, pt
import os.path as op
supported_formats = {
'.mesh' : 'medit',
'.vtk' : 'vtk',
'.node' : 'tetgen',
'.txt' : 'comsol',
'.h5' : 'hdf5',
# Order is important, avs_ucd does not guess -> it is the default.
'.inp' : ('abaqus', 'ansys_cdb', 'avs_ucd'),
'.dat' : 'ansys_cdb',
'.hmascii' : 'hmascii',
'.mesh3d' : 'mesh3d',
'.bdf' : 'nastran',
'.neu' : 'gambit',
'.med' : 'med',
'.cdb' : 'ansys_cdb',
}
# Map mesh formats to read and write capabilities.
# 'r' ... read mesh
# 'w' ... write mesh
# 'rn' ... read nodes for boundary conditions
# 'wn' ... write nodes for boundary conditions
supported_capabilities = {
'medit' : ['r', 'w'],
'vtk' : ['r', 'w'],
'tetgen' : ['r'],
'comsol' : ['r', 'w'],
'hdf5' : ['r', 'w'],
'abaqus' : ['r'],
'avs_ucd' : ['r'],
'hmascii' : ['r'],
'mesh3d' : ['r'],
'nastran' : ['r', 'w'],
'gambit' : ['r', 'rn'],
'med' : ['r'],
'ansys_cdb' : ['r'],
}
supported_cell_types = {
'medit' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'vtk' : ['line2', 'tri3', 'quad4', 'tetra4', 'hexa8'],
'tetgen' : ['tetra4'],
'comsol' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'hdf5' : ['user'],
'abaqus' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'avs_ucd' : ['tetra4', 'hexa8'],
'hmascii' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'mesh3d' : ['tetra4', 'hexa8'],
'nastran' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'gambit' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'med' : ['tri3', 'quad4', 'tetra4', 'hexa8'],
'ansys_cdb' : ['tetra4', 'hexa8'],
'function' : ['user'],
}
def output_writable_meshes():
output('Supported writable mesh formats are:')
for key, val in supported_capabilities.iteritems():
if 'w' in val:
output(key)
def split_conns_mat_ids(conns_in):
"""
Split connectivities (columns except the last ones in `conns_in`) from cell
groups (the last columns of `conns_in`).
"""
conns, mat_ids = [], []
for conn in conns_in:
conn = nm.asarray(conn, dtype=nm.int32)
conns.append(conn[:, :-1])
mat_ids.append(conn[:, -1])
return conns, mat_ids
def convert_complex_output(out_in):
"""
Convert complex values in the output dictionary `out_in` to pairs of
real and imaginary parts.
"""
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out
class MeshIO(Struct):
"""
The abstract class for importing and exporting meshes.
Read the docstring of the Mesh() class. Basically all you need to do is to
implement the read() method::
def read(self, mesh, **kwargs):
nodes = ...
ngroups = ...
conns = ...
mat_ids = ...
descs = ...
mesh._set_io_data(nodes, ngroups, conns, mat_ids, descs)
return mesh
See the Mesh class' docstring how the nodes, ngroups, conns, mat_ids and
descs should look like. You just need to read them from your specific
format from disk.
To write a mesh to disk, just implement the write() method and use the
information from the mesh instance (e.g. nodes, conns, mat_ids and descs)
to construct your specific format.
The methods read_dimension(), read_bounding_box() should be implemented in
subclasses, as it is often possible to get that kind of information without
reading the whole mesh file.
Optionally, subclasses can implement read_data() to read also computation
results. This concerns mainly the subclasses with implemented write()
supporting the 'out' kwarg.
The default implementation od read_last_step() just returns 0. It should be
reimplemented in subclasses capable of storing several steps.
"""
format = None
call_msg = 'called an abstract MeshIO instance!'
def __init__(self, filename, **kwargs):
Struct.__init__(self, filename=filename, **kwargs)
self.set_float_format()
def get_filename_trunk(self):
if isinstance(self.filename, file):
trunk = 'from_descriptor'
else:
trunk = op.splitext(self.filename)[0]
return trunk
def read_dimension(self, ret_fd=False):
raise ValueError(MeshIO.call_msg)
def read_bounding_box(self, ret_fd=False, ret_dim=False):
raise ValueError(MeshIO.call_msg)
def read_last_step(self):
"""The default implementation: just return 0 as the last step."""
return 0
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
Notes
-----
The default implementation returns empty arrays.
"""
aux = nm.array([0.0], dtype=nm.float64)
return aux.astype(nm.int32), aux, aux
def read(self, mesh, omit_facets=False, **kwargs):
raise ValueError(MeshIO.call_msg)
def write(self, filename, mesh, **kwargs):
raise ValueError(MeshIO.call_msg)
def read_data(self, step, filename=None):
raise ValueError(MeshIO.call_msg)
def set_float_format(self, format=None):
self.float_format = get_default(format, '%e')
def get_vector_format(self, dim):
return ' '.join([self.float_format] * dim)
class UserMeshIO(MeshIO):
"""
Special MeshIO subclass that enables reading and writing a mesh using a
user-supplied function.
"""
format = 'function'
def __init__(self, filename, **kwargs):
assert_(hasattr(filename, '__call__'))
self.function = filename
MeshIO.__init__(self, filename='function:%s' % self.function.__name__,
**kwargs)
def get_filename_trunk(self):
return self.filename
def read(self, mesh, *args, **kwargs):
aux = self.function(mesh, mode='read')
if aux is not None:
mesh = aux
self.filename = mesh.name
return mesh
def write(self, filename, mesh, *args, **kwargs):
self.function(mesh, mode='write')
class MeditMeshIO(MeshIO):
format = 'medit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'Dimension':
if len(line) == 2:
dim = int(line[1])
else:
dim = int(fd.readline())
break
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = open(self.filename, 'r')
dim, fd = self.read_dimension(ret_fd=True)
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'Vertices':
num = int(read_token(fd))
nod = read_array(fd, num, dim + 1, nm.float64)
break
bbox = nm.vstack((nm.amin(nod[:,:dim], 0),
nm.amax(nod[:,:dim], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, omit_facets=False, **kwargs):
dim, fd = self.read_dimension(ret_fd=True)
conns_in = []
descs = []
def _read_cells(dimension, size, has_id=True):
num = int(read_token(fd))
data = read_array(fd, num, size + 1 * has_id, nm.int32)
if omit_facets and (dimension < dim): return
data[:, :-1] -= 1
conns_in.append(data)
descs.append('%i_%i' % (dimension, size))
while 1:
line = skip_read_line(fd).split()
if not line:
break
ls = line[0]
if (ls == 'Vertices'):
num = int(read_token(fd))
nod = read_array(fd, num, dim + 1, nm.float64)
elif (ls == 'Corners'):
_read_cells(1, 1, False)
elif (ls == 'Edges'):
_read_cells(1, 2)
elif (ls == 'Tetrahedra'):
_read_cells(3, 4)
elif (ls == 'Hexahedra'):
_read_cells(3, 8)
elif (ls == 'Triangles'):
_read_cells(2, 3)
elif (ls == 'Quadrilaterals'):
_read_cells(2, 4)
elif ls == 'End':
break
elif line[0] == '#':
continue
else:
output('skipping unknown entity: %s' % line)
continue
fd.close()
# Detect wedges and pyramides -> separate groups.
if ('3_8' in descs):
ic = descs.index('3_8')
conn_in = conns_in.pop(ic)
flag = nm.zeros((conn_in.shape[0],), nm.int32)
for ii, el in enumerate(conn_in):
if (el[4] == el[5]):
if (el[5] == el[6]):
flag[ii] = 2
else:
flag[ii] = 1
conn = []
desc = []
ib = nm.where(flag == 0)[0]
if (len(ib) > 0):
conn.append(conn_in[ib])
desc.append('3_8')
iw = nm.where(flag == 1)[0]
if (len(iw) > 0):
ar = nm.array([0,1,2,3,4,6], nm.int32)
conn.append(conn_in[iw[:, None], ar])
desc.append('3_6')
ip = nm.where(flag == 2)[0]
if (len(ip) > 0):
ar = nm.array([0,1,2,3,4], nm.int32)
conn.append(conn_in[ip[:, None], ar])
desc.append('3_5')
conns_in[ic:ic] = conn
del(descs[ic])
descs[ic:ic] = desc
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod[:,:-1], nod[:,-1], conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
fd.write("MeshVersionFormatted 1\nDimension %d\n" % dim)
fd.write("Vertices\n%d\n" % n_nod)
format = self.get_vector_format(dim) + ' %d\n'
for ii in range(n_nod):
nn = tuple(coors[ii]) + (ngroups[ii],)
fd.write(format % tuple(nn))
for ig, conn in enumerate(conns):
ids = mat_ids[ig]
if (desc[ig] == "1_1"):
fd.write("Corners\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d\n"
% nn[0])
elif (desc[ig] == "1_2"):
fd.write("Edges\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d\n"
% (nn[0], nn[1], ids[ii]))
elif (desc[ig] == "2_4"):
fd.write("Quadrilaterals\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], ids[ii]))
elif (desc[ig] == "2_3"):
fd.write("Triangles\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d\n" % (nn[0], nn[1], nn[2], ids[ii]))
elif (desc[ig] == "3_4"):
fd.write("Tetrahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], ids[ii]))
elif (desc[ig] == "3_8"):
fd.write("Hexahedra\n%d\n" % conn.shape[0])
for ii in range(conn.shape[0]):
nn = conn[ii] + 1
fd.write("%d %d %d %d %d %d %d %d %d\n"
% (nn[0], nn[1], nn[2], nn[3], nn[4], nn[5],
nn[6], nn[7], ids[ii]))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
vtk_header = r"""x vtk DataFile Version 2.0
step %d time %e normalized time %e, generated by %s
ASCII
DATASET UNSTRUCTURED_GRID
"""
vtk_cell_types = {'1_1' : 1, '1_2' : 3, '2_2' : 3, '3_2' : 3,
'2_3' : 5, '2_4' : 9, '3_4' : 10, '3_8' : 12}
vtk_dims = {1 : 1, 3 : 1, 5 : 2, 9 : 2, 10 : 3, 12 : 3}
vtk_inverse_cell_types = {(3, 2) : '1_2', (5, 2) : '2_3',
(8, 2) : '2_4', (9, 2) : '2_4',
(3, 3) : '1_2', (10, 3) : '3_4',
(11, 3) : '3_8', (12, 3) : '3_8' }
vtk_remap = {8 : nm.array([0, 1, 3, 2], dtype=nm.int32),
11 : nm.array([0, 1, 3, 2, 4, 5, 7, 6], dtype=nm.int32)}
vtk_remap_keys = vtk_remap.keys()
class VTKMeshIO(MeshIO):
format = 'vtk'
def read_coors(self, ret_fd=False):
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINTS':
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
break
if ret_fd:
return coors, fd
else:
fd.close()
return coors
def get_dimension(self, coors):
dz = nm.diff(coors[:,2])
if nm.allclose(dz, 0.0):
dim = 2
else:
dim = 3
return dim
def read_dimension(self, ret_fd=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
coors, fd = self.read_coors(ret_fd=True)
dim = self.get_dimension(coors)
bbox = nm.vstack((nm.amin(coors[:,:dim], 0),
nm.amax(coors[:,:dim], 0)))
if ret_dim:
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
mode = 'header'
mode_status = 0
coors = conns = mat_id = node_grps = None
finished = 0
while 1:
line = skip_read_line(fd)
if not line:
break
if mode == 'header':
if mode_status == 0:
if line.strip() == 'ASCII':
mode_status = 1
elif mode_status == 1:
if line.strip() == 'DATASET UNSTRUCTURED_GRID':
mode_status = 0
mode = 'points'
elif mode == 'points':
line = line.split()
if line[0] == 'POINTS':
n_nod = int(line[1])
coors = read_array(fd, n_nod, 3, nm.float64)
mode = 'cells'
elif mode == 'cells':
line = line.split()
if line[0] == 'CELLS':
n_el, n_val = map(int, line[1:3])
raw_conn = read_list(fd, n_val, int)
mode = 'cell_types'
elif mode == 'cell_types':
line = line.split()
if line[0] == 'CELL_TYPES':
assert_(int(line[1]) == n_el)
cell_types = read_array(fd, n_el, 1, nm.int32)
mode = 'cp_data'
elif mode == 'cp_data':
line = line.split()
if line[0] == 'CELL_DATA':
assert_(int(line[1]) == n_el)
mode_status = 1
mode = 'mat_id'
elif line[0] == 'POINT_DATA':
assert_(int(line[1]) == n_nod)
mode_status = 1
mode = 'node_groups'
elif mode == 'mat_id':
if mode_status == 1:
if 'SCALARS mat_id int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
mat_id = read_list(fd, n_el, int)
mode_status = 0
mode = 'cp_data'
finished += 1
elif mode == 'node_groups':
if mode_status == 1:
if 'SCALARS node_groups int' in line.strip():
mode_status = 2
elif mode_status == 2:
if line.strip() == 'LOOKUP_TABLE default':
node_grps = read_list(fd, n_nod, int)
mode_status = 0
mode = 'cp_data'
finished += 1
elif finished >= 2:
break
fd.close()
if mat_id is None:
mat_id = [[0]] * n_el
else:
if len(mat_id) < n_el:
mat_id = [[ii] for jj in mat_id for ii in jj]
if node_grps is None:
node_grps = [0] * n_nod
else:
if len(node_grps) < n_nod:
node_grps = [ii for jj in node_grps for ii in jj]
dim = self.get_dimension(coors)
if dim == 2:
coors = coors[:,:2]
coors = nm.ascontiguousarray(coors)
cell_types = cell_types.squeeze()
dconns = {}
for iel, row in enumerate(raw_conn):
ct = cell_types[iel]
key = (ct, dim)
if key not in vtk_inverse_cell_types:
continue
ct = vtk_inverse_cell_types[key]
dconns.setdefault(key, []).append(row[1:] + mat_id[iel])
descs = []
conns = []
mat_ids = []
for key, conn in dconns.iteritems():
ct = key[0]
sct = vtk_inverse_cell_types[key]
descs.append(sct)
aux = nm.array(conn, dtype=nm.int32)
aconn = aux[:, :-1]
if ct in vtk_remap_keys: # Remap pixels and voxels.
aconn[:] = aconn[:, vtk_remap[ct]]
conns.append(aconn)
mat_ids.append(aux[:, -1])
mesh._set_io_data(coors, node_grps, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
def _reshape_tensors(data, dim, sym, nc):
if dim == 3:
if nc == sym:
aux = data[:, [0,3,4,3,1,5,4,5,2]]
elif nc == (dim * dim):
aux = data[:, [0,3,4,6,1,5,7,8,2]]
else:
aux = data.reshape((data.shape[0], dim*dim))
else:
zz = nm.zeros((data.shape[0], 1), dtype=nm.float64)
if nc == sym:
aux = nm.c_[data[:,[0,2]], zz, data[:,[2,1]],
zz, zz, zz, zz]
elif nc == (dim * dim):
aux = nm.c_[data[:,[0,2]], zz, data[:,[3,1]],
zz, zz, zz, zz]
else:
aux = nm.c_[data[:,0,[0,1]], zz, data[:,1,[0,1]],
zz, zz, zz, zz]
return aux
def _write_tensors(data):
format = self.get_vector_format(3)
format = '\n'.join([format] * 3) + '\n\n'
for row in aux:
fd.write(format % tuple(row))
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
fd = open(filename, 'w')
fd.write(vtk_header % (step, time, nt, op.basename(sys.argv[0])))
n_nod, dim = coors.shape
sym = dim * (dim + 1) / 2
fd.write('\nPOINTS %d float\n' % n_nod)
aux = coors
if dim < 3:
aux = nm.hstack((aux, nm.zeros((aux.shape[0], 3 - dim),
dtype=aux.dtype)))
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row))
n_el = mesh.n_el
n_els, n_e_ps = nm.array([conn.shape for conn in conns]).T
total_size = nm.dot(n_els, n_e_ps + 1)
fd.write('\nCELLS %d %d\n' % (n_el, total_size))
ct = []
for ig, conn in enumerate(conns):
nn = n_e_ps[ig] + 1
ct += [vtk_cell_types[descs[ig]]] * n_els[ig]
format = ' '.join(['%d'] * nn + ['\n'])
for row in conn:
fd.write(format % ((nn-1,) + tuple(row)))
fd.write('\nCELL_TYPES %d\n' % n_el)
fd.write(''.join(['%d\n' % ii for ii in ct]))
fd.write('\nPOINT_DATA %d\n' % n_nod)
# node groups
fd.write('\nSCALARS node_groups int 1\nLOOKUP_TABLE default\n')
fd.write(''.join(['%d\n' % ii for ii in ngroups]))
if out is not None:
point_keys = [key for key, val in out.iteritems()
if val.mode == 'vertex']
else:
point_keys = {}
for key in point_keys:
val = out[key]
nr, nc = val.data.shape
if nc == 1:
fd.write('\nSCALARS %s float %d\n' % (key, nc))
fd.write('LOOKUP_TABLE default\n')
format = self.float_format + '\n'
for row in val.data:
fd.write(format % row)
elif nc == dim:
fd.write('\nVECTORS %s float\n' % key)
if dim == 2:
aux = nm.hstack((val.data,
nm.zeros((nr, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row))
elif (nc == sym) or (nc == (dim * dim)):
fd.write('\nTENSORS %s float\n' % key)
aux = _reshape_tensors(val.data, dim, sym, nc)
_write_tensors(aux)
else:
raise NotImplementedError, nc
if out is not None:
cell_keys = [key for key, val in out.iteritems()
if val.mode == 'cell']
else:
cell_keys = {}
fd.write('\nCELL_DATA %d\n' % n_el)
# cells - mat_id
fd.write('SCALARS mat_id int 1\nLOOKUP_TABLE default\n')
aux = nm.hstack(mat_ids).tolist()
fd.write(''.join(['%d\n' % ii for ii in aux]))
for key in cell_keys:
val = out[key]
ne, aux, nr, nc = val.data.shape
if (nr == 1) and (nc == 1):
fd.write('\nSCALARS %s float %d\n' % (key, nc))
fd.write('LOOKUP_TABLE default\n')
format = self.float_format + '\n'
aux = val.data.squeeze()
if len(aux.shape) == 0:
fd.write(format % aux)
else:
for row in aux:
fd.write(format % row)
elif (nr == dim) and (nc == 1):
fd.write('\nVECTORS %s float\n' % key)
if dim == 2:
aux = nm.hstack((val.data.squeeze(),
nm.zeros((ne, 1), dtype=nm.float64)))
else:
aux = val.data
format = self.get_vector_format(3) + '\n'
for row in aux:
fd.write(format % tuple(row.squeeze()))
elif (((nr == sym) or (nr == (dim * dim))) and (nc == 1)) \
or ((nr == dim) and (nc == dim)):
fd.write('\nTENSORS %s float\n' % key)
data = val.data.squeeze()
aux = _reshape_tensors(data, dim, sym, nr)
_write_tensors(aux)
else:
raise NotImplementedError, (nr, nc)
fd.close()
# Mark the write finished.
fd = open(filename, 'r+')
fd.write('#')
fd.close()
def read_data(self, step, filename=None):
"""Point data only!"""
filename = get_default(filename, self.filename)
out = {}
fd = open(self.filename, 'r')
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINT_DATA':
break
n_nod = int(line[1])
while 1:
line = skip_read_line(fd)
if not line:
break
line = line.split()
if line[0] == 'SCALARS':
name, dtype, nc = line[1:]
assert_(int(nc) == 1)
fd.readline() # skip lookup table line
data = nm.zeros((n_nod,), dtype=nm.float64)
ii = 0
while ii < n_nod:
data[ii] = float(fd.readline())
ii += 1
out[name] = Struct(name=name, mode='vertex', data=data,
dofs=None)
elif line[0] == 'VECTORS':
name, dtype = line[1:]
data = []
ii = 0
while ii < n_nod:
data.append([float(val) for val in fd.readline().split()])
ii += 1
out[name] = Struct(name=name, mode='vertex',
data=nm.array(data, dtype=nm.float64),
dofs=None)
elif line[0] == 'CELL_DATA':
break
line = fd.readline()
fd.close()
return out
class TetgenMeshIO(MeshIO):
format = "tetgen"
def read(self, mesh, **kwargs):
import os
fname = os.path.splitext(self.filename)[0]
nodes = self.getnodes(fname+".node")
etype, elements, regions = self.getele(fname+".ele")
descs = []
conns = []
mat_ids = []
elements = nm.array(elements, dtype=nm.int32) - 1
for key, value in regions.iteritems():
descs.append(etype)
mat_ids.append(nm.ones_like(value) * key)
conns.append(elements[nm.array(value)-1].copy())
mesh._set_io_data(nodes, None, conns, mat_ids, descs)
return mesh
@staticmethod
def getnodes(fnods):
"""
Reads t.1.nodes, returns a list of nodes.
Example:
>>> self.getnodes("t.1.node")
[(0.0, 0.0, 0.0), (4.0, 0.0, 0.0), (0.0, 4.0, 0.0), (-4.0, 0.0, 0.0),
(0.0, 0.0, 4.0), (0.0, -4.0, 0.0), (0.0, -0.0, -4.0), (-2.0, 0.0,
-2.0), (-2.0, 2.0, 0.0), (0.0, 2.0, -2.0), (0.0, -2.0, -2.0), (2.0,
0.0, -2.0), (2.0, 2.0, 0.0), ... ]
"""
f = open(fnods)
l = [int(x) for x in f.readline().split()]
npoints, dim, nattrib, nbound = l
if dim == 2:
ndapp = [0.0]
else:
ndapp = []
nodes = []
for line in f:
if line[0] == "#": continue
l = [float(x) for x in line.split()]
l = l[:(dim + 1)]
assert_(int(l[0]) == len(nodes)+1)
l = l[1:]
nodes.append(tuple(l + ndapp))
assert_(npoints == len(nodes))
return nodes
@staticmethod
def getele(fele):
"""
Reads t.1.ele, returns a list of elements.
Example:
>>> elements, regions = self.getele("t.1.ele")
>>> elements
[(20, 154, 122, 258), (86, 186, 134, 238), (15, 309, 170, 310), (146,
229, 145, 285), (206, 207, 125, 211), (99, 193, 39, 194), (185, 197,
158, 225), (53, 76, 74, 6), (19, 138, 129, 313), (23, 60, 47, 96),
(119, 321, 1, 329), (188, 296, 122, 322), (30, 255, 177, 256), ...]
>>> regions
{100: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 7, ...],
...}
"""
f = file(fele)
l = [int(x) for x in f.readline().split()]
ntetra,nnod,nattrib = l
#we have either linear or quadratic tetrahedra:
elem = None
if nnod in [4,10]:
elem = '3_4'
linear = (nnod == 4)
if nnod in [3, 7]:
elem = '2_3'
linear = (nnod == 3)
if elem is None or not linear:
raise ValueError("Only linear triangle and tetrahedra reader"
" is implemented")
els = []
regions = {}
for line in f:
if line[0] == "#": continue
l = [int(x) for x in line.split()]
if elem == '2_3':
assert_((len(l) - 1 - nattrib) == 3)
els.append((l[1],l[2],l[3]))
if elem == '3_4':
assert_((len(l) - 1 - nattrib) == 4)
els.append((l[1],l[2],l[3],l[4]))
if nattrib == 1:
regionnum = l[-1]
else:
regionnum = 1
if regionnum == 0:
msg = "see %s, element # %d\n"%(fele,l[0])
msg += "there are elements not belonging to any physical entity"
raise ValueError(msg)
if regions.has_key(regionnum):
regions[regionnum].append(l[0])
else:
regions[regionnum]=[l[0]]
assert_(l[0] == len(els))
return elem, els, regions
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_dimension(self):
# TetGen only supports 3D mesh
return 3
def read_bounding_box(self):
raise NotImplementedError
class ComsolMeshIO(MeshIO):
format = 'comsol'
def _read_commented_int(self):
return int(skip_read_line(self.fd).split('#')[0])
def _skip_comment(self):
read_token(self.fd)
self.fd.readline()
def read(self, mesh, **kwargs):
self.fd = fd = open(self.filename, 'r')
mode = 'header'
coors = conns = None
while 1:
if mode == 'header':
line = skip_read_line(fd)
n_tags = self._read_commented_int()
for ii in xrange(n_tags):
skip_read_line(fd)
n_types = self._read_commented_int()
for ii in xrange(n_types):
skip_read_line(fd)
skip_read_line(fd)
assert_(skip_read_line(fd).split()[1] == 'Mesh')
skip_read_line(fd)
dim = self._read_commented_int()
assert_((dim == 2) or (dim == 3))
n_nod = self._read_commented_int()
i0 = self._read_commented_int()
mode = 'points'
elif mode == 'points':
self._skip_comment()
coors = read_array(fd, n_nod, dim, nm.float64)
mode = 'cells'
elif mode == 'cells':
n_types = self._read_commented_int()
conns = []
descs = []
mat_ids = []
for it in xrange(n_types):
t_name = skip_read_line(fd).split()[1]
n_ep = self._read_commented_int()
n_el = self._read_commented_int()
self._skip_comment()
aux = read_array(fd, n_el, n_ep, nm.int32)
if t_name == 'tri':
conns.append(aux)
descs.append('2_3')
is_conn = True
elif t_name == 'quad':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2)]
conns.append(aux)
descs.append('2_4')
is_conn = True
elif t_name == 'hex':
# Rearrange element node order to match SfePy.
aux = aux[:,(0,1,3,2,4,5,7,6)]
conns.append(aux)
descs.append('3_8')
is_conn = True
elif t_name == 'tet':
conns.append(aux)
descs.append('3_4')
is_conn = True
else:
is_conn = False
# Skip parameters.
n_pv = self._read_commented_int()
n_par = self._read_commented_int()
for ii in xrange(n_par):
skip_read_line(fd)
n_domain = self._read_commented_int()
assert_(n_domain == n_el)
if is_conn:
self._skip_comment()
mat_id = read_array(fd, n_domain, 1, nm.int32)
mat_ids.append(mat_id)
else:
for ii in xrange(n_domain):
skip_read_line(fd)
# Skip up/down pairs.
n_ud = self._read_commented_int()
for ii in xrange(n_ud):
skip_read_line(fd)
break
fd.close()
self.fd = None
mesh._set_io_data(coors, None, conns, mat_ids, descs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
def write_elements(fd, ig, conn, mat_ids, type_name,
npe, format, norder, nm_params):
fd.write("# Type #%d\n\n" % ig)
fd.write("%s # type name\n\n\n" % type_name)
fd.write("%d # number of nodes per element\n" % npe)
fd.write("%d # number of elements\n" % conn.shape[0])
fd.write("# Elements\n")
for ii in range(conn.shape[0]):
nn = conn[ii] # Zero based
fd.write(format % tuple(nn[norder]))
fd.write("\n%d # number of parameter values per element\n"
% nm_params)
# Top level always 0?
fd.write("0 # number of parameters\n")
fd.write("# Parameters\n\n")
fd.write("%d # number of domains\n"
% sum([mi.shape[0] for mi in mat_ids]))
fd.write("# Domains\n")
for mi in mat_ids:
# Domains in comsol have to be > 0
if (mi <= 0).any():
mi += mi.min() + 1
for dom in mi:
fd.write("%d\n" % abs(dom))
fd.write("\n0 # number of up/down pairs\n")
fd.write("# Up/down\n")
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
# Header
fd.write("# Created by SfePy\n\n\n")
fd.write("# Major & minor version\n")
fd.write("0 1\n")
fd.write("1 # number of tags\n")
fd.write("# Tags\n")
fd.write("2 m1\n")
fd.write("1 # number of types\n")
fd.write("# Types\n")
fd.write("3 obj\n\n")
# Record
fd.write("# --------- Object 0 ----------\n\n")
fd.write("0 0 1\n") # version unused serializable
fd.write("4 Mesh # class\n")
fd.write("1 # version\n")
fd.write("%d # sdim\n" % dim)
fd.write("%d # number of mesh points\n" % n_nod)
fd.write("0 # lowest mesh point index\n\n") # Always zero in SfePy
fd.write("# Mesh point coordinates\n")
format = self.get_vector_format(dim) + '\n'
for ii in range(n_nod):
nn = tuple(coors[ii])
fd.write(format % tuple(nn))
fd.write("\n%d # number of element types\n\n\n" % len(conns))
for ig, conn in enumerate(conns):
if (desc[ig] == "2_4"):
write_elements(fd, ig, conn, mat_ids,
"4 quad", 4, "%d %d %d %d\n", [0, 1, 3, 2], 8)
elif (desc[ig] == "2_3"):
# TODO: Verify number of parameters for tri element
write_elements(fd, ig, conn, mat_ids,
"3 tri", 3, "%d %d %d\n", [0, 1, 2], 4)
elif (desc[ig] == "3_4"):
# TODO: Verify number of parameters for tet element
write_elements(fd, ig, conn, mat_ids,
"3 tet", 4, "%d %d %d %d\n", [0, 1, 2, 3], 16)
elif (desc[ig] == "3_8"):
write_elements(fd, ig, conn, mat_ids,
"3 hex", 8, "%d %d %d %d %d %d %d %d\n",
[0, 1, 3, 2, 4, 5, 7, 6], 24)
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.close()
if out is not None:
for key, val in out.iteritems():
raise NotImplementedError
class HDF5MeshIO(MeshIO):
format = "hdf5"
import string
_all = ''.join(map(chr, range(256)))
_letters = string.letters + string.digits + '_'
_rubbish = ''.join([ch for ch in set(_all) - set(_letters)])
_tr = string.maketrans(_rubbish, '_' * len(_rubbish))
def read_dimension(self, ret_fd=False):
fd = pt.openFile(self.filename, mode="r")
dim = fd.root.mesh.coors.shape[1]
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read_bounding_box(self, ret_fd=False, ret_dim=False):
fd = pt.openFile(self.filename, mode="r")
mesh_group = fd.root.mesh
coors = mesh_group.coors.read()
bbox = nm.vstack((nm.amin(coors, 0),
nm.amax(coors, 0)))
if ret_dim:
dim = coors.shape[1]
if ret_fd:
return bbox, dim, fd
else:
fd.close()
return bbox, dim
else:
if ret_fd:
return bbox, fd
else:
fd.close()
return bbox
def read(self, mesh, **kwargs):
fd = pt.openFile(self.filename, mode="r")
mesh_group = fd.root.mesh
mesh.name = mesh_group.name.read()
coors = mesh_group.coors.read()
ngroups = mesh_group.ngroups.read()
n_gr = mesh_group.n_gr.read()
conns = []
descs = []
mat_ids = []
for ig in xrange(n_gr):
gr_name = 'group%d' % ig
group = mesh_group._f_getChild(gr_name)
conns.append(group.conn.read())
mat_ids.append(group.mat_id.read())
descs.append(group.desc.read())
nodal_bcs = {}
try:
node_sets_groups = mesh_group.node_sets
except:
pass
else:
for group in node_sets_groups:
key = group.key.read()
nods = group.nods.read()
nodal_bcs[key] = nods
fd.close()
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs,
nodal_bcs=nodal_bcs)
return mesh
def write(self, filename, mesh, out=None, ts=None, **kwargs):
from time import asctime
if pt is None:
raise ValueError('pytables not imported!')
step = get_default_attr(ts, 'step', 0)
if step == 0:
# A new file.
fd = pt.openFile(filename, mode="w",
title="SfePy output file")
mesh_group = fd.createGroup('/', 'mesh', 'mesh')
coors, ngroups, conns, mat_ids, descs = mesh._get_io_data()
fd.createArray(mesh_group, 'name', mesh.name, 'name')
fd.createArray(mesh_group, 'coors', coors, 'coors')
fd.createArray(mesh_group, 'ngroups', ngroups, 'ngroups')
fd.createArray(mesh_group, 'n_gr', len(conns), 'n_gr')
for ig, conn in enumerate(conns):
conn_group = fd.createGroup(mesh_group, 'group%d' % ig,
'connectivity group')
fd.createArray(conn_group, 'conn', conn, 'connectivity')
fd.createArray(conn_group, 'mat_id', mat_ids[ig],
'material id')
fd.createArray(conn_group, 'desc', descs[ig],
'element Type')
node_sets_groups = fd.createGroup(mesh_group, 'node_sets',
'node sets groups')
ii = 0
for key, nods in mesh.nodal_bcs.iteritems():
group = fd.createGroup(node_sets_groups, 'group%d' % ii,
'node sets group')
fd.createArray(group, 'key', key, 'key')
fd.createArray(group, 'nods', nods, 'nods')
ii += 1
if ts is not None:
ts_group = fd.createGroup('/', 'ts', 'time stepper')
fd.createArray(ts_group, 't0', ts.t0, 'initial time')
fd.createArray(ts_group, 't1', ts.t1, 'final time' )
fd.createArray(ts_group, 'dt', ts.dt, 'time step')
fd.createArray(ts_group, 'n_step', ts.n_step, 'n_step')
tstat_group = fd.createGroup('/', 'tstat', 'global time statistics')
fd.createArray(tstat_group, 'created', asctime(),
'file creation time')
fd.createArray(tstat_group, 'finished', '.' * 24,
'file closing time')
fd.createArray(fd.root, 'last_step', nm.array([0], dtype=nm.int32),
'last saved step')
fd.close()
if out is not None:
if ts is None:
step, time, nt = 0, 0.0, 0.0
else:
step, time, nt = ts.step, ts.time, ts.nt
# Existing file.
fd = pt.openFile(filename, mode="r+")
step_group = fd.createGroup('/', 'step%d' % step, 'time step data')
ts_group = fd.createGroup(step_group, 'ts', 'time stepper')
fd.createArray(ts_group, 'step', step, 'step')
fd.createArray(ts_group, 't', time, 'time')
fd.createArray(ts_group, 'nt', nt, 'normalized time')
name_dict = {}
for key, val in out.iteritems():
shape = val.get('shape', val.data.shape)
dofs = val.get('dofs', None)
if dofs is None:
dofs = [''] * nm.squeeze(shape)[-1]
var_name = val.get('var_name', '')
name = val.get('name', 'output_data')
group_name = '__' + key.translate(self._tr)
data_group = fd.createGroup(step_group, group_name,
'%s data' % key)
fd.createArray(data_group, 'data', val.data, 'data')
fd.createArray(data_group, 'mode', val.mode, 'mode')
fd.createArray(data_group, 'dofs', dofs, 'dofs')
fd.createArray(data_group, 'shape', shape, 'shape')
fd.createArray(data_group, 'name', name, 'object name')
fd.createArray(data_group, 'var_name',
var_name, 'object parent name')
fd.createArray(data_group, 'dname', key, 'data name')
if val.mode == 'full':
fd.createArray(data_group, 'field_name', val.field_name,
'field name')
name_dict[key] = group_name
step_group._v_attrs.name_dict = name_dict
fd.root.last_step[0] = step
fd.removeNode(fd.root.tstat.finished)
fd.createArray(fd.root.tstat, 'finished', asctime(),
'file closing time')
fd.close()
def read_last_step(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
last_step = fd.root.last_step[0]
fd.close()
return last_step
def read_time_stepper(self, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
try:
ts_group = fd.root.ts
out = (ts_group.t0.read(), ts_group.t1.read(),
ts_group.dt.read(), ts_group.n_step.read())
except:
raise ValueError('no time stepper found!')
finally:
fd.close()
return out
def read_times(self, filename=None):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
"""
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode='r')
steps = sorted(int(name[4:]) for name in fd.root._v_groups.keys()
if name.startswith('step'))
times = []
nts = []
for step in steps:
ts_group = fd.getNode(fd.root, 'step%d/ts' % step)
times.append(ts_group.t.read())
nts.append(ts_group.nt.read())
fd.close()
steps = nm.asarray(steps, dtype=nm.int32)
times = nm.asarray(times, dtype=nm.float64)
nts = nm.asarray(nts, dtype=nm.float64)
return steps, times, nts
def _get_step_group(self, step, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
gr_name = 'step%d' % step
try:
step_group = fd.getNode(fd.root, gr_name)
except:
output('step %d data not found - premature end of file?' % step)
fd.close()
return None, None
return fd, step_group
def read_data(self, step, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None: return None
out = {}
for data_group in step_group:
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
name = data_group.name.read()
mode = data_group.mode.read()
data = data_group.data.read()
dofs = tuple(data_group.dofs.read())
try:
shape = tuple(data_group.shape.read())
except pt.exceptions.NoSuchNodeError:
shape = data.shape
if mode == 'full':
field_name = data_group.field_name.read()
else:
field_name = None
out[key] = Struct(name=name, mode=mode, data=data,
dofs=dofs, shape=shape, field_name=field_name)
if out[key].dofs == (-1,):
out[key].dofs = None
fd.close()
return out
def read_data_header(self, dname, step=0, filename=None):
fd, step_group = self._get_step_group(step, filename=filename)
if fd is None: return None
groups = step_group._v_groups
for name, data_group in groups.iteritems():
try:
key = data_group.dname.read()
except pt.exceptions.NoSuchNodeError:
continue
if key == dname:
mode = data_group.mode.read()
fd.close()
return mode, name
fd.close()
raise KeyError('non-existent data: %s' % dname)
def read_time_history(self, node_name, indx, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
th = dict_from_keys_init(indx, list)
for step in xrange(fd.root.last_step[0] + 1):
gr_name = 'step%d' % step
step_group = fd.getNode(fd.root, gr_name)
data = step_group._f_getChild(node_name).data
for ii in indx:
th[ii].append(nm.array(data[ii]))
fd.close()
for key, val in th.iteritems():
aux = nm.array(val)
if aux.ndim == 4: # cell data.
aux = aux[:,0,:,0]
th[key] = aux
return th
def read_variables_time_history(self, var_names, ts, filename=None):
filename = get_default(filename, self.filename)
fd = pt.openFile(filename, mode="r")
assert_((fd.root.last_step[0] + 1) == ts.n_step)
ths = dict_from_keys_init(var_names, list)
arr = nm.asarray
for step in xrange(ts.n_step):
gr_name = 'step%d' % step
step_group = fd.getNode(fd.root, gr_name)
name_dict = step_group._v_attrs.name_dict
for var_name in var_names:
data = step_group._f_getChild(name_dict[var_name]).data
ths[var_name].append(arr(data.read()))
fd.close()
return ths
class MEDMeshIO(MeshIO):
format = "med"
def read(self, mesh, **kwargs):
fd = pt.openFile(self.filename, mode="r")
mesh_root = fd.root.ENS_MAA
#TODO: Loop through multiple meshes?
mesh_group = mesh_root._f_getChild(mesh_root._v_groups.keys()[0])
if not ('NOE' in mesh_group._v_groups.keys()):
mesh_group = mesh_group._f_getChild(mesh_group._v_groups.keys()[0])
mesh.name = mesh_group._v_name
aux_coors = mesh_group.NOE.COO.read()
n_nodes = mesh_group.NOE.COO.getAttr('NBR')
# Unflatten the node coordinate array
dim = aux_coors.shape[0] / n_nodes
coors = nm.zeros((n_nodes,dim), dtype=nm.float64)
for ii in range(dim):
coors[:,ii] = aux_coors[n_nodes*ii:n_nodes*(ii+1)]
ngroups = mesh_group.NOE.FAM.read()
assert_((ngroups >= 0).all())
# Dict to map MED element names to SfePy descs
#NOTE: The commented lines are elements which
# produce KeyError in SfePy
med_descs = {
'TE4' : '3_4',
#'T10' : '3_10',
#'PY5' : '3_5',
#'P13' : '3_13',
'HE8' : '3_8',
#'H20' : '3_20',
#'PE6' : '3_6',
#'P15' : '3_15',
#TODO: Polyhedrons (POE) - need special handling
'TR3' : '2_3',
#'TR6' : '2_6',
'QU4' : '2_4',
#'QU8' : '2_8',
#TODO: Polygons (POG) - need special handling
#'SE2' : '1_2',
#'SE3' : '1_3',
}
conns = []
descs = []
mat_ids = []
for md, desc in med_descs.iteritems():
if int(desc[0]) != dim: continue
try:
group = mesh_group.MAI._f_getChild(md)
aux_conn = group.NOD.read()
n_conns = group.NOD.getAttr('NBR')
# (0 based indexing in numpy vs. 1 based in MED)
nne = aux_conn.shape[0] / n_conns
conn = nm.zeros((n_conns,nne), dtype=nm.int32)
for ii in range(nne):
conn[:,ii] = aux_conn[n_conns*ii:n_conns*(ii+1)] - 1
conns.append(conn)
mat_id = group.FAM.read()
assert_((mat_id <= 0).all())
mat_id = nm.abs(mat_id)
mat_ids.append(mat_id)
descs.append(med_descs[md])
except pt.exceptions.NoSuchNodeError:
pass
fd.close()
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class Mesh3DMeshIO(MeshIO):
format = "mesh3d"
def read(self, mesh, **kwargs):
f = open(self.filename)
# read the whole file:
vertices = self._read_section(f, integer=False)
tetras = self._read_section(f)
hexes = self._read_section(f)
prisms = self._read_section(f)
tris = self._read_section(f)
quads = self._read_section(f)
# substract 1 from all elements, because we count from 0:
conns = []
mat_ids = []
descs = []
if len(tetras) > 0:
conns.append(tetras - 1)
mat_ids.append([0]*len(tetras))
descs.append("3_4")
if len(hexes) > 0:
conns.append(hexes - 1)
mat_ids.append([0]*len(hexes))
descs.append("3_8")
mesh._set_io_data(vertices, None, conns, mat_ids, descs)
return mesh
def read_dimension(self):
return 3
def _read_line(self, f):
"""
Reads one non empty line (if it's a comment, it skips it).
"""
l = f.readline().strip()
while l == "" or l[0] == "#": # comment or an empty line
l = f.readline().strip()
return l
def _read_section(self, f, integer=True):
"""
Reads one section from the mesh3d file.
integer ... if True, all numbers are passed to int(), otherwise to
float(), before returning
Some examples how a section can look like:
2
1 2 5 4 7 8 11 10
2 3 6 5 8 9 12 11
or
5
1 2 3 4 1
1 2 6 5 1
2 3 7 6 1
3 4 8 7 1
4 1 5 8 1
or
0
"""
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows)
def mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas, remap=None):
ids = nm.asarray(ids, dtype=nm.int32)
coors = nm.asarray(coors, dtype=nm.float64)
if remap is None:
n_nod = coors.shape[0]
remap = nm.zeros((ids.max()+1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
tris = remap[nm.array(tris, dtype=nm.int32)]
quads = remap[nm.array(quads, dtype=nm.int32)]
tetras = remap[nm.array(tetras, dtype=nm.int32)]
hexas = remap[nm.array(hexas, dtype=nm.int32)]
conns = [tris, quads, tetras, hexas]
mat_ids = [nm.array(ar, dtype=nm.int32)
for ar in [mat_tris, mat_quads, mat_tetras, mat_hexas]]
descs = ['2_3', '2_4', '3_4', '3_8']
# Remove empty groups.
conns, mat_ids, descs = zip(*[(conns[ig], mat_ids[ig], descs[ig])
for ig in xrange(4)
if conns[ig].shape[0] > 0])
mesh._set_io_data(coors, ngroups, conns, mat_ids, descs)
return mesh
class AVSUCDMeshIO(MeshIO):
format = 'avs_ucd'
@staticmethod
def guess(filename):
return True
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
# Skip all comments.
while 1:
line = fd.readline()
if line and (line[0] != '#'):
break
header = [int(ii) for ii in line.split()]
n_nod, n_el = header[0:2]
ids = nm.zeros((n_nod,), dtype=nm.int32)
dim = 3
coors = nm.zeros((n_nod, dim), dtype=nm.float64)
for ii in xrange(n_nod):
line = fd.readline().split()
ids[ii] = int(line[0])
coors[ii] = [float(coor) for coor in line[1:]]
mat_tetras = []
tetras = []
mat_hexas = []
hexas = []
for ii in xrange(n_el):
line = fd.readline().split()
if line[2] == 'tet':
mat_tetras.append(int(line[1]))
tetras.append([int(ic) for ic in line[3:]])
elif line[2] == 'hex':
mat_hexas.append(int(line[1]))
hexas.append([int(ic) for ic in line[3:]])
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
[], [], [], [],
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class HypermeshAsciiMeshIO(MeshIO):
format = 'hmascii'
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
quads = []
mat_quads = []
trias = []
mat_trias = []
mat_id = 0
for line in fd:
if line and (line[0] == '*'):
if line[1:10] == 'component':
line = line.strip()[11:-1].split(',')
mat_id = int(line[0])
if line[1:5] == 'node':
line = line.strip()[6:-1].split(',')
ids.append(int(line[0]))
coors.append([float(coor) for coor in line[1:4]])
elif line[1:7] == 'tetra4':
line = line.strip()[8:-1].split(',')
mat_tetras.append(mat_id)
tetras.append([int(ic) for ic in line[2:6]])
elif line[1:6] == 'hexa8':
line = line.strip()[7:-1].split(',')
mat_hexas.append(mat_id)
hexas.append([int(ic) for ic in line[2:10]])
elif line[1:6] == 'quad4':
line = line.strip()[7:-1].split(',')
mat_quads.append(mat_id)
quads.append([int(ic) for ic in line[2:6]])
elif line[1:6] == 'tria3':
line = line.strip()[7:-1].split(',')
mat_trias.append(mat_id)
trias.append([int(ic) for ic in line[2:5]])
fd.close()
mesh = mesh_from_groups(mesh, ids, coors, None,
trias, mat_trias, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
return 3
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class AbaqusMeshIO(MeshIO):
format = 'abaqus'
@staticmethod
def guess(filename):
ok = False
fd = open(filename, 'r')
for ii in xrange(100):
try:
line = fd.readline().strip().split(',')
except:
break
if line[0].lower() == '*node':
ok = True
break
fd.close()
return ok
def read(self, mesh, **kwargs):
fd = open(self.filename, 'r')
ids = []
coors = []
tetras = []
mat_tetras = []
hexas = []
mat_hexas = []
tris = []
mat_tris = []
quads = []
mat_quads = []
nsets = {}
ing = 1
dim = 0
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if dim == 0:
dim = len(line) - 1
ids.append(int(line[0]))
if dim == 2:
coors.append([float(coor) for coor in line[1:3]])
else:
coors.append([float(coor) for coor in line[1:4]])
elif token == '*element':
if line[1].find('C3D8') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_hexas.append(0)
hexas.append([int(ic) for ic in line[1:9]])
elif line[1].find('C3D4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tetras.append(0)
tetras.append([int(ic) for ic in line[1:5]])
elif line[1].find('CPS') >= 0 or line[1].find('CPE') >= 0:
if line[1].find('4') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_quads.append(0)
quads.append([int(ic) for ic in line[1:5]])
elif line[1].find('3') >= 0:
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
mat_tris.append(0)
tris.append([int(ic) for ic in line[1:4]])
else:
raise ValueError('unknown element type! (%s)' % line[1])
else:
raise ValueError('unknown element type! (%s)' % line[1])
elif token == '*nset':
if line[-1].strip().lower() == 'generate':
line = fd.readline()
continue
while 1:
line = fd.readline().strip().split(',')
if (not line[0]) or (line[0][0] == '*'): break
if not line[-1]: line = line[:-1]
aux = [int(ic) for ic in line]
nsets.setdefault(ing, []).extend(aux)
ing += 1
else:
line = fd.readline().split(',')
fd.close()
ngroups = nm.zeros((len(coors),), dtype=nm.int32)
for ing, ii in nsets.iteritems():
ngroups[nm.array(ii)-1] = ing
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
tris, mat_tris, quads, mat_quads,
tetras, mat_tetras, hexas, mat_hexas)
return mesh
def read_dimension(self):
fd = open(self.filename, 'r')
line = fd.readline().split(',')
while 1:
if not line[0]: break
token = line[0].strip().lower()
if token == '*node':
while 1:
line = fd.readline().split(',')
if (not line[0]) or (line[0][0] == '*'): break
dim = len(line) - 1
fd.close()
return dim
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class BDFMeshIO(MeshIO):
format = 'nastran'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
el3d = 0
while 1:
try:
line = fd.readline()
except:
output("reading " + fd.name + " failed!")
raise
if len(line) == 1: continue
if line[0] == '$': continue
aux = line.split()
if aux[0] == 'CHEXA':
el3d += 1
elif aux[0] == 'CTETRA':
el3d += 1
if el3d > 0:
dim = 3
else:
dim = 2
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
def mfloat(s):
if len(s) > 3:
if s[-3] == '-':
return float(s[:-3]+'e'+s[-3:])
return float(s)
import string
fd = open(self.filename, 'r')
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
cmd = ''
dim = 2
conns_in = []
descs = []
node_grp = None
while 1:
try:
line = fd.readline()
except EOFError:
break
except:
output("reading " + fd.name + " failed!")
raise
if (len(line) == 0): break
if len(line) < 4: continue
if line[0] == '$': continue
row = line.strip().split()
if row[0] == 'GRID':
cs = line.strip()[-24:]
aux = [ cs[0:8], cs[8:16], cs[16:24] ]
nod.append([mfloat(ii) for ii in aux]);
elif row[0] == 'GRID*':
aux = row[1:4];
cmd = 'GRIDX';
elif row[0] == 'CHEXA':
aux = [int(ii)-1 for ii in row[3:9]]
aux2 = int(row[2])
aux3 = row[9]
cmd ='CHEXAX'
elif row[0] == 'CTETRA':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['3_4'].append(aux)
dim = 3
elif row[0] == 'CQUAD4':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['2_4'].append(aux)
elif row[0] == 'CTRIA3':
aux = [int(ii)-1 for ii in row[3:]]
aux.append(int(row[2]))
el['2_3'].append(aux)
elif cmd == 'GRIDX':
cmd = ''
aux2 = row[1]
if aux2[-1] == '0':
aux2 = aux2[:-1]
aux3 = aux[1:]
aux3.append(aux2)
nod.append([float(ii) for ii in aux3]);
elif cmd == 'CHEXAX':
cmd = ''
aux4 = row[0]
aux5 = string.find(aux4, aux3)
aux.append(int(aux4[(aux5+len(aux3)):])-1)
aux.extend([int(ii)-1 for ii in row[1:]])
aux.append(aux2)
el['3_8'].append(aux)
dim = 3
elif row[0] == 'SPC' or row[0] == 'SPC*':
if node_grp is None:
node_grp = [0] * len(nod)
node_grp[int(row[2]) - 1] = int(row[1])
for elem in el.keys():
if len(el[elem]) > 0:
conns_in.append(el[elem])
descs.append(elem)
fd.close()
nod = nm.array(nod, nm.float64)
if dim == 2:
nod = nod[:,:2].copy()
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod, node_grp, conns, mat_ids, descs)
return mesh
@staticmethod
def format_str(str, idx, n=8):
out = ''
for ii, istr in enumerate(str):
aux = '%d' % istr
out += aux + ' ' * (n - len(aux))
if ii == 7:
out += '+%07d\n+%07d' % (idx, idx)
return out
def write(self, filename, mesh, out=None, **kwargs):
fd = open(filename, 'w')
coors, ngroups, conns, mat_ids, desc = mesh._get_io_data()
n_nod, dim = coors.shape
fd.write("$NASTRAN Bulk Data File created by SfePy\n")
fd.write("$\nBEGIN BULK\n")
fd.write("$\n$ ELEMENT CONNECTIVITY\n$\n")
iel = 0
mats = {}
for ig, conn in enumerate(conns):
ids = mat_ids[ig]
for ii in range(conn.shape[0]):
iel += 1
nn = conn[ii] + 1
mat = ids[ii]
if mat in mats:
mats[mat] += 1
else:
mats[mat] = 0
if (desc[ig] == "2_4"):
fd.write("CQUAD4 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "2_3"):
fd.write("CTRIA3 %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2]], iel))
elif (desc[ig] == "3_4"):
fd.write("CTETRA %s\n" %\
self.format_str([ii + 1, mat,
nn[0], nn[1], nn[2], nn[3]],
iel))
elif (desc[ig] == "3_8"):
fd.write("CHEXA %s\n" %\
self.format_str([ii + 1, mat, nn[0], nn[1], nn[2],
nn[3], nn[4], nn[5], nn[6],
nn[7]], iel))
else:
raise ValueError('unknown element type! (%s)' % desc[ig])
fd.write("$\n$ NODAL COORDINATES\n$\n")
format = 'GRID* %s % 08E % 08E\n'
if coors.shape[1] == 3:
format += '* % 08E0 \n'
else:
format += '* % 08E0 \n' % 0.0
for ii in range(n_nod):
sii = str(ii + 1)
fd.write(format % ((sii + ' ' * (8 - len(sii)),)
+ tuple(coors[ii])))
fd.write("$\n$ GEOMETRY\n$\n1 ")
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n* \n")
fd.write("$\n$ MATERIALS\n$\n")
matkeys = mats.keys()
matkeys.sort()
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : Isotropic\n" % imat)
aux = str(imat)
fd.write("MAT1* %s " % (aux + ' ' * (8 - len(aux))))
fd.write("0.000000E+00 0.000000E+00\n")
fd.write("* 0.000000E+00 0.000000E+00\n")
fd.write("$\n$ GEOMETRY\n$\n")
for ii, imat in enumerate(matkeys):
fd.write("$ material%d : solid%d\n" % (imat, imat))
fd.write("PSOLID* %s\n" % self.format_str([ii + 1, imat], 0, 16))
fd.write("* \n")
fd.write("ENDDATA\n")
fd.close()
class NEUMeshIO(MeshIO):
format = 'gambit'
def read_dimension(self, ret_fd=False):
fd = open(self.filename, 'r')
row = fd.readline().split()
while 1:
if not row: break
if len(row) == 0: continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int(row[4])
break;
if ret_fd:
return dim, fd
else:
fd.close()
return dim
def read(self, mesh, **kwargs):
el = {'3_8' : [], '3_4' : [], '2_4' : [], '2_3' : []}
nod = []
conns_in = []
descs = []
group_ids = []
group_n_els = []
groups = []
nodal_bcs = {}
fd = open(self.filename, 'r')
row = fd.readline().split()
while 1:
if not row: break
if len(row) == 0: continue
if (row[0] == 'NUMNP'):
row = fd.readline().split()
n_nod, n_el, dim = row[0], row[1], int(row[4])
elif (row[0] == 'NODAL'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
nod.append(row[1:])
row = fd.readline().split()
elif (row[0] == 'ELEMENTS/CELLS'):
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
elid = [row[0]]
gtype = int(row[1])
if gtype == 6:
el['3_4'].append(row[3:]+elid)
elif gtype == 4:
rr = row[3:]
if (len(rr) < 8):
rr.extend(fd.readline().split())
el['3_8'].append(rr+elid)
elif gtype == 3:
el['2_3'].append(row[3:]+elid)
elif gtype == 2:
el['2_4'].append(row[3:]+elid)
row = fd.readline().split()
elif (row[0] == 'GROUP:'):
group_ids.append(row[1])
g_n_el = int(row[3])
group_n_els.append(g_n_el)
name = fd.readline().strip()
els = []
row = fd.readline().split()
row = fd.readline().split()
while not(row[0] == 'ENDOFSECTION'):
els.extend(row)
row = fd.readline().split()
if g_n_el != len(els):
msg = 'wrong number of group elements! (%d == %d)'\
% (n_el, len(els))
raise ValueError(msg)
groups.append(els)
elif (row[0] == 'BOUNDARY'):
row = fd.readline().split()
key = row[0]
num = int(row[2])
inod = read_array(fd, num, None, nm.int32) - 1
nodal_bcs[key] = inod.squeeze()
row = fd.readline().split()
assert_(row[0] == 'ENDOFSECTION')
else:
row = fd.readline().split()
fd.close()
if int(n_el) != sum(group_n_els):
print 'wrong total number of group elements! (%d == %d)'\
% (int(n_el), len(group_n_els))
mat_ids = [None] * int(n_el)
for ii, els in enumerate(groups):
for iel in els:
mat_ids[int(iel) - 1] = group_ids[ii]
for elem in el.keys():
if len(el[elem]) > 0:
for iel in el[elem]:
for ii in range(len(iel)):
iel[ii] = int(iel[ii]) - 1
iel[-1] = mat_ids[iel[-1]]
conns_in.append(el[elem])
descs.append(elem)
nod = nm.array(nod, nm.float64)
conns, mat_ids = split_conns_mat_ids(conns_in)
mesh._set_io_data(nod, None, conns, mat_ids, descs, nodal_bcs=nodal_bcs)
return mesh
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
class ANSYSCDBMeshIO(MeshIO):
format = 'ansys_cdb'
@staticmethod
def guess(filename):
fd = open(filename, 'r')
for ii in xrange(1000):
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
kw = row[0].lower()
if (kw == 'nblock'):
ok = True
break
else:
ok = False
fd.close()
return ok
@staticmethod
def make_format(format):
idx = [];
dtype = [];
start = 0;
for iform in format:
ret = iform.partition('i')
if not ret[1]:
ret = iform.partition('e')
if not ret[1]:
raise ValueError
aux = ret[2].partition('.')
step = int(aux[0])
for j in range(int(ret[0])):
idx.append((start, start+step))
start += step
dtype.append(ret[1])
return idx, dtype
def write(self, filename, mesh, out=None, **kwargs):
raise NotImplementedError
def read_bounding_box(self):
raise NotImplementedError
def read_dimension(self, ret_fd=False):
return 3
def read(self, mesh, **kwargs):
ids = []
coors = []
tetras = []
hexas = []
qtetras = []
qhexas = []
nodal_bcs = {}
fd = open(self.filename, 'r')
while True:
row = fd.readline()
if not row: break
if len(row) == 0: continue
row = row.split(',')
kw = row[0].lower()
if (kw == 'nblock'):
# Solid keyword -> 3, otherwise 1 is the starting coors index.
ic = 3 if len(row) == 3 else 1
fmt = fd.readline()
fmt = fmt.strip()[1:-1].split(',')
idx, dtype = self.make_format(fmt)
ii0, ii1 = idx[0]
while True:
row = fd.readline()
if (row[0] == '!') or (row[:2] == '-1'):
break
line = [float(row[i0:i1]) for i0, i1 in idx[ic:]]
ids.append(int(row[ii0:ii1]))
coors.append(line)
elif (kw == 'eblock'):
if (len(row) <= 2) or row[2] != 'solid': # no solid keyword
continue
fmt = fd.readline()
fmt = [fmt.strip()[1:-1]]
idx, dtype = self.make_format(fmt)
imi0, imi1 = idx[0] # Material id.
inn0, inn1 = idx[8] # Number of nodes in line.
ien0, ien1 = idx[10] # Element number.
ic0 = 11
while True:
row = fd.readline()
if (row[0] == '!') or (row[:2] == '-1'):
break
line = [int(row[imi0:imi1])]
n_nod = int(row[inn0:inn1])
line.extend(int(row[i0:i1])
for i0, i1 in idx[ic0 : ic0 + n_nod])
if n_nod == 4:
tetras.append(line)
elif n_nod == 8:
hexas.append(line)
elif n_nod == 10:
row = fd.readline()
line.extend(int(row[i0:i1])
for i0, i1 in idx[:2])
qtetras.append(line)
elif n_nod == 20:
row = fd.readline()
line.extend(int(row[i0:i1])
for i0, i1 in idx[:12])
qhexas.append(line)
else:
raise ValueError('unsupported element type! (%d nodes)'
% n_nod)
elif kw == 'cmblock':
if row[2].lower() != 'node': # Only node sets support.
continue
n_nod = int(row[3])
fd.readline() # Format line not needed.
nods = read_array(fd, n_nod, 1, nm.int32)
nodal_bcs[row[1].strip()] = nods.ravel()
fd.close()
coors = nm.array(coors, dtype=nm.float64)
tetras = nm.array(tetras, dtype=nm.int32)
if len(tetras):
mat_ids_tetras = tetras[:, 0]
tetras = tetras[:, 1:]
else:
mat_ids_tetras = nm.array([])
hexas = nm.array(hexas, dtype=nm.int32)
if len(hexas):
mat_ids_hexas = hexas[:, 0]
hexas = hexas[:, 1:]
else:
mat_ids_hexas = nm.array([])
if len(qtetras):
qtetras = nm.array(qtetras, dtype=nm.int32)
tetras.shape = (max(0, tetras.shape[0]), 4)
tetras = nm.r_[tetras, qtetras[:, 1:5]]
mat_ids_tetras = nm.r_[mat_ids_tetras, qtetras[:, 0]]
if len(qhexas):
qhexas = nm.array(qhexas, dtype=nm.int32)
hexas.shape = (max(0, hexas.shape[0]), 8)
hexas = nm.r_[hexas, qhexas[:, 1:9]]
mat_ids_hexas = nm.r_[mat_ids_hexas, qhexas[:, 0]]
if len(qtetras) or len(qhexas):
ii = nm.union1d(tetras.ravel(), hexas.ravel())
n_nod = len(ii)
remap = nm.zeros((ii.max()+1,), dtype=nm.int32)
remap[ii] = nm.arange(n_nod, dtype=nm.int32)
ic = nm.searchsorted(ids, ii)
coors = coors[ic]
else:
n_nod = coors.shape[0]
remap = nm.zeros((nm.array(ids).max() + 1,), dtype=nm.int32)
remap[ids] = nm.arange(n_nod, dtype=nm.int32)
ngroups = nm.zeros(len(coors), dtype=nm.int32)
mesh = mesh_from_groups(mesh, ids, coors, ngroups,
[], [], [], [],
tetras, mat_ids_tetras,
hexas, mat_ids_hexas, remap=remap)
mesh.nodal_bcs = {}
for key, nods in nodal_bcs.iteritems():
mesh.nodal_bcs[key] = remap[nods]
return mesh
def guess_format(filename, ext, formats, io_table):
"""
Guess the format of filename, candidates are in formats.
"""
ok = False
for format in formats:
output('guessing %s' % format)
try:
ok = io_table[format].guess(filename)
except AttributeError:
pass
if ok: break
else:
raise NotImplementedError('cannot guess format of a *%s file!' % ext)
return format
var_dict = vars().items()
io_table = {}
for key, var in var_dict:
try:
if is_derived_class(var, MeshIO):
io_table[var.format] = var
except TypeError:
pass
del var_dict
def any_from_filename(filename, prefix_dir=None):
"""
Create a MeshIO instance according to the kind of `filename`.
Parameters
----------
filename : str, function or MeshIO subclass instance
The name of the mesh file. It can be also a user-supplied function
accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh
instance and `mode` is one of 'read','write', or a MeshIO subclass
instance.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the kind of `filename`.
"""
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError('unsupported mesh file suffix! (%s)' % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename)
insert_static_method(MeshIO, any_from_filename)
del any_from_filename
def for_format(filename, format=None, writable=False, prefix_dir=None):
"""
Create a MeshIO instance for file `filename` with forced `format`.
Parameters
----------
filename : str
The name of the mesh file.
format : str
One of supported formats. If None,
:func:`MeshIO.any_from_filename()` is called instead.
writable : bool
If True, verify that the mesh format is writable.
prefix_dir : str
The directory name to prepend to `filename`.
Returns
-------
io : MeshIO subclass instance
The MeshIO subclass instance corresponding to the `format`.
"""
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' % format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io
insert_static_method(MeshIO, for_format)
del for_format
|
RexFuzzle/sfepy
|
sfepy/discrete/fem/meshio.py
|
Python
|
bsd-3-clause
| 86,536
|
[
"VTK"
] |
a63a9606d4438fe59c1787039ddb15982e09f9236925458dddaa07a11143a8f1
|
#!/usr/bin/python
import sys
from ctypes import *
from functions import *
from math import sqrt
import numpy as np
import multiprocessing
import pyfits
import signal
# Load the library for the CFitsio routines
overseer = CDLL("/home/steven/Projects/galaxy_icd/libraries/liboverseer.so.0")
def main(sample):
naxes = (c_long*2)(500,500)
base='/home/steven/Projects/image_fields/CANDELS/GOODS_S/'
print "loading images...rms1",
f = pyfits.open(base+"fields/gs_acs_I_matched_rms.fits")
rms1_data=f[0].data
print "...rms2",
f = pyfits.open(base+"fields/gs_wfc3_H_rms.fits")
rms2_data=f[0].data
print "...image1",
f = pyfits.open(base+"fields/gs_acs_I_matched.fits")
image1_data=f[0].data
print "...image2",
f = pyfits.open(base+"fields/gs_wfc3_H.fits")
image2_data=f[0].data
print "..segmap",
f = pyfits.open(base+"fields/gs_segmap_h.fits")
segmap_data=f[0].data
print "...done"
del f
cat_data = np.loadtxt(base+"catalogs/gsd4e_sx_h_110509a_b_comb_4tfit.cat")
active = np.loadtxt(sample)
# Get the number of processors available
num_processes = multiprocessing.cpu_count()
threads = []
print "+++ Number of galaxies to process: %s" % (len(active[:,0]))
# run until all the threads are done, and there is no data left
done = False
a = 0
while not done:
if( len(threads) < num_processes-1):
#galaxy_num = int(active[a][0])
galaxy_num = int(active[a][3])
x_coor = int(cat_data[galaxy_num-1][1])
y_coor = int(cat_data[galaxy_num-1][2])
# Load the images into 1D arrays
rms1 = rms1_data[y_coor-naxes[0]/2:y_coor+naxes[0]/2,
x_coor-naxes[1]/2:x_coor+naxes[1]/2]
rms2 = rms2_data[y_coor-naxes[0]/2:y_coor+naxes[0]/2,
x_coor-naxes[1]/2:x_coor+naxes[1]/2]
image1 = image1_data[y_coor-naxes[0]/2:y_coor+naxes[0]/2,
x_coor-naxes[1]/2:x_coor+naxes[1]/2]
image2 = image2_data[y_coor-naxes[0]/2:y_coor+naxes[0]/2,
x_coor-naxes[1]/2:x_coor+naxes[1]/2]
segmap = segmap_data[y_coor-naxes[0]/2:y_coor+naxes[0]/2,
x_coor-naxes[1]/2:x_coor+naxes[1]/2]
f1 = open("galaxy"+str(galaxy_num).zfill(5)+"_gsd_IH.txt","wt")
p = multiprocessing.Process(target=work,
args=[naxes,rms1,rms2,image1,image2,segmap,x_coor,y_coor,galaxy_num,f1])
p.start()
print p, p.is_alive()
threads.append(p)
a+=1
else:
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
if a == len(active[:,0]):
done = True
def work(naxes,rms1,rms2,image1,image2,segmap,x_coor,y_coor,galaxy_num,f1):
# Declare arrays for the images to be put into
rms1_c = (c_float*(naxes[0]*naxes[1]))(-1.0)
rms2_c = (c_float*(naxes[0]*naxes[1]))(-1.0)
image1_c = (c_float*(naxes[0]*naxes[1]))(-1.0)
image2_c = (c_float*(naxes[0]*naxes[1]))(-1.0)
segmap_c = (c_float*(naxes[0]*naxes[1]))(-1.0)
rms1 = convert(rms1,rms1_c,naxes,'I')
rms2 = convert(rms2,rms2_c,naxes,'H')
image1 = convert(image1,image1_c,naxes,'I')
image2 = convert(image2,image2_c,naxes,'H')
segmap = convert(segmap,segmap_c,naxes)
del rms1_c,rms2_c,image1_c,image2_c,segmap_c
x_coor = naxes[0]/2
y_coor = naxes[1]/2
signal.alarm(30)
pr = overseer.calc_pr(image2,segmap,c_float(galaxy_num),x_coor,y_coor,naxes)
if not pr:
f1.close()
print "ERROR!!"
return 0
pr_map = make_pr_map(image2,segmap,galaxy_num,int(x_coor),int(y_coor),pr,naxes)
#alpha,beta = calc_scale_factors(galaxy_num,pr_map,image1,image2,rms2)
alpha,beta = calc_scale_factors3(pr_map, image1, image2, rms2)
flux = background = 0.0
g1w = g2w = 10E8
for i in range(len(pr_map)):
try:
if (g1w >= calc_weight(rms1[pr_map[i]])):
g1w = calc_weight(rms1[pr_map[i]])
if (g2w >= calc_weight(rms2[pr_map[i]])):
g2w = calc_weight(rms2[pr_map[i]])
except:
pass
flux+=image1[pr_map[i]]
background +=rms1[pr_map[i]]**2
ston = flux/sqrt(background)
size = int((sqrt(len(pr_map))/2.0)+1.0)
icd = []
err = []
for i in range(9):
back1=back2=0
while (0 == back1):
back1 = get_background_pr(segmap,image1,rms1,size,g1w,naxes)
while (0 == back2):
back2 = get_background_pr(segmap,image2,rms2,size,g2w,naxes)
ICD = calc_icd_pr(alpha, beta, galaxy_num, pr_map, image1, image2,
back1, back2)
ERR = icd_error_pr(alpha, beta, pr_map, galaxy_num, image2, back1,
back2)
icd.append(ICD)
err.append(ERR)
signal.alarm(0)
ICD = np.median(np.asarray(icd))
ERR = np.median(np.asarray(err))
print galaxy_num,ICD,ERR
f1.writelines(str(galaxy_num)+" ")
f1.writelines(str(ICD)+" ")
f1.writelines(str(ERR)+" ")
f1.writelines(str(ston)+" ")
f1.writelines(str(pr)+" ")
f1.writelines(str(alpha)+"\n")
f1.close()
# --- END OF MAIN FUNCTION --- #
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
|
boada/ICD
|
parallel_main_gsd.py
|
Python
|
mit
| 5,357
|
[
"Galaxy"
] |
d44f2bfbb466dfbf4e23a48cf44c35f1857f6ba3a08121af5aaab2416284f94f
|
import types
from moose import *
rng = ["Binomial", "Exponential", "Gamma", "Normal", "Poisson" ]
mooseClasses = []
class MooseClasses:
def __init__(self):
self._classList = []
class_file = open("classes.txt", "r")
for class_name in class_file:
class_name = class_name.strip()
if len(class_name) > 0:
classObj = eval(class_name)
self._classList.append(classObj)
def classList(self):
"""Returns a list of MOOSE classes available in PyMOOSE"""
return self._classList
def testCreation(self):
"""Try to create each class in list"""
result = True
container = Neutral("/testContainer")
for mooseClass in self._classList:
print "Create", mooseClass.__name__,
entity = mooseClass(mooseClass.__name__,container)
if entity.id.good():
print "- OK"
else:
print "- Failed"
result = False
return result
if __name__ == "__main__":
test = MooseClasses()
test.testCreation()
|
BhallaLab/moose-thalamocortical
|
pymoose/tests/regression/TestClassCreation.py
|
Python
|
lgpl-2.1
| 932
|
[
"MOOSE"
] |
7b1fe0408e9bbe8e6187012fa40d6c9e0d8344ccd7de6cdad9dcb17734ffa90d
|
# -*- coding: utf-8 -*-
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
"""
The :mod:`sklearn.gaussian_process` module implements Gaussian Process
based regression and classification.
"""
from ._gpr import GaussianProcessRegressor
from ._gpc import GaussianProcessClassifier
from . import kernels
__all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"]
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/__init__.py
|
Python
|
bsd-3-clause
| 530
|
[
"Gaussian"
] |
93362b7d7abc387bc358268a0eb92c519e550d6456b1d120d43c58cb7b2bffab
|
import numpy as np
import cPickle, gzip
from sklearn.manifold import TSNE
def convert_to_Y(y):
n = y.shape[0]
Y = np.zeros((n, np.unique(y).size))
Y[np.arange(n), y] = 1
return Y
def double_swiss_roll(n_samples = 250, var = 1.):
'''details about the generating procedure can be found at
http://people.cs.uchicago.edu/~dinoj/manifold/swissroll.html'''
# generate gaussian clusters
means = 2.5*np.array([[+1,+1], [-1,-1], [+1,-1], [-1,+1]], dtype=float) + 10
A = np.vstack(tuple([np.random.multivariate_normal(mean, var*np.eye(2), n_samples)\
for mean in means]))
y = np.vstack(tuple([i*np.ones((n_samples,1),dtype=int) for i in xrange(means.shape[0])]))[:,0]
# roll up the gaussian clusters
a1, a2 = A[:,0], A[:,1]
X = np.column_stack((a1*np.cos(a1), a2, a1*np.sin(a1)))
Y = convert_to_Y(y)
return X, Y, y
def mnist(fid = 'data/mnist.pkl.gz'):
f = gzip.open(fid, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
X, y = train_set
Y = convert_to_Y(y)
#model = TSNE(n_components=2, random_state=0)
#X = model.fit_transform(X)
return X, Y, y
def load_table(filename):
Xy = np.loadtxt(filename)
y = np.int32(Xy[:,0])
y = y - min(y)
X = Xy[:,1:]
Y = convert_to_Y(y)
return X, Y, y
def usps(fid = 'data/zip.train'):
return load_table(fid)
def letter(fid = 'data/letter.scale'):
return load_table(fid)
|
quark0/AnchorClouds
|
manifold_generator.py
|
Python
|
mit
| 1,459
|
[
"Gaussian"
] |
21804cb600577b82f93132a549d3dd23881702c43c260fe700fb6a184a19fb48
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
import math
import numpy
class USANSSimulation(PythonAlgorithm):
def category(self):
return "SANS"
def seeAlso(self):
return [ "USANSReduction" ]
def name(self):
return "USANSSimulation"
def summary(self):
return "Simulate a USANS workspace"
def PyInit(self):
self.declareProperty("TwoTheta", 0.01, "Scattering angle in degrees")
self.declareProperty(FloatArrayProperty("WavelengthPeaks", values=[0.72, 0.9, 1.2, 1.8, 3.6],
direction=Direction.Input), "Wavelength peaks out of the monochromator")
self.declareProperty("CountTime", 1000.0, "Fake count time")
# Model parameters
self.declareProperty("EmptyRun", False, "If True, the run is considered an empty run")
self.declareProperty("SphereRadius", 60.0, "Radius for the sphere model (Angstrom)")
self.declareProperty("Background", 0.0, "Background")
self.declareProperty("SigmaPeak", 0.01, "Width of the wavelength peaks")
self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "", Direction.Output), "Output workspace")
self.declareProperty(MatrixWorkspaceProperty("MonitorWorkspace", "", Direction.Output), "Output monitor workspace")
#pylint: disable=too-many-locals
def PyExec(self):
workspace = self.getPropertyValue("OutputWorkspace")
out_ws = CreateSimulationWorkspace(Instrument="USANS",
BinParams="0,50,32000",
UnitX="TOF",
OutputWorkspace=workspace)
out_ws.setYUnitLabel("1/cm")
data_x = out_ws.dataX(0)
mon_ws_name = self.getPropertyValue("MonitorWorkspace")
mon_ws = CreateWorkspace(dataX=data_x, dataY=numpy.zeros(len(data_x)-1),
UnitX="TOF", OutputWorkspace=mon_ws_name)
mon_y = mon_ws.dataY(0)
mon_e = mon_ws.dataE(0)
# Number of pixels for the main detector
n_pixels = int(out_ws.getNumberHistograms()/2)
# Clean up the workspace
for j in range(n_pixels):
data_y = out_ws.dataY(j)
for i in range(len(data_y)):
data_y[i] = 0.0
# Fill monitor workspace with fake beam profile
count_time = self.getProperty("CountTime").value
for i in range(len(data_x)-1):
wl_i = 0.0039560/30.0*(data_x[i]+data_x[i+1])/2.0
mon_y[i] = count_time*math.exp(-wl_i)
mon_e[i] = math.sqrt(mon_y[i])
# Add analyzer theta value and monochromator angle theta_b in logs
two_theta = self.getProperty("TwoTheta").value
is_empty_run = self.getProperty("EmptyRun").value
if is_empty_run:
two_theta = 0.0
theta_b = 70.0
theta = theta_b + two_theta
out_ws.getRun().addProperty("AnalyzerTheta", theta, 'degree', True)
out_ws.getRun().addProperty("two_theta", two_theta, 'degree', True)
out_ws.getRun().addProperty("MonochromatorTheta", theta_b, 'degree', True)
out_ws.getRun().addProperty("run_title", "Simulated USANS", True)
out_ws.getRun().addProperty("run_number", "1234", True)
# List of wavelength peaks, and width of the peaks
wl_peaks = self.getProperty("WavelengthPeaks").value
sigma = self.getProperty("SigmaPeak").value
for wl in wl_peaks:
q = 6.28*math.sin(two_theta)/wl
Logger("USANS").notice( "wl = %g; Q = %g" % (wl, q))
for i in range(len(data_x)-1):
wl_i = 0.0039560/30.0*(data_x[i]+data_x[i+1])/2.0
# Scale the I(q) by a Gaussian to simulate the wavelength peaks selected by the monochromator
flux = 1.0e6/(sigma*math.sqrt(2.0*math.pi))*math.exp(-(wl_i-wl)*(wl_i-wl)/(2.0*sigma*sigma))
# Multiply by beam profile
flux *= mon_y[i]
# Account for transmission
if not is_empty_run:
flux *= math.exp(-wl_i/2.0)
# Transmission detector
for j in range(n_pixels, 2*n_pixels):
det_pos = out_ws.getInstrument().getDetector(j).getPos()
r = math.sqrt(det_pos.Y()*det_pos.Y()+det_pos.X()*det_pos.X())
sigma = 0.01
scale = math.exp(-r*r/(2.0*sigma*sigma))
data_y = out_ws.dataY(j)
data_y[i] += int(scale*flux)
data_e = out_ws.dataE(j)
data_e[i] = math.sqrt(data_e[i]*data_e[i]+scale*scale*flux*flux)
# If we have an empty run, there's no need to fill the main detector
if is_empty_run:
continue
# Compute I(q) and store the results
q_i = q*wl/wl_i
i_q = self._sphere_model(q_i, scale=flux)
for j in range(n_pixels):
det_pos = out_ws.getInstrument().getDetector(j).getPos()
r = math.sqrt(det_pos.Y()*det_pos.Y()+det_pos.X()*det_pos.X())
sigma = 0.01
scale = math.exp(-r*r/(2.0*sigma*sigma))
data_y = out_ws.dataY(j)
data_y[i] += int(i_q*scale)
data_e = out_ws.dataE(j)
data_e[i] = math.sqrt(data_e[i]*data_e[i]+i_q*i_q*scale*scale)
self.setProperty("OutputWorkspace", out_ws)
self.setProperty("MonitorWorkspace", mon_ws)
def _sphere_model(self, q, scale):
"""
Return I(q) for a sphere model
@param q: q-value
@param scale: normalization factor to give I(q)
"""
radius = self.getProperty("SphereRadius").value
bck = self.getProperty("Background").value
qr = q*radius
bes = 3.0*(math.sin(qr)-qr*math.cos(qr))/(qr*qr*qr) if not qr == 0.0 else 1.0
vol = 4.0*math.pi/3.0*radius*radius*radius
f2 = vol*bes*bes*1.0e-6
return scale*f2+bck
#############################################################################################
AlgorithmFactory.subscribe(USANSSimulation())
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/USANSSimulation.py
|
Python
|
gpl-3.0
| 6,752
|
[
"Gaussian"
] |
ba75c76310c1e14518c959ccd0acd39d7eb9df2272bd1a7edbc6ba42468c40fe
|
from coyote_framework.testing.coyote_test import CoyoteTest
from example.example_app.config.example_config import ExampleConfig
from coyote_framework.drivers.coyote_driverfactory import driver_context
from example.example_app.page_objects.example_home_page import ExampleHomePage
__author__ = 'matt'
class TestClickButton_ShowText(CoyoteTest):
"""
This test demonstrates how the webdriverwrapper waits can be used.
First, we visit a page w/ a hidden div. We try to confirm that we can see
the text, but it's not yet visible, so this fails. Then, we click a button
which makes the text visible, and then successfully confirm we can see the text.
"""
def setUp(self):
super(TestClickButton_ShowText, self).setUp()
self.config = ExampleConfig()
def test_main(self):
with driver_context() as driver:
# Visit the test page
test = self.config.get('web_hostname')
driver.visit(test)
# Initialize the page object
hp = ExampleHomePage(driver_wrapper=driver)
driver.assertion.assert_true(hp.is_page_loaded())
# Demonstrate a failure of confirm_text_visible: we can't see it, because it's
# currently set to display: none;
try:
hp.confirm_text_visible(timeout=5)
except:
print "Yep, can't see anything"
# Click the button to expose the text, then try to read it again
# This call succeeds.
hp.click_see_me_button()
hp.confirm_text_visible(timeout=5)
|
Shapeways/coyote_framework
|
example/example_tests/TestClickButton_ShowText.py
|
Python
|
mit
| 1,605
|
[
"VisIt"
] |
5715f9539506c2b87deeb5aa0f07fdd9e752e12a346f6733076b1c65248a9343
|
# This file is part of BHMM (Bayesian Hidden Markov Models).
#
# Copyright (c) 2016 Frank Noe (Freie Universitaet Berlin)
# and John D. Chodera (Memorial Sloan-Kettering Cancer Center, New York)
#
# BHMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from six.moves import range
import numpy as np
from bhmm.output_models.impl_c import discrete as dc
from bhmm.output_models import OutputModel
from bhmm.util import config
class DiscreteOutputModel(OutputModel):
"""
HMM output probability model using discrete symbols. This is the "standard" HMM that is classically used in the
literature
"""
def __init__(self, B, prior=None, ignore_outliers=False):
"""
Create a 1D Gaussian output model.
Parameters
----------
B : ndarray((N, M), dtype=float)
output probability matrix using N hidden states and M observable symbols.
This matrix needs to be row-stochastic.
prior : None or broadcastable to ndarray((N, M), dtype=float)
Prior for the initial distribution of the HMM.
Currently implements the Dirichlet prior that is conjugate to the
Dirichlet distribution of :math:`b_i`. :math:`b_i` is sampled from:
.. math:
b_i \sim \prod_j b_{ij}_i^{a_{ij} + n_{ij} - 1}
where :math:`n_{ij}` are the number of times symbol :math:`j` has
been observed when the hidden trajectory was in state :math:`i`
and :math:`a_{ij}` is the prior count.
The default prior=None corresponds to :math:`a_{ij} = 0`.
This option ensures coincidence between sample mean an MLE.
Examples
--------
Create an observation model.
>>> import numpy as np
>>> B = np.array([[0.5, 0.5], [0.1, 0.9]])
>>> output_model = DiscreteOutputModel(B)
"""
self._output_probabilities = np.array(B, dtype=config.dtype)
nstates, self._nsymbols = self._output_probabilities.shape[0], self._output_probabilities.shape[1]
# superclass constructor
OutputModel.__init__(self, nstates, ignore_outliers=ignore_outliers)
# test if row-stochastic
assert np.allclose(self._output_probabilities.sum(axis=1), np.ones(self.nstates)), 'B is no stochastic matrix'
# set prior matrix
if prior is None:
prior = np.zeros((nstates, self._nsymbols))
else:
prior = np.zeros((nstates, self._nsymbols)) + prior # will fail if not broadcastable
self.prior = prior
def __repr__(self):
r""" String representation of this output model
>>> import numpy as np
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
>>> print(repr(output_model))
DiscreteOutputModel(array([[ 0.5, 0.5],
[ 0.1, 0.9]]))
"""
return "DiscreteOutputModel(%s)" % repr(self._output_probabilities)
def __str__(self):
r""" Human-readable string representation of this output model
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
>>> print(str(output_model))
--------------------------------------------------------------------------------
DiscreteOutputModel
nstates: 2
nsymbols: 2
B[0] = [ 0.5 0.5]
B[1] = [ 0.1 0.9]
--------------------------------------------------------------------------------
"""
output = "--------------------------------------------------------------------------------\n"
output += "DiscreteOutputModel\n"
output += "nstates: %d\n" % self.nstates
output += "nsymbols: %d\n" % self._nsymbols
for i in range(self.nstates):
output += "B["+str(i)+"] = %s\n" % str(self._output_probabilities[i])
output += "--------------------------------------------------------------------------------"
return output
@property
def model_type(self):
r""" Model type. Returns 'discrete' """
return 'discrete'
@property
def output_probabilities(self):
r""" Row-stochastic (n,m) output probability matrix from n hidden states to m symbols. """
return self._output_probabilities
@property
def nsymbols(self):
r""" Number of symbols, or observable output states """
return self._nsymbols
def sub_output_model(self, states):
return DiscreteOutputModel(self._output_probabilities[states])
def p_obs(self, obs, out=None):
"""
Returns the output probabilities for an entire trajectory and all hidden states
Parameters
----------
obs : ndarray((T), dtype=int)
a discrete trajectory of length T
Return
------
p_o : ndarray (T,N)
the probability of generating the symbol at time point t from any of the N hidden states
"""
if out is None:
out = self._output_probabilities[:, obs].T
# out /= np.sum(out, axis=1)[:,None]
return self._handle_outliers(out)
else:
if obs.shape[0] == out.shape[0]:
np.copyto(out, self._output_probabilities[:, obs].T)
elif obs.shape[0] < out.shape[0]:
out[:obs.shape[0], :] = self._output_probabilities[:, obs].T
else:
raise ValueError('output array out is too small: '+str(out.shape[0])+' < '+str(obs.shape[0]))
# out /= np.sum(out, axis=1)[:,None]
return self._handle_outliers(out)
def estimate(self, observations, weights):
"""
Maximum likelihood estimation of output model given the observations and weights
Parameters
----------
observations : [ ndarray(T_k) ] with K elements
A list of K observation trajectories, each having length T_k
weights : [ ndarray(T_k, N) ] with K elements
A list of K weight matrices, each having length T_k and containing the probability of any of the states in
the given time step
Examples
--------
Generate an observation model and samples from each state.
>>> import numpy as np
>>> ntrajectories = 3
>>> nobs = 1000
>>> B = np.array([[0.5,0.5],[0.1,0.9]])
>>> output_model = DiscreteOutputModel(B)
>>> from scipy import stats
>>> nobs = 1000
>>> obs = np.empty(nobs, dtype = object)
>>> weights = np.empty(nobs, dtype = object)
>>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])]
>>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])]
>>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])]
>>> for i in range(B.shape[0]): weights[i][:, i] = 1.0
Update the observation model parameters my a maximum-likelihood fit.
>>> output_model.estimate(obs, weights)
"""
# sizes
N, M = self._output_probabilities.shape
K = len(observations)
# initialize output probability matrix
self._output_probabilities = np.zeros((N, M))
# update output probability matrix (numerator)
if self.__impl__ == self.__IMPL_C__:
for k in range(K):
dc.update_pout(observations[k], weights[k], self._output_probabilities, dtype=config.dtype)
elif self.__impl__ == self.__IMPL_PYTHON__:
for k in range(K):
for o in range(M):
times = np.where(observations[k] == o)[0]
self._output_probabilities[:, o] += np.sum(weights[k][times, :], axis=0)
else:
raise RuntimeError('Implementation '+str(self.__impl__)+' not available')
# normalize
self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:, None]
def sample(self, observations_by_state):
"""
Sample a new set of distribution parameters given a sample of observations from the given state.
The internal parameters are updated.
Parameters
----------
observations : [ numpy.array with shape (N_k,) ] with nstates elements
observations[k] are all observations associated with hidden state k
Examples
--------
initialize output model
>>> B = np.array([[0.5, 0.5], [0.1, 0.9]])
>>> output_model = DiscreteOutputModel(B)
sample given observation
>>> obs = [[0, 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1]]
>>> output_model.sample(obs)
"""
from numpy.random import dirichlet
N, M = self._output_probabilities.shape # nstates, nsymbols
for i, obs_by_state in enumerate(observations_by_state):
# count symbols found in data
count = np.bincount(obs_by_state, minlength=M).astype(float)
# sample dirichlet distribution
count += self.prior[i]
positive = count > 0
# if counts at all: can't sample, so leave output probabilities as they are.
self._output_probabilities[i, positive] = dirichlet(count[positive])
def generate_observation_from_state(self, state_index):
"""
Generate a single synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
Returns
-------
observation : float
A single observation from the given state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observation = output_model.generate_observation_from_state(0)
"""
# generate random generator (note that this is inefficient - better use one of the next functions
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(len(self._output_probabilities[state_index])),
self._output_probabilities[state_index]))
gen.rvs(size=1)
def generate_observations_from_state(self, state_index, nobs):
"""
Generate synthetic observation data from a given state.
Parameters
----------
state_index : int
Index of the state from which observations are to be generated.
nobs : int
The number of observations to generate.
Returns
-------
observations : numpy.array of shape(nobs,) with type dtype
A sample of `nobs` observations from the specified state.
Examples
--------
Generate an observation model.
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
Generate sample from each state.
>>> observations = [output_model.generate_observations_from_state(state_index, nobs=100) for state_index in range(output_model.nstates)]
"""
import scipy.stats
gen = scipy.stats.rv_discrete(values=(range(self._nsymbols), self._output_probabilities[state_index]))
gen.rvs(size=nobs)
def generate_observation_trajectory(self, s_t, dtype=None):
"""
Generate synthetic observation data from a given state sequence.
Parameters
----------
s_t : numpy.array with shape (T,) of int type
s_t[t] is the hidden state sampled at time t
Returns
-------
o_t : numpy.array with shape (T,) of type dtype
o_t[t] is the observation associated with state s_t[t]
dtype : numpy.dtype, optional, default=None
The datatype to return the resulting observations in. If None, will select int32.
Examples
--------
Generate an observation model and synthetic state trajectory.
>>> nobs = 1000
>>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))
>>> s_t = np.random.randint(0, output_model.nstates, size=[nobs])
Generate a synthetic trajectory
>>> o_t = output_model.generate_observation_trajectory(s_t)
"""
if dtype is None:
dtype = np.int32
# Determine number of samples to generate.
T = s_t.shape[0]
nsymbols = self._output_probabilities.shape[1]
if (s_t.max() >= self.nstates) or (s_t.min() < 0):
msg = ''
msg += 's_t = %s\n' % s_t
msg += 's_t.min() = %d, s_t.max() = %d\n' % (s_t.min(), s_t.max())
msg += 's_t.argmax = %d\n' % s_t.argmax()
msg += 'self.nstates = %d\n' % self.nstates
msg += 's_t is out of bounds.\n'
raise Exception(msg)
# generate random generators
# import scipy.stats
# gens = [scipy.stats.rv_discrete(values=(range(len(self.B[state_index])), self.B[state_index]))
# for state_index in range(self.B.shape[0])]
# o_t = np.zeros([T], dtype=dtype)
# for t in range(T):
# s = s_t[t]
# o_t[t] = gens[s].rvs(size=1)
# return o_t
o_t = np.zeros([T], dtype=dtype)
for t in range(T):
s = s_t[t]
o_t[t] = np.random.choice(nsymbols, p=self._output_probabilities[s, :])
return o_t
|
bhmm/bhmm
|
bhmm/output_models/discrete.py
|
Python
|
lgpl-3.0
| 14,141
|
[
"Gaussian"
] |
90352bc61e7a07d6193b7a0d182dd38e312d7088d7688c541089dd7088ee7b5a
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('teamhealth_io.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
ochronus/teamhealth.io
|
config/urls.py
|
Python
|
mit
| 1,553
|
[
"VisIt"
] |
3eda8c4eb236021c0cfe4d6987f9b9d602b5cf0a15c860e5fd36e9bf81d05300
|
#! /usr/bin/env python
#This script check all pdb files using pdb2gmx
# check_structures_gromacs.py <PDB path> <GROMACS programs path>
# Example: python check_structures_gromacs.py /home/faccioli/Execute/1VII/ /home/faccioli/Programs/gmx-4.6.5/no_mpi/bin/
import sys
import os
from subprocess import Popen, PIPE
import shutil
#Log file of structures NOT accepted by pdb2gmx
log_file_structures_not_accepted_by_pdb2gmx='structures_not_accepted_by_pdb2gmx.log'
def get_files_pdb(mypath):
only_tar_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".pdb"):
f_path = os.path.join(root,file)
only_tar_file.append(f_path)
return only_tar_file
def get_all_directories(mypath):
only_directory = []
for root, dirs, files in os.walk(mypath):
for directory in dirs:
only_directory.append(directory)
return only_directory
def delete_check_files():
if os.path.isfile('check.top'):
os.remove('check.top')
if os.path.isfile('posre.itp'):
os.remove('posre.itp')
def rename_atoms_structue_by_pdb2gmx(pdbfile, gmx_path, forcefield='amber99sb-ildn'):
delete_check_files()
program = os.path.join(gmx_path, "pdb2gmx")
process = Popen([program, '-f', pdbfile, '-o', pdbfile, '-p', 'check.top', '-water', 'none', '-ff', forcefield], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
delete_check_files()
def main():
#Avoid GROMACS backup files
os.environ["GMX_MAXBACKUP"]="-1"
#path of pdb files
path_pdb = sys.argv[1]
#GROMACS program path
gmx_path = sys.argv[2]
#Remove log file, if exists
if os.path.isfile(log_file_structures_not_accepted_by_pdb2gmx):
os.remove(log_file_structures_not_accepted_by_pdb2gmx)
#Get pdb files in ALL subdirectory
my_pdb_files = get_files_pdb(path_pdb)
for pdb_file in my_pdb_files:
print pdb_file
rename_atoms_structue_by_pdb2gmx(pdb_file, gmx_path)
main()
|
rodrigofaccioli/2pg_cartesian
|
scripts/checking/rename_atoms.py
|
Python
|
apache-2.0
| 1,895
|
[
"Gromacs"
] |
14ecbd285c9ba02a565ae4ca530d75cb00c69dd1d6f3e719c6e28541c4a22efa
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
from functools import partial
from collections import namedtuple
from time import sleep
from platform import python_implementation
from powerline.segments import shell, tmux, pdb, i3wm
from powerline.lib.vcs import get_fallback_create_watcher
from powerline.lib.unicode import out_u
import tests.modules.vim as vim_module
from tests.modules.lib import (Args, urllib_read, replace_attr, new_module,
replace_module_module, replace_env, Pl)
from tests.modules import TestCase, SkipTest
def get_dummy_guess(**kwargs):
if 'directory' in kwargs:
def guess(path, create_watcher):
return Args(branch=lambda: out_u(os.path.basename(path)), **kwargs)
else:
def guess(path, create_watcher):
return Args(branch=lambda: out_u(os.path.basename(path)), directory=path, **kwargs)
return guess
class TestShell(TestCase):
def test_last_status(self):
pl = Pl()
segment_info = {'args': Args(last_exit_code=10)}
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': '10', 'highlight_groups': ['exit_fail']}
])
segment_info['args'].last_exit_code = 0
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_exit_code = None
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_exit_code = 'sigsegv'
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': 'sigsegv', 'highlight_groups': ['exit_fail']}
])
segment_info['args'].last_exit_code = 'sigsegv+core'
self.assertEqual(shell.last_status(pl=pl, segment_info=segment_info), [
{'contents': 'sigsegv+core', 'highlight_groups': ['exit_fail']}
])
def test_last_pipe_status(self):
pl = Pl()
segment_info = {'args': Args(last_pipe_status=[], last_exit_code=0)}
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 0, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), None)
segment_info['args'].last_pipe_status = [0, 2, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': '2', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
])
segment_info['args'].last_pipe_status = [2, 0, 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '2', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
])
segment_info['args'].last_pipe_status = [0, 0, 2]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': '2', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
])
segment_info['args'].last_pipe_status = [2]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '2', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
])
segment_info['args'].last_pipe_status = [0, 'sigsegv', 'sigsegv+core']
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': 'sigsegv', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': 'sigsegv+core', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True}
])
segment_info['args'].last_pipe_status = [0, 'sigsegv', 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': 'sigsegv', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True}
])
segment_info['args'].last_pipe_status = [0, 'sigsegv+core', 0]
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True},
{'contents': 'sigsegv+core', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
{'contents': '0', 'highlight_groups': ['exit_success'], 'draw_inner_divider': True}
])
segment_info['args'].last_pipe_status = []
segment_info['args'].last_exit_code = 5
self.assertEqual(shell.last_pipe_status(pl=pl, segment_info=segment_info), [
{'contents': '5', 'highlight_groups': ['exit_fail'], 'draw_inner_divider': True},
])
def test_jobnum(self):
pl = Pl()
segment_info = {'args': Args(jobnum=0)}
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info), None)
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=False), None)
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=True), '0')
segment_info = {'args': Args(jobnum=1)}
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info), '1')
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=False), '1')
self.assertEqual(shell.jobnum(pl=pl, segment_info=segment_info, show_zero=True), '1')
def test_continuation(self):
pl = Pl()
self.assertEqual(shell.continuation(pl=pl, segment_info={}), [{
'contents': '',
'width': 'auto',
'highlight_groups': ['continuation:current', 'continuation'],
}])
segment_info = {'parser_state': 'if cmdsubst'}
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=False), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
},
{
'contents': 'cmdsubst',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=False, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
'width': 'auto',
'align': 'r',
},
{
'contents': 'cmdsubst',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True), [
{
'contents': 'if',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True, renames={'if': 'IF'}), [
{
'contents': 'IF',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info, omit_cmdsubst=True, right_align=True, renames={'if': None}), [
{
'contents': '',
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'r',
},
])
segment_info = {'parser_state': 'then then then cmdsubst'}
self.assertEqual(shell.continuation(pl=pl, segment_info=segment_info), [
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
},
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_groups': ['continuation'],
},
{
'contents': 'then',
'draw_inner_divider': True,
'highlight_groups': ['continuation:current', 'continuation'],
'width': 'auto',
'align': 'l',
},
])
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(shell, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info.update(shortened_path='~foo/ghi')
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_shortened_path=False), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info.pop('shortened_path')
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3, shorten_home=False), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis='---'), [
{'contents': '---', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '.../', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis='---'), [
{'contents': '---/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
cwd[0] = '/etc'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
cwd[0] = '/'
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(shell.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_groups': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}
])
cwd[0] = OSError()
self.assertRaises(OSError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, shell.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
class TestTmux(TestCase):
def test_attached_clients(self):
def get_tmux_output(pl, cmd, *args):
if cmd == 'list-panes':
return 'session_name\n'
elif cmd == 'list-clients':
return '/dev/pts/2: 0 [191x51 xterm-256color] (utf8)\n/dev/pts/3: 0 [191x51 xterm-256color] (utf8)'
pl = Pl()
with replace_attr(tmux, 'get_tmux_output', get_tmux_output):
self.assertEqual(tmux.attached_clients(pl=pl), '2')
self.assertEqual(tmux.attached_clients(pl=pl, minimum=3), None)
class TestCommon(TestCase):
@classmethod
def setUpClass(cls):
module = __import__(str('powerline.segments.common.{0}'.format(cls.module_name)))
cls.module = getattr(module.segments.common, str(cls.module_name))
class TestNet(TestCommon):
module_name = 'net'
def test_hostname(self):
pl = Pl()
with replace_env('SSH_CLIENT', '192.168.0.12 40921 22') as segment_info:
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc')
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), 'abc.mydomain')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), 'abc')
segment_info['environ'].pop('SSH_CLIENT')
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True), None)
with replace_module_module(self.module, 'socket', gethostname=lambda: 'abc.mydomain'):
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info), 'abc.mydomain')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, exclude_domain=True), 'abc')
self.assertEqual(self.module.hostname(pl=pl, segment_info=segment_info, only_if_ssh=True, exclude_domain=True), None)
def test_external_ip(self):
pl = Pl()
with replace_attr(self.module, 'urllib_read', urllib_read):
self.assertEqual(self.module.external_ip(pl=pl), [{'contents': '127.0.0.1', 'divider_highlight_group': 'background:divider'}])
def test_internal_ip(self):
try:
import netifaces
except ImportError:
raise SkipTest('netifaces module is not available')
pl = Pl()
addr = {
'enp2s0': {
netifaces.AF_INET: [{'addr': '192.168.100.200'}],
netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777%enp2s0'}]
},
'lo': {
netifaces.AF_INET: [{'addr': '127.0.0.1'}],
netifaces.AF_INET6: [{'addr': '::1'}]
},
'teredo': {
netifaces.AF_INET6: [{'addr': 'feff::5446:5eff:fe5a:7777'}]
},
}
interfaces = ['lo', 'enp2s0', 'teredo']
with replace_module_module(
self.module, 'netifaces',
interfaces=(lambda: interfaces),
ifaddresses=(lambda interface: addr[interface]),
AF_INET=netifaces.AF_INET,
AF_INET6=netifaces.AF_INET6,
):
self.assertEqual(self.module.internal_ip(pl=pl), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='auto'), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='lo'), '127.0.0.1')
self.assertEqual(self.module.internal_ip(pl=pl, interface='teredo'), None)
self.assertEqual(self.module.internal_ip(pl=pl, ipv=4), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='auto', ipv=4), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='lo', ipv=4), '127.0.0.1')
self.assertEqual(self.module.internal_ip(pl=pl, interface='teredo', ipv=4), None)
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
self.assertEqual(self.module.internal_ip(pl=pl, interface='auto', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
self.assertEqual(self.module.internal_ip(pl=pl, interface='lo', ipv=6), '::1')
self.assertEqual(self.module.internal_ip(pl=pl, interface='teredo', ipv=6), 'feff::5446:5eff:fe5a:7777')
interfaces[1:2] = ()
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), 'feff::5446:5eff:fe5a:7777')
interfaces[1:2] = ()
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), '::1')
interfaces[:] = ()
self.assertEqual(self.module.internal_ip(pl=pl, ipv=6), None)
gateways = {
'default': {
netifaces.AF_INET: ('192.168.100.1', 'enp2s0'),
netifaces.AF_INET6: ('feff::5446:5eff:fe5a:0001', 'enp2s0')
}
}
with replace_module_module(
self.module, 'netifaces',
interfaces=(lambda: interfaces),
ifaddresses=(lambda interface: addr[interface]),
gateways=(lambda: gateways),
AF_INET=netifaces.AF_INET,
AF_INET6=netifaces.AF_INET6,
):
# default gateway has specified address family
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=4), '192.168.100.200')
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=6), 'feff::5446:5eff:fe5a:7777%enp2s0')
# default gateway doesn't have specified address family
gateways['default'] = {}
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=4), None)
self.assertEqual(self.module.internal_ip(pl=pl, interface='default_gateway', ipv=6), None)
def test_network_load(self):
def gb(interface):
return None
f = [gb]
def _get_bytes(interface):
return f[0](interface)
pl = Pl()
with replace_attr(self.module, '_get_bytes', _get_bytes):
self.module.network_load.startup(pl=pl)
try:
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), None)
sleep(self.module.network_load.interval)
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), None)
while 'prev' not in self.module.network_load.interfaces.get('eth0', {}):
sleep(0.1)
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), None)
l = [0, 0]
def gb2(interface):
l[0] += 1200
l[1] += 2400
return tuple(l)
f[0] = gb2
while not self.module.network_load.interfaces.get('eth0', {}).get('prev', (None, None))[1]:
sleep(0.1)
self.assertEqual(self.module.network_load(pl=pl, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'DL 1 KiB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 'UL 2 KiB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, interface='eth0', recv_format='r {value}', sent_format='s {value}'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 KiB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 KiB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', suffix='bps', interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 Kibps', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 Kibps', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', si_prefix=True, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 kB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 kB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', recv_max=0, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 KiB/s', 'highlight_groups': ['network_load_recv_gradient', 'network_load_gradient', 'network_load_recv', 'network_load'], 'gradient_level': 100},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 KiB/s', 'highlight_groups': ['network_load_sent', 'network_load']},
])
class ApproxEqual(object):
def __eq__(self, i):
return abs(i - 50.0) < 1
self.assertEqual(self.module.network_load(pl=pl, recv_format='r {value}', sent_format='s {value}', sent_max=4800, interface='eth0'), [
{'divider_highlight_group': 'network_load:divider', 'contents': 'r 1 KiB/s', 'highlight_groups': ['network_load_recv', 'network_load']},
{'divider_highlight_group': 'network_load:divider', 'contents': 's 2 KiB/s', 'highlight_groups': ['network_load_sent_gradient', 'network_load_gradient', 'network_load_sent', 'network_load'], 'gradient_level': ApproxEqual()},
])
finally:
self.module.network_load.shutdown()
class TestEnv(TestCommon):
module_name = 'env'
def test_user(self):
new_os = new_module('os', getpid=lambda: 1)
class Process(object):
def __init__(self, pid):
pass
def username(self):
return 'def@DOMAIN.COM'
if hasattr(self.module, 'psutil') and not callable(self.module.psutil.Process.username):
username = property(username)
struct_passwd = namedtuple('struct_passwd', ('pw_name',))
new_psutil = new_module('psutil', Process=Process)
new_pwd = new_module('pwd', getpwuid=lambda uid: struct_passwd(pw_name='def@DOMAIN.COM'))
new_getpass = new_module('getpass', getuser=lambda: 'def@DOMAIN.COM')
pl = Pl()
with replace_attr(self.module, 'pwd', new_pwd):
with replace_attr(self.module, 'getpass', new_getpass):
with replace_attr(self.module, 'os', new_os):
with replace_attr(self.module, 'psutil', new_psutil):
with replace_attr(self.module, '_geteuid', lambda: 5):
self.assertEqual(self.module.user(pl=pl), [
{'contents': 'def@DOMAIN.COM', 'highlight_groups': ['user']}
])
self.assertEqual(self.module.user(pl=pl, hide_user='abc'), [
{'contents': 'def@DOMAIN.COM', 'highlight_groups': ['user']}
])
self.assertEqual(self.module.user(pl=pl, hide_domain=False), [
{'contents': 'def@DOMAIN.COM', 'highlight_groups': ['user']}
])
self.assertEqual(self.module.user(pl=pl, hide_user='def@DOMAIN.COM'), None)
self.assertEqual(self.module.user(pl=pl, hide_domain=True), [
{'contents': 'def', 'highlight_groups': ['user']}
])
with replace_attr(self.module, '_geteuid', lambda: 0):
self.assertEqual(self.module.user(pl=pl), [
{'contents': 'def', 'highlight_groups': ['superuser', 'user']}
])
def test_cwd(self):
new_os = new_module('os', path=os.path, sep='/')
pl = Pl()
cwd = [None]
def getcwd():
wd = cwd[0]
if isinstance(wd, Exception):
raise wd
else:
return wd
segment_info = {'getcwd': getcwd, 'home': None}
with replace_attr(self.module, 'os', new_os):
cwd[0] = '/abc/def/ghi/foo/bar'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'abc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'def', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
segment_info['home'] = '/abc/def/ghi'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=3, shorten_home=False), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'ghi', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'foo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1), [
{'contents': '...', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis='---'), [
{'contents': '---', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True), [
{'contents': '.../', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis='---'), [
{'contents': '---/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=1, use_path_separator=True, ellipsis=None), [
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '~', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'fo', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2, use_path_separator=True), [
{'contents': '~/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'fo/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'bar', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']}
])
cwd[0] = '/etc'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False},
{'contents': 'etc', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
cwd[0] = '/'
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=False), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': True, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, use_path_separator=True), [
{'contents': '/', 'divider_highlight_group': 'cwd:divider', 'draw_inner_divider': False, 'highlight_groups': ['cwd:current_folder', 'cwd']},
])
ose = OSError()
ose.errno = 2
cwd[0] = ose
self.assertEqual(self.module.cwd(pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2), [
{'contents': '[not found]', 'divider_highlight_group': 'cwd:divider', 'highlight_groups': ['cwd:current_folder', 'cwd'], 'draw_inner_divider': True}
])
cwd[0] = OSError()
self.assertRaises(OSError, self.module.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
cwd[0] = ValueError()
self.assertRaises(ValueError, self.module.cwd, pl=pl, segment_info=segment_info, dir_limit_depth=2, dir_shorten_len=2)
def test_virtualenv(self):
pl = Pl()
with replace_env('VIRTUAL_ENV', '/abc/def/ghi') as segment_info:
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
segment_info['environ'].pop('VIRTUAL_ENV')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
with replace_env('CONDA_DEFAULT_ENV', 'foo') as segment_info:
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'foo')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), 'foo')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
segment_info['environ'].pop('CONDA_DEFAULT_ENV')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
with replace_env('CONDA_DEFAULT_ENV', 'foo', environ={'VIRTUAL_ENV': '/sbc/def/ghi'}) as segment_info:
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), 'foo')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
segment_info['environ'].pop('CONDA_DEFAULT_ENV')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_conda=True), 'ghi')
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True), None)
self.assertEqual(self.module.virtualenv(pl=pl, segment_info=segment_info, ignore_venv=True, ignore_conda=True), None)
def test_environment(self):
pl = Pl()
variable = 'FOO'
value = 'bar'
with replace_env(variable, value) as segment_info:
self.assertEqual(self.module.environment(pl=pl, segment_info=segment_info, variable=variable), value)
segment_info['environ'].pop(variable)
self.assertEqual(self.module.environment(pl=pl, segment_info=segment_info, variable=variable), None)
class TestVcs(TestCommon):
module_name = 'vcs'
def test_branch(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
segment_info = {'getcwd': os.getcwd}
branch = partial(self.module.branch, pl=pl, create_watcher=create_watcher)
with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: None, directory='/tmp/tests')):
with replace_attr(self.module, 'tree_status', lambda repo, pl: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [{
'contents': 'tests',
'highlight_groups': ['branch_clean', 'branch'],
'divider_highlight_group': None
}])
with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: 'D ', directory='/tmp/tests')):
with replace_attr(self.module, 'tree_status', lambda repo, pl: 'D '):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [{
'contents': 'tests',
'highlight_groups': ['branch_dirty', 'branch'],
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
with replace_attr(self.module, 'guess', lambda path, create_watcher: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), None)
with replace_attr(self.module, 'guess', get_dummy_guess(status=lambda: 'U')):
with replace_attr(self.module, 'tree_status', lambda repo, pl: 'U'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False, ignore_statuses=['U']), [{
'highlight_groups': ['branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['DU']), [{
'highlight_groups': ['branch_dirty', 'branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [{
'highlight_groups': ['branch_dirty', 'branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['U']), [{
'highlight_groups': ['branch_clean', 'branch'],
'contents': 'tests',
'divider_highlight_group': None
}])
def test_stash(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
stash = partial(self.module.stash, pl=pl, create_watcher=create_watcher, segment_info={'getcwd': os.getcwd})
def forge_stash(n):
return replace_attr(self.module, 'guess', get_dummy_guess(stash=lambda: n, directory='/tmp/tests'))
with forge_stash(0):
self.assertEqual(stash(), None)
with forge_stash(1):
self.assertEqual(stash(), [{
'highlight_groups': ['stash'],
'contents': '1',
'divider_highlight_group': None
}])
with forge_stash(2):
self.assertEqual(stash(), [{
'highlight_groups': ['stash'],
'contents': '2',
'divider_highlight_group': None
}])
class TestTime(TestCommon):
module_name = 'time'
def test_date(self):
pl = Pl()
with replace_attr(self.module, 'datetime', Args(now=lambda: Args(strftime=lambda fmt: fmt))):
self.assertEqual(self.module.date(pl=pl), [{'contents': '%Y-%m-%d', 'highlight_groups': ['date'], 'divider_highlight_group': None}])
self.assertEqual(self.module.date(pl=pl, format='%H:%M', istime=True), [{'contents': '%H:%M', 'highlight_groups': ['time', 'date'], 'divider_highlight_group': 'time:divider'}])
unicode_date = self.module.date(pl=pl, format='\u231a', istime=True)
expected_unicode_date = [{'contents': '\u231a', 'highlight_groups': ['time', 'date'], 'divider_highlight_group': 'time:divider'}]
if python_implementation() == 'PyPy' and sys.version_info >= (3,):
if unicode_date != expected_unicode_date:
raise SkipTest('Dates do not match, see https://bitbucket.org/pypy/pypy/issues/2161/pypy3-strftime-does-not-accept-unicode')
self.assertEqual(unicode_date, expected_unicode_date)
def test_fuzzy_time(self):
time = Args(hour=0, minute=45)
pl = Pl()
with replace_attr(self.module, 'datetime', Args(now=lambda: time)):
self.assertEqual(self.module.fuzzy_time(pl=pl), 'quarter to one')
time.hour = 23
time.minute = 59
self.assertEqual(self.module.fuzzy_time(pl=pl), 'round about midnight')
time.minute = 33
self.assertEqual(self.module.fuzzy_time(pl=pl), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(self.module.fuzzy_time(pl=pl), 'twelve o\'clock')
time.minute = 33
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=False), 'twenty-five to twelve')
time.minute = 60
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=False), 'twelve o\'clock')
time.minute = 33
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=True), 'twenty‐five to twelve')
time.minute = 60
self.assertEqual(self.module.fuzzy_time(pl=pl, unicode_text=True), 'twelve o’clock')
class TestSys(TestCommon):
module_name = 'sys'
def test_uptime(self):
pl = Pl()
with replace_attr(self.module, '_get_uptime', lambda: 259200):
self.assertEqual(self.module.uptime(pl=pl), [{'contents': '3d', 'divider_highlight_group': 'background:divider'}])
with replace_attr(self.module, '_get_uptime', lambda: 93784):
self.assertEqual(self.module.uptime(pl=pl), [{'contents': '1d 2h 3m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(self.module.uptime(pl=pl, shorten_len=4), [{'contents': '1d 2h 3m 4s', 'divider_highlight_group': 'background:divider'}])
with replace_attr(self.module, '_get_uptime', lambda: 65536):
self.assertEqual(self.module.uptime(pl=pl), [{'contents': '18h 12m 16s', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(self.module.uptime(pl=pl, shorten_len=2), [{'contents': '18h 12m', 'divider_highlight_group': 'background:divider'}])
self.assertEqual(self.module.uptime(pl=pl, shorten_len=1), [{'contents': '18h', 'divider_highlight_group': 'background:divider'}])
def _get_uptime():
raise NotImplementedError
with replace_attr(self.module, '_get_uptime', _get_uptime):
self.assertEqual(self.module.uptime(pl=pl), None)
def test_system_load(self):
pl = Pl()
with replace_module_module(self.module, 'os', getloadavg=lambda: (7.5, 3.5, 1.5)):
with replace_attr(self.module, '_cpu_count', lambda: 2):
self.assertEqual(self.module.system_load(pl=pl), [
{'contents': '7.5 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '3.5 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0},
{'contents': '1.5', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 0}
])
self.assertEqual(self.module.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1), [
{'contents': '8 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '4 ', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
{'contents': '2', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 75.0}
])
self.assertEqual(self.module.system_load(pl=pl, short=True), [
{'contents': '7.5', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
])
self.assertEqual(self.module.system_load(pl=pl, format='{avg:.0f}', threshold_good=0, threshold_bad=1, short=True), [
{'contents': '8', 'highlight_groups': ['system_load_gradient', 'system_load'], 'divider_highlight_group': 'background:divider', 'gradient_level': 100},
])
def test_cpu_load_percent(self):
try:
__import__('psutil')
except ImportError as e:
raise SkipTest('Failed to import psutil: {0}'.format(e))
pl = Pl()
with replace_module_module(self.module, 'psutil', cpu_percent=lambda **kwargs: 52.3):
self.assertEqual(self.module.cpu_load_percent(pl=pl), [{
'contents': '52%',
'gradient_level': 52.3,
'highlight_groups': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
self.assertEqual(self.module.cpu_load_percent(pl=pl, format='{0:.1f}%'), [{
'contents': '52.3%',
'gradient_level': 52.3,
'highlight_groups': ['cpu_load_percent_gradient', 'cpu_load_percent'],
}])
class TestWthr(TestCommon):
module_name = 'wthr'
def test_weather(self):
pl = Pl()
with replace_attr(self.module, 'urllib_read', urllib_read):
self.assertEqual(self.module.weather(pl=pl), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, temp_coldest=0, temp_hottest=100), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 14.0}
])
self.assertEqual(self.module.weather(pl=pl, temp_coldest=-100, temp_hottest=-50), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 100}
])
self.assertEqual(self.module.weather(pl=pl, icons={'blustery': 'o'}), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'o '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, icons={'windy': 'x'}), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'x '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, unit='F'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '57°F', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, unit='K'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '287K', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, temp_format='{temp:.1e}C'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '1.4e+01C', 'gradient_level': 62.857142857142854}
])
with replace_attr(self.module, 'urllib_read', urllib_read):
self.module.weather.startup(pl=pl, location_query='Meppen,06,DE')
self.assertEqual(self.module.weather(pl=pl), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_blustery', 'weather_condition_windy', 'weather_conditions', 'weather'], 'contents': 'WINDY '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '14°C', 'gradient_level': 62.857142857142854}
])
self.assertEqual(self.module.weather(pl=pl, location_query='Moscow,RU'), [
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_condition_fair_night', 'weather_condition_night', 'weather_conditions', 'weather'], 'contents': 'NIGHT '},
{'divider_highlight_group': 'background:divider', 'highlight_groups': ['weather_temp_gradient', 'weather_temp', 'weather'], 'contents': '9°C', 'gradient_level': 55.714285714285715}
])
self.module.weather.shutdown()
class TestI3WM(TestCase):
@staticmethod
def get_workspaces():
return iter([
{'name': '1: w1', 'output': 'LVDS1', 'focused': False, 'urgent': False, 'visible': False},
{'name': '2: w2', 'output': 'LVDS1', 'focused': False, 'urgent': False, 'visible': True},
{'name': '3: w3', 'output': 'HDMI1', 'focused': False, 'urgent': True, 'visible': True},
{'name': '4: w4', 'output': 'DVI01', 'focused': True, 'urgent': True, 'visible': True},
])
def test_workspaces(self):
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Args(get_workspaces=self.get_workspaces)):
segment_info = {}
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info), [
{'contents': '1: w1', 'highlight_groups': ['workspace']},
{'contents': '2: w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=None), [
{'contents': '1: w1', 'highlight_groups': ['workspace']},
{'contents': '2: w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['focused', 'urgent']), [
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible']), [
{'contents': '2: w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], strip=3), [
{'contents': 'w2', 'highlight_groups': ['w_visible', 'workspace']},
{'contents': 'w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
{'contents': 'w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['focused', 'urgent'], output='DVI01'), [
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], output='HDMI1'), [
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], strip=3, output='LVDS1'), [
{'contents': 'w2', 'highlight_groups': ['w_visible', 'workspace']},
])
segment_info['output'] = 'LVDS1'
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], output='HDMI1'), [
{'contents': '3: w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspaces(pl=pl, segment_info=segment_info, only_show=['visible'], strip=3), [
{'contents': 'w2', 'highlight_groups': ['w_visible', 'workspace']},
])
def test_workspace(self):
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Args(get_workspaces=self.get_workspaces)):
segment_info = {}
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='1: w1'), [
{'contents': '1: w1', 'highlight_groups': ['workspace']},
])
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='3: w3', strip=True), [
{'contents': 'w3', 'highlight_groups': ['w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='9: w9'), None)
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info), [
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
segment_info['workspace'] = next(self.get_workspaces())
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, workspace='4: w4'), [
{'contents': '4: w4', 'highlight_groups': ['w_focused', 'w_urgent', 'w_visible', 'workspace']},
])
self.assertEqual(i3wm.workspace(pl=pl, segment_info=segment_info, strip=True), [
{'contents': 'w1', 'highlight_groups': ['workspace']},
])
def test_mode(self):
pl = Pl()
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'default'}), None)
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'test'}), 'test')
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'default'}, names={'default': 'test'}), 'test')
self.assertEqual(i3wm.mode(pl=pl, segment_info={'mode': 'test'}, names={'default': 'test', 'test': 't'}), 't')
def test_scratchpad(self):
class Conn(object):
def get_tree(self):
return self
def descendents(self):
nodes_unfocused = [Args(focused = False)]
nodes_focused = [Args(focused = True)]
workspace_scratch = lambda: Args(name='__i3_scratch')
workspace_noscratch = lambda: Args(name='2: www')
return [
Args(scratchpad_state='fresh', urgent=False, workspace=workspace_scratch, nodes=nodes_unfocused),
Args(scratchpad_state='changed', urgent=True, workspace=workspace_noscratch, nodes=nodes_focused),
Args(scratchpad_state='fresh', urgent=False, workspace=workspace_scratch, nodes=nodes_unfocused),
Args(scratchpad_state=None, urgent=False, workspace=workspace_noscratch, nodes=nodes_unfocused),
Args(scratchpad_state='fresh', urgent=False, workspace=workspace_scratch, nodes=nodes_focused),
Args(scratchpad_state=None, urgent=True, workspace=workspace_noscratch, nodes=nodes_unfocused),
]
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Conn()):
self.assertEqual(i3wm.scratchpad(pl=pl), [
{'contents': 'O', 'highlight_groups': ['scratchpad']},
{'contents': 'X', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:focused', 'scratchpad:visible', 'scratchpad']},
{'contents': 'O', 'highlight_groups': ['scratchpad']},
{'contents': 'X', 'highlight_groups': ['scratchpad:visible', 'scratchpad']},
{'contents': 'O', 'highlight_groups': ['scratchpad:focused', 'scratchpad']},
{'contents': 'X', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:visible', 'scratchpad']},
])
self.assertEqual(i3wm.scratchpad(pl=pl, icons={'changed': '-', 'fresh': 'o'}), [
{'contents': 'o', 'highlight_groups': ['scratchpad']},
{'contents': '-', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:focused', 'scratchpad:visible', 'scratchpad']},
{'contents': 'o', 'highlight_groups': ['scratchpad']},
{'contents': '-', 'highlight_groups': ['scratchpad:visible', 'scratchpad']},
{'contents': 'o', 'highlight_groups': ['scratchpad:focused', 'scratchpad']},
{'contents': '-', 'highlight_groups': ['scratchpad:urgent', 'scratchpad:visible', 'scratchpad']},
])
class TestMail(TestCommon):
module_name = 'mail'
def test_email_imap_alert(self):
# TODO
pass
class TestPlayers(TestCommon):
module_name = 'players'
def test_now_playing(self):
# TODO
pass
class TestBat(TestCommon):
module_name = 'bat'
def test_battery(self):
pl = Pl()
def _get_battery_status(pl):
return 86, False
with replace_attr(self.module, '_get_battery_status', _get_battery_status):
self.assertEqual(self.module.battery(pl=pl), [{
'contents': ' 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(self.module.battery(pl=pl, format='{capacity:.2f}'), [{
'contents': '0.86',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(self.module.battery(pl=pl, steps=7), [{
'contents': ' 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
self.assertEqual(self.module.battery(pl=pl, gamify=True), [
{
'contents': ' ',
'draw_inner_divider': False,
'highlight_groups': ['battery_offline', 'battery_ac_state', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': 'OOOO',
'draw_inner_divider': False,
'highlight_groups': ['battery_full', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': 'O',
'draw_inner_divider': False,
'highlight_groups': ['battery_empty', 'battery_gradient', 'battery'],
'gradient_level': 100
}
])
self.assertEqual(self.module.battery(pl=pl, gamify=True, full_heart='+', empty_heart='-', steps='10'), [
{
'contents': ' ',
'draw_inner_divider': False,
'highlight_groups': ['battery_offline', 'battery_ac_state', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': '++++++++',
'draw_inner_divider': False,
'highlight_groups': ['battery_full', 'battery_gradient', 'battery'],
'gradient_level': 0
},
{
'contents': '--',
'draw_inner_divider': False,
'highlight_groups': ['battery_empty', 'battery_gradient', 'battery'],
'gradient_level': 100
}
])
def test_battery_with_ac_online(self):
pl = Pl()
def _get_battery_status(pl):
return 86, True
with replace_attr(self.module, '_get_battery_status', _get_battery_status):
self.assertEqual(self.module.battery(pl=pl, online='C', offline=' '), [
{
'contents': 'C 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
def test_battery_with_ac_offline(self):
pl = Pl()
def _get_battery_status(pl):
return 86, False
with replace_attr(self.module, '_get_battery_status', _get_battery_status):
self.assertEqual(self.module.battery(pl=pl, online='C', offline=' '), [
{
'contents': ' 86%',
'highlight_groups': ['battery_gradient', 'battery'],
'gradient_level': 14,
}])
class TestVim(TestCase):
def test_mode(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'NORMAL')
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info, override={'i': 'INS'}), 'NORMAL')
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info, override={'n': 'NORM'}), 'NORM')
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'INSERT')
with vim_module._with('mode', 'i\0') as segment_info:
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'INSERT')
with vim_module._with('mode', chr(ord('V') - 0x40)) as segment_info:
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info), 'V-BLCK')
self.assertEqual(self.vim.mode(pl=pl, segment_info=segment_info, override={'^V': 'VBLK'}), 'VBLK')
def test_visual_range(self):
pl = Pl()
vr = partial(self.vim.visual_range, pl=pl)
vim_module.current.window.cursor = [0, 0]
try:
with vim_module._with('mode', 'i') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '')
with vim_module._with('mode', '^V') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '1 x 1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 4')
with vim_module._with('mode', '^S') as segment_info:
self.assertEqual(vr(segment_info=segment_info), '1 x 1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), '5 x 4')
with vim_module._with('mode', 'V') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'L:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 'S') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'L:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 'v') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'C:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('mode', 's') as segment_info:
self.assertEqual(vr(segment_info=segment_info), 'C:1')
with vim_module._with('vpos', line=5, col=5, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
with vim_module._with('vpos', line=5, col=4, off=0):
self.assertEqual(vr(segment_info=segment_info), 'L:5')
finally:
vim_module._close(1)
def test_modified_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info), None)
segment_info['buffer'][0] = 'abc'
try:
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info), '+')
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info, text='-'), '-')
finally:
vim_module._bw(segment_info['bufnr'])
def test_paste_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.paste_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('options', paste=1):
self.assertEqual(self.vim.paste_indicator(pl=pl, segment_info=segment_info), 'PASTE')
self.assertEqual(self.vim.paste_indicator(pl=pl, segment_info=segment_info, text='P'), 'P')
def test_readonly_indicator(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.readonly_indicator(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', readonly=1):
self.assertEqual(self.vim.readonly_indicator(pl=pl, segment_info=segment_info), 'RO')
self.assertEqual(self.vim.readonly_indicator(pl=pl, segment_info=segment_info, text='L'), 'L')
def test_file_scheme(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_scheme(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', '/tmp/’’/abc') as segment_info:
self.assertEqual(self.vim.file_scheme(pl=pl, segment_info=segment_info), None)
with vim_module._with('buffer', 'zipfile:/tmp/abc.zip::abc/abc.vim') as segment_info:
self.assertEqual(self.vim.file_scheme(pl=pl, segment_info=segment_info), 'zipfile')
def test_file_directory(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), None)
with replace_env('HOME', '/home/foo', os.environ):
with vim_module._with('buffer', '/tmp/’’/abc') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/’’/')
with vim_module._with('buffer', b'/tmp/\xFF\xFF/abc') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/<ff><ff>/')
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/')
os.environ['HOME'] = '/tmp'
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '~/')
with vim_module._with('buffer', 'zipfile:/tmp/abc.zip::abc/abc.vim') as segment_info:
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=False), 'zipfile:/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=True), '/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/abc.zip::abc/')
os.environ['HOME'] = '/tmp'
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=False), 'zipfile:/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info, remove_scheme=True), '/tmp/abc.zip::abc/')
self.assertEqual(self.vim.file_directory(pl=pl, segment_info=segment_info), '/tmp/abc.zip::abc/')
def test_file_name(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True), [
{'contents': '[No file]', 'highlight_groups': ['file_name_no_file', 'file_name']}
])
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info, display_no_file=True, no_file_text='X'), [
{'contents': 'X', 'highlight_groups': ['file_name_no_file', 'file_name']}
])
with vim_module._with('buffer', '/tmp/abc') as segment_info:
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), 'abc')
with vim_module._with('buffer', '/tmp/’’') as segment_info:
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), '’’')
with vim_module._with('buffer', b'/tmp/\xFF\xFF') as segment_info:
self.assertEqual(self.vim.file_name(pl=pl, segment_info=segment_info), '<ff><ff>')
def test_file_size(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_size(pl=pl, segment_info=segment_info), '0 B')
with vim_module._with(
'buffer',
os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'empty')
) as segment_info:
self.assertEqual(self.vim.file_size(pl=pl, segment_info=segment_info), '0 B')
def test_file_opts(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.file_format(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'unix'}
])
self.assertEqual(self.vim.file_encoding(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'utf-8'}
])
self.assertEqual(self.vim.file_type(pl=pl, segment_info=segment_info), None)
with vim_module._with('bufoptions', filetype='python'):
self.assertEqual(self.vim.file_type(pl=pl, segment_info=segment_info), [
{'divider_highlight_group': 'background:divider', 'contents': 'python'}
])
def test_window_title(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.window_title(pl=pl, segment_info=segment_info), None)
with vim_module._with('wvars', quickfix_title='Abc'):
self.assertEqual(self.vim.window_title(pl=pl, segment_info=segment_info), 'Abc')
def test_line_percent(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(100)]
try:
self.assertEqual(self.vim.line_percent(pl=pl, segment_info=segment_info), '1')
vim_module._set_cursor(50, 0)
self.assertEqual(self.vim.line_percent(pl=pl, segment_info=segment_info), '50')
self.assertEqual(self.vim.line_percent(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': '50', 'highlight_groups': ['line_percent_gradient', 'line_percent'], 'gradient_level': 50 * 100.0 / 101}
])
finally:
vim_module._bw(segment_info['bufnr'])
def test_line_count(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
segment_info['buffer'][0:-1] = [str(i) for i in range(99)]
try:
self.assertEqual(self.vim.line_count(pl=pl, segment_info=segment_info), '100')
vim_module._set_cursor(50, 0)
self.assertEqual(self.vim.line_count(pl=pl, segment_info=segment_info), '100')
finally:
vim_module._bw(segment_info['bufnr'])
def test_position(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
try:
segment_info['buffer'][0:-1] = [str(i) for i in range(99)]
vim_module._set_cursor(49, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info), '50%')
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': '50%', 'highlight_groups': ['position_gradient', 'position'], 'gradient_level': 50.0}
])
vim_module._set_cursor(0, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info), 'Top')
vim_module._set_cursor(97, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, position_strings={'top': 'Comienzo', 'bottom': 'Final', 'all': 'Todo'}), 'Final')
segment_info['buffer'][0:-1] = [str(i) for i in range(2)]
vim_module._set_cursor(0, 0)
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, position_strings={'top': 'Comienzo', 'bottom': 'Final', 'all': 'Todo'}), 'Todo')
self.assertEqual(self.vim.position(pl=pl, segment_info=segment_info, gradient=True), [
{'contents': 'All', 'highlight_groups': ['position_gradient', 'position'], 'gradient_level': 0.0}
])
finally:
vim_module._bw(segment_info['bufnr'])
def test_cursor_current(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.line_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(self.vim.col_current(pl=pl, segment_info=segment_info), '1')
self.assertEqual(self.vim.virtcol_current(pl=pl, segment_info=segment_info), [{
'highlight_groups': ['virtcol_current_gradient', 'virtcol_current', 'col_current'], 'contents': '1', 'gradient_level': 100.0 / 80,
}])
self.assertEqual(self.vim.virtcol_current(pl=pl, segment_info=segment_info, gradient=False), [{
'highlight_groups': ['virtcol_current', 'col_current'], 'contents': '1',
}])
def test_modified_buffers(self):
pl = Pl()
self.assertEqual(self.vim.modified_buffers(pl=pl), None)
def test_branch(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
branch = partial(self.vim.branch, pl=pl, create_watcher=create_watcher)
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(self.vcs, 'guess', get_dummy_guess(status=lambda: None)):
with replace_attr(self.vcs, 'tree_status', lambda repo, pl: None):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_clean', 'branch'], 'contents': 'foo'}
])
with replace_attr(self.vcs, 'guess', get_dummy_guess(status=lambda: 'DU')):
with replace_attr(self.vcs, 'tree_status', lambda repo, pl: 'DU'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
with replace_attr(self.vcs, 'guess', get_dummy_guess(status=lambda: 'U')):
with replace_attr(self.vcs, 'tree_status', lambda repo, pl: 'U'):
self.assertEqual(branch(segment_info=segment_info, status_colors=False, ignore_statuses=['U']), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['DU']), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_dirty', 'branch'], 'contents': 'foo'}
])
self.assertEqual(branch(segment_info=segment_info, status_colors=True, ignore_statuses=['U']), [
{'divider_highlight_group': 'branch:divider', 'highlight_groups': ['branch_clean', 'branch'], 'contents': 'foo'}
])
def test_stash(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
with vim_module._with('buffer', '/foo') as segment_info:
stash = partial(self.vim.stash, pl=pl, create_watcher=create_watcher, segment_info=segment_info)
def forge_stash(n):
return replace_attr(self.vcs, 'guess', get_dummy_guess(stash=lambda: n))
with forge_stash(0):
self.assertEqual(stash(), None)
with forge_stash(1):
self.assertEqual(stash(), [{
'divider_highlight_group': 'stash:divider',
'highlight_groups': ['stash'],
'contents': '1'
}])
with forge_stash(2):
self.assertEqual(stash(), [{
'divider_highlight_group': 'stash:divider',
'highlight_groups': ['stash'],
'contents': '2'
}])
def test_file_vcs_status(self):
pl = Pl()
create_watcher = get_fallback_create_watcher()
file_vcs_status = partial(self.vim.file_vcs_status, pl=pl, create_watcher=create_watcher)
with vim_module._with('buffer', '/foo') as segment_info:
with replace_attr(self.vim, 'guess', get_dummy_guess(status=lambda file: 'M')):
self.assertEqual(file_vcs_status(segment_info=segment_info), [
{'highlight_groups': ['file_vcs_status_M', 'file_vcs_status'], 'contents': 'M'}
])
with replace_attr(self.vim, 'guess', get_dummy_guess(status=lambda file: None)):
self.assertEqual(file_vcs_status(segment_info=segment_info), None)
with vim_module._with('buffer', '/bar') as segment_info:
with vim_module._with('bufoptions', buftype='nofile'):
with replace_attr(self.vim, 'guess', get_dummy_guess(status=lambda file: 'M')):
self.assertEqual(file_vcs_status(segment_info=segment_info), None)
def test_trailing_whitespace(self):
pl = Pl()
with vim_module._with('buffer', 'tws') as segment_info:
trailing_whitespace = partial(self.vim.trailing_whitespace, pl=pl, segment_info=segment_info)
self.assertEqual(trailing_whitespace(), None)
self.assertEqual(trailing_whitespace(), None)
vim_module.current.buffer[0] = ' '
self.assertEqual(trailing_whitespace(), [{
'highlight_groups': ['trailing_whitespace', 'warning'],
'contents': '1',
}])
self.assertEqual(trailing_whitespace(), [{
'highlight_groups': ['trailing_whitespace', 'warning'],
'contents': '1',
}])
vim_module.current.buffer[0] = ''
self.assertEqual(trailing_whitespace(), None)
self.assertEqual(trailing_whitespace(), None)
def test_tabnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.tabnr(pl=pl, segment_info=segment_info, show_current=True), '1')
self.assertEqual(self.vim.tabnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_tab(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.tab(pl=pl, segment_info=segment_info), [{
'contents': None,
'literal_contents': (0, '%1T'),
}])
self.assertEqual(self.vim.tab(pl=pl, segment_info=segment_info, end=True), [{
'contents': None,
'literal_contents': (0, '%T'),
}])
def test_bufnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.bufnr(pl=pl, segment_info=segment_info, show_current=True), str(segment_info['bufnr']))
self.assertEqual(self.vim.bufnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_winnr(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.winnr(pl=pl, segment_info=segment_info, show_current=True), str(segment_info['winnr']))
self.assertEqual(self.vim.winnr(pl=pl, segment_info=segment_info, show_current=False), None)
def test_segment_info(self):
pl = Pl()
with vim_module._with('tabpage'):
with vim_module._with('buffer', '1') as segment_info:
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
vim_module.current.buffer[0] = ' '
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), [{
'contents': '+',
'highlight_groups': ['tab_modified_indicator', 'modified_indicator'],
}])
vim_module._undo()
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
old_buffer = vim_module.current.buffer
vim_module._new('2')
segment_info = vim_module._get_segment_info()
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), None)
old_buffer[0] = ' '
self.assertEqual(self.vim.modified_indicator(pl=pl, segment_info=segment_info), None)
self.assertEqual(self.vim.tab_modified_indicator(pl=pl, segment_info=segment_info), [{
'contents': '+',
'highlight_groups': ['tab_modified_indicator', 'modified_indicator'],
}])
def test_csv_col_current(self):
pl = Pl()
segment_info = vim_module._get_segment_info()
def csv_col_current(**kwargs):
self.vim.csv_cache and self.vim.csv_cache.clear()
return self.vim.csv_col_current(pl=pl, segment_info=segment_info, **kwargs)
buffer = segment_info['buffer']
try:
self.assertEqual(csv_col_current(), None)
buffer.options['filetype'] = 'csv'
self.assertEqual(csv_col_current(), None)
buffer[:] = ['1;2;3', '4;5;6']
vim_module._set_cursor(1, 1)
self.assertEqual(csv_col_current(), [{
'contents': '1', 'highlight_groups': ['csv:column_number', 'csv']
}])
vim_module._set_cursor(2, 3)
self.assertEqual(csv_col_current(), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}])
vim_module._set_cursor(2, 3)
self.assertEqual(csv_col_current(display_name=True), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (2)', 'highlight_groups': ['csv:column_name', 'csv']
}])
buffer[:0] = ['Foo;Bar;Baz']
vim_module._set_cursor(2, 3)
self.assertEqual(csv_col_current(), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (Bar)', 'highlight_groups': ['csv:column_name', 'csv']
}])
if sys.version_info < (2, 7):
raise SkipTest('csv module in Python-2.6 does not handle multiline csv files well')
buffer[len(buffer):] = ['1;"bc', 'def', 'ghi', 'jkl";3']
vim_module._set_cursor(5, 1)
self.assertEqual(csv_col_current(), [{
'contents': '2', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (Bar)', 'highlight_groups': ['csv:column_name', 'csv']
}])
vim_module._set_cursor(7, 6)
self.assertEqual(csv_col_current(), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (Baz)', 'highlight_groups': ['csv:column_name', 'csv']
}])
self.assertEqual(csv_col_current(name_format=' ({column_name:.1})'), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (B)', 'highlight_groups': ['csv:column_name', 'csv']
}])
self.assertEqual(csv_col_current(display_name=True, name_format=' ({column_name:.1})'), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}, {
'contents': ' (B)', 'highlight_groups': ['csv:column_name', 'csv']
}])
self.assertEqual(csv_col_current(display_name=False, name_format=' ({column_name:.1})'), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}])
self.assertEqual(csv_col_current(display_name=False), [{
'contents': '3', 'highlight_groups': ['csv:column_number', 'csv']
}])
finally:
vim_module._bw(segment_info['bufnr'])
@classmethod
def setUpClass(cls):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'vim_sys_path')))
from powerline.segments import vim
cls.vim = vim
from powerline.segments.common import vcs
cls.vcs = vcs
@classmethod
def tearDownClass(cls):
sys.path.pop(0)
class TestPDB(TestCase):
def test_current_line(self):
pl = Pl()
self.assertEqual(pdb.current_line(pl=pl, segment_info={'curframe': Args(f_lineno=10)}), '10')
def test_current_file(self):
pl = Pl()
cf = lambda **kwargs: pdb.current_file(
pl=pl,
segment_info={'curframe': Args(f_code=Args(co_filename='/tmp/abc.py'))},
**kwargs
)
self.assertEqual(cf(), 'abc.py')
self.assertEqual(cf(basename=True), 'abc.py')
self.assertEqual(cf(basename=False), '/tmp/abc.py')
def test_current_code_name(self):
pl = Pl()
ccn = lambda **kwargs: pdb.current_code_name(
pl=pl,
segment_info={'curframe': Args(f_code=Args(co_name='<module>'))},
**kwargs
)
self.assertEqual(ccn(), '<module>')
def test_current_context(self):
pl = Pl()
cc = lambda **kwargs: pdb.current_context(
pl=pl,
segment_info={'curframe': Args(f_code=Args(co_name='<module>', co_filename='/tmp/abc.py'))},
**kwargs
)
self.assertEqual(cc(), 'abc.py')
def test_stack_depth(self):
pl = Pl()
sd = lambda **kwargs: pdb.stack_depth(
pl=pl,
segment_info={'pdb': Args(stack=[1, 2, 3]), 'initial_stack_length': 1},
**kwargs
)
self.assertEqual(sd(), '2')
self.assertEqual(sd(full_stack=False), '2')
self.assertEqual(sd(full_stack=True), '3')
old_cwd = None
def setUpModule():
global old_cwd
global __file__
old_cwd = os.getcwd()
__file__ = os.path.abspath(__file__)
os.chdir(os.path.dirname(os.path.dirname(__file__)))
def tearDownModule():
global old_cwd
os.chdir(old_cwd)
if __name__ == '__main__':
from tests.modules import main
main()
|
codeprimate/arid
|
powerline/tests/test_python/test_segments.py
|
Python
|
bsd-2-clause
| 86,678
|
[
"FEFF"
] |
b0ea4ee0e3fcffc07ca2ad4074d9d47ecfa5a3b3303af05db94a8fb05074881a
|
#!/usr/bin/env python
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import sys
if len( sys.argv ) < 2:
print 'Usage: dirac-transformation-remove-output transID [transID] [transID]'
sys.exit()
else:
transIDs = [int( arg ) for arg in sys.argv[1:]]
from DIRAC.TransformationSystem.Agent.TransformationCleaningAgent import TransformationCleaningAgent
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC import gLogger
import DIRAC
agent = TransformationCleaningAgent( 'Transformation/TransformationCleaningAgent',
'Transformation/TransformationCleaningAgent',
'dirac-transformation-remove-output' )
agent.initialize()
client = TransformationClient()
for transID in transIDs:
res = client.getTransformationParameters( transID, ['Status'] )
if not res['OK']:
gLogger.error( "Failed to determine transformation status" )
gLogger.error( res['Message'] )
continue
status = res['Value']
if not status in ['RemovingFiles', 'RemovingOutput', 'ValidatingInput', 'Active']:
gLogger.error( "The transformation is in %s status and the outputs can not be removed" % status )
continue
agent.removeTransformationOutput( transID )
|
avedaee/DIRAC
|
TransformationSystem/scripts/dirac-transformation-remove-output.py
|
Python
|
gpl-3.0
| 1,370
|
[
"DIRAC"
] |
a8730044b55f46199186b1fe204fce9eca34f7f272583ca64f5c028d2c41443d
|
#Author: Kwasi Mensah (kmensah@andrew.cmu.edu)
#Date: 7/25/2005
import direct.directbase.DirectStart
from panda3d.core import Filename,Buffer,Shader
from panda3d.core import PandaNode,NodePath
from panda3d.core import ColorBlendAttrib
from panda3d.core import AmbientLight,DirectionalLight
from panda3d.core import TextNode,Point3,Vec4
from direct.showbase.DirectObject import DirectObject
from direct.gui.OnscreenText import OnscreenText
from direct.actor.Actor import Actor
import sys,os
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1,1,1,1),
pos=(-1.3, pos), align=TextNode.ALeft, scale = .05)
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1,1,1,1),
pos=(1.28,-0.95), align=TextNode.ARight, scale = .07)
#This function is responsible for setting up the two blur filters.
#It just makes a temp Buffer, puts a screen aligned card, and then sets
#the appropiate shader to do all the work. Gaussian blurs are decomposable
#into a two-pass algorithm which is faster than the equivalent one-pass
#algorithm, so we do it in two passes. The full explanation (for math buffs)
#can be found in the article above
def makeFilterBuffer(srcbuffer, name, sort, prog):
blurBuffer=base.win.makeTextureBuffer(name, 512, 512)
blurBuffer.setSort(sort)
blurBuffer.setClearColor(Vec4(1,0,0,1))
blurCamera=base.makeCamera2d(blurBuffer)
blurScene=NodePath("new Scene")
blurCamera.node().setScene(blurScene)
shader = loader.loadShader(prog)
card = srcbuffer.getTextureCard()
card.reparentTo(blurScene)
card.setShader(shader)
return blurBuffer
class GlowDemo(DirectObject):
def __init__(self):
base.disableMouse()
base.setBackgroundColor(0,0,0)
camera.setPos(0,-50,0)
# Check video card capabilities.
if (base.win.getGsg().getSupportsBasicShaders() == 0):
addTitle("Glow Filter: Video driver reports that shaders are not supported.")
return
# Post the instructions
self.title = addTitle("Panda3D: Tutorial - Glow Filter")
self.inst1 = addInstructions(0.95,"ESC: Quit")
self.inst2 = addInstructions(0.90,"Space: Toggle Glow Filter On/Off")
self.inst3 = addInstructions(0.85,"Enter: Toggle Running/Spinning")
self.inst4 = addInstructions(0.80,"V: View the render-to-texture results")
#create the shader that will determime what parts of the scene will glow
glowShader=loader.loadShader("glowShader.sha")
# load our model
self.tron=Actor()
self.tron.loadModel("models/tron")
self.tron.loadAnims({"running":"models/tron_anim"})
self.tron.reparentTo(render)
self.interval = self.tron.hprInterval(60,Point3(360,0,0))
self.interval.loop()
self.isRunning=False
#put some lighting on the tron model
dlight = DirectionalLight('dlight')
alight = AmbientLight('alight')
dlnp = render.attachNewNode(dlight)
alnp = render.attachNewNode(alight)
dlight.setColor(Vec4(1.0, 0.7, 0.2, 1))
alight.setColor(Vec4(0.2, 0.2, 0.2, 1))
dlnp.setHpr(0, -60, 0)
render.setLight(dlnp)
render.setLight(alnp)
# create the glow buffer. This buffer renders like a normal scene,
# except that only the glowing materials should show up nonblack.
glowBuffer=base.win.makeTextureBuffer("Glow scene", 512, 512)
glowBuffer.setSort(-3)
glowBuffer.setClearColor(Vec4(0,0,0,1))
# We have to attach a camera to the glow buffer. The glow camera
# must have the same frustum as the main camera. As long as the aspect
# ratios match, the rest will take care of itself.
glowCamera=base.makeCamera(glowBuffer, lens=base.cam.node().getLens())
# Tell the glow camera to use the glow shader
tempnode = NodePath(PandaNode("temp node"))
tempnode.setShader(glowShader)
glowCamera.node().setInitialState(tempnode.getState())
# set up the pipeline: from glow scene to blur x to blur y to main window.
blurXBuffer=makeFilterBuffer(glowBuffer, "Blur X", -2, "XBlurShader.sha")
blurYBuffer=makeFilterBuffer(blurXBuffer, "Blur Y", -1, "YBlurShader.sha")
self.finalcard = blurYBuffer.getTextureCard()
self.finalcard.reparentTo(render2d)
self.finalcard.setAttrib(ColorBlendAttrib.make(ColorBlendAttrib.MAdd))
# Panda contains a built-in viewer that lets you view the results of
# your render-to-texture operations. This code configures the viewer.
self.accept("v", base.bufferViewer.toggleEnable)
self.accept("V", base.bufferViewer.toggleEnable)
base.bufferViewer.setPosition("llcorner")
base.bufferViewer.setLayout("hline")
base.bufferViewer.setCardSize(0.652,0)
# event handling
self.accept("space", self.toggleGlow)
self.accept("enter", self.toggleDisplay)
self.accept("escape", sys.exit, [0])
self.glowOn=True;
def toggleGlow(self):
if self.glowOn:
self.finalcard.reparentTo(hidden)
else:
self.finalcard.reparentTo(render2d)
self.glowOn= not(self.glowOn)
def toggleDisplay(self):
self.isRunning=not(self.isRunning)
if not(self.isRunning):
camera.setPos(0,-50,0)
self.tron.stop("running")
self.tron.pose("running", 0)
self.interval.loop()
else:
camera.setPos(0,-170,3)
self.interval.finish()
self.tron.setHpr(0,0,0)
self.tron.loop("running")
t=GlowDemo()
run()
|
toontownfunserver/Panda3D-1.9.0
|
samples/Glow-Filter/Tut-Glow-Advanced.py
|
Python
|
bsd-3-clause
| 5,837
|
[
"Gaussian"
] |
2a826265f19d8eb688b5b04b40b4a0309da98ccbaa05e7d7886d9bc4c28fbc05
|
# coding: utf-8
# In[1]:
import numpy as np
import networkx as nx
import som_functions as som
import math
import sklearn.metrics as met
import sklearn.manifold as man
from time import time
# In[48]:
##GHSOM algorithm
#G: graph
#lam: lambda -- the number of epochs to train before assessing error
#eta: learning rate
#sigma: initial neighbourhood range
#e_0: error of previous layer
#e_sg: error must reduce by this much for growth to stop
#e_en: error must be greater than this to expand neuron
#layer: layer of som
#n: desired initial number of neurons
#m: desired number of neurons in the next layer
def ghsom(G, lam, w, eta, sigma, e_0, e_sg, e_en, layer, n, m):
#embedding
X = dsd_embedding(G)
#save embedding to graph
set_embedding(G, X)
print('embedded graph')
#number of nodes in G
num_nodes = nx.number_of_nodes(G)
##number of training patterns to visit
N = min(num_nodes, 100)
# N = num_nodes
#create som for this neuron
network = som.initialise_network(X, n, w)
##inital training phase
#train for lam epochs
network = som.train_network(X, network, lam, eta, sigma, N)
#classify nodes
network = som.assign_nodes(G, X, network, layer)
#calculate mean network error
network, MQE = som.update_errors(network)
##som growth phase
#repeat until error is low enough
while MQE > e_sg * e_0:
# for i in range(1):
# if layer > 0:
#find neuron with greatest error
error_unit = som.identify_error_unit(network)
#expand network
network = som.expand_network(network, error_unit)
#train for l epochs
network = som.train_network(X, network, lam, eta, sigma, N)
#classify nodes
network = som.assign_nodes(G, X, network, layer)
#calculate mean network error
network, MQE = som.update_errors(network)
print('ghsom has expanded som',layer,'error',MQE)
print('ghsom has terminated expansion',layer)
print('error',MQE)
#recalculate error after neuron expansion
MQE = 0
##neuron expansion phase
#iterate thorugh all neruons and find neurons with error great enough to expand
for i in network.nodes():
#unpack
ls = network.node[i]['ls']
e = network.node[i]['e']
#check error
if e > e_en * e_0 or e_0 == math.inf:
# if layer < len(labels) and len(ls) > 0:
if e_0 == math.inf:
e_0 = e
#subgraph
H = G.subgraph(ls)
#recursively run algorithm to create new network for subgraph of this neurons nodes
n, e = ghsom(H, lam, w, eta, sigma, e_0, e_sg, e_en, layer + 1, m, m)
#repack
network.node[i]['e'] = e
network.node[i]['n'] = n
print('ghsom has built new layer',layer+1)
#increase overall network error
MQE += e
#mean MQE
MQE /= nx.number_of_nodes(network)
#return network
return network, MQE
# In[3]:
##visualise network
def visualise(G, network, neurons_in_each_layer, layer):
##randomly generate colours for plotting
colours = np.random.rand(len(network), 3)
##visualise in ghsom function
som.visualise_graph(G, colours, neurons_in_each_layer, layer)
som.visualise_network(network, colours, layer)
for i in network.nodes():
#unpack
l = network.node[i]['ls']
n = network.node[i]['n']
if len(n) > 0:
H = G.subgraph(l)
visualise(H, n, neurons_in_each_layer, layer + 1)
# In[4]:
##function to recursively label nodes in graph
def label_graph(G, network, layer, neuron_count):
#number of neurons in network
num_neurons = len(network)
for i in network.nodes():
#unpack
l = network.node[i]['ls']
for node in l:
G.node[node]['community'+str(layer)] = neuron_count[layer]
n = network.node[i]['n']
if len(n) > 0:
H = G.subgraph(l)
G = label_graph(G, n, layer + 1, neuron_count)
neuron_count[layer] += 1
return G
# In[5]:
##function to read in attributes from file and return a dictionary
def read_attributes(filepath):
#initialise dictionary
d = {}
#open file
with open(filepath) as f:
#iterate over lines in f
for l in f:
#separate into key and value
k, v = l.split()
#add to dictionary
d[k] = v
#return
return d
# In[6]:
##function to generate benchmark graph
def benchmark_graph(edge_path, c_path):
#construct graph from edge list
G = nx.read_edgelist(edge_path)
#create dictionarys of attributes
c = read_attributes(c_path)
#set attributes of G
nx.set_node_attributes(G, 'firstlevelcommunity', c)
#return graph
return G
# In[7]:
##function to generate benchmark graph
def benchmark_hierarchical_graph(edge_path, c1_path, c2_path):
#construct graph from edge list
G = nx.read_edgelist(edge_path)
#create dictionarys of attributes
c1 = read_attributes(c1_path)
c2 = read_attributes(c2_path)
#set attributes of G
nx.set_node_attributes(G, 'firstlevelcommunity', c1)
nx.set_node_attributes(G, 'secondlevelcommunity', c2)
#return graph
return G
# In[8]:
##function to calculate community detection error given a generated benchmark graph
def mutual_information(G, labels):
#number of layers of cluser
num_layers = len(labels)
#initialise scores
scores = [0 for l in range(num_layers)]
#iterate over all levels of labels
for i in range(num_layers):
#assigned first layer community
actual_community = nx.get_node_attributes(G, labels[num_layers - i - 1])
#predicted first layer community
predicted_community = nx.get_node_attributes(G, 'community'+str(i + 1))
#labels for first layer of community
labels_true = [v for k,v in actual_community.items()]
labels_pred = [v for k,v in predicted_community.items()]
if len(labels_pred) == 0:
continue
#mutual information to score classifcation
scores[i] = met.normalized_mutual_info_score(labels_true, labels_pred)
# scores[i] = met.adjusted_mutual_info_score(labels_true, labels_pred)
#return
return scores
# In[9]:
##function to compute G = (I - P + W)^-1
#N: normalised laplacian
#d: list of degrees
#v: eigenvector corresponding to smallest eigenvalue
def compute_gr(N, B, d, v):
#number of nodes in the graph
n = len(N)
#initialise Gr
Gr = np.zeros((n, n))
for i in range(n):
#b2
b2 = (np.transpose(v) @ B[:,i]) * v
#b1
b1 = B[:,i] - b2
#x1
x1 = np.linalg.lstsq(N, b1)[0]
#add to Gr
Gr[:,i] = np.diag(d ** 0.5) @ (x1 + b2)
#return Gr
return Gr
# In[10]:
##function to compute DSD matrix using AMG method
#N: laplacian matrix
#d: list of degrees
#v: smallest eigenvector
def dsd(N, d, v):
#number of nodes in G
n = len(N)
#initialize dsd matrix
dsd = np.zeros((n, n))
#B
B = np.diag(d ** -0.5) @ np.identity(n)
#compute G
G = compute_gr(N, B, d, v)
print('computed greens matrix')
#compute dsd for each pair
for i in range(n):
for j in range(i, n):
#compute distance
dis = np.linalg.norm(np.transpose(B[:,i] - B[:,j]) @ G, ord=1)
#add to dsd matrix
dsd[i,j] = dis
dsd[j,i] = dis
#return
return dsd
# In[11]:
##function to return all connected components
def connected_components(G):
#number of nodes in the graph
n = nx.number_of_nodes(G)
#adjacency matrix
A = nx.adjacency_matrix(G).toarray()
#list of degrees
deg = np.sum(A, axis=1)
#normalised graph laplacian
N = np.identity(n) - np.diag(deg ** -0.5) @ A @ np.diag(deg ** -0.5)
#eigen decompose normalised laplacian
l, U = np.linalg.eigh(N)
##sort eigenvalues (and eigenvectors) into ascending order
idx = l.argsort()
l = l[idx]
U = U[:,idx]
connected_components = len(l[l<1e-12])
print('number of connected components',connected_components)
connected_graphs = []
for i in range(connected_components):
ids = U[:,i].nonzero()[0]
connected_graphs.append(G.subgraph([G.nodes()[n] for n in ids]))
return connected_graphs
# In[41]:
##dsd embedding
def dsd_embedding(G):
#number of nodes in the graph
n = nx.number_of_nodes(G)
#adjacency matrix
A = nx.adjacency_matrix(G).toarray()
#list of degrees
deg = np.sum(A, axis=1)
#normalised graph laplacian
N = np.identity(n) - np.diag(deg ** -0.5) @ A @ np.diag(deg ** -0.5)
print('constructed normalised laplacian')
#eigen decompose normalised laplacian
l, U = np.linalg.eigh(N)
##sort eigenvalues (and eigenvectors) into ascending order
idx = l.argsort()
l = l[idx]
U = U[:,idx]
#compute dsd matrix as metric
D = dsd(N, deg, U[:,0])
print('computed dsd matrix')
#centreing matrix
C = np.identity(n) - np.ones((n, n)) / n
#similarity matrix
K = - 1/2 * C @ D ** 2 @ C
#eigen decompose K
l, U = np.linalg.eigh(K)
##sort eigenvalues (and eigenvectors) into ascending order
idx = l.argsort()[::-1]
l = l[idx]
U = U[:,idx]
#sum of all eigen values
s = sum(l)
#estimate the number of dimensions to keep
k = len(l)
var = 1
while var > 0.95:
var = sum(l[:k]) / s
k -= 1
k += 1
##TODO
# k = 3
print('reduced dimension of data',k)
#position matrix
X = U[:,:k] @ np.diag(l[:k] ** 0.5)
return X
# In[13]:
##save embedding to graph
def set_embedding(G, X):
#get number of niodes in the graph
num_nodes = nx.number_of_nodes(G)
#iterate over a;; the nodes and save their embedding
for i in range(num_nodes):
G.node[G.nodes()[i]]['embedding'] = X[i]
# In[14]:
##get embedding from graph
def get_embedding(G):
#get the number of nodes in the graph
num_nodes = nx.number_of_nodes(G)
# get the embeddings
embeddings = nx.get_node_attributes(G, 'embedding')
#append all embeddings into one array
X = np.array([v for k,v in embeddings.items()])
#return the array
return X
# In[37]:
# generate graph
# G = nx.karate_club_graph()
# G = nx.read_gml('football.gml')
# G = nx.read_gml('netscience.gml')
# G = nx.read_gml('dolphins.gml')
# G = nx.read_gml('karate.gml')
# G = benchmark_graph('bin_network.dat', 'community.dat')
# G = benchmark_hierarchical_graph('network.dat',
# 'community_first_level.dat',
# 'community_second_level.dat')
# G = benchmark_hierarchical_graph('literature_network.dat',
# 'literature_community_first_level.dat',
# 'literature_community_second_level.dat')
G = benchmark_hierarchical_graph('literature_network_32.dat',
'literature_community_first_level_32.dat',
'literature_community_second_level_32.dat')
# G = benchmark_hierarchical_graph('literature_network_double.dat',
# 'literature_community_first_level_double.dat',
# 'literature_community_second_level_double.dat')
print('loaded G')
# labels = ['club']
# labels = ['firstlevelcommunity']
labels = ['firstlevelcommunity','secondlevelcommunity']
#divide graph into connected components
connected_graphs = connected_components(G)
print('calculated number of connected components')
# In[ ]:
##start time
start = time()
#number of epochs to train == lambda
lam = 1000
#weights are intialised based on a uniform random distribution between +- this value
w = 1e-4
#initial learning rate
eta_0 = 1e-3
#initial neighbourhood size
sigma_0 = 1
#stop growth of current layer
e_sg = 0.8
#error must be greater than this times previous error for expansion
e_en = 0.9
#layer of GHSOM
layer = 0;
#desired number of initial neurons
n = 1
#desired number of initial neurons for all other layers
m = 2
for H in connected_graphs:
#run ghsom algorithm
network, MQE = ghsom(H, lam, w, eta_0, sigma_0, math.inf, e_sg, e_en, layer, n, m)
print('ghsom algorithm has terminated')
print('mean network error:',MQE)
#label graph
neurons = np.zeros(50, dtype=np.int)
G = label_graph(G, network, layer, neurons)
##visualise
neurons = np.zeros(50, dtype=np.int)
visualise(G, network, neurons, 0)
##calculate error
mi_score = mutual_information(G, labels)
print('normalised mutual information score',mi_score)
##time taken
print('time taken',(time() - start))
# In[80]:
import matplotlib.pyplot as plt
##save network to gml file
N = nx.Graph();
N, num_nodes = build_connected_som(N, network, 0)
nx.draw_networkx(N)
plt.show()
# In[43]:
def build_connected_som(N, network, num):
num_nodes = num
for n in range(len(network.nodes())):
num_nodes += 1
node1 = network.nodes()[n]
N.add_edge(num, num_nodes)
for m in range(n):
node2 = network.nodes()[m]
if network.has_edge(node1, node2):
N.add_edge(num_nodes, num + m + 1)
som = network.node[node1]['n']
if len(som) > 0:
N, num_nodes = build_connected_som(N, som, num_nodes)
return N, num_nodes
|
DavidMcDonald1993/ghsom
|
GHSOM.py
|
Python
|
gpl-2.0
| 15,030
|
[
"NEURON",
"VisIt"
] |
d43b8a9bbe9fe9e3207ef72765f704c8cc4289a6dd32669e549927b7225573f6
|
# -*- coding: utf-8 -*-
#
# balancedneuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Balanced neuron example
-----------------------
This script simulates a neuron driven by an excitatory and an
inhibitory population of neurons firing Poisson spike trains. The aim
is to find a firing rate for the inhibitory population that will make
the neuron fire at the same rate as the excitatory population.
Optimization is performed using the ``bisection`` method from Scipy,
simulating the network repeatedly.
This example is also shown in the article Eppler et al. (2009)
**PyNEST: A convenient interface to the NEST simulator**,
*Front. Neuroinform.* http://dx.doi.org/10.3389/neuro.11.012.2008
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting. Scipy should be imported before nest.
'''
from scipy.optimize import bisect
import nest
import nest.voltage_trace
'''
Additionally, we set the verbosity using `set_verbosity` to
suppress info messages.
'''
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
'''
Second, the simulation parameters are assigned to variables.
'''
t_sim = 25000.0 # how long we simulate
n_ex = 16000 # size of the excitatory population
n_in = 4000 # size of the inhibitory population
r_ex = 5.0 # mean rate of the excitatory population
r_in = 20.5 # initial rate of the inhibitory population
epsc = 45.0 # peak amplitude of excitatory synaptic currents
ipsc = -45.0 # peak amplitude of inhibitory synaptic currents
d = 1.0 # synaptic delay
lower = 15.0 # lower bound of the search interval
upper = 25.0 # upper bound of the search interval
prec = 0.01 # how close need the excitatory rates be
'''
Third, the nodes are created using `Create`. We store the returned
handles in variables for later reference.
'''
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
spikedetector = nest.Create("spike_detector")
'''
Fourth, the excitatory `poisson_generator` (``noise[0]``) and the
`voltmeter` are configured using `SetStatus`, which expects a list of
node handles and a list of parameter dictionaries. The rate of the
inhibitory Poisson generator is set later. Note that we need not set
parameters for the neuron and the spike detector, since they have
satisfactory defaults.
'''
nest.SetStatus(noise, [{"rate": n_ex * r_ex}, {"rate": n_in * r_in}])
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
'''
Fifth, the `iaf_psc_alpha` is connected to the `spike_detector` and the
`voltmeter`, as are the two Poisson generators to the neuron. The
command `Connect` has different variants. Plain `Connect` just takes
the handles of pre- and post-synaptic nodes and uses the default
values for weight and delay. It can also be called with a list of
weights, as in the connection of the noise below.
Note that the connection direction for the `voltmeter` is
reversed compared to the `spike_detector`, because it observes the
neuron instead of receiving events from it. Thus, `Connect` reflects
the direction of signal flow in the simulation kernel rather than the
physical process of inserting an electrode into the neuron. The latter
semantics is presently not available in NEST.
'''
nest.Connect(neuron, spikedetector)
nest.Connect(voltmeter, neuron)
nest.Connect(noise, neuron, syn_spec={'weight': [[epsc, ipsc]], 'delay': 1.0})
'''
To determine the optimal rate of the neurons in the inhibitory
population, the network is simulated several times for different
values of the inhibitory rate while measuring the rate of the target
neuron. This is done by calling `Simulate` until the rate of the
target neuron matches the rate of the neurons in the excitatory
population with a certain accuracy. The algorithm is implemented in
two steps:
First, the function ``output_rate`` is defined to measure the firing
rate of the target neuron for a given rate of the inhibitory neurons.
'''
def output_rate(guess):
print("Inhibitory rate estimate: %5.2f Hz" % guess)
rate = float(abs(n_in * guess))
nest.SetStatus([noise[1]], "rate", rate)
nest.SetStatus(spikedetector, "n_events", 0)
nest.Simulate(t_sim)
out = nest.GetStatus(spikedetector, "n_events")[0] * 1000.0 / t_sim
print(" -> Neuron rate: %6.2f Hz (goal: %4.2f Hz)" % (out, r_ex))
return out
'''
The function takes the firing rate of the inhibitory neurons as an
argument. It scales the rate with the size of the inhibitory
population and configures the inhibitory Poisson generator
(``noise[1]``) accordingly. Then, the spike counter of the
`spike_detector` is reset to zero. The network is simulated using
`Simulate`, which takes the desired simulation time in milliseconds
and advances the network state by this amount of time. During
simulation, the `spike_detector` counts the spikes of the target
neuron and the total number is read out at the end of the simulation
period. The return value of ``output_rate()`` is the firing rate of
the target neuron in Hz.
Second, the scipy function ``bisect`` is used to determine the optimal
firing rate of the neurons of the inhibitory population.
'''
in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec)
print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate)
'''
The function ``bisect`` takes four arguments: first a function whose
zero crossing is to be determined. Here, the firing rate of the target
neuron should equal the firing rate of the neurons of the excitatory
population. Thus we define an anonymous function (using ``lambda``)
that returns the difference between the actual rate of the target
neuron and the rate of the excitatory Poisson generator, given a rate
for the inhibitory neurons. The next two arguments are the lower and
upper bound of the interval in which to search for the zero
crossing. The fourth argument of ``bisect`` is the desired relative
precision of the zero crossing.
Finally, we plot the target neuron's membrane potential as a function
of time.
'''
nest.voltage_trace.from_device(voltmeter)
|
mschmidt87/nest-simulator
|
pynest/examples/balancedneuron.py
|
Python
|
gpl-2.0
| 6,751
|
[
"NEURON"
] |
c46108ebda7a26d2c41b9a765aeef647f205aadc035c5ac398010f544856e372
|
# Name: Maud Ottenheijm
# Student nr: 10641785
# This file contains code for writing csv to json format. Takes input from InternetUsersPer100.csv, outputs in
# data.json. Part of D3 Datamaps assignment week 5, Data Processing.
# bounds for range
range_1 = 2000
range_2 = 4000
range_3 = 6000
range_4 = 8000
range_5 = 10000
range_name1 = "<" + str(range_1)
range_name2 = "<" + str(range_2)
range_name3 = "<" + str(range_3)
range_name4 = "<" + str(range_4)
range_name5 = "<" + str(range_5)
range_name6 = ">=" + str(range_5)
# open readfile
import csv
country_codes = [
["af", "AFG", "Afghanistan"],
["ax", "ALA", "Aland Islands"],
["al", "ALB", "Albania"],
["dz", "DZA", "Algeria"],
["as", "ASM", "American Samoa"],
["ad", "AND", "Andorra"],
["ao", "AGO", "Angola"],
["ai", "AIA", "Anguilla"],
["aq", "ATA", "Antarctica"],
["ag", "ATG", "Antigua and Barbuda"],
["ar", "ARG", "Argentina"],
["am", "ARM", "Armenia"],
["aw", "ABW", "Aruba"],
["au", "AUS", "Australia"],
["at", "AUT", "Austria"],
["az", "AZE", "Azerbaijan"],
["bs", "BHS", "Bahamas"],
["bh", "BHR", "Bahrain"],
["bd", "BGD", "Bangladesh"],
["bb", "BRB", "Barbados"],
["by", "BLR", "Belarus"],
["be", "BEL", "Belgium"],
["bz", "BLZ", "Belize"],
["bj", "BEN", "Benin"],
["bm", "BMU", "Bermuda"],
["bt", "BTN", "Bhutan"],
["bo", "BOL", "Bolivia, Plurinational State of"],
["bq", "BES", "Bonaire, Sint Eustatius and Saba"],
["ba", "BIH", "Bosnia and Herzegovina"],
["bw", "BWA", "Botswana"],
["bv", "BVT", "Bouvet Island"],
["br", "BRA", "Brazil"],
["io", "IOT", "British Indian Ocean Territory"],
["bn", "BRN", "Brunei Darussalam"],
["bg", "BGR", "Bulgaria"],
["bf", "BFA", "Burkina Faso"],
["bi", "BDI", "Burundi"],
["kh", "KHM", "Cambodia"],
["cm", "CMR", "Cameroon"],
["ca", "CAN", "Canada"],
["cv", "CPV", "Cape Verde"],
["ky", "CYM", "Cayman Islands"],
["cf", "CAF", "Central African Republic"],
["td", "TCD", "Chad"],
["cl", "CHL", "Chile"],
["cn", "CHN", "China"],
["cx", "CXR", "Christmas Island"],
["cc", "CCK", "Cocos (Keeling) Islands"],
["co", "COL", "Colombia"],
["km", "COM", "Comoros"],
["cg", "COG", "Congo"],
["cd", "COD", "Congo, the Democratic Republic of the"],
["ck", "COK", "Cook Islands"],
["cr", "CRI", "Costa Rica"],
["ci", "CIV", "Cote d'Ivoire"],
["hr", "HRV", "Croatia"],
["cu", "CUB", "Cuba"],
["cw", "CUW", "Curacao"],
["cy", "CYP", "Cyprus"],
["cz", "CZE", "Czech Republic"],
["dk", "DNK", "Denmark"],
["dj", "DJI", "Djibouti"],
["dm", "DMA", "Dominica"],
["do", "DOM", "Dominican Republic"],
["ec", "ECU", "Ecuador"],
["eg", "EGY", "Egypt"],
["sv", "SLV", "El Salvador"],
["gq", "GNQ", "Equatorial Guinea"],
["er", "ERI", "Eritrea"],
["ee", "EST", "Estonia"],
["et", "ETH", "Ethiopia"],
["fk", "FLK", "Falkland Islands (Malvinas)"],
["fo", "FRO", "Faroe Islands"],
["fj", "FJI", "Fiji"],
["fi", "FIN", "Finland"],
["fr", "FRA", "France"],
["gf", "GUF", "French Guiana"],
["pf", "PYF", "French Polynesia"],
["tf", "ATF", "French Southern Territories"],
["ga", "GAB", "Gabon"],
["gm", "GMB", "Gambia"],
["ge", "GEO", "Georgia"],
["de", "DEU", "Germany"],
["gh", "GHA", "Ghana"],
["gi", "GIB", "Gibraltar"],
["gr", "GRC", "Greece"],
["gl", "GRL", "Greenland"],
["gd", "GRD", "Grenada"],
["gp", "GLP", "Guadeloupe"],
["gu", "GUM", "Guam"],
["gt", "GTM", "Guatemala"],
["gg", "GGY", "Guernsey"],
["gn", "GIN", "Guinea"],
["gw", "GNB", "Guinea-Bissau"],
["gy", "GUY", "Guyana"],
["ht", "HTI", "Haiti"],
["hm", "HMD", "Heard Island and McDonald Islands"],
["va", "VAT", "Holy See (Vatican City State)"],
["hn", "HND", "Honduras"],
["hk", "HKG", "Hong Kong"],
["hu", "HUN", "Hungary"],
["is", "ISL", "Iceland"],
["in", "IND", "India"],
["id", "IDN", "Indonesia"],
["ir", "IRN", "Iran, Islamic Republic of"],
["iq", "IRQ", "Iraq"],
["ie", "IRL", "Ireland"],
["im", "IMN", "Isle of Man"],
["il", "ISR", "Israel"],
["it", "ITA", "Italy"],
["jm", "JAM", "Jamaica"],
["jp", "JPN", "Japan"],
["je", "JEY", "Jersey"],
["jo", "JOR", "Jordan"],
["kz", "KAZ", "Kazakhstan"],
["ke", "KEN", "Kenya"],
["ki", "KIR", "Kiribati"],
["kp", "PRK", "Korea, Democratic People's Republic of"],
["kr", "KOR", "Korea, Republic of"],
["kw", "KWT", "Kuwait"],
["kg", "KGZ", "Kyrgyzstan"],
["la", "LAO", "Lao People's Democratic Republic"],
["lv", "LVA", "Latvia"],
["lb", "LBN", "Lebanon"],
["ls", "LSO", "Lesotho"],
["lr", "LBR", "Liberia"],
["ly", "LBY", "Libya"],
["li", "LIE", "Liechtenstein"],
["lt", "LTU", "Lithuania"],
["lu", "LUX", "Luxembourg"],
["mo", "MAC", "Macao"],
["mk", "MKD", "Macedonia, the former Yugoslav Republic of"],
["mg", "MDG", "Madagascar"],
["mw", "MWI", "Malawi"],
["my", "MYS", "Malaysia"],
["mv", "MDV", "Maldives"],
["ml", "MLI", "Mali"],
["mt", "MLT", "Malta"],
["mh", "MHL", "Marshall Islands"],
["mq", "MTQ", "Martinique"],
["mr", "MRT", "Mauritania"],
["mu", "MUS", "Mauritius"],
["yt", "MYT", "Mayotte"],
["mx", "MEX", "Mexico"],
["fm", "FSM", "Micronesia, Federated States of"],
["md", "MDA", "Moldova, Republic of"],
["mc", "MCO", "Monaco"],
["mn", "MNG", "Mongolia"],
["me", "MNE", "Montenegro"],
["ms", "MSR", "Montserrat"],
["ma", "MAR", "Morocco"],
["mz", "MOZ", "Mozambique"],
["mm", "MMR", "Myanmar"],
["na", "NAM", "Namibia"],
["nr", "NRU", "Nauru"],
["np", "NPL", "Nepal"],
["nl", "NLD", "Netherlands"],
["nc", "NCL", "New Caledonia"],
["nz", "NZL", "New Zealand"],
["ni", "NIC", "Nicaragua"],
["ne", "NER", "Niger"],
["ng", "NGA", "Nigeria"],
["nu", "NIU", "Niue"],
["nf", "NFK", "Norfolk Island"],
["mp", "MNP", "Northern Mariana Islands"],
["no", "NOR", "Norway"],
["om", "OMN", "Oman"],
["pk", "PAK", "Pakistan"],
["pw", "PLW", "Palau"],
["ps", "PSE", "Palestine, State of"],
["pa", "PAN", "Panama"],
["pg", "PNG", "Papua New Guinea"],
["py", "PRY", "Paraguay"],
["pe", "PER", "Peru"],
["ph", "PHL", "Philippines"],
["pn", "PCN", "Pitcairn"],
["pl", "POL", "Poland"],
["pt", "PRT", "Portugal"],
["pr", "PRI", "Puerto Rico"],
["qa", "QAT", "Qatar"],
["re", "REU", "Reunion"],
["ro", "ROU", "Romania"],
["ru", "RUS", "Russian Federation"],
["rw", "RWA", "Rwanda"],
["bl", "BLM", "Saint Barthelemy"],
["sh", "SHN", "Saint Helena, Ascension and Tristan da Cunha"],
["kn", "KNA", "Saint Kitts and Nevis"],
["lc", "LCA", "Saint Lucia"],
["mf", "MAF", "Saint Martin (French part)"],
["pm", "SPM", "Saint Pierre and Miquelon"],
["vc", "VCT", "Saint Vincent and the Grenadines"],
["ws", "WSM", "Samoa"],
["sm", "SMR", "San Marino"],
["st", "STP", "Sao Tome and Principe"],
["sa", "SAU", "Saudi Arabia"],
["sn", "SEN", "Senegal"],
["rs", "SRB", "Serbia"],
["sc", "SYC", "Seychelles"],
["sl", "SLE", "Sierra Leone"],
["sg", "SGP", "Singapore"],
["sx", "SXM", "Sint Maarten (Dutch part)"],
["sk", "SVK", "Slovakia"],
["si", "SVN", "Slovenia"],
["sb", "SLB", "Solomon Islands"],
["so", "SOM", "Somalia"],
["za", "ZAF", "South Africa"],
["gs", "SGS", "South Georgia and the South Sandwich Islands"],
["ss", "SSD", "South Sudan"],
["es", "ESP", "Spain"],
["lk", "LKA", "Sri Lanka"],
["sd", "SDN", "Sudan"],
["sr", "SUR", "Suriname"],
["sj", "SJM", "Svalbard and Jan Mayen"],
["sz", "SWZ", "Swaziland"],
["se", "SWE", "Sweden"],
["ch", "CHE", "Switzerland"],
["sy", "SYR", "Syrian Arab Republic"],
["tw", "TWN", "Taiwan, Province of China"],
["tj", "TJK", "Tajikistan"],
["tz", "TZA", "Tanzania, United Republic of"],
["th", "THA", "Thailand"],
["tl", "TLS", "Timor-Leste"],
["tg", "TGO", "Togo"],
["tk", "TKL", "Tokelau"],
["to", "TON", "Tonga"],
["tt", "TTO", "Trinidad and Tobago"],
["tn", "TUN", "Tunisia"],
["tr", "TUR", "Turkey"],
["tm", "TKM", "Turkmenistan"],
["tc", "TCA", "Turks and Caicos Islands"],
["tv", "TUV", "Tuvalu"],
["ug", "UGA", "Uganda"],
["ua", "UKR", "Ukraine"],
["ae", "ARE", "United Arab Emirates"],
["gb", "GBR", "United Kingdom"],
["us", "USA", "United States"],
["um", "UMI", "United States Minor Outlying Islands"],
["uy", "URY", "Uruguay"],
["uz", "UZB", "Uzbekistan"],
["vu", "VUT", "Vanuatu"],
["ve", "VEN", "Venezuela, Bolivarian Republic of"],
["vn", "VNM", "Viet Nam"],
["vg", "VGB", "Virgin Islands, British"],
["vi", "VIR", "Virgin Islands, U.S."],
["wf", "WLF", "Wallis and Futuna"],
["eh", "ESH", "Western Sahara"],
["ye", "YEM", "Yemen"],
["zm", "ZMB", "Zambia"],
["zw", "ZWE", "Zimbabwe"] ]
# start output string
json = "data: {"
# loop through each row in csv, put values in appropriate order into output string
with open('SCN_DS_31052016075604234.csv', 'rb') as file:
reader = csv.reader(file, header = true)
data = list(reader)
for row in data:
count = row
if count.TIME == "2000":
count.countryCode = "Unknown";
for code in country_codes:
if count.LOCATION == country_codes[code][2]:
count.countryCode = country_codes[code][1]
break
value = count.value
if value < range_1:
count.fillKey = range_name1
elif value < range_2:
count.fillKey = range_name2
elif value < range_3:
count.fillKey = range_name3
elif value < range_4:
count.fillKey = range_name4
elif value < range_5:
count.fillKey = range_name5
elif value >= range_5:
count.fillKey = range_name6
# end output string
json = json[0:-1] + "}"
# write output into outputfile
outputFile = open('processed_data.json', 'w')
outputFile.write(json)
outputFile.close()
|
MaudOtten/programmeerproject
|
WomenInScience/data/trial_data_files/csvreader.py
|
Python
|
unlicense
| 9,175
|
[
"BWA"
] |
4f957b7e516b056c4b58801ee186101e414419e65624b3190cda37401939c27a
|
#import turbulence.turbulence
from .turbulence import *
import turbulence.vonkarman
import turbulence.gaussian
|
antiface/turbulence
|
turbulence/__init__.py
|
Python
|
bsd-2-clause
| 111
|
[
"Gaussian"
] |
103f395aa56db1e89b5e0699204c3682368b4bb22934b49484f6c39ae68ffdff
|
# Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" Methods specific for Active Directory. """
import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class adUserMixin(object):
@classmethod
def __str__(cls, self):
return six.u("ADU:%s") % (self.displayName or self.cn)
@classmethod
def setup_from_master(cls, self, master):
self.sAMAccountName = getattr(master, "sAMAccountName", None)
@classmethod
def pre_add(cls, self):
assert self.objectSid is None
if self.userAccountControl is None:
self.userAccountControl = 512
if self.sAMAccountName is None:
self.sAMAccountName = self.uid
# we can't set the primary group on initial creation, set this later
self.tmp_primary_group = self.primary_group.get_obj()
self.primary_group = None
@classmethod
def post_add(cls, self):
# AD sets this automagically
using = self._alias
self._db_values[using]["primaryGroupID"] = [513, ]
# set our desired primary group
self.secondary_groups.add(self.tmp_primary_group)
self.primary_group = self.tmp_primary_group
self.save()
@classmethod
def is_locked(cls, self):
return self.userAccountControl & 0x2
@classmethod
def lock(cls, self):
self.userAccountControl = self.userAccountControl | 0x2
@classmethod
def unlock(cls, self):
self.userAccountControl = self.userAccountControl & 0xFFFFFFFD
@classmethod
def change_password(cls, self, password):
self.userPassword = None
self.unicodePwd = '"' + password + '"'
self.force_replace.add('unicodePwd')
@python_2_unicode_compatible
class adGroupMixin(object):
@classmethod
def __str__(cls, self):
return six.u("%s") % (self.displayName or self.cn)
@classmethod
def pre_save(cls, self):
if self.displayName is None:
self.displayName = self.cn
|
brianmay/python-tldap-debian
|
tldap/methods/ad.py
|
Python
|
gpl-3.0
| 2,688
|
[
"Brian"
] |
c179fcc519fbb564a617e315baee5a1e5082bd5ab5ae1bf1e463f5b1c7b7e7b1
|
# mako/_ast_util.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(list(zip(node.keys, node.values))):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
|
OndinaHQ/Tracker
|
mako/_ast_util.py
|
Python
|
gpl-3.0
| 25,621
|
[
"VisIt"
] |
e910312eb10898b134d4987de59e52cdb2710a8db4dad7e7cd5bf67800b53fa0
|
"""
climatology3Spark.py
Compute a multi-epoch (multi-day) climatology from daily SST Level-3 grids.
Simple code to be run on Spark cluster, or using multi-core parallelism on single machine.
"""
import sys, os, urlparse, urllib, re, time
import numpy as N
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as M
from variables import getVariables, close
from cache import retrieveFile, CachePath
from split import fixedSplit, splitByNDays
from netCDF4 import Dataset, default_fillvals
from plotlib import imageMap, makeMovie
from spatialFilter import spatialFilter
from gaussInterp import gaussInterp # calls into Fortran version gaussInterp_f.so
#from gaussInterp_slow import gaussInterp_slow as gaussInterp # pure python, slow debuggable version
VERBOSE = 1
# Possible execution modes
ExecutionModes = ['multicore', 'spark']
# SST L3m 4.6km Metadata
# SST calues are scaled integers in degrees Celsius, lon/lat is 8640 x 4320
# Variable = 'sst', Mask = 'qual_sst', Coordinates = ['lon', 'lat']
# Generate algorithmic name for N-day Climatology product
SSTClimatologyTemplate = 'SST.L3.Global.Clim.%(period)s.%(date)s.%(version)s.nc' #??
# Simple mask and average functions to get us started, then add gaussian interpolation.
# MODIS L3 SST product, qual_sst is [-1, 2] - =0 is best data, can add =1 for better coverage
#def qcMask(var, mask): return N.ma.array(var, mask=N.ma.make_mask(mask == 0))
def qcMask(var, mask): return N.ma.masked_where(mask != 0, var)
#def qcMask(var, mask): return N.ma.masked_where(mask < 0, var)
def splitModisSst(seq, n):
for chunk in splitByNDays(seq, n, re.compile(r'(...).L3m')):
yield chunk
AveragingFunctions = {'pixelMean': mean, 'gaussInterp': gaussInterp, 'spatialFilter': spatialFilter}
PixelMeanConfig = {'name': 'pixelMean'}
GaussInterpConfig = {'name': 'gaussInterp',
'latGrid': None, 'lonGrid': None, # None means use input lat/lon grid
'wlat': 3, 'wlon': 3,
'slat': 0.15, 'slon': 0.15, 'stime': 1,
'vfactor': -0.6931, 'missingValue': default_fillvals['f4']}
GaussInterpConfig1a = {'name': 'gaussInterp',
'latGrid': None, 'lonGrid': None, # None means use input lat/lon grid
'wlat': 0.30, 'wlon': 0.30,
'slat': 0.15, 'slon': 0.15, 'stime': 1,
'vfactor': -0.6931, 'missingValue': default_fillvals['f4']}
GaussInterpConfig1b = {'name': 'gaussInterp',
'latGrid': None, 'lonGrid': None, # None means use input lat/lon grid
'wlat': 0.08, 'wlon': 0.08,
'slat': 0.15, 'slon': 0.15, 'stime': 1,
'vfactor': -0.6931, 'missingValue': default_fillvals['f4']}
GaussInterpConfig2 = {'name': 'gaussInterp',
'latGrid': (89.5, -89.5, -0.25), 'lonGrid': (-180., 179., 0.25),
'wlat': 2., 'wlon': 2.,
'slat': 0.15, 'slon': 0.15, 'stime': 1,
'vfactor': -0.6931, 'missingValue': default_fillvals['f4']}
FilterGaussian = [[1, 2, 1], [2, 4, 2], [1, 2, 1]] # divide by 16
FilterLowPass = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] # divide by 9
SpatialFilterConfig1 = {'name': 'spatialFilter', 'normalization': 16.,
'spatialFilter': N.array(FilterGaussian, dtype=N.int32),
'missingValue': default_fillvals['f4']}
SpatialFilterConfig2 = {'name': 'spatialFilter', 'normalization': 9.,
'spatialFilter': N.array(FilterLowPass, dtype=N.int32),
'missingValue': default_fillvals['f4']}
# Directory to cache retrieved files in
CachePath = '~/cache'
def climByAveragingPeriods(urls, # list of (daily) granule URLs for a long time period (e.g. a year)
nEpochs, # compute a climatology for every N epochs (days) by 'averaging'
nWindow, # number of epochs in window needed for averaging
variable, # name of primary variable in file
mask, # name of mask variable
coordinates, # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
splitFn=splitModisSst, # split function to use to partition the input URL list
maskFn=qcMask, # mask function to compute mask from mask variable
averager='pixelMean', # averaging function to use, one of ['pixelMean', 'gaussInterp', 'spatialFilter']
averagingConfig={}, # dict of parameters to control the averaging function (e.g. gaussInterp)
optimization='fortran', # optimization mode (fortran or cython)
mode='multicore', # Map across time periods of N-days for concurrent work, executed by:
# 'multicore' using pysparkling or 'spark' using Spark/Mesos cluster
numNodes=1, # number of cluster nodes to use
nWorkers=4, # number of parallel workers per node
averagingFunctions=AveragingFunctions, # dict of possible averaging functions
legalModes=ExecutionModes, # list of possible execution modes
cachePath=CachePath # directory to cache retrieved files in
):
'''Compute a climatology every N days by applying a mask and averaging function.
Writes the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption: This routine assumes that the N grids will fit in memory.***
'''
if averagingConfig['name'] == 'gaussInterp':
averagingConfig['wlat'] = nNeighbors
averagingConfig['wlon'] = nNeighbors
try:
averageFn = averagingFunctions[averager]
except:
print >>sys.stderr, 'climatology: Error, Averaging function must be one of: %s' % str(averagingFunctions)
sys.exit(1)
urlSplits = [s for s in splitFn(urls, nEpochs)]
if mode == 'multicore':
pass
elif mode == 'spark':
pass
def climsContoured(urls, plot=None, fillValue=default_fillvals['f4'], format='NETCDF4', cachePath=cachePath):
n = len(urls)
if VERBOSE: print >>sys.stderr, urls
var = climByAveraging(urls, variable, mask, coordinates, maskFn, averageFn, averagingConfig, optimization, cachePath)
fn = os.path.split(urls[0])[1]
inFile = os.path.join(cachePath, fn)
method = averagingConfig['name']
fn = os.path.splitext(fn)[0]
day = fn[5:8]
nDays = int(var['time'][0])
if 'wlat' in averagingConfig:
wlat = averagingConfig['wlat']
else:
wlat = 1
if int(wlat) == wlat:
outFile = 'A%s.L3m_%dday_clim_sst_4km_%s_%dnbrs.nc' % (day, nDays, method, int(wlat)) # mark each file with first day in period
else:
outFile = 'A%s.L3m_%dday_clim_sst_4km_%s_%4.2fnbrs.nc' % (day, nDays, method, wlat) # mark each file with first day in period
outFile = writeOutNetcdfVars(var, variable, mask, coordinates, inFile, outFile, fillValue, format)
if plot == 'contour':
figFile = contourMap(var, variable, coordinates, n, outFile)
elif plot == 'histogram':
# figFile = histogram(var, variable, n, outFile)
figFile = None
else:
figFile = None
return (outFile, figFile)
return results
def accumulateClim(urls, # list of granule URLs for a time period
variable, # name of primary variable in file
mask, # name of mask variable
coordinates, # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
maskFn=qcMask, # mask function to compute mask from mask variable
averageFn=mean, # averaging function to use
averagingConfig={}, # parameters to control averaging function (e.g. gaussInterp)
optimization='fortran', # optimization mode (fortran or cython)
cachePath=CachePath
):
'''Compute a climatology over N arrays by applying a mask and averaging function.
Returns the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption: This routine assumes that the N grids will fit in memory.***
'''
print >>sys.stderr, 'accumulateClim: Doing %s ...' % averagingConfig['name']
varList = [variable, mask]
for i, url in enumerate(urls):
try:
path = retrieveFile(url, cachePath)
fn = os.path.split(path)[1]
vtime = int(fn[5:8]) # KLUDGE: extract DOY from filename
except:
print >>sys.stderr, 'climByAveraging: Error, continuing without file %s' % url
continue
if path is None: continue
try:
print >>sys.stderr, 'Reading file %s ...' % path
var, fh = getVariables(path, varList, arrayOnly=True, order='F', set_auto_mask=False) # return dict of variable objects by name
except:
print >>sys.stderr, 'climByAveraging: Error, cannot read file %s' % path
continue
if i == 0:
dtype = var[variable].dtype
if 'int' in dtype.name: dtype = N.float32
shape = var[variable].shape
vsum = N.ma.empty(shape, dtype, order='F')
vcount = N.ma.empty(shape, dtype, order='F')
emptyVar = N.array(N.ma.masked_all(var[variable].shape, dtype), order='F') # totally masked variable array for missing or bad file reads
print >>sys.stderr, 'Read coordinates ...'
var, fh = getVariables(path, coordinates, var, arrayOnly=True, order='F') # read coordinate arrays and add to dict
var[variable] = maskFn(var[variable], var[mask]) # apply quality mask variable to get numpy MA, turned off masking done by netCDF4 library
# Echo variable range for sanity check
vals = var[variable].compressed()
print >>sys.stderr, 'Variable Range: min, max:', vals.min(), vals.max()
if averagingConfig['name'] == 'pixelMean':
vsum += var[variable] # update accumulators
vcount += ~var[mask]
elif averagingConfig['name'] == 'gaussInterp':
var[variable] = accum
c = averagingConfig
latGrid = c['latGrid']; lonGrid = c['lonGrid']
if latGrid is not None and lonGrid is not None:
outlat = N.arange(latGrid[0], latGrid[1]+latGrid[2], latGrid[2], dtype=N.float32, order='F')
outlon = N.arange(lonGrid[0], lonGrid[1]+lonGrid[2], lonGrid[2], dtype=N.float32, order='F')
else:
outlat = N.array(var[coordinates[1]], dtype=N.float32, order='F')
outlon = N.array(var[coordinates[2]], dtype=N.float32, order='F')
varNames = [variable] + coordinates
start = time.time()
avg, weight, status = \
gaussInterp(var, varNames, outlat, outlon, c['wlat'], c['wlon'],
c['slat'], c['slon'], c['stime'], c['vfactor'], c['missingValue'],
VERBOSE, optimization)
end = time.time()
vcount = weight.astype(N.float32)
vsum = avg
print >>sys.stderr, 'gaussInterp execution time:', (end - start)
elif averagingConfig['name'] == 'spatialFilter':
var[variable] = accum
c = averagingConfig
varNames = [variable] + coordinates
start = time.time()
avg, vcount, status = \
spatialFilter(var, varNames, c['spatialFilter'], c['normalization'],
c['missingValue'], VERBOSE, optimization)
vsum = avg
end = time.time()
print >>sys.stderr, 'spatialFilter execution time:', (end - start)
close(fh)
return (vcount, vsum)
def writeOutNetcdfVars(var, variable, mask, coordinates, inFile, outFile, fillValue=None, format='NETCDF4'):
'''Construct output bundle of arrays with NetCDF dimensions and attributes.
Output variables and attributes will have same names as the input file.
'''
din = Dataset(inFile, 'r')
dout = Dataset(outFile, 'w', format=format)
print >>sys.stderr, 'Writing %s ...' % outFile
# Transfer global attributes from input file
for a in din.ncattrs():
dout.setncattr(a, din.getncattr(a))
# Add dimensions and variables, copying data
coordDim = [dout.createDimension(coord, var['out'+coord].shape[0]) for coord in coordinates] # here output lon, lat, etc.
for coord in coordinates:
v = dout.createVariable(coord, var['out'+coord].dtype, (coord,))
v[:] = var['out'+coord][:]
primaryVar = dout.createVariable(variable, var['out'+variable].dtype, coordinates, fill_value=fillValue)
primaryVar[:] = var['out'+variable][:] # transfer array
maskVar = dout.createVariable(mask, 'i1', coordinates)
maskVar[:] = var['out'+mask].astype('i1')[:]
# Transfer variable attributes from input file
for k,v in dout.variables.iteritems():
for a in din.variables[k].ncattrs():
if a == 'scale_factor' or a == 'add_offset' or a == '_FillValue': continue
v.setncattr(a, din.variables[k].getncattr(a))
if k == variable:
try:
# if fillValue == None: fillValue = din.variables[k].getncattr('_FillValue') # total kludge
if fillValue == None: fillValue = default_fillvals['f4']
# print >>sys.stderr, default_fillvals
# v.setncattr('_FillValue', fillValue) # set proper _FillValue for climatology array
v.setncattr('missing_value', fillValue)
print >>sys.stderr, 'Setting missing_value for primary variable %s to %f' % (variable, fillValue)
except:
print >>sys.stderr, 'writeOutNetcdfVars: Warning, for variable %s no fill value specified or derivable from inputs.' % variable
din.close()
dout.close()
return outFile
def contourMap(var, variable, coordinates, n, outFile):
figFile = os.path.splitext(outFile)[0] + '_hist.png'
# TODO: Downscale variable array (SST) before contouring, matplotlib is TOO slow on large arrays
vals = var[variable][:]
# Fixed color scale, write file, turn off auto borders, set title, reverse lat direction so monotonically increasing??
imageMap(var[coordinates[1]][:], var[coordinates[0]][:], var[variable][:],
vmin=-2., vmax=45., outFile=figFile, autoBorders=False,
title='%s %d-day Mean from %s' % (variable.upper(), n, outFile))
print >>sys.stderr, 'Writing contour plot to %s' % figFile
return figFile
def histogram(vals, variable, n, outFile):
figFile = os.path.splitext(outFile)[0] + '_hist.png'
M.clf()
# M.hist(vals, 47, (-2., 45.))
M.hist(vals, 94)
M.xlim(-5, 45)
M.xlabel('SST in degrees Celsius')
M.ylim(0, 300000)
M.ylabel('Count')
M.title('Histogram of %s %d-day Mean from %s' % (variable.upper(), n, outFile))
M.show()
print >>sys.stderr, 'Writing histogram plot to %s' % figFile
M.savefig(figFile)
return figFile
def dailyFile2date(path, offset=1):
'''Convert YYYYDOY string in filename to date.'''
fn = os.path.split(path)[1]
year = int(fn[offset:offset+4])
doy = int(fn[offset+5:offset+8])
return fn[5:15].replace('.', '/')
def formatRegion(r):
"""Format lat/lon region specifier as string suitable for file name."""
if isinstance(r, str):
return r
else:
strs = [str(i).replace('-', 'm') for i in r]
return 'region-%s-%sby%s-%s' % tuple(strs)
def formatGrid(r):
"""Format lat/lon grid resolution specifier as string suitable for file name."""
if isinstance(r, str):
return r
else:
return str(r[0]) + 'by' + str(r[1])
def main(args):
nEpochs = int(args[0])
nWindow = int(args[1])
nNeighbors = int(args[2])
averager = args[3]
optimization = args[4]
mode = args[5]
nWorkers = int(args[6])
urlFile = args[7]
urls = [s.strip() for s in open(urlFile, 'r')]
if averager == 'gaussInterp':
results = climByAveragingPeriods(urls, nEpochs, nWindow, nNeighbors, 'sst', 'qual_sst', ['lat', 'lon'],
averager=averager, optimization=optimization, averagingConfig=GaussInterpConfig,
mode=mode, nWorkers=nWorkers)
elif averager == 'spatialFilter':
results = climByAveragingPeriods(urls, nEpochs, nWindow, nNeighbors, 'sst', 'qual_sst', ['lat', 'lon'],
averager=averager, optimization=optimization, averagingConfig=SpatialFilterConfig1,
mode=mode, nWorkers=nWorkers)
elif averager == 'pixelMean':
results = climByAveragingPeriods(urls, nEpochs, nWindow, nNeighbors, 'sst', 'qual_sst', ['lat', 'lon'],
averager=averager, optimization=optimization, averagingConfig=PixelMeanConfig,
mode=mode, nWorkers=nWorkers)
else:
print >>sys.stderr, 'climatology2: Error, averager must be one of', AveragingFunctions.keys()
sys.exit(1)
if results[0][1] is not None:
makeMovie([r[1] for r in results], 'clim.mpg')
return results
if __name__ == '__main__':
print main(sys.argv[1:])
# Old Tests:
# python climatology2.py 5 5 0 pixelMean fortran sequential 1 urls_sst_10days.txt
# python climatology2.py 5 5 3 gaussInterp fortran sequential 1 urls_sst_10days.txt
# python climatology2.py 5 5 0 pixelMean fortran sequential 1 urls_sst_40days.txt
# python climatology2.py 5 5 0 pixelMean fortran multicore 8 urls_sst_40days.txt
# python climatology2.py 5 5 3 gaussInterp fortran multicore 8 urls_sst_40days.txt
# Old Production:
# python climatology2.py 5 5 0 pixelMean fortran multicore 16 urls_sst_2015.txt >& log &
# python climatology2.py 10 10 0 pixelMean fortran multicore 16 urls_sst_2015.txt >& log &
# python climatology2.py 5 5 3 gaussInterp fortran multicore 16 urls_sst_2015.txt >& log &
# Tests:
# python climatology2.py 5 5 0 pixelMean fortran sequential 1 urls_sst_daynight_5days_sorted.txt
# python climatology2.py 5 5 0 pixelMean fortran multicore 4 urls_sst_daynight_5days_sorted.txt
# python climatology2.py 5 5 3 gaussInterp fortran sequential 1 urls_sst_daynight_5days_sorted.txt
# python climatology2.py 5 5 3 gaussInterp fortran multicore 4 urls_sst_daynight_5days_sorted.txt
# python climatology2.py 5 7 1 spatialFilter fortran sequential 1 urls_sst_daynight_5days_sorted.txt
# Test number of neighbors needed:
# python climatology2.py 5 7 3 gaussInterp fortran multicore 4 urls_sst_daynight_20days_sorted.txt
# Production:
# python climatology2.py 5 7 0 pixelMean fortran multicore 4 urls_sst_daynight_2003_2015_sorted.txt
# python climatology2.py 5 7 3 gaussInterp fortran sequential 1 urls_sst_daynight_2003_2015_sorted.txt
# python climatology2.py 5 7 3 gaussInterp fortran multicore 4 urls_sst_daynight_2003_2015_sorted.txt
# python climatology2.py 5 7 1 spatialFilter fortran multicore 4 urls_sst_daynight_2003_2015_sorted.txt
|
dataplumber/nexus
|
climatology/clim/climatology3Spark.py
|
Python
|
apache-2.0
| 19,840
|
[
"Gaussian",
"NetCDF"
] |
095b50c0467c27d5ceea5cc6466dd4a8a83f872352c5642cc0cfd975bf42a792
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# PMDA
# Copyright (c) 2019 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
"""
Generating Densities from Trajectories --- :mod:`pmda.density`
===============================================================
This module contains parallel versions of analysis tasks in
:mod:`MDAnalysis.analysis.density`.
.. autoclass:: DensityAnalysis
:members:
:inherited-members:
See Also
--------
MDAnalysis.analysis.density.density_from_Universe
"""
from __future__ import absolute_import
import numpy as np
from MDAnalysis.lib.util import fixedwidth_bins
from MDAnalysis.analysis import density as serial_density
try:
# MDAnalysis < 2.0.0
from MDAnalysis.analysis.density import _set_user_grid as set_user_grid
except ImportError:
# MDAnalysis >= 2.0.0
# pylint: disable=protected-access
set_user_grid = serial_density.DensityAnalysis._set_user_grid
from .parallel import ParallelAnalysisBase
class DensityAnalysis(ParallelAnalysisBase):
r"""Parallel density analysis.
The trajectory is read, frame by frame, and the atoms selected with
`atomselection` are histogrammed on a grid with spacing `delta`.
Parameters
----------
atomgroup : AtomGroup
Group of atoms (such as all the water oxygen atoms) being analyzed.
If this is an updating AtomGroup then you need to set
'atomselection' and also 'updating=True'.
atomselection : str (optional)
Selection string (MDAnalysis syntax) for the species to be analyzed
["name OH2"]
delta : float (optional)
Bin size for the density grid in ångström (same in x,y,z).
Slice the trajectory as "trajectory[start:stop:step]"; default
is to read the whole trajectory.
metadata : dict (optional)
`dict` of additional data to be saved with the object; the meta
data are passed through as they are.
padding : float (optional)
Increase histogram dimensions by padding (on top of initial box
size) in ångström. Padding is ignored when setting a user defined
grid.
updating : bool (optional)
Should the selection of atoms be updated for every step? ["False"]
- "True": atom selection is updated for each frame, can be slow
- "False": atoms are only selected at the beginning
parameters : dict (optional)
`dict` with some special parameters for :class:`Density` (see docs)
gridcenter : numpy ndarray, float32 (optional)
3 element numpy array detailing the x, y and z coordinates of the
center of a user defined grid box in ångström.
xdim : float (optional)
User defined x dimension box edge in ångström; ignored if
gridcenter is "None".
ydim : float (optional)
User defined y dimension box edge in ångström; ignored if
gridcenter is "None".
zdim : float (optional)
User defined z dimension box edge in ångström; ignored if
gridcenter is "None".
See Also
--------
MDAnalysis.analysis.density.density_from_Universe
Notes
-----
By default, the `atomselection` is static, i.e., atoms are only selected
once at the beginning. If you want *dynamically changing selections*
(such as "name OW and around 4.0 (protein and not name H*)", i.e., the
water oxygen atoms that are within 4 Å of the protein heavy atoms) then
set ``updating=True``.
For more details about density calculations, refer to [Awtrey2019]_.
Examples
--------
A common use case is to analyze the solvent density around a protein of
interest. The density is calculated with :class:`DensityAnalysis` in the
fixed coordinate system of the simulation unit cell. It is therefore
necessary to orient and fix the protein with respect to the box coordinate
system. In practice this means centering and superimposing the protein,
frame by frame, on a reference structure and translating and rotating all
other components of the simulation with the protein. In this way, the
solvent will appear in the reference frame of the protein.
An input trajectory must
1. have been centered on the protein of interest;
2. have all molecules made whole that have been broken across periodic
boundaries [#pbc]_;
3. have the solvent molecules remapped so that they are closest to the
solute (this is important when using triclinic unit cells such as
a dodecahedron or a truncated octahedron) [#pbc]_;
4. have a fixed frame of reference; for instance, by superimposing a
protein on a reference structure so that one can study the solvent
density around it [#fit]_.
To generate the density of water molecules around a protein (assuming that
the trajectory is already appropriately treated for periodic boundary
artifacts and is suitably superimposed to provide a fixed reference frame)
[#testraj]_, first create the :class:`DensityAnalysis` object by
supplying an AtomGroup, then use the :meth:`run` method::
from pmda.density import DensityAnalysis
U = Universe(TPR, XTC)
ow = U.select_atoms("name OW")
D = DensityAnalysis(ow, delta=1.0)
D.run()
D.convert_density('TIP4P')
The positions of all water oxygens are histogrammed on a grid with spacing
*delta* = 1 Å. Initially the density is measured in inverse cubic
angstroms. With the :meth:`Density.convert_density` method,
the units of measurement are changed. In the example we are now measuring
the density relative to the literature value of the TIP4P water model at
ambient conditions (see the values in :data:`MDAnalysis.units.water` for
details). In particular, the density is stored as a NumPy array in
:attr:`grid`, which can be processed in any manner. Results are available
through the :attr:`density` attribute, which has the :attr:`grid`
attribute that contains the histogrammed density data.
:attr:`DensityAnalysis.density` is a :class:`gridData.core.Grid` object.
In particular, its contents can be `exported to different formats
<https://www.mdanalysis.org/GridDataFormats/gridData/formats.html>`_.
For example, to `write a DX file
<https://www.mdanalysis.org/GridDataFormats/gridData/basic.html#writing-out-data>`_
``water.dx`` that can be read with VMD, PyMOL, or Chimera::
D.density.export("water.dx", type="double")
Basic use for creating a water density (just using the water oxygen
atoms "OW")::
D = DensityAnalysis(universe.atoms, atomselection='name OW')
If you are only interested in water within a certain region, e.g., within
a vicinity around a binding site, you can use a selection that updates
every step by setting the `updating` keyword argument::
atomselection = 'name OW and around 5 (resid 156 157 305)'
D_site = DensityAnalysis(universe.atoms, atomselection=atomselection,
updating=True)
If you are interested in explicitly setting a grid box of a given edge
size and origin, you can use the gridcenter and x/y/zdim arguments.
For example to plot the density of waters within 5 Å of a ligand (in this
case the ligand has been assigned the residue name "LIG") in a cubic grid
with 20 Å edges which is centered on the center of mass (COM) of the
ligand::
# Create a selection based on the ligand
ligand_selection = universe.select_atoms("resname LIG")
# Extract the COM of the ligand
ligand_COM = ligand_selection.center_of_mass()
# Create a density of waters on a cubic grid centered on the ligand COM
# In this case, we update the atom selection as shown above.
D_water = DensityAnalysis(universe.atoms, delta=1.0,
atomselection='name OW around 5 resname LIG',
updating=True,
gridcenter=ligand_COM,
xdim=20, ydim=20, zdim=20)
(It should be noted that the `padding` keyword is not used when a user
defined grid is assigned).
.. rubric:: Footnotes
.. [#pbc] Making molecules whole can be accomplished with the
:meth:`MDAnalysis.core.groups.AtomGroup.wrap` of
:attr:`Universe.atoms` (use ``compound="fragments"``).
When using, for instance, the Gromacs_ command `gmx trjconv`_::
gmx trjconv -pbc mol -center -ur compact
one can make the molecules whole ``-pbc whole``, center it on a
group (``-center``), and also pack all molecules in a compact
unitcell representation, which can be useful for density
generation.
.. [#fit] Superposition can be performed with
:class:`MDAnalysis.analysis.align.AlignTraj`.
The Gromacs_ command `gmx trjconv`_::
gmx trjconv -fit rot+trans
will also accomplish such a superposition. Note that the fitting
has to be done in a *separate* step from the treatment of the
periodic boundaries [#pbc]_.
.. [#testraj] Note that the trajectory in the example (`XTC`) is *not*
properly made whole and fitted to a reference structure;
these steps were omitted to clearly show the steps necessary
for the actual density calculation.
.. Links
.. -----
.. _OpenDX: http://www.opendx.org/
.. _VMD: http://www.ks.uiuc.edu/Research/vmd/
.. _Chimera: https://www.cgl.ucsf.edu/chimera/
.. _PyMOL: http://www.pymol.org/
.. _Gromacs: http://www.gromacs.org
.. _`gmx trjconv`: http://manual.gromacs.org/programs/gmx-trjconv.html
.. versionadded:: 0.3.0
"""
def __init__(self, atomgroup, delta=1.0, atomselection=None,
metadata=None, padding=2.0, updating=False,
parameters=None, gridcenter=None, xdim=None, ydim=None,
zdim=None):
u = atomgroup.universe
super(DensityAnalysis, self).__init__(u, (atomgroup, ))
self._atomgroup = atomgroup
self._delta = delta
self._atomselection = atomselection
self._metadata = metadata
self._padding = padding
self._updating = updating
self._parameters = parameters
self._gridcenter = gridcenter
self._xdim = xdim
self._ydim = ydim
self._zdim = zdim
self._trajectory = u.trajectory
if updating and atomselection is None:
raise ValueError("updating=True requires a atomselection string")
elif not updating and atomselection is not None:
raise ValueError("""With updating=False, the atomselection='{}' is
not used and should be None""".format(atomselection))
def _prepare(self):
coord = self.current_coordinates(self._atomgroup, self._atomselection,
self._updating)
if self._gridcenter is not None:
# Generate a copy of smin/smax from coords to later check if the
# defined box might be too small for the selection
smin = np.min(coord, axis=0)
smax = np.max(coord, axis=0)
# Overwrite smin/smax with user defined values
smin, smax = set_user_grid(
self._gridcenter, self._xdim,
self._ydim, self._zdim, smin, smax)
else:
# Make the box bigger to avoid as much as possible 'outlier'. This
# is important if the sites are defined at a high density: in this
# case the bulk regions don't have to be close to 1 * n0 but can
# be less. It's much more difficult to deal with outliers. The
# ideal solution would use images: implement 'looking across the
# periodic boundaries' but that gets complicated when the box
# rotates due to RMS fitting.
smin = np.min(coord, axis=0) - self._padding
smax = np.max(coord, axis=0) + self._padding
BINS = fixedwidth_bins(self._delta, smin, smax)
arange = np.transpose(np.vstack((BINS['min'], BINS['max'])))
bins = BINS['Nbins']
# create empty grid with the right dimensions (and get the edges)
grid, edges = np.histogramdd(np.zeros((1, 3)), bins=bins,
range=arange, normed=False)
grid *= 0.0
self._grid = grid
self._edges = edges
self._arange = arange
self._bins = bins
def _single_frame(self, ts, atomgroups):
coord = self.current_coordinates(atomgroups[0], self._atomselection,
self._updating)
result = np.histogramdd(coord, bins=self._bins, range=self._arange,
normed=False)
return result[0]
def _conclude(self):
self._grid = self._results[:].sum(axis=0)
self._grid /= float(self.n_frames)
metadata = self._metadata if self._metadata is not None else {}
metadata['psf'] = self._atomgroup.universe.filename
metadata['dcd'] = self._trajectory.filename
metadata['atomselection'] = self._atomselection
metadata['n_frames'] = self.n_frames
metadata['totaltime'] = self._atomgroup.universe.trajectory.totaltime
metadata['dt'] = self._trajectory.dt
metadata['time_unit'] = "ps"
parameters = self._parameters if self._parameters is not None else {}
parameters['isDensity'] = False # must override
density = serial_density.Density(
grid=self._grid, edges=self._edges,
units={'length': "Angstrom"},
parameters=parameters,
metadata=metadata)
density.make_density()
self.density = density
@staticmethod
def _reduce(res, result_single_frame):
""" 'accumulate' action for a time series"""
if isinstance(res, list) and len(res) == 0:
res = result_single_frame
else:
res += result_single_frame
return res
@staticmethod
def current_coordinates(atomgroup, atomselection, updating):
"""Retrieves the current coordinates of all atoms in the chosen atom
selection.
Note: currently required to allow for updating selections"""
ag = atomgroup if not updating else atomgroup.select_atoms(
atomselection)
return ag.positions
|
MDAnalysis/pmda
|
pmda/density.py
|
Python
|
gpl-2.0
| 14,957
|
[
"Gromacs",
"MDAnalysis",
"PyMOL",
"VMD"
] |
95a89543b3b098d81d3dae3de4773f08942d00435081abb814146e83fc506ab7
|
#make print in python 2, 3 compatible
from __future__ import print_function
import numpy as np
import pyedda as edda
#Distribution modeler
dm = edda.DistributionModeler(2)
dummy_data = np.random.rand(100)
dm.computeGMM(dummy_data,2,0)
dm.computeHistogram(dummy_data,32,1)
dm.printDistr()
r1,r2,r3,r4 = dm.getDistr(1)
print (r1)
print (r2)
print (r3)
print (r4)
print()
#Univariate histogram
print("//////////Univariate Histogram///////")
dummy_data = np.random.rand(100)
hist = edda.Histogram(dummy_data, 10)
print ("hist.getMean():", hist.getMean())
histCopy = edda.Histogram(hist)
print("histCopy.getMean():", histCopy.getMean())
print("hist.getVar():", hist.getVar())
print("hist.getPdf(0.5):",hist.getPdf(0.5))
print("hist.getCdf(1.0):",hist.getCdf(1.0))
print("hist.getSample():", hist.getSample())
print("Output histogram:")
hist.output()
print("hist.getBins():", hist.getBins())
print("hist.getMaxValue():", hist.getMaxValue())
print("hist.getMinValue():", hist.getMinValue())
print("hist.getBinValue(3):", hist.getBinValue(3))
print()
#Univariate Gaussian
print("//////////Univariate Gaussian///////")
dummy_data = np.random.rand(100)
gaussian = edda.Gaussian(100, 20)
print("gaussian.getMean():", gaussian.getMean())
print("gaussian.getVar():", gaussian.getVar())
print("gaussian.getPdf(105):", gaussian.getPdf(105))
print("gaussian.getSample():", gaussian.getSample())
print("gaussian.getCdf(105):", gaussian.getCdf(105))
print("gaussian.getCdfPrecise():", gaussian.getCdfPrecise(105))
print("Output gaussian:")
gaussian.output()
print()
#Univariate GMM
print("//////////Univariate GMM///////")
gmm = edda.GMM(4)
models = np.array([[0.2, 10, 2],[0.5, 15, 4],[0.1, 8, 7],[0.2,6, 9]])
gmm = edda.GMM(models)
print("gmm.getNumComponents():", gmm.getNumComponents() )
print("gmm.getMean():", gmm.getMean())
print("gmm.getVar():", gmm.getVar())
print("gmm.getPdf(10):", gmm.getPdf(10))
print("gmm.getSample():", gmm.getSample())
print("gaussian.getCdf(10):", gmm.getCdf(10))
print("Output GMM:")
gmm.output()
print("Model and output a gmm by trainig samples:")
dummy_data = np.random.rand(100)
modelGmm = edda.GMM(dummy_data,3)
modelGmm.output()
print()
|
subhashis/edda
|
pyEdda/test.py
|
Python
|
mit
| 2,233
|
[
"Gaussian"
] |
72eefa5037f94680cff4ad5a62bc33c6348ce31e6d072169833147207f50edd1
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import os
from io import StringIO
import MDAnalysis as mda
import numpy as np
import pytest
from MDAnalysisTests import make_Universe
from MDAnalysisTests.coordinates.base import _SingleFrameReader
from MDAnalysisTests.coordinates.reference import (RefAdKSmall,
RefAdK)
from MDAnalysisTests.datafiles import (PDB, PDB_small, PDB_multiframe,
PDB_full,
XPDB_small, PSF, DCD, CONECT, CRD,
INC_PDB, PDB_xlserial, ALIGN, ENT,
PDB_cm, PDB_cm_gz, PDB_cm_bz2,
PDB_mc, PDB_mc_gz, PDB_mc_bz2,
PDB_CRYOEM_BOX, MMTF_NOCRYST,
PDB_HOLE, mol2_molecule)
from numpy.testing import (assert_equal,
assert_array_almost_equal,
assert_almost_equal)
IGNORE_NO_INFORMATION_WARNING = 'ignore:Found no information for attr:UserWarning'
@pytest.fixture
def dummy_universe_without_elements():
n_atoms = 5
u = make_Universe(size=(n_atoms, 1, 1), trajectory=True)
u.add_TopologyAttr('resnames', ['RES'])
u.add_TopologyAttr('names', ['C1', 'O2', 'N3', 'S4', 'NA'])
u.dimensions = [42, 42, 42, 90, 90, 90]
return u
class TestPDBReader(_SingleFrameReader):
__test__ = True
def setUp(self):
# can lead to race conditions when testing in parallel
self.universe = mda.Universe(RefAdKSmall.filename)
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
self.prec = 3
def test_uses_PDBReader(self):
from MDAnalysis.coordinates.PDB import PDBReader
assert isinstance(self.universe.trajectory, PDBReader), "failed to choose PDBReader"
def test_dimensions(self):
assert_almost_equal(
self.universe.trajectory.ts.dimensions, RefAdKSmall.ref_unitcell,
self.prec,
"PDBReader failed to get unitcell dimensions from CRYST1")
def test_ENT(self):
from MDAnalysis.coordinates.PDB import PDBReader
self.universe = mda.Universe(ENT)
assert isinstance(self.universe.trajectory, PDBReader), "failed to choose PDBReader"
class TestPDBMetadata(object):
header = 'HYDROLASE 11-MAR-12 4E43'
title = ['HIV PROTEASE (PR) DIMER WITH ACETATE IN EXO SITE AND PEPTIDE '
'IN ACTIVE', '2 SITE']
compnd = ['MOL_ID: 1;',
'2 MOLECULE: PROTEASE;',
'3 CHAIN: A, B;',
'4 ENGINEERED: YES;',
'5 MUTATION: YES;',
'6 MOL_ID: 2;',
'7 MOLECULE: RANDOM PEPTIDE;',
'8 CHAIN: C;',
'9 ENGINEERED: YES;',
'10 OTHER_DETAILS: UNKNOWN IMPURITY', ]
num_remarks = 333
# only first 5 remarks for comparison
nmax_remarks = 5
remarks = [
'2',
'2 RESOLUTION. 1.54 ANGSTROMS.',
'3',
'3 REFINEMENT.',
'3 PROGRAM : REFMAC 5.5.0110',
]
@staticmethod
@pytest.fixture(scope='class')
def universe():
return mda.Universe(PDB_full)
def test_HEADER(self, universe):
assert_equal(universe.trajectory.header,
self.header,
err_msg="HEADER record not correctly parsed")
def test_TITLE(self, universe):
try:
title = universe.trajectory.title
except AttributeError:
raise AssertionError("Reader does not have a 'title' attribute.")
assert_equal(len(title),
len(self.title),
err_msg="TITLE does not contain same number of lines")
for lineno, (parsed, reference) in enumerate(zip(title, self.title),
start=1):
assert_equal(parsed,
reference,
err_msg="TITLE line {0} do not match".format(lineno))
def test_COMPND(self, universe):
try:
compound = universe.trajectory.compound
except AttributeError:
raise AssertionError(
"Reader does not have a 'compound' attribute.")
assert_equal(len(compound),
len(self.compnd),
err_msg="COMPND does not contain same number of lines")
for lineno, (parsed, reference) in enumerate(zip(compound,
self.compnd),
start=1):
assert_equal(parsed,
reference,
err_msg="COMPND line {0} do not match".format(lineno))
def test_REMARK(self, universe):
try:
remarks = universe.trajectory.remarks
except AttributeError:
raise AssertionError("Reader does not have a 'remarks' attribute.")
assert_equal(len(remarks),
self.num_remarks,
err_msg="REMARK does not contain same number of lines")
# only look at the first 5 entries
for lineno, (parsed, reference) in enumerate(
zip(remarks[:self.nmax_remarks],
self.remarks[:self.nmax_remarks]),
start=1):
assert_equal(parsed,
reference,
err_msg="REMARK line {0} do not match".format(lineno))
class TestExtendedPDBReader(_SingleFrameReader):
__test__ = True
def setUp(self):
self.universe = mda.Universe(PDB_small,
topology_format="XPDB",
format="XPDB")
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
self.prec = 3
def test_long_resSeq(self):
# it checks that it can read a 5-digit resid
self.universe = mda.Universe(XPDB_small, topology_format="XPDB")
u = self.universe.select_atoms(
'resid 1 or resid 10 or resid 100 or resid 1000 or resid 10000')
assert_equal(u[4].resid, 10000, "can't read a five digit resid")
class TestPDBWriter(object):
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
prec = 3
ext = ".pdb"
@pytest.fixture
def universe(self):
return mda.Universe(PSF, PDB_small)
@pytest.fixture
def universe2(self):
return mda.Universe(PSF, DCD)
@pytest.fixture
def universe3(self):
return mda.Universe(PDB)
@pytest.fixture
def universe4(self):
return mda.Universe(PDB_HOLE)
@pytest.fixture
def universe5(self):
return mda.Universe(mol2_molecule)
@pytest.fixture(params=[
[PDB_CRYOEM_BOX, None],
[MMTF_NOCRYST, None]
])
def universe_and_expected_dims(self, request):
"""
File with meaningless CRYST1 record and expected dimensions.
"""
filein = request.param[0]
expected_dims = request.param[1]
return mda.Universe(filein), expected_dims
@pytest.fixture
def outfile(self, tmpdir):
return str(tmpdir.mkdir("PDBWriter").join('primitive-pdb-writer' + self.ext))
@pytest.fixture
def u_no_ids(self):
# The test universe does not have atom ids, but it has everything
# else the PDB writer expects to avoid issuing warnings.
universe = make_Universe(
[
'names', 'resids', 'resnames', 'altLocs',
'segids', 'occupancies', 'tempfactors',
],
trajectory=True,
)
universe.add_TopologyAttr('icodes', [' '] * len(universe.residues))
universe.add_TopologyAttr('record_types', ['ATOM'] * len(universe.atoms))
universe.dimensions = [10, 10, 10, 90, 90, 90]
return universe
@pytest.fixture
def u_no_resnames(self):
return make_Universe(['names', 'resids'], trajectory=True)
@pytest.fixture
def u_no_resids(self):
return make_Universe(['names', 'resnames'], trajectory=True)
@pytest.fixture
def u_no_names(self):
return make_Universe(['resids', 'resnames'], trajectory=True)
def test_writer(self, universe, outfile):
"Test writing from a single frame PDB file to a PDB file." ""
universe.atoms.write(outfile)
u = mda.Universe(PSF, outfile)
assert_almost_equal(u.atoms.positions,
universe.atoms.positions, self.prec,
err_msg="Writing PDB file with PDBWriter "
"does not reproduce original coordinates")
def test_writer_no_resnames(self, u_no_resnames, outfile):
u_no_resnames.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array(['UNK'] * u_no_resnames.atoms.n_atoms)
assert_equal(u.atoms.resnames, expected)
def test_writer_no_resids(self, u_no_resids, outfile):
u_no_resids.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.ones((25,))
assert_equal(u.residues.resids, expected)
def test_writer_no_atom_names(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array(['X'] * u_no_names.atoms.n_atoms)
assert_equal(u.atoms.names, expected)
def test_writer_no_altlocs(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array([''] * u_no_names.atoms.n_atoms)
assert_equal(u.atoms.altLocs, expected)
def test_writer_no_icodes(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array([''] * u_no_names.atoms.n_atoms)
assert_equal(u.atoms.icodes, expected)
def test_writer_no_segids(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.array(['X'] * u_no_names.atoms.n_atoms)
assert_equal([atom.segid for atom in u.atoms], expected)
def test_writer_no_occupancies(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.ones(u_no_names.atoms.n_atoms)
assert_equal(u.atoms.occupancies, expected)
def test_writer_no_tempfactors(self, u_no_names, outfile):
u_no_names.atoms.write(outfile)
u = mda.Universe(outfile)
expected = np.zeros(u_no_names.atoms.n_atoms)
assert_equal(u.atoms.tempfactors, expected)
def test_write_single_frame_Writer(self, universe2, outfile):
"""Test writing a single frame from a DCD trajectory to a PDB using
MDAnalysis.Writer (Issue 105)"""
u = universe2
u.trajectory[50]
with mda.Writer(outfile) as W:
W.write(u.select_atoms('all'))
u2 = mda.Universe(outfile)
assert_equal(u2.trajectory.n_frames,
1,
err_msg="The number of frames should be 1.")
def test_write_single_frame_AtomGroup(self, universe2, outfile):
"""Test writing a single frame from a DCD trajectory to a PDB using
AtomGroup.write() (Issue 105)"""
u = universe2
u.trajectory[50]
u.atoms.write(outfile)
u2 = mda.Universe(PSF, outfile)
assert_equal(u2.trajectory.n_frames,
1,
err_msg="Output PDB should only contain a single frame")
assert_almost_equal(u2.atoms.positions, u.atoms.positions,
self.prec, err_msg="Written coordinates do not "
"agree with original coordinates from frame %d" %
u.trajectory.frame)
def test_write_nodims(self, universe_and_expected_dims, outfile):
"""
Test :code:`PDBWriter` for universe without cell dimensions.
Notes
-----
Test fix for Issue #2679.
"""
u, expected_dims = universe_and_expected_dims
# See Issue #2698
if expected_dims is None:
assert u.dimensions is None
else:
assert np.allclose(u.dimensions, expected_dims)
expected_msg = "Unit cell dimensions not found. CRYST1 record set to unitary values."
with pytest.warns(UserWarning, match=expected_msg):
u.atoms.write(outfile)
with pytest.warns(UserWarning, match="Unit cell dimensions will be set to None."):
uout = mda.Universe(outfile)
assert uout.dimensions is None, "Problem with default box."
assert_equal(
uout.trajectory.n_frames, 1,
err_msg="Output PDB should only contain a single frame"
)
assert_almost_equal(
u.atoms.positions, uout.atoms.positions,
self.prec,
err_msg="Written coordinates do not "
"agree with original coordinates from frame %d" %
u.trajectory.frame
)
def test_check_coordinate_limits_min(self, universe, outfile):
"""Test that illegal PDB coordinates (x <= -999.9995 A) are caught
with ValueError (Issue 57)"""
# modify coordinates (universe needs to be a per-function fixture)
u = universe
u.atoms[2000].position = [0, -999.9995, 22.8]
with pytest.raises(ValueError):
u.atoms.write(outfile)
def test_check_coordinate_limits_max(self, universe, outfile):
"""Test that illegal PDB coordinates (x > 9999.9995 A) are caught
with ValueError (Issue 57)"""
# modify coordinates (universe needs to be a per-function fixture)
u = universe
# OB: 9999.99951 is not caught by '<=' ?!?
u.atoms[1000].position = [90.889, 9999.9996, 12.2]
with pytest.raises(ValueError):
u.atoms.write(outfile)
def test_check_HEADER_TITLE_multiframe(self, universe2, outfile):
"""Check whether HEADER and TITLE are written just once in a multi-
frame PDB file (Issue 741)"""
u = universe2
protein = u.select_atoms("protein and name CA")
with mda.Writer(outfile, multiframe=True) as pdb:
for ts in u.trajectory[:5]:
pdb.write(protein)
with open(outfile) as f:
got_header = 0
got_title = 0
for line in f:
if line.startswith('HEADER'):
got_header += 1
assert got_header <= 1, "There should be only one HEADER."
elif line.startswith('TITLE'):
got_title += 1
assert got_title <= 1, "There should be only one TITLE."
@pytest.mark.parametrize("startframe,maxframes",
[(0, 12), (9997, 12)])
def test_check_MODEL_multiframe(self, universe2, outfile, startframe, maxframes):
"""Check whether MODEL number is in the right column (Issue #1950)"""
u = universe2
protein = u.select_atoms("protein and name CA")
with mda.Writer(outfile, multiframe=True, start=startframe) as pdb:
for ts in u.trajectory[:maxframes]:
pdb.write(protein)
def get_MODEL_lines(filename):
with open(filename) as pdb:
for line in pdb:
if line.startswith("MODEL"):
yield line
MODEL_lines = list(get_MODEL_lines(outfile))
assert len(MODEL_lines) == maxframes
for model, line in enumerate(MODEL_lines, start=startframe+1):
# test that only the right-most 4 digits are stored (rest must be space)
# line[10:14] == '9999' or ' 1'
# test appearance with white space
assert line[5:14] == "{0:>9d}".format(int(str(model)[-4:]))
# test number (only last 4 digits)
assert int(line[10:14]) == model % 10000
@pytest.mark.parametrize("bad_chainid",
['@', '', 'AA'])
def test_chainid_validated(self, universe3, outfile, bad_chainid):
"""
Check that an atom's chainID is set to 'X' if the chainID
does not confirm to standards (issue #2224)
"""
default_id = 'X'
u = universe3
u.atoms.chainIDs = bad_chainid
u.atoms.write(outfile)
u_pdb = mda.Universe(outfile)
assert_equal(u_pdb.segments.chainIDs[0][0], default_id)
def test_stringio_outofrange(self, universe3):
"""
Check that when StringIO is used, the correct out-of-range error for
coordinates is raised (instead of failing trying to remove StringIO
as a file).
"""
u = universe3
u.atoms.translate([-9999, -9999, -9999])
outstring = StringIO()
errmsg = "PDB files must have coordinate values between"
with pytest.raises(ValueError, match=errmsg):
with mda.coordinates.PDB.PDBWriter(outstring) as writer:
writer.write(u.atoms)
def test_hetatm_written(self, universe4, tmpdir, outfile):
"""
Checks that HETATM record types are written.
"""
u = universe4
u_hetatms = u.select_atoms("resname ETA and record_type HETATM")
assert_equal(len(u_hetatms), 8)
u.atoms.write(outfile)
written = mda.Universe(outfile)
written_atoms = written.select_atoms("resname ETA and "
"record_type HETATM")
assert len(u_hetatms) == len(written_atoms), \
"mismatched HETATM number"
assert_almost_equal(u_hetatms.atoms.positions,
written_atoms.atoms.positions)
def test_default_atom_record_type_written(self, universe5, tmpdir,
outfile):
"""
Checks that ATOM record types are written when there is no
record_type attribute.
"""
u = universe5
expected_msg = ("Found no information for attr: "
"'record_types' Using default value of 'ATOM'")
with pytest.warns(UserWarning, match=expected_msg):
u.atoms.write(outfile)
written = mda.Universe(outfile)
assert len(u.atoms) == len(written.atoms), \
"mismatched number of atoms"
atms = written.select_atoms("record_type ATOM")
assert len(atms.atoms) == len(u.atoms), \
"mismatched ATOM number"
hetatms = written.select_atoms("record_type HETATM")
assert len(hetatms.atoms) == 0, "mismatched HETATM number"
def test_abnormal_record_type(self, universe5, tmpdir, outfile):
"""
Checks whether KeyError is raised when record type is
neither ATOM or HETATM.
"""
u = universe5
u.add_TopologyAttr('record_type', ['ABNORM']*len(u.atoms))
expected_msg = ("Found ABNORM for the record type, but only "
"allowed types are ATOM or HETATM")
with pytest.raises(ValueError, match=expected_msg):
u.atoms.write(outfile)
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_no_reindex(self, universe, outfile):
"""
When setting the `reindex` keyword to False, the atom are
not reindexed.
"""
universe.atoms.ids = universe.atoms.ids + 23
universe.atoms.write(outfile, reindex=False)
read_universe = mda.Universe(outfile)
assert np.all(read_universe.atoms.ids == universe.atoms.ids)
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_no_reindex_bonds(self, universe, outfile):
"""
When setting the `reindex` keyword to False, the connect
record match the non-reindexed atoms.
"""
universe.atoms.ids = universe.atoms.ids + 23
universe.atoms.write(outfile, reindex=False, bonds='all')
with open(outfile) as infile:
for line in infile:
if line.startswith('CONECT'):
assert line.strip() == "CONECT 23 24 25 26 27"
break
else:
raise AssertError('No CONECT record fond in the output.')
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_reindex(self, universe, outfile):
"""
When setting the `reindex` keyword to True, the atom are
reindexed.
"""
universe.atoms.ids = universe.atoms.ids + 23
universe.atoms.write(outfile, reindex=True)
read_universe = mda.Universe(outfile)
# AG.ids is 1-based, while AG.indices is 0-based, hence the +1
assert np.all(read_universe.atoms.ids == universe.atoms.indices + 1)
def test_no_reindex_missing_ids(self, u_no_ids, outfile):
"""
When setting `reindex` to False, if there is no AG.ids,
then an exception is raised.
"""
# Making sure AG.ids is indeed missing
assert not hasattr(u_no_ids.atoms, 'ids')
with pytest.raises(mda.exceptions.NoDataError):
u_no_ids.atoms.write(outfile, reindex=False)
class TestMultiPDBReader(object):
@staticmethod
@pytest.fixture(scope='class')
def multiverse():
return mda.Universe(PDB_multiframe, guess_bonds=True)
@staticmethod
@pytest.fixture(scope='class')
def conect():
return mda.Universe(CONECT, guess_bonds=True)
def test_n_frames(self, multiverse):
assert_equal(multiverse.trajectory.n_frames, 24,
"Wrong number of frames read from PDB muliple model file")
def test_n_atoms_frame(self, multiverse):
u = multiverse
desired = 392
for frame in u.trajectory:
assert_equal(len(u.atoms), desired, err_msg="The number of atoms "
"in the Universe (%d) does not" " match the number "
"of atoms in the test case (%d) at frame %d" % (
len(u.atoms), desired, u.trajectory.frame))
def test_rewind(self, multiverse):
u = multiverse
u.trajectory[11]
assert_equal(u.trajectory.ts.frame, 11,
"Failed to forward to 11th frame (frame index 11)")
u.trajectory.rewind()
assert_equal(u.trajectory.ts.frame, 0,
"Failed to rewind to 0th frame (frame index 0)")
def test_iteration(self, multiverse):
u = multiverse
frames = []
for frame in u.trajectory:
pass
# should rewind after previous test
# problem was: the iterator is NoneType and next() cannot be called
for ts in u.trajectory:
frames.append(ts)
assert_equal(
len(frames), u.trajectory.n_frames,
"iterated number of frames %d is not the expected number %d; "
"trajectory iterator fails to rewind" %
(len(frames), u.trajectory.n_frames))
def test_slice_iteration(self, multiverse):
u = multiverse
frames = []
for ts in u.trajectory[4:-2:4]:
frames.append(ts.frame)
assert_equal(np.array(frames),
np.arange(u.trajectory.n_frames)[4:-2:4],
err_msg="slicing did not produce the expected frames")
def test_conect_bonds_conect(self, tmpdir, conect):
assert_equal(len(conect.atoms), 1890)
assert_equal(len(conect.bonds), 1922)
outfile = str(tmpdir.join('test-pdb-hbonds.pdb'))
conect.atoms.write(outfile, bonds="conect")
u1 = mda.Universe(outfile, guess_bonds=True)
assert_equal(len(u1.atoms), 1890)
assert_equal(len(u1.bonds), 1922)
def test_numconnections(self, multiverse):
u = multiverse
# the bond list is sorted - so swaps in input pdb sequence should not
# be a problem
desired = [[48, 365],
[99, 166],
[166, 99],
[249, 387],
[313, 331],
[331, 313, 332, 340],
[332, 331, 333, 338, 341],
[333, 332, 334, 342, 343],
[334, 333, 335, 344, 345],
[335, 334, 336, 337],
[336, 335],
[337, 335, 346, 347, 348], [338, 332, 339, 349],
[339, 338],
[340, 331],
[341, 332],
[342, 333],
[343, 333],
[344, 334],
[345, 334],
[346, 337],
[347, 337],
[348, 337],
[349, 338],
[365, 48],
[387, 249]]
def helper(atoms, bonds):
"""
Convert a bunch of atoms and bonds into a list of CONECT records
"""
con = {}
for bond in bonds:
a1, a2 = bond[0].index, bond[1].index
if a1 not in con:
con[a1] = []
if a2 not in con:
con[a2] = []
con[a2].append(a1)
con[a1].append(a2)
atoms = sorted([a.index for a in atoms])
conect = [([a, ] + sorted(con[a])) for a in atoms if a in con]
conect = [[a + 1 for a in c] for c in conect]
return conect
conect = helper(u.atoms, [b for b in u.bonds if not b.is_guessed])
assert_equal(conect, desired, err_msg="The bond list does not match "
"the test reference; len(actual) is %d, len(desired) "
"is %d" % (len(u._topology.bonds.values), len(desired)))
def test_conect_bonds_all(tmpdir):
conect = mda.Universe(CONECT, guess_bonds=True)
assert_equal(len(conect.atoms), 1890)
assert_equal(len(conect.bonds), 1922)
outfile = os.path.join(str(tmpdir), 'pdb-connect-bonds.pdb')
conect.atoms.write(outfile, bonds="all")
u2 = mda.Universe(outfile, guess_bonds=True)
assert_equal(len(u2.atoms), 1890)
assert_equal(len([b for b in u2.bonds if not b.is_guessed]), 1922)
# assert_equal(len([b for b in conect.bonds if not b.is_guessed]), 1922)
def test_write_bonds_partial(tmpdir):
u = mda.Universe(CONECT)
# grab all atoms with bonds
ag = (u.atoms.bonds.atom1 + u.atoms.bonds.atom2).unique
outfile = os.path.join(str(tmpdir), 'test.pdb')
ag.write(outfile)
u2 = mda.Universe(outfile)
assert len(u2.atoms.bonds) > 0
# check bonding is correct in new universe
for a_ref, atom in zip(ag, u2.atoms):
assert len(a_ref.bonds) == len(atom.bonds)
class TestMultiPDBWriter(object):
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
prec = 3
@staticmethod
@pytest.fixture
def universe():
return mda.Universe(PSF, PDB_small)
@staticmethod
@pytest.fixture
def multiverse():
return mda.Universe(PDB_multiframe)
@staticmethod
@pytest.fixture
def universe2():
return mda.Universe(PSF, DCD)
@staticmethod
@pytest.fixture
def outfile(tmpdir):
return os.path.join(str(tmpdir), 'multiwriter-test-1.pdb')
def test_write_atomselection(self, multiverse, outfile):
"""Test if multiframe writer can write selected frames for an
atomselection."""
u = multiverse
group = u.select_atoms('name CA', 'name C')
desired_group = 56
desired_frames = 6
pdb = mda.Writer(outfile, multiframe=True, start=12, step=2)
for ts in u.trajectory[-6:]:
pdb.write(group)
pdb.close()
u2 = mda.Universe(outfile)
assert_equal(len(u2.atoms), desired_group,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d atoms, it should contain %d" % (
len(u2.atoms), desired_group))
assert_equal(len(u2.trajectory), desired_frames,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d frames, it should have %d" % (
len(u.trajectory), desired_frames))
def test_write_all_timesteps(self, multiverse, outfile):
"""
Test write_all_timesteps() of the multiframe writer (selected frames
for an atomselection)
"""
u = multiverse
group = u.select_atoms('name CA', 'name C')
desired_group = 56
desired_frames = 6
with mda.Writer(outfile, multiframe=True, start=12, step=2) as W:
W.write_all_timesteps(group)
u2 = mda.Universe(outfile)
assert_equal(len(u2.atoms), desired_group,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d atoms, it should contain %d" % (
len(u2.atoms), desired_group))
assert_equal(len(u2.trajectory), desired_frames,
err_msg="MultiPDBWriter trajectory written for an "
"AtomGroup contains %d frames, it should have %d" % (
len(u.trajectory), desired_frames))
with open(outfile, "r") as f:
lines = f.read()
assert lines.count("CONECT") == 2 # Expected two CONECT records
def test_write_loop(self, multiverse, outfile):
"""
Test write() in a loop with the multiframe writer (selected frames
for an atomselection)
"""
u = multiverse
group = u.select_atoms('name CA', 'name C')
desired_group = 56
desired_frames = 6
with mda.Writer(outfile, multiframe=True) as W:
for ts in u.trajectory[12::2]:
W.write(group)
u2 = mda.Universe(outfile)
assert_equal(len(u2.atoms), desired_group,
err_msg="MultiPDBWriter trajectory written for an "
f"AtomGroup contains {len(u2.atoms)} atoms, "
f"it should contain {desired_group}")
assert_equal(len(u2.trajectory), desired_frames,
err_msg="MultiPDBWriter trajectory written for an "
f"AtomGroup contains {len(u.trajectory)} "
f"frames, it should have {desired_frames}")
with open(outfile, "r") as f:
lines = f.read()
# Expected only two CONECT records
assert lines.count("CONECT") == 2
def test_write_atoms(self, universe2, outfile):
u = universe2
with mda.Writer(outfile, multiframe=True) as W:
# 2 frames expected
for ts in u.trajectory[-2:]:
W.write(u.atoms)
u0 = mda.Universe(outfile)
assert_equal(u0.trajectory.n_frames,
2,
err_msg="The number of frames should be 2.")
class TestPDBReaderBig(RefAdK):
prec = 6
@staticmethod
@pytest.fixture(scope='class')
def universe():
return mda.Universe(PDB)
def test_load_pdb(self, universe):
U = universe
assert_equal(len(U.atoms), self.ref_n_atoms,
"load Universe from big PDB")
assert_equal(U.atoms.select_atoms('resid 150 and name HA2').atoms[0],
U.atoms[self.ref_E151HA2_index], "Atom selections")
def test_selection(self, universe):
na = universe.select_atoms('resname NA+')
assert_equal(len(na), self.ref_Na_sel_size,
"Atom selection of last atoms in file")
def test_n_atoms(self, universe):
assert_equal(universe.trajectory.n_atoms, self.ref_n_atoms,
"wrong number of atoms")
def test_n_frames(self, universe):
assert_equal(universe.trajectory.n_frames, 1,
"wrong number of frames")
def test_time(self, universe):
assert_equal(universe.trajectory.time, 0.0,
"wrong time of the frame")
def test_frame(self, universe):
assert_equal(universe.trajectory.frame, 0, "wrong frame number")
def test_dt(self, universe):
"""testing that accessing universe.trajectory.dt returns the default
of 1.0 ps"""
assert_equal(universe.trajectory.dt, 1.0)
def test_coordinates(self, universe):
A10CA = universe.select_atoms('name CA')[10]
assert_almost_equal(A10CA.position,
self.ref_coordinates['A10CA'],
self.prec,
err_msg="wrong coordinates for A10:CA")
def test_distances(self, universe):
NTERM = universe.select_atoms('name N')[0]
CTERM = universe.select_atoms('name C')[-1]
d = mda.lib.mdamath.norm(NTERM.position - CTERM.position)
assert_almost_equal(d, self.ref_distances['endtoend'], self.prec,
err_msg="wrong distance between M1:N and G214:C")
def test_selection(self, universe):
na = universe.select_atoms('resname NA+')
assert_equal(len(na), self.ref_Na_sel_size,
"Atom selection of last atoms in file")
def test_unitcell(self, universe):
assert_array_almost_equal(
universe.dimensions,
self.ref_unitcell,
self.prec,
err_msg="unit cell dimensions (rhombic dodecahedron), issue 60")
def test_volume(self, universe):
assert_almost_equal(
universe.coord.volume,
self.ref_volume,
0,
err_msg="wrong volume for unitcell (rhombic dodecahedron)")
def test_n_residues(self, universe):
# Should have first 10000 residues, then another 1302
assert len(universe.residues) == 10000 + 1302
def test_first_residue(self, universe):
# First residue is a MET, shouldn't be smushed together
# with a water
assert len(universe.residues[0].atoms) == 19
class TestIncompletePDB(object):
"""Tests for Issue #396
Reads an incomplete (but still intelligible) PDB file
"""
@staticmethod
@pytest.fixture(scope='class')
def u():
return mda.Universe(INC_PDB)
def test_natoms(self, u):
assert_equal(len(u.atoms), 3)
def test_coords(self, u):
assert_array_almost_equal(u.atoms.positions,
np.array([[111.2519989, 98.3730011,
98.18699646],
[111.20300293, 101.74199677,
96.43000031], [107.60700226,
102.96800232,
96.31600189]],
dtype=np.float32))
def test_dims(self, u):
assert_array_almost_equal(u.dimensions,
np.array([216.48899841, 216.48899841,
216.48899841, 90., 90., 90.],
dtype=np.float32))
def test_names(self, u):
assert all(u.atoms.names == 'CA')
def test_residues(self, u):
assert_equal(len(u.residues), 3)
def test_resnames(self, u):
assert_equal(len(u.atoms.resnames), 3)
assert 'VAL' in u.atoms.resnames
assert 'LYS' in u.atoms.resnames
assert 'PHE' in u.atoms.resnames
def test_reading_trajectory(self, u):
counter = 0
for ts in u.trajectory:
counter += 1
assert counter == 2
class TestPDBXLSerial(object):
"""For Issue #446"""
@staticmethod
@pytest.fixture(scope='class')
def u():
return mda.Universe(PDB_xlserial)
def test_load(self, u):
# Check that universe loads ok, should be 4 atoms
assert len(u.atoms) == 4
def test_serials(self, u):
# These should be none
assert u.atoms[0].id == 99998
assert u.atoms[1].id == 99999
assert u.atoms[2].id == 100000
assert u.atoms[3].id == 100001
class TestPSF_CRDReader(_SingleFrameReader):
__test__ = True
def setUp(self):
self.universe = mda.Universe(PSF, CRD)
self.prec = 5 # precision in CRD (at least we are writing %9.5f)
class TestPSF_PDBReader(TestPDBReader):
def setUp(self):
self.universe = mda.Universe(PSF, PDB_small)
# 3 decimals in PDB spec
# http://www.wwpdb.org/documentation/format32/sect9.html#ATOM
self.prec = 3
def test_uses_PDBReader(self):
from MDAnalysis.coordinates.PDB import PDBReader
assert isinstance(self.universe.trajectory, PDBReader), "failed to choose PDBReader"
def test_write_occupancies(tmpdir):
"""Tests for Issue #620 Modify occupancies, write out the file and check"""
u = mda.Universe(PDB_small)
u.atoms.occupancies = 0.12
outfile = str(tmpdir.join('occ.pdb'))
u.atoms.write(outfile)
u2 = mda.Universe(outfile)
assert_array_almost_equal(u2.atoms.occupancies, 0.12)
class TestWriterAlignments(object):
@pytest.fixture(scope='class')
def writtenstuff(self, tmpdir_factory):
u = mda.Universe(ALIGN)
outfile = str(tmpdir_factory.mktemp('pdb').join('nucl.pdb'))
u.atoms.write(outfile)
with open(outfile) as fh:
return fh.readlines()
def test_atomname_alignment(self, writtenstuff):
# Our PDBWriter adds some stuff up top, so line 1 happens at [9]
refs = ("ATOM 1 H5T",
"ATOM 2 CA ",
"ATOM 3 CA ",
"ATOM 4 H5''",)
for written, reference in zip(writtenstuff[9:], refs):
assert_equal(written[:16], reference)
def test_atomtype_alignment(self, writtenstuff):
result_line = ("ATOM 1 H5T GUA X 1 7.974 6.430 9.561"
" 1.00 0.00 RNAA \n")
assert_equal(writtenstuff[9], result_line)
@pytest.mark.parametrize('atom, refname', ((mda.coordinates.PDB.Pair('ASP', 'CA'), ' CA '), # Regular protein carbon alpha
(mda.coordinates.PDB.Pair('GLU', 'OE1'), ' OE1'),
(mda.coordinates.PDB.Pair('MSE', 'SE'), 'SE '), # Selenium like in 4D3L
(mda.coordinates.PDB.Pair('CA', 'CA'), 'CA '), # Calcium like in 4D3L
(mda.coordinates.PDB.Pair('HDD', 'FE'), 'FE '), # Iron from a heme like in 1GGE
(mda.coordinates.PDB.Pair('PLC', 'P'), ' P '), # Lipid phosphorus (1EIN)
))
def test_deduce_PDB_atom_name(atom, refname):
# The Pair named tuple is used to mock atoms as we only need them to have a
# ``resname`` and a ``name`` attribute.
dummy_file = StringIO()
name = (mda.coordinates.PDB.PDBWriter(dummy_file, n_atoms=1)
._deduce_PDB_atom_name(atom.name, atom.resname))
assert_equal(name, refname)
@pytest.mark.parametrize('pdbfile', [PDB_cm, PDB_cm_bz2, PDB_cm_gz,
PDB_mc, PDB_mc_bz2, PDB_mc_gz])
class TestCrystModelOrder(object):
"""Check offset based reading of pdb files
Checks
- len
- seeking around
# tests that cryst can precede or follow model header
# allow frames to follow either of these formats:
# Case 1 (PDB_mc)
# MODEL
# ...
# ENDMDL
# CRYST
# Case 2 (PDB_cm)
# CRYST
# MODEL
# ...
# ENDMDL
"""
boxsize = [80, 70, 60]
position = [10, 20, 30]
def test_len(self, pdbfile):
u = mda.Universe(pdbfile)
assert len(u.trajectory) == 3
def test_order(self, pdbfile):
u = mda.Universe(pdbfile)
for ts, refbox, refpos in zip(
u.trajectory, self.boxsize, self.position):
assert_almost_equal(u.dimensions[0], refbox)
assert_almost_equal(u.atoms[0].position[0], refpos)
def test_seekaround(self, pdbfile):
u = mda.Universe(pdbfile)
for frame in [2, 0, 2, 1]:
u.trajectory[frame]
assert_almost_equal(u.dimensions[0], self.boxsize[frame])
assert_almost_equal(u.atoms[0].position[0], self.position[frame])
def test_rewind(self, pdbfile):
u = mda.Universe(pdbfile)
u.trajectory[2]
u.trajectory.rewind()
assert_almost_equal(u.dimensions[0], self.boxsize[0])
assert_almost_equal(u.atoms[0].position[0], self.position[0])
def test_standalone_pdb():
# check that PDBReader works without n_atoms kwarg
r = mda.coordinates.PDB.PDBReader(PDB_cm)
assert r.n_atoms == 4
def test_write_pdb_zero_atoms(tmpdir):
# issue 1083
u = make_Universe(trajectory=True)
with tmpdir.as_cwd():
outfile = 'out.pdb'
ag = u.atoms[:0] # empty ag
with mda.Writer(outfile, ag.n_atoms) as w:
with pytest.raises(IndexError):
w.write(ag)
def test_atom_not_match(tmpdir):
# issue 1998
outfile = str(tmpdir.mkdir("PDBReader").join('test_atom_not_match' + ".pdb"))
u = mda.Universe(PSF, DCD)
# select two groups of atoms
protein = u.select_atoms("protein and name CA")
atoms = u.select_atoms(
'resid 1 or resid 10 or resid 100 or resid 1000 or resid 10000')
with mda.Writer(outfile, multiframe=True, n_atoms=10) as pdb:
# write these two groups of atoms to pdb
# Then the n_atoms will not match
pdb.write(protein)
pdb.write(atoms)
reader = mda.coordinates.PDB.PDBReader(outfile)
with pytest.raises(ValueError) as excinfo:
reader._read_frame(1)
assert 'Inconsistency in file' in str(excinfo.value)
def test_partially_missing_cryst():
# issue 2252
raw = open(INC_PDB, 'r').readlines()
# mangle the cryst lines so that only box angles are left
# this mimics '6edu' from PDB
raw = [line if not line.startswith('CRYST')
else line[:6] + ' ' * 28 + line[34:]
for line in raw]
with pytest.warns(UserWarning):
u = mda.Universe(StringIO('\n'.join(raw)), format='PDB')
assert len(u.atoms) == 3
assert len(u.trajectory) == 2
assert u.dimensions is None
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_write_no_atoms_elements(dummy_universe_without_elements):
"""
If no element symbols are provided, the PDB writer guesses.
"""
destination = StringIO()
with mda.coordinates.PDB.PDBWriter(destination) as writer:
writer.write(dummy_universe_without_elements.atoms)
content = destination.getvalue()
element_symbols = [
line[76:78].strip()
for line in content.splitlines()
if line[:6] == 'ATOM '
]
expectation = ['', '', '', '', '']
assert element_symbols == expectation
@pytest.mark.filterwarnings(IGNORE_NO_INFORMATION_WARNING)
def test_write_atom_elements(dummy_universe_without_elements):
"""
If element symbols are provided, they are used when writing the file.
See `Issue 2423 <https://github.com/MDAnalysis/mdanalysis/issues/2423>`_.
"""
elems = ['S', 'O', '', 'C', 'Na']
expectation = ['S', 'O', '', 'C', 'NA']
dummy_universe_with_elements = dummy_universe_without_elements
dummy_universe_with_elements.add_TopologyAttr('elements', elems)
destination = StringIO()
with mda.coordinates.PDB.PDBWriter(destination) as writer:
writer.write(dummy_universe_without_elements.atoms)
content = destination.getvalue()
element_symbols = [
line[76:78].strip()
for line in content.splitlines()
if line[:6] == 'ATOM '
]
assert element_symbols == expectation
def test_elements_roundtrip(tmpdir):
"""
Roundtrip test for PDB elements reading/writing.
"""
u = mda.Universe(CONECT)
elements = u.atoms.elements
outfile = os.path.join(str(tmpdir), 'elements.pdb')
with mda.coordinates.PDB.PDBWriter(outfile) as writer:
writer.write(u.atoms)
u_written = mda.Universe(outfile)
assert_equal(elements, u_written.atoms.elements)
def test_cryst_meaningless_warning():
# issue 2599
# FIXME: This message might change with Issue #2698
with pytest.warns(UserWarning, match="Unit cell dimensions will be set to None."):
mda.Universe(PDB_CRYOEM_BOX)
def test_cryst_meaningless_select():
# issue 2599
u = mda.Universe(PDB_CRYOEM_BOX)
cur_sele = u.select_atoms('around 0.1 (resid 4 and name CA and segid A)')
assert cur_sele.n_atoms == 0
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/coordinates/test_pdb.py
|
Python
|
gpl-2.0
| 46,473
|
[
"MDAnalysis"
] |
65655960665b75d4fa765ad3160d8e1b93cc0cd2c5ec324f44a7be35f436c73d
|
from django.core.urlresolvers import reverse
from django.utils.html import strip_tags
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.common.exceptions import NoSuchElementException
import django.test
import re, os, time
import tao.datasets as datasets
#from tao.models import DataSetProperty, BandPassFilter, Simulation
from tao.settings import MODULE_INDICES
from tao.tests.helper import TaoModelsCleanUpMixin
def interact(local=locals()):
"""
drop into an interactive shell - can be helpful for debugging
call like interact(local=locals())
"""
import code
code.interact(local=local)
def visit(client, view_name, *args, **kwargs):
return client.get(reverse(view_name, args=args), follow=True)
class LiveServerTest(object):
fixtures = ['rules.json']
DOWNLOAD_DIRECTORY = '/tmp/work/downloads'
## List all ajax enabled pages that have initialization code and must wait
AJAX_WAIT = ['mock_galaxy_factory', 'view_job']
SUMMARY_INDEX = str(len(MODULE_INDICES)+1)
OUTPUT_FORMATS = [
{'value':'csv', 'text':'CSV (Text)', 'extension':'csv'},
{'value':'hdf5', 'text':'HDF5', 'extension':'hdf5'},
{'value': 'fits', 'text': 'FITS', 'extension': 'fits'},
{'value': 'votable', 'text': 'VOTable', 'extension': 'xml'}
]
def wait(self, secs=1):
time.sleep(secs * 1.0)
def setUp(self):
from selenium.webdriver.firefox.webdriver import FirefoxProfile
fp = FirefoxProfile()
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.dir", self.DOWNLOAD_DIRECTORY)
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/html, application/zip, text/plain, application/force-download, application/x-tar")
self.selenium = WebDriver(firefox_profile=fp)
self.selenium.implicitly_wait(1) # wait one second before failing to find
# create the download dir
if not os.path.exists(self.DOWNLOAD_DIRECTORY):
os.makedirs(self.DOWNLOAD_DIRECTORY)
def tearDown(self):
self.selenium.quit()
# remove the download dir
for root, dirs, files in os.walk(self.DOWNLOAD_DIRECTORY, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def lc_id(self, bare_field):
return '#id_light_cone-%s' % bare_field
def lc_2select(self, bare_field):
return 'id_light_cone-output_properties_%s' % bare_field
def rf_id(self, bare_field):
return '#id_record_filter-%s' % bare_field
def sed(self, bare_field):
return 'id_sed-%s' % bare_field
def mi_id(self, bare_field):
return 'id_mock_image-%s' % bare_field
def sed_id(self, bare_field):
return '#%s' % self.sed(bare_field)
def sed_2select(self, bare_field):
return 'id_sed-band_pass_filters_%s' % bare_field
def job_select(self, bare_field):
return 'id-job_%s' % bare_field
def job_id(self, bare_field):
return '#%s' % self.job_select(bare_field)
def get_parent_element(self, element):
return self.selenium.execute_script('return arguments[0].parentNode;', element)
def get_element_css_classes(self, element):
list = []
found = element.get_attribute('class')
if found is not None: list = found.split()
return list
def get_closest_by_class(self, element, css_class):
while css_class not in self.get_element_css_classes(element):
element = self.get_parent_element(element)
return element
def get_summary_selector(self, form_name, field_name):
return 'div.summary_%s .%s' % (form_name, field_name)
def get_summary_field(self, form_name, field_name):
summary_selector = self.get_summary_selector(form_name, field_name)
return self.selenium.find_element_by_css_selector(summary_selector)
def get_summary_field_text(self, form_name, field_name):
return self.get_summary_field(form_name, field_name).text
def get_info_field(self, section, field):
elem = self.selenium.find_element_by_css_selector("div.%(section)s-info .%(field)s" % {'section': section, 'field': field})
return elem.text
def find_element_by_css_selector(self, selector):
retries = 3
while retries > 0:
try:
elem = self.selenium.find_element_by_css_selector(selector)
return elem
except NoSuchElementException:
retries -= 1
self.wait(1)
# If it hasn't been found by now, try one more time and let the exception through
return self.selenium.find_element_by_css_selector(selector)
def find_element_by_id(self, elem_id):
retries = 3
while retries > 0:
try:
elem = self.selenium.find_element_by_id(elem_id)
return elem
except NoSuchElementException:
retries -= 1
self.wait(1)
# If it hasn't been found by now, try one more time and let the exception through
return self.selenium.find_element_by_id(elem_id)
def assert_email_body_contains(self, email, text):
pattern = re.escape(text)
matches = re.search(pattern, email.body)
self.assertTrue(matches, "Email does not contain " + text)
def get_page_source(self):
try:
return self.selenium.page_source
except:
while True:
self.wait(0.2)
try:
self.selenium.switch_to_alert().accept()
except:
return self.selenium.page_source
def assertTrue(self, value, msg):
if not value:
raise AssertionError(msg)
return
def assertEqual(self, vala, valb):
if vala != valb:
msg = 'FAIL: "{0}" != "{1}"'.format(vala, valb)
raise AssertionError(msg)
return
def assert_page_has_content(self, string):
page_source = self.get_page_source()
pattern = re.escape(string)
self.assertTrue((string in page_source) or re.search(pattern, page_source), "page source did not contain %s" % pattern)
def assert_page_does_not_contain(self, string):
page_source = self.get_page_source()
pattern = re.escape(string)
self.assertFalse(re.search(pattern, page_source), "page source contained %s" % pattern)
def assert_element_text_equals(self, selector, expected_value):
text = self.find_visible_element(selector).text.strip()
self.assertEqual(expected_value.strip(), text.strip())
def assert_element_value_equals(self, selector, expected_value):
text = self.find_visible_element(selector).get_attribute('value')
self.assertEqual(expected_value.strip(), text.strip())
def assert_selector_texts_equals_expected_values(self, selector_value):
# selector_value is a dict of selectors to expected text values
for selector, expected_value in selector_value.items():
self.assert_element_text_equals(selector, unicode(expected_value))
def assert_attribute_equals(self, attribute, selector_values):
# selector_values is a dict of selectors to attribute values
for selector, expected_value in selector_values.items():
element = self.find_visible_element(selector)
actual_value = element.get_attribute(attribute)
self.assertEqual(expected_value, actual_value)
def assert_is_checked(self, selector):
field = self.selenium.find_element_by_css_selector(selector)
self.assertEqual('true', field.get_attribute('checked'))
def assert_is_unchecked(self, selector):
field = self.selenium.find_element_by_css_selector(selector)
self.assertIsNone(field.get_attribute('checked'))
def assert_is_enabled(self, selector):
field = self.selenium.find_element_by_css_selector(selector)
self.assertIsNone(field.get_attribute('disabled'))
def assert_is_disabled(self, selector):
field = self.selenium.find_element_by_css_selector(selector)
self.assertEqual('true', field.get_attribute('disabled'))
def assert_are_displayed(self, name):
fields = self.selenium.find_elements_by_name(name)
self.assertTrue([field.is_displayed() for field in fields])
def assert_are_displayed_by_class_name(self, name):
fields = self.selenium.find_elements_by_class_name(name)
self.assertTrue([field.is_displayed() for field in fields])
def assert_are_not_displayed(self, name):
fields = self.selenium.find_elements_by_name(name)
self.assertFalse(all([field.is_displayed() for field in fields]))
def assert_is_displayed(self, selector):
field = self.selenium.find_element_by_css_selector(selector)
self.assertTrue(field.is_displayed())
def assert_not_displayed(self, selector):
field = self.selenium.find_element_by_css_selector(selector)
self.assertFalse(field.is_displayed())
def assert_not_in_page(self, selector):
"Assert that the supplied selector is not part of the page content"
elements = self.selenium.find_elements_by_css_selector(selector)
self.assertTrue(len(elements) == 0)
def assert_on_page(self, url_name, ignore_query_string=False):
retries = 30
while retries > 0:
try:
self._assert_on_page(url_name, ignore_query_string)
return
except AssertionError:
retries -= 1
print "assert_on_page: retry"
self.wait(1)
self._assert_on_page(url_name, ignore_query_string)
def _assert_on_page(self, url_name, ignore_query_string=False):
if not ignore_query_string:
self.assertEqual(self.selenium.current_url, self.get_full_url(url_name))
else:
split_url = self.selenium.current_url.split('?')
url = split_url[0]
self.assertEqual(url, self.get_full_url(url_name))
def assert_multi_selected_text_equals(self, id_of_select, expected):
actual = self.get_multi_selected_option_text(id_of_select)
remaining = []
for value in expected:
if value not in actual:
remaining.append(value)
else:
actual.remove(value)
self.assertTrue(not actual and not remaining)
def assert_summary_field_correctly_shown(self, expected_value, form_name, field_name):
value_displayed = self.get_summary_field_text(form_name, field_name)
self.assertEqual(expected_value, strip_tags(value_displayed))
def fill_in_fields(self, field_data, id_wrap=None, clear=False):
for selector, text_to_input in field_data.items():
if id_wrap:
selector = id_wrap(selector)
elem = self.selenium.find_element_by_css_selector(selector)
if elem.tag_name == 'select':
self.select(selector, str(text_to_input))
else:
if clear:
elem.clear()
elem.send_keys(str(text_to_input))
self.wait(0.5)
def clear(self, selector):
elem = self.selenium.find_element_by_css_selector(selector)
elem.clear()
def click(self, elem_id):
elem = self.find_element_by_id(elem_id)
elem.click()
self.wait(0.5)
def click_by_css(self, element_css):
elem = self.selenium.find_element_by_css_selector(element_css)
elem.click()
self.wait(0.5)
def click_by_class_name(self, class_name):
elem = self.selenium.find_element_by_class_name(class_name)
elem.click()
self.wait(0.5)
def login(self, username, password):
self.visit('accounts/login')
username_input = self.selenium.find_element_by_id('id_username')
password_input = self.selenium.find_element_by_id('id_password')
submit_button = self.selenium.find_element_by_tag_name('button') # TODO make this more specific
username_input.send_keys(username)
password_input.send_keys(password)
submit_button.submit()
def visit(self, url_name, *args, **kwargs):
""" self.visit(name_of_url_as_defined_in_your_urlconf) """
self.selenium.get(self.get_full_url(url_name, *args, **kwargs))
if url_name in LiveServerTest.AJAX_WAIT:
self.wait(2)
self.assertTrue(self.selenium.execute_script('return (window.catalogue !== undefined ? catalogue._loaded : true)'),
'catalogue.js loading error')
def get_actual_filter_options(self):
option_selector = '%s option' % self.rf_id('filter')
return [x.get_attribute('value').encode('ascii') for x in self.selenium.find_elements_by_css_selector(option_selector)]
def get_expected_filter_options(self, data_set):
def gen_bp_pairs(objs):
for obj in objs:
yield ('B-' + str(obj.id) + '_apparent')
yield ('B-' + str(obj.id) + '_absolute')
normal_parameters = datasets.filter_choices(data_set.simulation.id, data_set.galaxy_model.id)
bandpass_parameters = datasets.band_pass_filters_objects()
return ['D-' + str(x.id) for x in normal_parameters] + [pair for pair in gen_bp_pairs(bandpass_parameters)]
def get_actual_snapshot_options(self):
option_selector = '%s option' % self.lc_id('snapshot')
return [x.get_attribute("innerHTML") for x in self.selenium.find_elements_by_css_selector(option_selector)]
def get_expected_snapshot_options(self, snapshots):
return [str("%.5g" % snapshot.redshift) for snapshot in snapshots]
def get_full_url(self, url_name, *args, **kwargs):
return "%s%s" % (self.job_params.BASE_URL, url_name)
def get_selected_option_text(self, id_of_select):
select = self.selenium.find_element_by_css_selector(id_of_select)
options = select.find_elements_by_css_selector('option')
selected_option = None
for option in options:
if option.get_attribute('selected'):
selected_option = option
return selected_option.text
def get_multi_selected_option_text(self, id_of_select):
select = self.selenium.find_element_by_css_selector(id_of_select)
options = select.find_elements_by_css_selector('option')
return [option.text for option in options]
def get_selector_value(self, selector):
return self.selenium.find_element_by_css_selector(selector).get_attribute('value')
def select(self, selector, value):
from selenium.webdriver.support.ui import Select
elem = self.selenium.find_element_by_css_selector(selector)
select = Select(elem)
select.select_by_visible_text(value)
def find_visible_elements(self, css_selector):
elements = self.selenium.find_elements_by_css_selector(css_selector)
return [elem for elem in elements if elem.is_displayed()]
def find_visible_element(self, css_selector):
elements = self.find_visible_elements(css_selector)
num_elements = len(elements)
if num_elements != 1:
raise Exception("Found %s elements for selector %s" % (num_elements, css_selector))
return elements[0]
def select_dark_matter_simulation(self, simulation):
self.select(self.lc_id('dark_matter_simulation'), simulation.name)
self.wait(0.5)
def select_galaxy_model(self, galaxy_model):
self.select(self.lc_id('galaxy_model'), galaxy_model.name)
self.wait(0.5)
def select_stellar_model(self, stellar_model):
self.select(self.sed_id('single_stellar_population_model'), stellar_model.label)
self.wait(0.5)
def select_record_filter(self, filter, extension=None):
text = ''
if isinstance(filter, DataSetProperty):
units_str = ''
if filter.units is not None and len(filter.units) > 0:
units_str = ' (' + filter.units + ')'
text = filter.label + units_str
elif isinstance(filter, BandPassFilter):
text = filter.label
if extension is not None:
text += ' (' + extension.capitalize() + ')'
else:
raise TypeError("Unknown filter type")
self.select(self.rf_id('filter'), text)
#a function to make a list of list of text inside the table
def table_as_text_rows(self, selector):
table = self.selenium.find_element_by_css_selector(selector)
rows = table.find_elements_by_css_selector('tr')
cells = [[cell.text for cell in row.find_elements_by_css_selector('th, td')] for row in rows]
return cells
def submit_support_form(self):
submit_button = self.selenium.find_element_by_css_selector('button[type="submit"]')
submit_button.submit()
class DeploymentTester(LiveServerTest):
def submit_mgf_form(self, description=''):
self.click('tao-tabs-summary_submit')
#self.click('id-job_description')
self.fill_in_fields({'#job_description': description},
clear=True)
submit_button = self.selenium.find_element_by_css_selector('#mgf-form #form_submit')
submit_button.click()
def assert_cant_submit_mgf_form(self):
self.click('tao-tabs-summary_submit')
try:
submit_button = self.selenium.find_element_by_css_selector('#mgf-form #form_submit')
self.fail('Submit button present')
except NoSuchElementException:
pass
self.selenium.find_element_by_css_selector('#mgf-form #form_errors')
def assert_errors_on_field(self, what, field_id):
field_elem = self.selenium.find_element_by_css_selector(field_id)
div_container = self.get_closest_by_class(field_elem, 'control-group')
self.assertEquals(what, 'error' in self.get_element_css_classes(div_container))
def assert_required_on_field(self, what, field_id):
field_elem = self.selenium.find_element_by_css_selector(field_id)
div_container = self.get_closest_by_class(field_elem, 'control-group')
label = div_container.find_element_by_css_selector('label')
self.assertTrue(label.get_attribute('class').find('error') != -1, '%s label is not in error' % (field_id,))
def upload_params_file(self, fname):
self.find_element_by_id('id_job_type-params_file').send_keys(fname)
|
IntersectAustralia/asvo-tao
|
web/tao/deployment_tests/ithelper.py
|
Python
|
gpl-3.0
| 18,904
|
[
"VisIt"
] |
b9dc2533a15ff4ab7a98abcd3655b12257bb4f2d72d9a45a1780e8661ac3e5d4
|
# -*- coding: utf-8 -*-
import os
import pygame
import random
import classes.board
import classes.extras as ex
import classes.game_driver as gd
import classes.level_controller as lc
import classes.drw.splash
import classes.drw.fraction
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.lvlc = mainloop.xml_conn.get_level_count(mainloop.m.game_dbid, mainloop.config.user_age_group)
self.level = lc.Level(self, mainloop, self.lvlc[0], self.lvlc[1])
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 13, 11)
def create_game_objects(self, level=1):
self.board.draw_grid = False
self.show_info_btn = False
h = random.randrange(0, 255, 5)
if self.mainloop.scheme is not None:
if self.mainloop.scheme.dark:
self.scheme_dir = "black"
color = (0, 0, 0)
else:
self.scheme_dir = "white"
color = (255, 255, 255)
else:
self.scheme_dir = "white"
color = (255, 255, 255)
self.color = color
"""lvl_data = [term_len_min, term_len_max, term_count_min, term_count_max, term_completed_count, semi_completed_count, shuffled]"""
lvl_data = self.mainloop.xml_conn.get_level_data(self.mainloop.m.game_dbid, self.mainloop.config.user_age_group,
self.level.lvl)
self.chapters = self.mainloop.xml_conn.get_chapters(self.mainloop.m.game_dbid,
self.mainloop.config.user_age_group)
self.term_len = random.randint(lvl_data[0], lvl_data[1])
self.term_count = random.randint(lvl_data[2], lvl_data[3])
term_completed_count = lvl_data[4]
if lvl_data[5] > 1:
term_semi_completed_count = max(2, random.randint(2, lvl_data[5]))
else:
term_semi_completed_count = lvl_data[5]
self.task_len = self.term_len * self.term_count
self.term = self.generate_term(self.term_len)
self.task = self.generate_task(self.term, self.term_len, self.term_count, term_completed_count,
term_semi_completed_count, shuffled=lvl_data[6])
if self.mainloop.m.game_variant < 2:
# make the backgrounds different for each letter or number
unit_clrs = []
unit_clrs_fg = []
font_clrs = []
for i in range(self.term_len):
if self.level.lvl < 3:
h = random.randrange(0, 100, 5)
gap = i * (155//self.term_len)
else:
gap = 0
unit_clrs.append(ex.hsv_to_rgb(h + gap, self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v))
unit_clrs_fg.append(ex.hsv_to_rgb(h + gap, self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v))
font_clrs.append(ex.hsv_to_rgb(h + gap, self.mainloop.cl.font_color_s, self.mainloop.cl.font_color_v))
if self.mainloop.m.game_variant == 0:
if random.randint(0, 1) == 0:
self.choices = self.lang.alphabet_uc[:]
else:
self.choices = self.lang.alphabet_lc[:]
elif self.mainloop.m.game_variant == 1:
self.choices = [str(x) for x in range(0, 9)]
elif self.mainloop.m.game_variant == 2:
self.choices = [x for x in range(2, 20)]
elif self.mainloop.m.game_variant == 3:
self.initiate_images()
self.choices = [x for x in range(len(self.imgs))]
elif self.mainloop.m.game_variant == 5:
self.initiate_shapes()
self.choices = [x for x in range(len(self.imgs))]
random.shuffle(self.color_ind)
if self.level.lvl < 3:
self.mixed_colours = True
else:
self.mixed_colours = False
elif self.mainloop.m.game_variant == 4:
self.func_number = random.randint(0, 3)
#create fractions
self.fractions = []
full = False
while not full:
a = random.randint(1, 4)
b = random.randint(a+1, 6)
l = [a, b]
if l not in self.fractions:
self.fractions.append(l)
if len(self.fractions) >= self.term_len:
full = True
clrs1 = []
clrs2 = []
hues = []
for i in range(self.term_len):
if self.level.lvl < 3:
h = random.randrange(0, 100, 5)
gap = i * (155 // self.term_len)
else:
gap = 0
clrs1.append(ex.hsv_to_rgb(h + gap, 150, 230))
clrs2.append(ex.hsv_to_rgb(h + gap, 255, 140))
hues.append(h + gap)
self.choices = [x for x in range(2, 20)]
random.shuffle(self.choices)
self.term_values = self.choices[0:self.term_len]
data = [self.task_len, 4]
self.data = data
self.vis_buttons = [0, 1, 1, 1, 1, 0, 1, 1, 1]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.board.set_animation_constraints(0, data[0], 0, data[1])
self.layout.update_layout(data[0], data[1])
scale = self.layout.scale
self.board.level_start(data[0], data[1], scale)
self.unit_mouse_over = None
self.units = []
self.board.board_bg.initcolor = color
self.board.board_bg.color = color
self.board.board_bg.update_me = True
self.left_offset = (self.data[0] - len(self.task)) // 2
self.positions = [x for x in range(self.left_offset, len(self.task)+self.left_offset)]
self.solution_grid = [0 for x in range(data[0])]
random.shuffle(self.positions)
p_ind = 0
if self.mainloop.scheme is None:
dc_img_src = os.path.join('unit_bg', "universal_sq_dc.png")
else:
dc_img_src = None
bg_img_src = os.path.join('unit_bg', "universal_sq_bg.png")
bg_door_img_src = os.path.join('unit_bg', "universal_sq_door.png")
if self.mainloop.m.game_variant == 5:
fg_tint_color = (30, 30, 30)
dc_tint_color = ex.hsv_to_rgb(160, self.mainloop.cl.door_bg_tint_s, self.mainloop.cl.door_bg_tint_v)
if self.mainloop.m.game_variant == 3:
fg_tint_color = (30, 30, 30)
if self.mainloop.scheme is not None and self.mainloop.scheme.dark:
bg_door_img_src = os.path.join('unit_bg', "img_decor_bb.png")
dc_tint_color = None
else:
bg_door_img_src = os.path.join('unit_bg', "img_decor_w.png")
if self.mainloop.m.game_variant == 4:
bg_img_src = os.path.join('unit_bg', "universal_sq_bg.png")
if self.mainloop.m.game_variant == 2:
bg_img_src = os.path.join('unit_bg', "universal_sq_bg.png")
fg_img_src = "splash_mask.png"
hue_choice = [[255, 255, 255], [2, 2, 2], [140, 140, 140], [255, 0, 0], [255, 138, 0], [255, 255, 0],
[181, 219, 3], [0, 160, 0], [41, 131, 82], [0, 130, 133], [0, 0, 255], [0, 0, 132],
[132, 0, 132], [255, 0, 255], [74, 0, 132], [255, 20, 138], [132, 0, 0], [140, 69, 16],
[0, 255, 255], [0, 255, 0]]
font_color = [ex.hsv_to_rgb(h, self.mainloop.cl.font_color_s, self.mainloop.cl.font_color_v), ]
for i in range(len(self.task)):
self.solution_grid[self.left_offset + i] = 1
if self.task[i] == "?":
#add placeholder and the items to add
self.board.add_universal_unit(grid_x=self.left_offset + i, grid_y=0, grid_w=1, grid_h=1, txt=None,
fg_img_src=None, bg_img_src=None, dc_img_src=bg_door_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=None,
bg_tint_color=None, fg_tint_color=None, txt_align=(0, 0),
font_type=10, multi_color=False, alpha=True, immobilized=True, mode=2,
dc_tint_color=dc_tint_color)
if self.mainloop.m.game_variant < 2:
v = self.term_values[int(self.term[i % self.term_len])]
else:
v = ""
if self.mainloop.m.game_variant == 3:
img = "%s.jpg" % self.imgs[self.term_values[int(self.term[i % self.term_len])]]
img_src = os.path.join('art4apps', self.category, img)
self.board.add_universal_unit(grid_x=self.positions[p_ind], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=img_src, bg_img_src=img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=None, dc_tint_color=None,
fg_tint_color=(50, 50, 50), txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
self.board.ships[-1].set_blit_mask(os.path.join('unit_bg', 'img_mask.png'))
elif self.mainloop.m.game_variant == 5:
img = "%s.png" % self.imgs[self.term_values[int(self.term[i % self.term_len])]]
img_src = os.path.join('shapes', img)
if self.mixed_colours:
bg_tint_color = self.shape_colors[self.color_ind[int(self.term[i % self.term_len])]]
fg_tint_color = self.shape_colors_fg[self.color_ind[int(self.term[i % self.term_len])]]
else:
bg_tint_color = self.shape_colors[self.color_ind[0]]
fg_tint_color = self.shape_colors_fg[self.color_ind[0]]
self.board.add_universal_unit(grid_x=self.positions[p_ind], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=img_src, bg_img_src=img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_tint_color, dc_tint_color=None,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
elif self.mainloop.m.game_variant == 2:
color_index = self.term_values[int(self.term[i % self.term_len])]
dc_tint_colorx = hue_choice[color_index]
if color_index < 3:
if self.mainloop.scheme is not None and self.mainloop.scheme.dark:
bg_tint_color = (90, 90, 90)
fg_tint_color = (60, 60, 60)
else:
bg_tint_color = (200, 200, 200)
fg_tint_color = (160, 160, 160)
else:
bg_tint_color = ex.hsv_to_rgb(
ex.rgb_to_hsv(dc_tint_colorx[0], dc_tint_colorx[1], dc_tint_colorx[2])[0],
self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)
fg_tint_color = ex.hsv_to_rgb(
ex.rgb_to_hsv(dc_tint_colorx[0], dc_tint_colorx[1], dc_tint_colorx[2])[0],
self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v)
self.board.add_universal_unit(grid_x=self.positions[p_ind], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=fg_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_tint_color, dc_tint_color=dc_tint_colorx,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
elif self.mainloop.m.game_variant == 4:
bg_color = ex.hsv_to_rgb(hues[int(self.term[i % self.term_len])], self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)
fg_color = ex.hsv_to_rgb(hues[int(self.term[i % self.term_len])], self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v)
self.board.add_universal_unit(grid_x=self.positions[p_ind], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_color, dc_tint_color=None,
fg_tint_color=fg_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
fraction = classes.drw.fraction.Fraction(1, self.board.scale, clrs1[int(self.term[i % self.term_len])], clrs2[int(self.term[i % self.term_len])], self.fractions[int(self.term[i % self.term_len])], self.func_number)
self.board.ships[-1].manual_painting_layer = 1
self.board.ships[-1].init_m_painting()
self.board.ships[-1].manual_painting = fraction.get_canvas().copy()
self.board.ships[-1].update_me = True
elif self.mainloop.m.game_variant < 2:
self.board.add_universal_unit(grid_x=self.positions[p_ind], grid_y=2, grid_w=1, grid_h=1, txt=v,
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=dc_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=[font_clrs[int(self.term[i % self.term_len])],],
bg_tint_color=unit_clrs[int(self.term[i % self.term_len])], fg_tint_color=unit_clrs_fg[int(self.term[i % self.term_len])],
txt_align=(0, 0), font_type=0, multi_color=False,
alpha=True, immobilized=False, fg_as_hover=True)
self.units.append(self.board.ships[-1])
self.board.ships[-1].pattern_value = self.term[i % self.term_len]
self.board.ships[-1].highlight = False
self.board.ships[-1].readable = False
self.board.ships[-1].checkable = True
self.board.ships[-1].init_check_images()
p_ind += 1
else:
#add pre-entered part of a pattern
if self.mainloop.m.game_variant < 2:
v = self.term_values[int(self.task[i])]
else:
v = ""
if self.mainloop.m.game_variant == 3:
img = "%s.jpg" % self.imgs[self.term_values[int(self.term[i % self.term_len])]]
img_src = os.path.join('art4apps', self.category, img)
self.board.add_universal_unit(grid_x=self.left_offset + i, grid_y=0, grid_w=1, grid_h=1, txt="",
fg_img_src=None, bg_img_src=img_src, dc_img_src=bg_door_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=None, dc_tint_color=dc_tint_color,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=True, fg_as_hover=False)
elif self.mainloop.m.game_variant == 5:
img = "%s.png" % self.imgs[self.term_values[int(self.term[i % self.term_len])]]
img_src = os.path.join('shapes', img)
if self.mixed_colours:
bg_tint_color = self.shape_colors[self.color_ind[int(self.term[i % self.term_len])]]
else:
bg_tint_color = self.shape_colors[self.color_ind[0]]
self.board.add_universal_unit(grid_x=self.left_offset + i, grid_y=0, grid_w=1, grid_h=1, txt="",
fg_img_src=None, bg_img_src=img_src, dc_img_src=bg_door_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_tint_color, dc_tint_color=dc_tint_color,
fg_tint_color=None, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=True, fg_as_hover=False)
elif self.mainloop.m.game_variant == 2:
color_index = self.term_values[int(self.term[i % self.term_len])]
dc_tint_colorx = hue_choice[color_index]
if color_index < 3:
if self.mainloop.scheme is not None and self.mainloop.scheme.dark:
bg_tint_color = (90, 90, 90)
fg_tint_color = (60, 60, 60)
else:
bg_tint_color = (200, 200, 200)
fg_tint_color = (160, 160, 160)
else:
bg_tint_color = ex.hsv_to_rgb(
ex.rgb_to_hsv(dc_tint_colorx[0], dc_tint_colorx[1], dc_tint_colorx[2])[0],
self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)
fg_tint_color = ex.hsv_to_rgb(
ex.rgb_to_hsv(dc_tint_colorx[0], dc_tint_colorx[1], dc_tint_colorx[2])[0],
self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v)
self.board.add_universal_unit(grid_x=self.left_offset + i, grid_y=0, grid_w=1, grid_h=1, txt="",
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=fg_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_tint_color, dc_tint_color=dc_tint_colorx,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
elif self.mainloop.m.game_variant == 4:
bg_color = ex.hsv_to_rgb(hues[int(self.term[i % self.term_len])], self.mainloop.cl.bg_color_s,
self.mainloop.cl.bg_color_v)
self.board.add_universal_unit(grid_x=self.left_offset + i, grid_y=0, grid_w=1, grid_h=1, txt="",
fg_img_src=None, bg_img_src=bg_img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_color, dc_tint_color=None,
fg_tint_color=None, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=True, fg_as_hover=False)
fraction = classes.drw.fraction.Fraction(1, self.board.scale, clrs1[int(self.term[i % self.term_len])], clrs2[int(self.term[i % self.term_len])], self.fractions[int(self.term[i % self.term_len])], self.func_number)
self.board.ships[-1].manual_painting_layer = 0
self.board.ships[-1].init_m_painting()
self.board.ships[-1].manual_painting = fraction.get_canvas().copy()
self.board.ships[-1].update_me = True
elif self.mainloop.m.game_variant < 2:
self.board.add_universal_unit(grid_x=self.left_offset + i, grid_y=0, grid_w=1, grid_h=1, txt=v,
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=dc_img_src,
bg_color=(0, 0, 0, 0), border_color=None,
font_color=[font_clrs[int(self.term[i % self.term_len])], ],
bg_tint_color=unit_clrs[int(self.term[i % self.term_len])],
fg_tint_color=unit_clrs_fg[int(self.term[i % self.term_len])],
txt_align=(0, 0), font_type=0, multi_color=False,
alpha=True, immobilized=False, fg_as_hover=True)
if self.mainloop.m.game_variant == 2 or self.mainloop.m.game_variant == 5:
self.board.ships[-1].outline = False
self.board.ships[-1].pattern_value = self.term[i % self.term_len]
self.board.ships[-1].immobilize()
self.board.ships[-1].highlight = False
self.board.ships[-1].readable = False
self.board.ships[-1].checkable = True
self.board.ships[-1].init_check_images()
#add noise
for i in range(p_ind, len(self.positions)):
if self.mainloop.m.game_variant < 2:
v = self.term_values[int(self.term[i % self.term_len])]
else:
v = ""
if self.mainloop.m.game_variant == 3:
img = "%s.jpg" % self.imgs[self.term_values[int(self.term[i % self.term_len])]]
img_src = os.path.join('art4apps', self.category, img)
self.board.add_universal_unit(grid_x=self.positions[i], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=img_src, bg_img_src=img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=None, dc_tint_color=None,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
self.board.ships[-1].set_blit_mask(os.path.join('unit_bg', 'img_mask.png'))
elif self.mainloop.m.game_variant == 5:
img = "%s.png" % self.imgs[self.term_values[int(self.term[i % self.term_len])]]
img_src = os.path.join('shapes', img)
if self.mixed_colours:
bg_tint_color = self.shape_colors[self.color_ind[int(self.term[i % self.term_len])]]
fg_tint_color = self.shape_colors_fg[self.color_ind[int(self.term[i % self.term_len])]]
else:
bg_tint_color = self.shape_colors[self.color_ind[0]]
fg_tint_color = self.shape_colors_fg[self.color_ind[0]]
self.board.add_universal_unit(grid_x=self.positions[i], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=img_src, bg_img_src=img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_tint_color, dc_tint_color=None,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
elif self.mainloop.m.game_variant == 2:
color_index = self.term_values[int(self.term[i % self.term_len])]
dc_tint_colorx = hue_choice[color_index]
if color_index < 3:
if self.mainloop.scheme is not None and self.mainloop.scheme.dark:
bg_tint_color = (90, 90, 90)
fg_tint_color = (60, 60, 60)
else:
bg_tint_color = (200, 200, 200)
fg_tint_color = (160, 160, 160)
else:
bg_tint_color = ex.hsv_to_rgb(
ex.rgb_to_hsv(dc_tint_colorx[0], dc_tint_colorx[1], dc_tint_colorx[2])[0],
self.mainloop.cl.bg_color_s, self.mainloop.cl.bg_color_v)
fg_tint_color = ex.hsv_to_rgb(
ex.rgb_to_hsv(dc_tint_colorx[0], dc_tint_colorx[1], dc_tint_colorx[2])[0],
self.mainloop.cl.fg_hover_s, self.mainloop.cl.fg_hover_v)
self.board.add_universal_unit(grid_x=self.positions[i], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=fg_img_src,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_tint_color, dc_tint_color=dc_tint_colorx,
fg_tint_color=fg_tint_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
elif self.mainloop.m.game_variant == 4:
bg_color = ex.hsv_to_rgb(hues[int(self.term[i % self.term_len])], self.mainloop.cl.bg_color_s,
self.mainloop.cl.bg_color_v)
fg_color = ex.hsv_to_rgb(hues[int(self.term[i % self.term_len])], self.mainloop.cl.fg_hover_s,
self.mainloop.cl.fg_hover_v)
self.board.add_universal_unit(grid_x=self.positions[i], grid_y=2, grid_w=1, grid_h=1, txt="",
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=None,
bg_color=(0, 0, 0, 0), border_color=None, font_color=font_color,
bg_tint_color=bg_color, dc_tint_color=None,
fg_tint_color=fg_color, txt_align=(0, 0), font_type=0,
multi_color=False, alpha=True, immobilized=False, fg_as_hover=True)
fraction = classes.drw.fraction.Fraction(1, self.board.scale, clrs1[int(self.term[i % self.term_len])],
clrs2[int(self.term[i % self.term_len])],
self.fractions[int(self.term[i % self.term_len])],
self.func_number)
self.board.ships[-1].manual_painting_layer = 1
self.board.ships[-1].init_m_painting()
self.board.ships[-1].manual_painting = fraction.get_canvas().copy()
self.board.ships[-1].update_me = True
elif self.mainloop.m.game_variant < 2:
self.board.add_universal_unit(grid_x=self.positions[i], grid_y=2, grid_w=1, grid_h=1, txt=v,
fg_img_src=bg_img_src, bg_img_src=bg_img_src, dc_img_src=dc_img_src,
bg_color=(0, 0, 0, 0), border_color=None,
font_color=[font_clrs[int(self.term[i % self.term_len])], ],
bg_tint_color=unit_clrs[int(self.term[i % self.term_len])],
fg_tint_color=unit_clrs_fg[int(self.term[i % self.term_len])],
txt_align=(0, 0), font_type=0, multi_color=False,
alpha=True, immobilized=False, fg_as_hover=True)
if self.mainloop.m.game_variant == 2 or self.mainloop.m.game_variant == 5:
self.board.ships[-1].outline = False
self.units.append(self.board.ships[-1])
self.board.ships[-1].pattern_value = self.term[i % self.term_len]
self.board.ships[-1].highlight = False
self.board.ships[-1].readable = False
self.board.ships[-1].checkable = True
self.board.ships[-1].init_check_images()
for each in self.board.units:
self.board.all_sprites_list.move_to_front(each)
def show_info_dialog(self):
self.mainloop.dialog.show_dialog(3, self.lang.d["Complete the pattern"])
def handle(self, event):
gd.BoardGame.handle(self, event)
if event.type == pygame.MOUSEBUTTONUP:
for each in self.board.units:
if each.is_door is True:
self.board.all_sprites_list.move_to_front(each)
self.check_result()
if event.type == pygame.MOUSEMOTION or event.type == pygame.MOUSEBUTTONUP:
self.default_hover(event)
def start_game(self, gameid):
self.mainloop.m.start_hidden_game(gameid)
def update(self, game):
game.fill(self.color)
gd.BoardGame.update(self, game)
def auto_check(self):
for each in self.board.ships:
each.update_me = True
if each.checkable and (each.grid_y == 0):
if each.pattern_value == self.term[(self.left_offset + each.grid_x) % self.term_len]:
each.set_display_check(True)
else:
each.set_display_check(False)
else:
each.set_display_check(None)
self.check_for_pattern()
def check_for_pattern(self):
pattern = [" " for i in range(self.task_len)]
for each in self.board.ships:
each.update_me = True
if each.checkable and (each.grid_y == 0):
pattern[each.grid_x - self.left_offset] = each.pattern_value
if self.has_pattern(pattern):
self.level.next_board()
for each in self.board.ships:
each.update_me = True
if each.checkable and (each.grid_y == 0):
each.set_display_check(True)
def auto_check_reset(self):
for each in self.board.ships:
each.update_me = True
each.set_display_check(None)
def check_result(self):
if self.board.grid[0] == self.solution_grid:
self.auto_check()
else:
self.auto_check_reset()
def generate_term(self, s_len):
found = False
while not found:
sequence = ""
for i in range(s_len):
sequence += str(random.randint(0, s_len-1))
if i > 0 and not found:
if sequence[i-1] != sequence[i]:
found = True
if found:
# if len is even and halves are the same then regenerate to avoid having half len term, ie 1212 1212
if s_len > 3 and s_len % 2 == 0:
if sequence[0:s_len//2] == sequence[s_len//2:]:
found = False
continue
return sequence
def generate_task(self, term, term_len, term_count, term_completed_count, term_semi_completed_count, shuffled=False):
s = []
indexes = [x for x in range(term_len)]
for i in range(term_completed_count):
s.append(term)
for i in range(term_semi_completed_count):
count2show = random.randint(1, term_len-1)
# make sure all numbers are displayed by the last semi-shown sequence
if i == term_semi_completed_count-1:
li = len(indexes)
if count2show < li:
count2show = li
ts = ["?" for each in range(term_len)]
if count2show <= len(indexes):
for j in range(count2show):
p = random.randint(0, len(indexes)-1)
ts[indexes[p]] = term[indexes[p]]
del indexes[p]
else:
li = len(indexes)
if li > 0:
for j in range(li):
p = random.randint(0, len(indexes) - 1)
ts[indexes[p]] = term[indexes[p]]
del indexes[p]
for j in range(count2show-li):
p = random.randint(0, term_len-1)
ts[p] = term[p]
s.append("".join(ts))
for i in range(term_count-(term_completed_count+term_semi_completed_count)):
s.append("?" * term_len)
if shuffled:
random.shuffle(s)
return "".join(s)
else:
return "".join(s)
def has_pattern(self, sequence):
l = len(sequence)
# pattern needs to repeat at least 2 times and needs to have at least 2 different items
has_pattern = False
for i in range(2, l//2 + 1):
if l % i == 0: # is the "i" one of the factors?
has_pattern = True
# check if all parts are equal
for j in range(i, l, i):
if sequence[j:j+i] != sequence[j-i:j]:
has_pattern = False
if has_pattern:
return has_pattern
return has_pattern
def initiate_shapes(self):
self.shape_colors = []
self.shape_colors_fg = []
for h in range(0, 255, 15):
s = random.randint(200, 235)
v = random.randint(150, 235)
self.shape_colors.append(ex.hsv_to_rgb(h, s, v))
self.shape_colors_fg.append(ex.hsv_to_rgb(h, s - 40, v + 20))
self.color_ind = [x for x in range(len(self.shape_colors))]
self.imgs = ["s%d" % x for x in range(1, 18)]
def initiate_images(self):
gv = random.randint(0, 15)
if gv == 0:
self.category = "animals"
self.imgs = ['panda', 'pug', 'koala', 'gorilla', 'kitten', 'rabbit', 'baby_rabbit', 'chimp', 'puppy', 'cat', 'dog']
elif gv == 12:
self.category = "animals" # farm
self.imgs = ['cow', 'pony', 'pig', 'donkey', 'sheep', 'buffalo', 'bull', 'goat', 'horse', 'ram', 'ox']
elif gv == 13:
self.category = "animals" # large predators
self.imgs = ['wolf', 'panther', 'tiger', 'fox', 'leopard', 'bear', 'lion_cub', 'jaguar', 'hyena', 'lion']
elif gv == 14:
self.category = "animals" #
self.imgs = ['fawn', 'llama', 'moose', 'zebra', 'camel', 'antelope', 'anteater', 'lama', 'deer', 'hippopotamus', 'kangaroo', 'elk', 'rhinoceros', 'elephant', 'giraffe']
elif gv == 15:
self.category = "animals" # rodents
self.imgs = ['mouse', 'hamster', 'bat', 'hedgehog', 'guinea_pig', 'squirrel', 'sloth', 'rat', 'otter', 'mole', 'gopher', 'beaver', 'skunk', 'lemur', 'opossum', ]
elif gv == 1:
self.category = "animals" # birds
self.imgs = ['turkey', 'magpie', 'vulture', 'bird', 'crow', 'parakeet', 'hummingbird', 'chick', 'hen', 'shrike', 'penguin', 'ostrich', 'pigeon', 'flamingo', 'sparrow', 'dove', 'eagle', 'owl', 'goose', 'pelican', 'duck', 'peacock', 'parrot', 'jay', 'rooster', 'blackbird', 'swan', 'chicken']
elif gv == 2:
self.category = "animals" # bugs
self.imgs = ['ladybug', 'spider', 'mosquito', 'slug', 'caterpillar', 'scorpion', 'bee', 'snail', 'beetle', 'dragonfly', 'ant']
elif gv == 3:
self.category = "animals" # water animals
self.imgs = ['shrimp', 'seal', 'lobster', 'crab', 'clam', 'squid', 'starfish', 'piranha', 'dolphin', 'whale', 'jellyfish', 'shark', 'ray', 'oyster']
elif gv == 4:
self.category = "animals" # reptiles and amphibians
self.imgs = ['frog', 'turtle', 'iguana', 'snake', 'chameleon', 'viper', 'cobra', 'salamander', 'toad', 'lizard', 'alligator']
elif gv == 5:
self.category = "sport"
self.imgs = ['judo', 'pool', 'ride', 'stretch', 'walk', 'run', 'swim', 'hop', 'hike', 'boxing', 'hockey', 'throw', 'skate', 'win', 'squat', 'ski', 'golf', 'stand', 'tennis', 'jump', 'rowing', 'jog', 'rope']
elif gv == 6:
self.category = "construction"
self.imgs = ['lighthouse', 'circus', 'temple', 'well', 'street', 'castle', 'store', 'school', 'farm', 'bridge', 'dam', 'pyramid', 'barn', 'mill', 'cabin', 'shed', 'garage', 'mosque', 'hospital', 'tent', 'house', 'bank', 'hut']
elif gv == 7:
self.category = "nature"
self.imgs = ['land', 'canyon', 'sea', 'shore', 'mountain', 'pond', 'cave', 'island', 'forest', 'desert', 'iceberg']
elif gv == 8:
self.category = "jobs"
self.imgs = ['clown', 'engineer', 'priest', 'vet', 'judge', 'chef', 'athlete', 'librarian', 'juggler', 'police', 'plumber', 'queen', 'farmer', 'magic', 'knight', 'doctor', 'bricklayer', 'cleaner', 'teacher', 'hunter', 'soldier', 'musician', 'fisherman', 'princess', 'fireman', 'nun', 'pirate', 'cowboy', 'electrician', 'nurse', 'king', 'president', 'office', 'carpenter', 'worker', 'mechanic', 'actor', 'cook', 'student', 'butcher', 'accountant', 'prince', 'pope', 'sailor', 'boxer', 'ballet', 'astronaut', 'painter', 'anaesthesiologist', 'scientist']
elif gv == 9:
self.category = "clothes_n_accessories"
self.imgs = ['gloves', 'hat', 'jacket', 'overalls', 'pullover', 'sandals', 'shirt', 'shoe', 'shoes', 'shorts', 'slippers', 'sneaker', 'sweatshirt', 'trousers', 'vest']
elif gv == 10:
self.category = "fruit_n_veg"
self.imgs = ['carrot', 'blackberries', 'celery', 'turnip', 'cacao', 'peach', 'melon', 'grapefruit', 'broccoli', 'grapes', 'spinach', 'fig', 'radish', 'tomato', 'kiwi', 'asparagus', 'olives', 'cucumbers', 'beans', 'strawberry', 'peppers', 'raspberry', 'apricot', 'potatoes', 'peas', 'cabbage', 'cherries', 'squash', 'blueberries', 'pear', 'orange', 'pumpkin', 'avocado', 'garlic', 'onion', 'apple', 'lime', 'cauliflower', 'mango', 'lettuce', 'lemon', 'aubergine', 'artichokes', 'plums', 'leek', 'bananas', 'papaya']
elif gv == 11:
self.category = "transport"
self.imgs = ['taxi', 'car', 'bike', 'raft', 'bus', 'boat', 'truck', 'sleigh', 'carpet', 'motorcycle', 'train', 'ship', 'van', 'canoe', 'rocket', 'sledge', 'bicycle']
|
imiolek-ireneusz/eduActiv8
|
game_boards/game025.py
|
Python
|
gpl-3.0
| 39,742
|
[
"Elk",
"Jaguar",
"MOOSE"
] |
d308a8584569ff4542c0662ec19acbdb9a831990e5265ab42b5a1f884bcb4ae2
|
import ast
from astmonkey import visitors, transformers
def parseprint(code, filename="<string>", mode="exec", **kwargs):
"""Parse some code from a string and pretty-print it."""
node = ast.parse(code, mode=mode) # An ode to the code
print(ast.dump(node, **kwargs))
source = """
l = k = [1,2,3]
i = a = 0
while i < 3:
a = a+1
"""
parseprint(source)
node = ast.parse(source)
node = transformers.ParentNodeTransformer().visit(node)
visitor = visitors.GraphNodeVisitor()
visitor.visit(node)
visitor.graph.write_png('graph.png')
|
librallu/RICM4Projet
|
parser/test.py
|
Python
|
gpl-3.0
| 532
|
[
"VisIt"
] |
7f9000ff27075551b7a3c79fac796b19d775710040f27504e7bad92ba8fc4ff8
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys, getopt, re, os
try:
from splinter import Browser
except:
print "Please install Splinter: http://splinter.readthedocs.org/en/latest/install.html"
sys.exit();
import getpass
from splinter.request_handler.status_code import HttpResponseError
def main(argv):
email = None
txtopt = None
profile = None
self = None
socks = None
socksPort = None
try:
opts, args = getopt.getopt(argv, "ho:m:p:s:S:P:",["port=","socks=","self=","profile=","output=","mail=","help"])
except:
print "Use --help for help"
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print 'Usage %s options \n' % (os.path.basename(__file__))
print ' -h, --help This help'
print ' -m, --mail Your facebook login email'
print ' -o, --output Your output file name'
print ' -p, --profile Profile to capture friends(name after facebook.com/)'
print ' -s, --self Your profile name(name after facebook.com/)'
print ' -S, --socks Socks Proxy Address for Tor use'
print ' -P, --port Port Socks for Tor use'
sys.exit()
elif opt in ("-o","--output"):
txtopt = arg
elif opt in ("-m","--mail"):
email = arg
elif opt in ("-p","--profile"):
profile = arg
elif opt in ("-s","--self"):
self = arg
elif opt in ("-S","--socks"):
socks = arg
elif opt in ("-P","--port"):
socksPort = arg
if not email or not txtopt or not self:
print 'Use --help for help'
sys.exit()
password = getpass.getpass()
if socks and socksProt:
proxy_settings = {
'network.proxy.type':1,
'network.proxy.socks': socks,
'network.proxy.socks_port': socksPort
}
browser = Browser('firefox',profile_preferences=proxy_settings)
else:
browser = Browser()
# with Browser() as browser:
browser.visit('https://m.facebook.com/')
browser.fill("email",email);
browser.fill("pass",password);
browser.find_by_name("login").click()
if browser.is_element_present_by_css('.login_error_box'):
print 'The email and password didn\'t work.'
sys.exit()
try:
fileopt = open(txtopt, 'a')
except:
sys.exit('Unable to open file %s' % txtopt)
if not profile:
browser.find_link_by_text("Profile").click()
print 'Accessing profile at %s\n' % browser.url
browser.find_link_by_text("Friends").click()
print 'Accessing friends at %s\n' % browser.url
else:
url = 'https://m.facebook.com/%s/friends?refid=17' % profile
print 'Accessing profile friends at %s\n' % url
browser.visit(url)
friends = browser.find_by_css('a')
notList = ["/a/mobile/friends/add_friend.php","language.php","/help/","/settings/","/pages/","/bugnub/","/policies/","/logout","/home","/friends","/messages/","/notifications.php","/buddylist.php","/menu/","/photo.php","/mbasic/","%s"%profile,"%s"%self]
for friend in friends:
if all([x not in friend['href'] for x in notList ]):
fileopt.write('%s\n' % friend['href'])
print '%s' % friend.value
while browser.is_element_present_by_css("#m_more_friends"):
browser.find_by_css('#m_more_friends a').first.click()
friends = browser.find_by_css('a')
for friend in friends:
if all([x not in friend['href'] for x in notList ]):
fileopt.write('%s\n' % friend['href'])
print '%s' % friend.value
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.stdout.write('\nQuit by keyboard interrupt sequence!')
|
skladsec/facebookFriendSaver
|
facebookFriendSaver.py
|
Python
|
isc
| 3,504
|
[
"VisIt"
] |
0073cbdd70e2422dde6e3d83598736c082a25399fd4373b0a492273d4a2e2464
|
""" This is the guy that actually modifies the content of the CS
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import zlib
import difflib
import six
from diraccfg import CFG
from DIRAC.Core.Utilities import List, Time
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = "$Id$"
class Modificator(object):
def __init__(self, rpcClient=False, commiterId="unknown"):
self.commiterTag = "@@-"
self.commiterId = commiterId
self.cfgData = CFG()
self.rpcClient = None
if rpcClient:
self.setRPCClient(rpcClient)
def loadCredentials(self):
retVal = getProxyInfo()
if retVal['OK']:
credDict = retVal['Value']
self.commiterId = "%s@%s - %s" % (credDict['username'],
credDict['group'],
Time.dateTime().strftime("%Y-%m-%d %H:%M:%S"))
return retVal
return retVal
def setRPCClient(self, rpcClient):
self.rpcClient = rpcClient
def loadFromRemote(self):
retVal = self.rpcClient.getCompressedData()
if retVal['OK']:
self.cfgData = CFG()
data = retVal['Value']
if six.PY3 and isinstance(data, str):
data = data.encode(errors="surrogateescape")
self.cfgData.loadFromBuffer(zlib.decompress(data).decode())
return retVal
def getCFG(self):
return self.cfgData
def getSections(self, sectionPath):
return gConfigurationData.getSectionsFromCFG(sectionPath, self.cfgData)
def getComment(self, sectionPath):
return gConfigurationData.getCommentFromCFG(sectionPath, self.cfgData)
def getOptions(self, sectionPath):
return gConfigurationData.getOptionsFromCFG(sectionPath, self.cfgData)
def getOptionsDict(self, sectionPath):
"""Gives the options of a CS section in a Python dict with values as
lists"""
opts = self.getOptions(sectionPath)
pathDict = dict((o, self.getValue("%s/%s" % (sectionPath, o))) for o in opts)
return pathDict
def getDictRootedAt(self, relpath="", root=""):
"""Gives the configuration rooted at path in a Python dict. The
result is a Python dictionary that reflects the structure of the
config file."""
def getDictRootedAt(path):
retval = {}
opts = self.getOptionsDict(path)
secs = self.getSections(path)
for k in opts:
retval[k] = opts[k]
for i in secs:
retval[i] = getDictRootedAt(path + "/" + i)
return retval
return getDictRootedAt(root + "/" + relpath)
def getValue(self, optionPath):
return gConfigurationData.extractOptionFromCFG(optionPath, self.cfgData)
def sortAlphabetically(self, path, ascending=True):
cfg = self.__getParentCFG(path, parentLevel=0)
if cfg:
if cfg.sortAlphabetically(ascending):
self.__setCommiter(path)
def __getParentCFG(self, path, parentLevel=1):
sectionList = List.fromChar(path, "/")
cfg = self.cfgData
try:
if parentLevel > 0:
sectionList = sectionList[:-parentLevel]
for section in sectionList:
cfg = cfg[section]
return cfg
except Exception:
return False
def __setCommiter(self, entryPath, cfg=False):
if not cfg:
cfg = self.__getParentCFG(entryPath)
entry = List.fromChar(entryPath, "/")[-1]
comment = cfg.getComment(entry)
filteredComment = [line.strip() for line in comment.split("\n") if line.find(self.commiterTag) != 0]
filteredComment.append("%s%s" % (self.commiterTag, self.commiterId))
cfg.setComment(entry, "\n".join(filteredComment))
def setOptionValue(self, optionPath, value):
levelList = [level.strip() for level in optionPath.split("/") if level.strip() != ""]
parentPath = "/%s" % "/".join(levelList[:-1])
optionName = List.fromChar(optionPath, "/")[-1]
self.createSection(parentPath)
cfg = self.__getParentCFG(optionPath)
if not cfg:
return
cfg.setOption(optionName, value)
self.__setCommiter(optionPath, cfg)
def createSection(self, sectionPath):
levelList = [level.strip() for level in sectionPath.split("/") if level.strip() != ""]
currentPath = ""
cfg = self.cfgData
createdSection = False
for section in levelList:
currentPath += "/%s" % section
if section not in cfg.listSections():
cfg.createNewSection(section)
self.__setCommiter(currentPath)
createdSection = True
cfg = cfg[section]
return createdSection
def setComment(self, entryPath, value):
cfg = self.__getParentCFG(entryPath)
entry = List.fromChar(entryPath, "/")[-1]
if cfg.setComment(entry, value):
self.__setCommiter(entryPath)
return True
return False
def existsSection(self, sectionPath):
sectionList = List.fromChar(sectionPath, "/")
cfg = self.cfgData
try:
for section in sectionList[:-1]:
cfg = cfg[section]
return len(sectionList) == 0 or sectionList[-1] in cfg.listSections()
except Exception:
return False
def existsOption(self, optionPath):
sectionList = List.fromChar(optionPath, "/")
cfg = self.cfgData
try:
for section in sectionList[:-1]:
cfg = cfg[section]
return sectionList[-1] in cfg.listOptions()
except Exception:
return False
def renameKey(self, path, newName):
parentCfg = self.cfgData.getRecursive(path, -1)
if not parentCfg:
return False
pathList = List.fromChar(path, "/")
oldName = pathList[-1]
if parentCfg['value'].renameKey(oldName, newName):
pathList[-1] = newName
self.__setCommiter("/%s" % "/".join(pathList))
return True
else:
return False
def copyKey(self, originalKeyPath, newKey):
parentCfg = self.cfgData.getRecursive(originalKeyPath, -1)
if not parentCfg:
return False
pathList = List.fromChar(originalKeyPath, "/")
originalKey = pathList[-1]
if parentCfg['value'].copyKey(originalKey, newKey):
self.__setCommiter("/%s/%s" % ("/".join(pathList[:-1]), newKey))
return True
return False
def removeOption(self, optionPath):
if not self.existsOption(optionPath):
return False
cfg = self.__getParentCFG(optionPath)
optionName = List.fromChar(optionPath, "/")[-1]
return cfg.deleteKey(optionName)
def removeSection(self, sectionPath):
if not self.existsSection(sectionPath):
return False
cfg = self.__getParentCFG(sectionPath)
sectionName = List.fromChar(sectionPath, "/")[-1]
return cfg.deleteKey(sectionName)
def loadFromBuffer(self, data):
self.cfgData = CFG()
self.cfgData.loadFromBuffer(data)
def loadFromFile(self, filename):
self.cfgData = CFG()
self.mergeFromFile(filename)
def dumpToFile(self, filename):
with open(filename, "wt") as fd:
fd.write(str(self.cfgData))
def mergeFromFile(self, filename):
cfg = CFG()
cfg.loadFromFile(filename)
self.cfgData = self.cfgData.mergeWith(cfg)
def mergeFromCFG(self, cfg):
self.cfgData = self.cfgData.mergeWith(cfg)
def mergeSectionFromCFG(self, sectionPath, cfg):
parentDict = self.cfgData.getRecursive(sectionPath, -1)
parentCFG = parentDict['value']
secName = [lev.strip() for lev in sectionPath.split("/") if lev.strip()][-1]
secCFG = parentCFG[secName]
if not secCFG:
return False
mergedCFG = secCFG.mergeWith(cfg)
parentCFG.deleteKey(secName)
parentCFG.createNewSection(secName, parentDict['comment'], mergedCFG)
self.__setCommiter(sectionPath)
return True
def __str__(self):
return str(self.cfgData)
def commit(self):
compressedData = zlib.compress(str(self.cfgData).encode(), 9)
return self.rpcClient.commitNewData(compressedData)
def getHistory(self, limit=0):
retVal = self.rpcClient.getCommitHistory(limit)
if retVal['OK']:
return retVal['Value']
return []
def showCurrentDiff(self):
retVal = self.rpcClient.getCompressedData()
if retVal['OK']:
data = retVal['Value']
if six.PY3 and isinstance(data, str):
data = data.encode(errors="surrogateescape")
remoteData = zlib.decompress(data).decode().splitlines()
localData = str(self.cfgData).splitlines()
return difflib.ndiff(remoteData, localData)
return []
def getVersionDiff(self, fromDate, toDate):
retVal = self.rpcClient.getVersionContents([fromDate, toDate])
if retVal['OK']:
fromData = retVal['Value'][0]
if six.PY3 and isinstance(fromData, str):
fromData = fromData.encode(errors="surrogateescape")
fromData = zlib.decompress(fromData).decode()
toData = retVal['Value'][1]
if six.PY3 and isinstance(toData, str):
toData = toData.encode(errors="surrogateescape")
toData = zlib.decompress(toData).decode()
return difflib.ndiff(fromData.split("\n"), toData.split("\n"))
return []
def mergeWithServer(self):
retVal = self.rpcClient.getCompressedData()
if retVal['OK']:
remoteCFG = CFG()
data = retVal['Value']
if six.PY3 and isinstance(data, str):
data = data.encode(errors="surrogateescape")
remoteCFG.loadFromBuffer(zlib.decompress(data).decode())
serverVersion = gConfigurationData.getVersion(remoteCFG)
self.cfgData = remoteCFG.mergeWith(self.cfgData)
gConfigurationData.setVersion(serverVersion, self.cfgData)
return retVal
def rollbackToVersion(self, version):
return self.rpcClient.rollbackToVersion(version)
def updateGConfigurationData(self):
gConfigurationData.setRemoteCFG(self.cfgData)
|
yujikato/DIRAC
|
src/DIRAC/ConfigurationSystem/private/Modificator.py
|
Python
|
gpl-3.0
| 9,707
|
[
"DIRAC"
] |
199e01fcfa13aee71ae753657a388eb7c74fca7a474d032d286556a9ef1d763c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
# $HeadURL$
# File : dirac-distribution
# Author : Adria Casajus
########################################################################
"""
Create tarballs for a given DIRAC release
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities import List, File, Distribution, Platform, Subprocess, CFG
import sys, os, re, urllib2, tempfile, getpass, imp
try:
import hashlib as md5
except ImportError:
import md5
globalDistribution = Distribution.Distribution()
g_uploadCmd = {
'DIRAC' : "( cd %OUTLOCATION% ; tar -cf - *.tar.gz *.md5 *.cfg *.pdf *.html ) | ssh lhcbprod@lxplus.cern.ch 'cd /afs/cern.ch/lhcb/distribution/DIRAC3/installSource && tar -xvf - && ls *.tar.gz > tars.list'",
'LHCb' : "( cd %OUTLOCATION% ; tar -cf - *.tar.gz *.md5 *.cfg *.pdf *.html ) | ssh lhcbprod@lxplus.cern.ch 'cd /afs/cern.ch/lhcb/distribution/LHCbDirac_project && tar -xvf - && ls *.tar.gz > tars.list'",
'ILC' : "( cd %OUTLOCATION% ; tar -cf - *.tar.gz *.md5 *.cfg *.pdf *.html ) | ssh lhcbprod@lxplus.cern.ch 'cd /afs/cern.ch/lhcb/distribution/DIRAC3/tars && tar -xvf - && ls *.tar.gz > tars.list'",
}
###
# Load release manager from dirac-install
##
diracInstallLocation = os.path.join( os.path.dirname( __file__ ), "dirac-install" )
if not os.path.isfile( diracInstallLocation ):
diracInstallLocation = os.path.join( os.path.dirname( __file__ ), "dirac-install.py" )
try:
diFile = open( diracInstallLocation, "r" )
DiracInstall = imp.load_module( "DiracInstall", diFile, diracInstallLocation, ( "", "r", imp.PY_SOURCE ) )
diFile.close()
except Exception, excp:
raise
gLogger.fatal( "Cannot find dirac-install! Aborting (%s)" % str( excp ) )
sys.exit( 1 )
##END OF LOAD
class Params:
def __init__( self ):
self.releasesToBuild = []
self.projectName = 'DIRAC'
self.debug = False
self.externalsBuildType = [ 'client' ]
self.ignoreExternals = False
self.forceExternals = False
self.ignorePackages = False
self.relcfg = False
self.externalsPython = '26'
self.destination = ""
self.externalsLocation = ""
self.makeJobs = 1
self.globalDefaults = ""
self.forcedLocations = {}
def setReleases( self, optionValue ):
self.releasesToBuild = List.fromChar( optionValue )
return S_OK()
def setProject( self, optionValue ):
self.projectName = optionValue
return S_OK()
def setDebug( self, optionValue ):
self.debug = True
return S_OK()
def setExternalsBuildType( self, optionValue ):
self.externalsBuildType = List.fromChar( optionValue )
return S_OK()
def setForceExternals( self, optionValue ):
self.forceExternals = True
return S_OK()
def setIgnoreExternals( self, optionValue ):
self.ignoreExternals = True
return S_OK()
def setDestination( self, optionValue ):
self.destination = optionValue
return S_OK()
def setPythonVersion( self, optionValue ):
self.externalsPython = optionValue
return S_OK()
def setIgnorePackages( self, optionValue ):
self.ignorePackages = True
return S_OK()
def setExternalsLocation( self, optionValue ):
self.externalsLocation = optionValue
return S_OK()
def setMakeJobs( self, optionValue ):
self.makeJobs = max( 1, int( optionValue ) )
return S_OK()
def setReleasesCFG( self, optionValue ):
self.relcfg = optionValue
return S_OK()
def setGlobalDefaults( self, value ):
self.globalDefaults = value
return S_OK()
def overWriteLocation( self, value ):
locSplit = value.split( ":" )
if len( locSplit ) < 2:
return S_ERROR( "Invalid location. It has to have format <moduleName>:<url> insteaf of %s" % value )
modName = locSplit[0]
location = ":".join( locSplit[1:] )
gLogger.notice( "Forcing location of %s to %s" % ( modName, location ) )
self.forcedLocations[ modName ] = location
return S_OK()
def registerSwitches( self ):
Script.registerSwitch( "r:", "releases=", "releases to build (mandatory, comma separated)", cliParams.setReleases )
Script.registerSwitch( "l:", "project=", "Project to build the release for (DIRAC by default)", cliParams.setProject )
Script.registerSwitch( "D:", "destination", "Destination where to build the tar files", cliParams.setDestination )
Script.registerSwitch( "i:", "pythonVersion", "Python version to use (25/26)", cliParams.setPythonVersion )
Script.registerSwitch( "P", "ignorePackages", "Do not make tars of python packages", cliParams.setIgnorePackages )
Script.registerSwitch( "C:", "relcfg=", "Use <file> as the releases.cfg", cliParams.setReleasesCFG )
Script.registerSwitch( "b", "buildExternals", "Force externals compilation even if already compiled", cliParams.setForceExternals )
Script.registerSwitch( "B", "ignoreExternals", "Skip externals compilation", cliParams.setIgnoreExternals )
Script.registerSwitch( "t:", "buildType=", "External type to build (client/server)", cliParams.setExternalsBuildType )
Script.registerSwitch( "x:", "externalsLocation=", "Use externals location instead of downloading them", cliParams.setExternalsLocation )
Script.registerSwitch( "j:", "makeJobs=", "Make jobs (default is 1)", cliParams.setMakeJobs )
Script.registerSwitch( 'M:', 'defaultsURL=', 'Where to retrieve the global defaults from', cliParams.setGlobalDefaults )
Script.registerSwitch( 'O:', 'overwriteLocation=', 'Force location of modules from where to make the release. Format <moduleName>:<url>', cliParams.overWriteLocation )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'\nUsage:',
' %s [option|cfgfile] ...\n' % Script.scriptName ] ) )
class DistributionMaker:
def __init__( self, cliParams ):
self.cliParams = cliParams
self.relConf = DiracInstall.ReleaseConfig( projectName = cliParams.projectName,
globalDefaultsURL = cliParams.globalDefaults )
self.relConf.setDebugCB( gLogger.info )
self.relConf.loadProjectDefaults()
def isOK( self ):
if not self.cliParams.releasesToBuild:
gLogger.error( "Missing releases to build!" )
Script.showHelp()
return False
if not self.cliParams.destination:
self.cliParams.destination = tempfile.mkdtemp( 'DiracDist' )
else:
try:
os.makedirs( self.cliParams.destination )
except:
pass
gLogger.notice( "Will generate tarballs in %s" % self.cliParams.destination )
return True
def loadReleases( self ):
gLogger.notice( "Loading releases.cfg" )
return self.relConf.loadProjectRelease( self.cliParams.releasesToBuild, releaseMode = True, relLocation = self.cliParams.relcfg )
def createModuleTarballs( self ):
for version in self.cliParams.releasesToBuild:
result = self.__createReleaseTarballs( version )
if not result[ 'OK' ]:
return result
return S_OK()
def __createReleaseTarballs( self, releaseVersion ):
result = self.relConf.getModulesForRelease( releaseVersion )
if not result[ 'OK' ]:
return result
modsToTar = result[ 'Value' ]
for modName in modsToTar:
modVersion = modsToTar[ modName ]
dctArgs = [ '-A' ] #Leave a copy of the release notes outside the tarballs
#Version
dctArgs.append( "-n '%s'" % modName )
dctArgs.append( "-v '%s'" % modVersion )
gLogger.notice( "Creating tar for %s version %s" % ( modName, modVersion ) )
#Source
if modName in cliParams.forcedLocations:
location = cliParams.forcedLocations[ modName ]
gLogger.notice( "Source is forced to %s" % location )
dctArgs.append( "-u '%s'" % location )
else:
result = self.relConf.getModSource( releaseVersion, modName )
if not result[ 'OK' ]:
return result
modSrcTuple = result[ 'Value' ]
if modSrcTuple[0]:
logMsgVCS = modSrcTuple[0]
dctArgs.append( "-z '%s'" % modSrcTuple[0] )
else:
logMsgVCS = "autodiscover"
dctArgs.append( "-u '%s'" % modSrcTuple[1] )
gLogger.notice( "Sources will be retrieved from %s (%s)" % ( modSrcTuple[1], logMsgVCS ) )
#Tar destination
dctArgs.append( "-D '%s'" % self.cliParams.destination )
if cliParams.debug:
dctArgs.append( "-dd" )
#Script location discovery
scriptName = os.path.join( os.path.dirname( __file__ ), "dirac-create-distribution-tarball" )
if not os.path.isfile( scriptName ):
scriptName = os.path.join( os.path.dirname( __file__ ), "dirac-create-distribution-tarball.py" )
cmd = "'%s' %s" % ( scriptName, " ".join( dctArgs ) )
gLogger.verbose( "Executing %s" % cmd )
if os.system( cmd ) != 0:
return S_ERROR( "Failed creating tarball for module %s. Aborting" % modName )
gLogger.notice( "Tarball for %s version %s created" % ( modName, modVersion ) )
return S_OK()
def getAvailableExternals( self ):
packagesURL = "http://lhcbproject.web.cern.ch/lhcbproject/dist/DIRAC3/installSource/tars.list"
try:
remoteFile = urllib2.urlopen( packagesURL )
except urllib2.URLError:
gLogger.exception()
return []
remoteData = remoteFile.read()
remoteFile.close()
versionRE = re.compile( "Externals-([a-zA-Z]*)-([a-zA-Z0-9]*(?:-pre[0-9]+)*)-(.*)-(python[0-9]+)\.tar\.gz" )
availableExternals = []
for line in remoteData.split( "\n" ):
res = versionRE.search( line )
if res:
availableExternals.append( res.groups() )
return availableExternals
def createExternalsTarballs( self ):
extDone = []
for releaseVersion in self.cliParams.releasesToBuild:
if releaseVersion in extDone:
continue
if not self.tarExternals( releaseVersion ):
return False
extDone.append( releaseVersion )
return True
def tarExternals( self, releaseVersion ):
externalsVersion = self.relConf.getExtenalsVersion( releaseVersion )
platform = Platform.getPlatformString()
availableExternals = self.getAvailableExternals()
if not externalsVersion:
gLogger.notice( "Externals is not defined for release %s" % releaseVersion )
return False
for externalType in self.cliParams.externalsBuildType:
requestedExternals = ( externalType, externalsVersion, platform, 'python%s' % self.cliParams.externalsPython )
requestedExternalsString = "-".join( list( requestedExternals ) )
gLogger.notice( "Trying to compile %s externals..." % requestedExternalsString )
if not self.cliParams.forceExternals and requestedExternals in availableExternals:
gLogger.notice( "Externals %s is already compiled, skipping..." % ( requestedExternalsString ) )
continue
compileScript = os.path.join( os.path.dirname( __file__ ), "dirac-compile-externals" )
if not os.path.isfile( compileScript ):
compileScript = os.path.join( os.path.dirname( __file__ ), "dirac-compile-externals.py" )
compileTarget = os.path.join( self.cliParams.destination, platform )
cmdArgs = []
cmdArgs.append( "-D '%s'" % compileTarget )
cmdArgs.append( "-t '%s'" % externalType )
cmdArgs.append( "-v '%s'" % externalsVersion )
cmdArgs.append( "-i '%s'" % self.cliParams.externalsPython )
if cliParams.externalsLocation:
cmdArgs.append( "-e '%s'" % self.cliParams.externalsLocation )
if cliParams.makeJobs:
cmdArgs.append( "-j '%s'" % self.cliParams.makeJobs )
compileCmd = "%s %s" % ( compileScript, " ".join( cmdArgs ) )
gLogger.info( compileCmd )
if os.system( compileCmd ):
gLogger.error( "Error while compiling externals!" )
sys.exit( 1 )
tarfilePath = os.path.join( self.cliParams.destination, "Externals-%s.tar.gz" % ( requestedExternalsString ) )
result = Distribution.createTarball( tarfilePath,
compileTarget,
os.path.join( self.cliParams.destination, "mysql" ) )
if not result[ 'OK' ]:
gLogger.error( "Could not generate tarball for package %s" % requestedExternalsString, result[ 'Error' ] )
sys.exit( 1 )
os.system( "rm -rf '%s'" % compileTarget )
return True
def doTheMagic( self ):
if not distMaker.isOK():
gLogger.fatal( "There was an error with the release description" )
return False
result = distMaker.loadReleases()
if not result[ 'OK' ]:
gLogger.fatal( "There was an error when loading the release.cfg file: %s" % result[ 'Message' ] )
return False
#Module tars
if self.cliParams.ignorePackages:
gLogger.notice( "Skipping creating module tarballs" )
else:
result = self.createModuleTarballs()
if not result[ 'OK' ]:
gLogger.fatal( "There was a problem when creating the module tarballs: %s" % result[ 'Message' ] )
return False
#Externals
if self.cliParams.ignoreExternals or cliParams.projectName != "DIRAC":
gLogger.notice( "Skipping creating externals tarball" )
else:
if not self.createExternalsTarballs():
gLogger.fatal( "There was a problem when creating the Externals tarballs" )
return False
#Write the releases files
for relVersion in self.cliParams.releasesToBuild:
projectCFG = self.relConf.getReleaseCFG( self.cliParams.projectName, relVersion )
projectCFGData = projectCFG.toString() + "\n"
try:
relFile = file( os.path.join( self.cliParams.destination, "release-%s-%s.cfg" % ( self.cliParams.projectName, relVersion ) ), "w" )
relFile.write( projectCFGData )
relFile.close()
except Exception, exc:
gLogger.fatal( "Could not write the release info: %s" % str( exc ) )
return False
try:
relFile = file( os.path.join( self.cliParams.destination, "release-%s-%s.md5" % ( self.cliParams.projectName, relVersion ) ), "w" )
relFile.write( md5.md5( projectCFGData ).hexdigest() )
relFile.close()
except Exception, exc:
gLogger.fatal( "Could not write the release info: %s" % str( exc ) )
return False
#Check deps
if 'DIRAC' != self.cliParams.projectName:
deps = self.relConf.getReleaseDependencies( self.cliParams.projectName, relVersion )
if 'DIRAC' not in deps:
gLogger.notice( "Release %s doesn't depend on DIRAC. Check it's what you really want" % relVersion )
else:
gLogger.notice( "Release %s depends on DIRAC %s" % ( relVersion, deps[ 'DIRAC'] ) )
return True
def getUploadCmd( self ):
result = self.relConf.getUploadCommand()
upCmd = False
if not result['OK']:
if self.cliParams.projectName in g_uploadCmd:
upCmd = g_uploadCmd[ self.cliParams.projectName ]
else:
upCmd = result[ 'Value' ]
filesToCopy = []
for fileName in os.listdir( cliParams.destination ):
for ext in ( ".tar.gz", ".md5", ".cfg", ".html", ".pdf" ):
if fileName.find( ext ) == len( fileName ) - len( ext ):
filesToCopy.append( os.path.join( cliParams.destination, fileName ) )
outFiles = " ".join( filesToCopy )
outFileNames = " ".join( [ os.path.basename( filePath ) for filePath in filesToCopy ] )
if not upCmd:
return "Upload to your installation source:\n'%s'\n" % "' '".join( filesToCopy )
for inRep, outRep in ( ( "%OUTLOCATION%", self.cliParams.destination ),
( "%OUTFILES%", outFiles ),
( "%OUTFILENAMES%", outFileNames ) ):
upCmd = upCmd.replace( inRep, outRep )
return upCmd
if __name__ == "__main__":
cliParams = Params()
Script.disableCS()
Script.addDefaultOptionValue( "/DIRAC/Setup", "Dummy" )
cliParams.registerSwitches()
Script.parseCommandLine( ignoreErrors = False )
if Script.localCfg.getDebugMode():
cliParams.debug = True
distMaker = DistributionMaker( cliParams )
if not distMaker.doTheMagic():
sys.exit( 1 )
gLogger.notice( "Everything seems ok. Tarballs generated in %s" % cliParams.destination )
upCmd = distMaker.getUploadCmd()
gLogger.always( upCmd )
|
avedaee/DIRAC
|
Core/scripts/dirac-distribution.py
|
Python
|
gpl-3.0
| 16,448
|
[
"DIRAC"
] |
42628edaa05380ee5dfa53d0545e3391962503c142e3a596e55213c8bd311afc
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 9 15:52:38 2016
@author: nebula
"""
import os
import swc2vtk
from tqdm import tqdm
cellname_list = [
'200000',
'300000',
'301000',
]
stoptime = 1000.0
datastep = 0.25
nstep = int(stoptime / datastep)
for cellname in cellname_list:
vtkfile_base = os.path.join('/data/vtk_data/arase_simulation_5571105', 'vtk', cellname + '_%d.vtk')
swcfilename = os.path.join('swc', cellname + '.swc')
datafile_base = os.path.join('/data/vtk_data/arase_simulation_5571105', 'data', cellname, cellname + 't%.6f.dat')
vtkgen = swc2vtk.VtkGenerator()
vtkgen.set_draw_mode(3)
vtkgen.add_swc(swcfilename)
for i in tqdm(range(1, nstep), desc='Generating VTK'):
vtkgen.clear_datafile()
datafile = datafile_base % (i * datastep)
vtkgen.add_datafile(datafile)
vtkgen.write_vtk(vtkfile_base % i, datatitle='simulation')
|
DaisukeMiyamoto/swc2vtk
|
examples/test_simulation.py
|
Python
|
apache-2.0
| 926
|
[
"VTK"
] |
e94fa624bdc9c26b617870a3261abccb766c0a7c0d8546084eed9a26c87fd202
|
####################################################################
# This example illustrates modelling of presynaptic plasticity involving
# Ca influx to 3 boutons, each with a pool of presnaptic vesicles, and
# reactions leading to a buildup of Ca and depletion of vesicles during
# synaptic release. The neurotransmitter is coupled to receptors to give a
# complex, stochastic postsynaptic response to a burst of input.
####################################################################
import moose
import rdesigneur as rd
freq = 20.0 # Hz
settleTime = 0.2 # seconds
numPulses = 8
stimEnd = settleTime + numPulses/(freq+1)
stimAmpl = 2.0e-2
runtime = 0.8
## This string sets up a burst of input activity
gluStimStr = "0.08e-3 + {}*(t>{} && t<{}) * exp( 50 * (sin(t*2*3.14159265 * {}) -1) )".format(stimAmpl, settleTime, stimEnd, freq )
rdes = rd.rdesigneur(
elecDt = 50e-6,
chemDt = 0.001,
chemPlotDt = 0.001,
turnOffElec = False,
useGssa = True,
# cellProto syntax: ['ballAndStick', 'name', somaDia, somaLength, dendDia, dendLength, numDendSeg]
# 6x6 micron soma, 1.5x60 micron dendrite, 1 segment dendrite.
cellProto = [['ballAndStick', 'soma', 6e-6, 6e-6, 1.5e-6, 20e-6, 1]],
chanProto = [['make_glu()', 'glu']],
chanDistrib = [['glu', 'dend#', 'Gbar', '0.1']],
chemProto = [['chem/echem.g', 'chem']],
chemDistrib = [ # Put presynaptic bouton compartments next to the dend
# Args: chem_model, elec_compts, mesh_type, spatial_distrib, r, sdev, spacing
['kinetics', 'dend#', 'presyn_dend', '1', 0.26e-6, 0, 5e-6 ],
],
adaptorList = [ # map released neurotransmitter to cognate receptor
# Want et al JACS 2019, 141,44 estimate 8K glu per vesicle.
['kinetics/glu/glu', 'n', 'glu', 'activation', 0.0, 8.0e3 ],
],
stimList = [ # deliver the stimuli
['dend#', '1', 'kinetics/glu/Ca_ext', 'conc', gluStimStr ],
],
plotList = [ # Lots of plots.
['#', '1', 'kinetics/glu/Ca', 'conc', 'Ca in presyn bouton'],
['#', '1', 'kinetics/glu/RR_pool', 'n', 'RR_pool in presyn bouton'],
['#', '1', 'kinetics/glu/Ca_ext', 'conc', 'Input to bouton'],
['#', '1', 'kinetics/glu/glu', 'n', '# of glu vesicles released'],
['soma', '1', '.', 'Vm', 'Membrane potential'],
],
moogList = [
['#', '1', '.', 'Vm', 'Membrane potential', -65.0, -55.0],
['#', '1', 'kinetics/glu/Ca', 'conc', 'Ca conc', 0.0, 1.0]
]
)
moose.seed( 1234 ) # Random number seed. Response details change with this.
rdes.buildModel() # Assemble the model from prototypes.
moose.reinit()
rdes.displayMoogli( 0.001, runtime, rotation = 0.00, mergeDisplays=True )
|
BhallaLab/moose-examples
|
tutorials/Rdesigneur/ex11.1_presyn_dend.py
|
Python
|
gpl-2.0
| 2,708
|
[
"MOOSE"
] |
d2d0cb5f5c4e1fd348fb2f0954d056379700c0754ee3f2d3eeb144c0fdbe5b39
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import random
import shutil
import subprocess
import time
import yaml
from charms.leadership import leader_get, leader_set
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# migrate to new flags
if is_state('kubernetes-worker.restarted-for-cloud'):
remove_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.cloud.ready')
if is_state('kubernetes-worker.cloud-request-sent'):
# minor change, just for consistency
remove_state('kubernetes-worker.cloud-request-sent')
set_state('kubernetes-worker.cloud.request-sent')
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-worker.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
calculate_and_store_resource_checksums()
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', get_node_name())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('worker_snapd_refresh', timer):
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh')
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@hookenv.atexit
def charm_status():
'''Update the status message with the current status of kubelet.'''
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
cloud_blocked = is_state('kubernetes-worker.cloud.blocked')
if vsphere_joined and cloud_blocked:
hookenv.status_set('blocked',
'vSphere integration requires K8s 1.12 or greater')
return
if azure_joined and cloud_blocked:
hookenv.status_set('blocked',
'Azure integration requires K8s 1.11 or greater')
return
if is_state('kubernetes-worker.cloud.pending'):
hookenv.status_set('waiting', 'Waiting for cloud integration')
return
if not is_state('kube-control.dns.available'):
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool
# waiting to self host the dns pod, and configure itself to query the
# dns service declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
return
if is_state('kubernetes-worker.snaps.upgrade-specified'):
hookenv.status_set('waiting', 'Upgrade pending')
return
if is_state('kubernetes-worker.snaps.upgrade-needed'):
hookenv.status_set('blocked',
'Needs manual upgrade, run the upgrade action')
return
if is_state('kubernetes-worker.snaps.installed'):
update_kubelet_status()
return
else:
pass # will have been set by snap layer or other handler
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
@when_not('kubernetes-worker.cloud.pending',
'kubernetes-worker.cloud.blocked')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress
load balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args',
'config.changed.kubelet-extra-config')
def config_changed_requires_restart():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def merge_kubelet_extra_config(config, extra_config):
''' Updates config to include the contents of extra_config. This is done
recursively to allow deeply nested dictionaries to be merged.
This is destructive: it modifies the config dict that is passed in.
'''
for k, extra_config_value in extra_config.items():
if isinstance(extra_config_value, dict):
config_value = config.setdefault(k, {})
merge_kubelet_extra_config(config_value, extra_config_value)
else:
config[k] = extra_config_value
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['node-ip'] = ingress_ip
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'openstack'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.vsphere.joined'):
# vsphere just needs to be joined on the worker (vs 'ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'vsphere'
# NB: vsphere maps node product-id to its uuid (no config file needed).
uuid_file = '/sys/class/dmi/id/product_uuid'
with open(uuid_file, 'r') as f:
uuid = f.read().strip()
kubelet_opts['provider-id'] = 'vsphere://{}'.format(uuid)
elif is_state('endpoint.azure.ready'):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'azure'
kubelet_opts['cloud-config'] = str(cloud_config_path)
kubelet_opts['provider-id'] = azure.vm_id
if get_version('kubelet') >= (1, 10):
# Put together the KubeletConfiguration data
kubelet_config = {
'apiVersion': 'kubelet.config.k8s.io/v1beta1',
'kind': 'KubeletConfiguration',
'address': '0.0.0.0',
'authentication': {
'anonymous': {
'enabled': False
},
'x509': {
'clientCAFile': ca_cert_path
}
},
'clusterDomain': dns['domain'],
'failSwapOn': False,
'port': 10250,
'tlsCertFile': server_cert_path,
'tlsPrivateKeyFile': server_key_path
}
if dns['enable-kube-dns']:
kubelet_config['clusterDNS'] = [dns['sdn-ip']]
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_config['featureGates'] = {
'DevicePlugins': True
}
# Add kubelet-extra-config. This needs to happen last so that it
# overrides any config provided by the charm.
kubelet_extra_config = hookenv.config('kubelet-extra-config')
kubelet_extra_config = yaml.load(kubelet_extra_config)
merge_kubelet_extra_config(kubelet_config, kubelet_extra_config)
# Render the file and configure Kubelet to use it
os.makedirs('/root/cdk/kubelet', exist_ok=True)
with open('/root/cdk/kubelet/config.yaml', 'w') as f:
f.write('# Generated by kubernetes-worker charm, do not edit\n')
yaml.dump(kubelet_config, f)
kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml'
else:
# NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove
# this whole block and the parent if statement.
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['port'] = '10250'
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
if dns['enable-kube-dns']:
kubelet_opts['cluster-dns'] = dns['sdn-ip']
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if get_version('kubelet') >= (1, 11):
kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.5"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.5"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-amd64:1.5"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1', # noqa
'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.16.1', # noqa
's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.16.1', # noqa
'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.16.1', # noqa
}
context['ingress_image'] = images.get(context['arch'], images['amd64'])
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1beta2'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
elif is_state('endpoint.vsphere.ready'):
cloud_provider = 'vsphere'
elif is_state('endpoint.azure.ready'):
cloud_provider = 'azure'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-worker.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kubelet')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-worker.cloud.blocked')
else:
remove_state('kubernetes-worker.cloud.blocked')
set_state('kubernetes-worker.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
elif is_state('endpoint.azure.joined'):
cloud = endpoint_from_flag('endpoint.azure.joined')
cloud.tag_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud.request-sent')
hookenv.status_set('waiting', 'Waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
def clear_cloud_flags():
remove_state('kubernetes-worker.cloud.pending')
remove_state('kubernetes-worker.cloud.request-sent')
remove_state('kubernetes-worker.cloud.blocked')
remove_state('kubernetes-worker.cloud.ready')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready',
'endpoint.vsphere.ready',
'endpoint.azure.ready')
@when_not('kubernetes-worker.cloud.blocked',
'kubernetes-worker.cloud.ready')
def cloud_ready():
remove_state('kubernetes-worker.cloud.pending')
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kubelet')
elif is_state('endpoint.azure.ready'):
_write_azure_snap_config('kubelet')
set_state('kubernetes-worker.cloud.ready')
set_state('kubernetes-worker.restart-needed') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def _write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
}))
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
|
justinsb/kubernetes
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 55,860
|
[
"CDK"
] |
22b9530b90773d81f796ce58a8a1e1c50c219323b749c3d486a01766b1eb4743
|
#!/usr/bin/python
# wpa-shoot
#WPAShoot Adalah Tools Untuk Audit Keamanan Jaringan Wireless Yang Berbasis aircrack-ng suite
#Jika Anda Ingin Mengedit Source Code Tolong Sertakan Nama Code Writernya.
#
#Version: 1.0 Beta
#Code Writer: HardGhost
#Web Page : Hardghost-sec.blogspot.com
#
#Jika Terjadi Error Pada File WPAShot.py Silahkan Eksekusi File bernama wpashot2.py.
#
#Required Tools:
#-Aircrack-ng
#-Leafpad
#-macchanger
#-crunch
#-xterm
#
#Copyright HardGhostID
import os.path
import os
import subprocess
import time
from termcolor import colored
os.system('clear')
def check():
print "Checking required tools:"
path = '/usr/bin/aircrack-ng'
if os.path.exists(path):
time.sleep(0.5)
print "aircrack-ng",colored(' [OK]','green')
time.sleep(0.1)
else:
print "Aircrack-ng",colored ("[NOT INSTALLED]",'red')," Please Install aircrack-ng"
exit()
def check2():
path = '/usr/bin/leafpad'
if os.path.exists(path):
print "Leafpad", colored('[OK]','green')
time.sleep(0.1)
else:
print "Leafpad", colored("[NOT INSTALLED]",'red'), "Please Install Leafpad"
exit()
def check3():
path = '/usr/bin/crunch'
if os.path.exists(path):
print "Crunch",colored('[OK]','green')
else:
print "Crunch", colored("[NOT INSATLLED]",'red'), "Please Install Crunch"
exit()
def check4():
path = '/usr/bin/macchanger'
if os.path.exists(path):
print "macchanger",colored("[OK]","green")
else:
print "macchanger",colored("[NOT INSTALLED]",'red'),"Please Install Macchanger"
exit()
def check5():
path = '/usr/bin/xterm'
if os.path.exists(path):
print "xterm",colored("[OK]","green")
else:
print "xterm",colored("[NOT INSTALLED]",'red'),"Please Install xterm"
def check6():
path = '/usr/bin/hashcat'
if os.path.exists(path):
print "Hashcat",colored("[OK]","green")
else:
print "Hashcat",colored("[NOT INSTALLED]",'red'),"Please Install Hashcat"
exit()
check()
check2()
check3()
check4()
check5()
check6()
time.sleep(1.5)
os.system("clear")
def opening():
os.system("clear")
print colored(" __ ______ _",'red')," _ _"
time.sleep(0.1)
print colored(" \ \ / / _ \ / \ ",'red')," ___| |__ ___ | |"
time.sleep(0.1)
print colored(" \ \ /\ / /| |_) / _ \ ",'red')," / __| '_ \ / _ \| __|"
time.sleep(0.1)
print colored(" \ V V / | __/ ___ \ ",'red')," \__ \ | | | (_) | |"
time.sleep(0.1)
print colored(" \_/\_/ |_| /_/ \_\ ",'red'),"|___/_| |_|\___/ \__|"
print
time.sleep(1)
print' Version 1.0 Beta'
time.sleep(1)
print ' Bug Report hardghostalkori@gmail.com'
time.sleep(3)
def menu() :
os.system("clear")
print colored(" __ ______ _",'red')," _ _"
print colored(" \ \ / / _ \ / \ ",'red')," ___| |__ ___ | |"
print colored(" \ \ /\ / /| |_) / _ \ ",'red')," / __| '_ \ / _ \| __|"
print colored(" \ V V / | __/ ___ \ ",'red')," \__ \ | | | (_) | |"
print colored(" \_/\_/ |_| /_/ \_\ ",'red'),"|___/_| |_|\___/ \__|"
print
print colored(" Version 1.0 Beta",'blue')
print colored(" visit: hardghost-sec.blogspot.com",'cyan')
print colored(" Bug Report hardghostalkori@gmail.com",'blue')
print
print colored(" 1.Activate Mode Monitor",'green')
print colored(" 2.Check Monitor Interfaces",'green')
print colored(" 3.Test Wireless Adapter Packet Injection",'green')
print colored(" 4.Scan Wireless Network",'green')
print colored(" 5.Capture Packet Specific by Target Acces Point",'green')
print colored(" 6.Capture Handshake or WPA Encryption",'green')
print colored(" 7.Crack WPA Encrytion!",'green')
print colored(" 8.Crack WPA Encryption! [HASHCAT]",'green')
print colored(" 9.Restart Service NetworkManager And Networking",'green')
print colored(" ----EXTRAS----",'yellow')
print colored(" 99.Create Wordlist [Crunch]",'red')
print colored(" 88.Change Mac Address",'red')
print colored(" 77.Create Hashcat Capture File",'red')
print colored(" 00.Exit",'red')
def pilih():
print
time.sleep(0.1)
pilih = input("WPAShot> ")
os.system("clear")
if pilih == 1:
monitor()
elif pilih == 2:
interface()
elif pilih == 3:
inject()
elif pilih == 4:
scan()
elif pilih == 5:
capture()
elif pilih == 6:
handshake()
elif pilih == 7:
wpa()
elif pilih == 00:
exit()
elif pilih == 8:
restart()
elif pilih == 99:
extra()
elif pilih == 88:
mac()
elif pilih == 77:
hashcat_capture()
else:
print ("Pilihan Tidak Ada Di Menu!")
os.system("clear")
def interface():
pid =subprocess.Popen(args=["xterm","-hold","-e","airmon-ng"])
def restart():
print ("Restarting Service Networking...")
os.system ("service networking restart")
print ("Restarting Service Network Manager...")
os.system("service NetworkManager restart")
print ("Removing Wireless Monitor Interface...")
os.system("airmon-ng stop mon0")
def monitor():
os.system("xterm -e airmon-ng check kill")
os.system("xterm -e airmon-ng start wlan0")
os.system("clear")
menu()
def inject():
print "Berfungsi Untuk Testing Adapter Apakah Support Injection Atau Tidak"
print
interface=raw_input ("input your wireless monitor interface mon0/wlan0mon: ")
pid2 = subprocess.Popen(args=["xterm","-e","airodump-ng",interface])
time.sleep(1)
pid = subprocess.Popen(args=["xterm","-hold","-e","aireplay-ng --test "+interface])
os.system("clear")
menu()
def scan():
print "Berfungsi Untuk Scan Jaringan Wifi Di sekitar"
print
interface=raw_input("input your wireless monitor interface mon0/wlan0mon: ")
pid = subprocess.Popen(args=["xterm","-e","airodump-ng "+interface])
subprocess.Popen(args=["leafpad"])
os.system("clear")
menu()
print "type ctrl+c when done"
def capture():
print "Berfungsi Untuk Memonitor Acces Point Yang Target Nya sudah di tentukan"
print
interface =raw_input ("input your wireless monitor interface mon0/wlan0mon: ")
channel=raw_input("Target Channel: ")
bssid=raw_input("BSSID Target:")
write=raw_input ("Masukan Nama Hasil Capture:")
pid = subprocess.Popen(args=["xterm","-hold","-e","airodump-ng", "-c" ,channel, "--bssid" ,bssid ,"-w" ,write, interface])
menu()
def handshake():
print "Berfungsi Untuk Memutuskan Client Dan Menangkap Informasi User Ke Acces Point Termasuk Enskripsi WPAnya"
print
interface=raw_input("input wireless monitor wlan0mon/mon0: ")
bssid=raw_input("BSSID Target: ")
client=raw_input("input your client target: ")
packet=raw_input("JUmlah Paket untuk deauth: ")
os.system("xterm -e aireplay-ng --deauth %s -a %s -c %s --ignore-negative-one %s"%(packet,bssid,client,interface))
def wpa():
print "Untuk Proses Cracking nya"
wordlist=raw_input("Path Your Wordlist: ")
capture=raw_input("Your Capture File: ")
os.system("xterm -hold -e aircrack-ng -w %s %s"%(wordlist,capture))
menu()
def extra():
minimum = raw_input("Insert Minimum Wordlist *WPA Minimum 8 : ")
maximum = raw_input("Insert Maximum Wordlist: ")
os.system("clear")
print "lalpha-numeric"
print "lalpha"
charset = raw_input("Insert Charset Type:")
output = raw_input ("Insert Your Output Name: ")
pid = subprocess.Popen(args=["xterm","-hold","-e","crunch" ,minimum, maximum, "-f", "/usr/share/crunch/charset.lst" ,charset,"-o","/root/"+output])
def mac():
interface = raw_input("Input Your Monitor Interfaces: ")
subprocess.Popen(args=["ifconfig" ,interface,"down"])
mac = raw_input("Input Your New Mac Address: ")
subprocess.Popen(args=["macchanger","-m" ,mac,interface])
def hashcat_capture():
file = raw_input("Insert Your Capture File: ")
subprocess.Popen(args=["xterm","-hold","-e","aircrack-ng","-J" ,file])
def hash():
file = raw_input("Insert Your Capture File: ")
wordlist = raw_input("insert your wordlist path: ")
subprocess.Popen(args=["xterm","-hold","-e","hashcat","-m","2500","-a","0",file,wordlist])
while menu:
menu()
pilih()
|
hardghost07/wpa-shoot
|
WPAShot.py
|
Python
|
gpl-3.0
| 7,892
|
[
"VisIt"
] |
3d9e7f7373d9b2f3d30b3e6c784716b3556383eaf08bf3f299117b7c3f5f0c63
|
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
{
"type": "doctype",
"name": "Opportunity",
"description": _("Potential opportunities for selling."),
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
},
{
"type": "doctype",
"name": "Newsletter",
"description": _("Newsletters to contacts, leads."),
},
{
"type": "doctype",
"name": "Communication",
"description": _("Record of all communications of type email, phone, chat, visit, etc."),
},
]
},
{
"label": _("Tools"),
"icon": "icon-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
}
]
},
{
"label": _("Setup"),
"icon": "icon-cog",
"items": [
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "page",
"label": _("Customer Group"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Customer Group",
"description": _("Manage Customer Group Tree."),
"doctype": "Customer Group",
},
{
"type": "page",
"label": _("Territory"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Territory",
"description": _("Manage Territory Tree."),
"doctype": "Territory",
},
{
"type": "page",
"label": _("Sales Person"),
"name": "Sales Browser",
"icon": "icon-sitemap",
"link": "Sales Browser/Sales Person",
"description": _("Manage Sales Person Tree."),
"doctype": "Sales Person",
},
{
"type": "doctype",
"name": "Newsletter List",
"description": _("Newsletter Mailing List"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
},
]
},
{
"label": _("Main Reports"),
"icon": "icon-table",
"items": [
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "icon-bar-chart",
},
]
},
{
"label": _("Standard Reports"),
"icon": "icon-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses and Contacts",
"doctype": "Contact"
},
{
"type": "report",
"is_query_report": True,
"name": "Customers Not Buying Since Long Time",
"doctype": "Sales Order"
},
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"label": _("Lead to Quotation"),
"youtube_id": "TxYX4r4JAKA"
},
]
},
]
|
sheafferusa/erpnext
|
erpnext/config/crm.py
|
Python
|
agpl-3.0
| 3,225
|
[
"VisIt"
] |
a6804967360ad3b99ed6207adcbb2f6cdbe5b932bc907e4c78d42769586a3fd7
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from pymatgen.analysis.magnetism.analyzer import *
|
fraricci/pymatgen
|
pymatgen/analysis/magnetism/__init__.py
|
Python
|
mit
| 161
|
[
"pymatgen"
] |
e5544e10b55dd2ec03ac8fd6b08b17c958fac15b0c030b89ad2b035e2aba1313
|
import statsmodels.api as sm
import mdtraj as md
import simtk.unit as u
code = "benzene"
ff_name = "amber99sbildn"
water_name = 'tip3p'
which_forcefield = "%s.xml" % ff_name
which_water = '%s.xml' % water_name
out_pdb_filename = "./water/box.pdb"
temperature = 1600.0 * u.kelvin
dcd_filename = "./water/%s_%s_%s_%s.dcd" % (code, ff_name, water_name, temperature)
t1 = md.load(dcd_filename, top=out_pdb_filename)
np.diff(t1.xyz[:, 0], 0).std(0)
temperature = 300.0 * u.kelvin
dcd_filename = "./water/%s_%s_%s_%s.dcd" % (code, ff_name, water_name, temperature)
t0 = md.load(dcd_filename, top=out_pdb_filename)
np.diff(t0.xyz[:, 0], 0).std(0)
d0 = md.compute_distances(t0, np.array([[0, 15]]))[:, 0]
d1 = md.compute_distances(t1, np.array([[0, 15]]))[:, 0]
plot(d0, label="300")
plot(d1, label="hot")
|
kyleabeauchamp/T4Binding
|
code/benzene_box_analyze.py
|
Python
|
gpl-2.0
| 808
|
[
"MDTraj"
] |
a12cc54ac8c8ef1c1d3ac80fb68cdbc4fa6123f909a7e5fc7a28420c25f9d6c8
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from event import Event
from file import Location
from mongodb import ObjectJSON
from task import Task, DummyTask
class Scheduler(object):
"""
Class to handle task execution on a resource
Notes
-----
In RP this would correspond to a Pilot with a UnitManager
Attributes
----------
project : `Project`
a back reference to the project that uses this scheduler
tasks : dict uid : `Task`
dict that references all running task by the associated CU.uid
wrapper : `Task`
a wrapping task that contains additional commands to be executed
around each task running on that scheduler. It usually contains
adding certain paths, etc.
"""
def __init__(self, resource, queue=None, runtime=240, cores=1):
"""
Parameters
----------
resource : `Resource`
a `Resource` where this scheduler works on
queue : str
the name of the queue to be used for pilot creation
runtime : int
max runtime in minutes for the created pilot
cores
number of used cores to be used in the created pilot
"""
self.resource = resource
self.queue = queue
self.runtime = runtime
self.cores = cores
self.project = None
self.tasks = dict()
self.auto_submit_dependencies = True
self._generator_list = []
self._events = []
self._stop_signal = False
self._shutting_down = False
self._finished = False
self.wrapper = DummyTask()
self._folder_name = None
self.simplifier = ObjectJSON()
self._state_cb = None
self.state = 'booting'
@property
def staging_area_location(self):
"""
Return the path to the staging area used by this scheduler
"""
return 'sandbox:///' + self.folder_name + '/staging_area'
@property
def generators(self):
"""
Return the generators of the attached project
Returns
-------
list of `TaskGenerator`
"""
if self.project:
return self.project.generators
else:
return []
@property
def folder_name(self):
return self._folder_name
def get_path(self, f):
"""
Get the schedulers representation of the path in `Location` object
Parameters
----------
f : `Location`
the location object
Returns
-------
str
a real file path
"""
return self.replace_prefix(f.url)
# def in_staging_area(self, url):
# pass
def unroll_staging_path(self, location):
"""
Convert a staging location into an adaptiveMD location
Parameters
----------
location : `Location`
the location to the changed
"""
if location.drive == 'staging':
location.location = self.staging_area_location + location.path
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
fail = True
if exc_type is None:
pass
elif issubclass(exc_type, (KeyboardInterrupt, SystemExit)):
# self.report.warn('exit requested\n')
pass
elif issubclass(exc_type, Exception):
# self.report.error('caught exception: %s\n' % exc_type)
fail = False
self.exit()
return fail
def enter(self, project=None):
"""
Call a preparations to use a scheduler
Parameters
----------
project : `Project`
the project the worker should execute for
"""
if project is not None:
self.project = project
def __call__(self, submission):
return self.submit(submission)
@property
def is_idle(self):
"""
Check whether the scheduler is idle
"""
return len(self.tasks) == 0
def exit(self):
"""
Shut down the scheduler
"""
self.shut_down(False)
def stage_generators(self):
"""
Prepare files and folder for all generators
"""
pass
def stage_in(self, staging):
pass
def flatten_location(self, obj):
if isinstance(obj, Location):
return self.replace_prefix(obj.url)
elif isinstance(obj, list):
return map(self.flatten_location, obj)
elif isinstance(obj, dict):
return {
self.flatten_location(key): self.flatten_location(value)
for key, value in obj.iteritems()
}
elif isinstance(obj, tuple):
return tuple(map(self.flatten_location, obj))
else:
return obj
def remove_task(self, task):
pass
def _to_tasks(self, submission):
if isinstance(submission, (tuple, list)):
return sum(map(self._to_tasks, submission), [])
elif isinstance(submission, Task):
if submission in self.tasks.values() or submission.is_done():
return []
if submission.ready:
return [submission]
else:
if self.auto_submit_dependencies:
return self._to_tasks(submission.dependencies)
else:
return []
# else:
# for cls, gen in self.file_generators.items():
# if isinstance(submission, cls):
# return self._to_tasks(gen(submission))
#
# return []
return []
def _to_events(self, submission):
if isinstance(submission, (tuple, list)):
return sum(map(self._to_events, submission), [])
elif isinstance(submission, Event):
return [submission]
else:
return []
def submit(self, submission):
"""
Submit a task in form of an event, a task or an task-like object
Parameters
----------
submission : (list of) [`Task` or object or `Event`]
Returns
-------
list of `Task`
the list of tasks actually executed after looking at all objects
"""
return self._to_tasks(submission)
def add_event(self, event):
if isinstance(event, (tuple, list)):
map(self._events.append, event)
else:
self._events.append(event)
self.trigger()
return event
def trigger(self):
"""
Trigger a check of state changes that leads to task execution
"""
# delegate to project level
self.project.trigger()
def shut_down(self, wait_to_finish=True):
"""
Do a controlled shutdown. Cancel all units and wait until they finish.
Parameters
----------
wait_to_finish : bool
if True default the function will block until all tasks report
finish
"""
if not self._finished:
self._finished = True
def on(self, condition):
"""
Shortcut for creation and appending of a new Event
Parameters
----------
condition : `Condition`
Returns
-------
`Event`
"""
ev = Event(condition)
self._events.append(ev)
return ev
def wait(self):
"""
Wait until no more units are running and hence no more state changes
"""
pass
def cancel_events(self):
"""
Remove all pending events and stop them from further task execution
"""
for ev in self._events:
ev.cancel()
self._events = []
def replace_prefix(self, path):
"""
Interprete adaptive paths and replace prefixes with real os paths
Parameters
----------
path : str
the path with an adaptiveMD prefix
Returns
-------
str
the path without any adaptiveMD prefixes
"""
path = path.replace('staging://', '../staging_area')
# the rp sandbox://
path = path.replace('sandbox://', '../..')
# the main remote shared FS
path = path.replace('shared://', '../../..')
path = path.replace('worker://', '')
path = path.replace('file://', '')
# the specific project folder://
path = path.replace(
'project://', '../../projects/' + self.project.name)
return path
def change_state(self, new_state):
print 'changed state to', new_state
self.state = new_state
if self._state_cb is not None:
self._state_cb(self)
@property
def is_idle(self):
return len(self.tasks) == 0 and self.state == 'running'
|
thempel/adaptivemd
|
adaptivemd/scheduler.py
|
Python
|
lgpl-2.1
| 9,942
|
[
"MDTraj"
] |
28f1e335e45d270a381ebf27f2841db3f5397d64ffd86fea35d0e86a12950423
|
#!/usr/bin/env python
"""
This script prepares/copies the required settings files and directories to the tombo scratch
filesystem and creates a job script before submitting it to the job que
Author: Tom Close (tclose@oist.jp)
Created: 20/8/2013
"""
#Name of the script for the output directory and submitted mpi job
SCRIPT_NAME = 'neurofitter'
# Required imports
import argparse
import os.path
import neurofitter.tombo
# Arguments to the script
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--np', type=int, default=256,
help="The the number of processes to use for the simulation "
"(default: %(default)s)")
parser.add_argument('--que_name', type=str, default='short',
help="The the que to submit the job to(default: %(default)s)")
parser.add_argument('--output_dir', default=None, type=str,
help="The parent directory in which the output directory will be created "
"(defaults to $HOME/Output)")
parser.add_argument('--max_memory', type=str, default='3g',
help="The maximum memory allocated to run the network (when tested the neuron "
"version required 1~1.5Gb and the NEST version ~500Mb so 2G is set as the "
"safe default")
parser.add_argument('--virtual_memory', type=str, default='2g',
help="The average memory usage required by the program, decides when the "
"scheduler is able to run the job")
parser.add_argument('--name', type=str, default=None,
help="Saves a file within the output directory with the name 'name' for easy "
"renaming of the output directory after it is copied to its final "
"destination, via the command 'mv <output_dir> `cat <output_dir>/name`'")
args = parser.parse_args()
# Create work directory and get path for output directory
work_dir, output_dir = neurofitter.tombo.create_work_dir(SCRIPT_NAME, args.output_dir,
required_dirs=required_dirs)
cmd_line = ("time mpirun mpineurofitter {work_dir}/settings.xml".format(script_name=SCRIPT_NAME,
work_dir=work_dir))
# Submit job to que
if not args.dry_run:
neurofitter.tombo.submit_job(SCRIPT_NAME, cmd_line, args.np, work_dir, output_dir,
copy_to_output=copy_to_output, que_name=args.que_name,
strip_build_from_copy=(not args.keep_build), name=args.name,
max_memory=args.max_memory, virtual_memory=args.virtual_memory)
|
wvangeit/NeuroFitter
|
scripts/neurofitter.py
|
Python
|
gpl-2.0
| 2,760
|
[
"NEURON"
] |
2a1cfc48c44f8ceced76ee5d6bce40e297dae8b1934136de97d14f18248415c3
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
__all__ = ['bayesian_info_criterion', 'bayesian_info_criterion_lsq',
'akaike_info_criterion', 'akaike_info_criterion_lsq']
__doctest_requires__ = {'bayesian_info_criterion_lsq': ['scipy'],
'akaike_info_criterion_lsq': ['scipy']}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r""" Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <http://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<http://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params*np.log(n_samples) - 2.0*log_likelihood
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5)
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +FLOAT_CMP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<http://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://myweb.uiowa.edu/cavaaugh/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
<http://theses.ulaval.ca/archimede/fichiers/21842/apa.html>
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <http://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<http://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples/float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = (2.0 * (n_params - log_likelihood) +
2.0 * n_params * (n_params + 1.0) /
(n_samples - n_params - 1.0))
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2., .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-656.32589850659224
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 6.5. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criteria
<http://avesbiodiv.mncn.csic.es/estadistica/ejemploaic.pdf>
.. [2] Hu, S. Akaike Information Criterion.
<http://www4.ncsu.edu/~shu3/Presentation/AIC.pdf>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
|
joergdietrich/astropy
|
astropy/stats/info_theory.py
|
Python
|
bsd-3-clause
| 14,954
|
[
"Gaussian"
] |
03689bc911ceb90446b43da32e6721927fd346d983e47757f4f5078467b29286
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import re
import textwrap
import marshal
from setuptools.extern import six
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
"""
for base, dirs, files in os.walk(dir):
dirs.sort()
files.sort()
yield base, dirs, files
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s", self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s", native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
path = os.path.join(base, name)
if name.endswith('.py'):
log.debug("Deleting %s", path)
os.unlink(path)
if base.endswith('__pycache__'):
path_old = path
pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
m = re.match(pattern, name)
path_new = os.path.join(
base, os.pardir, m.group('name') + '.pyc')
log.info(
"Renaming file from [%s] to [%s]"
% (path_old, path_new))
try:
os.remove(path_new)
except OSError:
pass
os.rename(path_old, path_new)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in sorted_walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = sorted_walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
elif sys.version_info < (3, 7):
skip = 12 # skip magic & date & file size
else:
skip = 16 # skip magic & reserved? & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'", p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in sorted_walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in sorted_walk(base_dir):
visit(None, dirname, files)
return zip_filename
|
ARL-UTEP-OC/emubox
|
workshop-creator/python27-64bit-gtk3/Lib/site-packages/setuptools/command/bdist_egg.py
|
Python
|
gpl-2.0
| 18,185
|
[
"VisIt"
] |
450f61f019925695ca250493de2fdbfec9b4f7767e6825db7cc04433622b23e4
|
# Copyright (c) 2014, Alan Saul
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import GPy
import numpy as np
from GPy.util import datasets
try:
import matplotlib.pyplot as plt
except:
pass
def student_t_approx(optimize=True, plot=True):
"""
Example of regressing with a student t likelihood using Laplace
"""
real_std = 0.1
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
Yc = Y.copy()
X_full = np.linspace(0.0, np.pi*2, 500)[:, None]
Y_full = np.sin(X_full)
Y_full = Y_full/Y_full.max()
#Slightly noisy data
Yc[75:80] += 1
#Very noisy data
#Yc[10] += 100
#Yc[25] += 10
#Yc[23] += 10
#Yc[26] += 1000
#Yc[24] += 10
#Yc = Yc/Yc.max()
#Add student t random noise to datapoints
deg_free = 1
print "Real noise: ", real_std
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
# Kernel object
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel3 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel4 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
#Gaussian GP model on clean data
m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
# optimize
m1['.*white'].constrain_fixed(1e-5)
m1.randomize()
#Gaussian GP model on corrupt data
m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
m2['.*white'].constrain_fixed(1e-5)
m2.randomize()
#Student t GP model on clean data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m3 = GPy.core.GP(X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf)
m3['.*t_scale2'].constrain_bounded(1e-6, 10.)
m3['.*white'].constrain_fixed(1e-5)
m3.randomize()
#Student t GP model on corrupt data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m4 = GPy.core.GP(X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf)
m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
m4['.*white'].constrain_fixed(1e-5)
m4.randomize()
print m4
debug=True
if debug:
m4.optimize(messages=1)
import pylab as pb
pb.plot(m4.X, m4.inference_method.f_hat)
pb.plot(m4.X, m4.Y, 'rx')
m4.plot()
print m4
return m4
if optimize:
optimizer='scg'
print "Clean Gaussian"
m1.optimize(optimizer, messages=1)
print "Corrupt Gaussian"
m2.optimize(optimizer, messages=1)
print "Clean student t"
m3.optimize(optimizer, messages=1)
print "Corrupt student t"
m4.optimize(optimizer, messages=1)
if plot:
plt.figure(1)
plt.suptitle('Gaussian likelihood')
ax = plt.subplot(211)
m1.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian clean')
ax = plt.subplot(212)
m2.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian corrupt')
plt.figure(2)
plt.suptitle('Student-t likelihood')
ax = plt.subplot(211)
m3.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm clean')
ax = plt.subplot(212)
m4.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm corrupt')
return m1, m2, m3, m4
def boston_example(optimize=True, plot=True):
raise NotImplementedError("Needs updating")
import sklearn
from sklearn.cross_validation import KFold
optimizer='bfgs'
messages=0
data = datasets.boston_housing()
degrees_freedoms = [3, 5, 8, 10]
X = data['X'].copy()
Y = data['Y'].copy()
X = X-X.mean(axis=0)
X = X/X.std(axis=0)
Y = Y-Y.mean()
Y = Y/Y.std()
num_folds = 10
kf = KFold(len(Y), n_folds=num_folds, indices=True)
num_models = len(degrees_freedoms) + 3 #3 for baseline, gaussian, gaussian laplace approx
score_folds = np.zeros((num_models, num_folds))
pred_density = score_folds.copy()
def rmse(Y, Ystar):
return np.sqrt(np.mean((Y-Ystar)**2))
for n, (train, test) in enumerate(kf):
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
print "Fold {}".format(n)
noise = 1e-1 #np.exp(-2)
rbf_len = 0.5
data_axis_plot = 4
kernelstu = GPy.kern.RBF(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
kernelgp = GPy.kern.RBF(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
#Baseline
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
#Gaussian GP
print "Gauss GP"
mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy())
mgp.constrain_fixed('.*white', 1e-5)
mgp['.*len'] = rbf_len
mgp['.*noise'] = noise
print mgp
if optimize:
mgp.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mgp.predict(X_test)
score_folds[1, n] = rmse(Y_test, Y_test_pred[0])
pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test))
print mgp
print pred_density
print "Gaussian Laplace GP"
N, D = Y_train.shape
g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(variance=noise, N=N, D=D)
g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution)
mg = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=g_likelihood)
mg.constrain_positive('noise_variance')
mg.constrain_fixed('.*white', 1e-5)
mg['rbf_len'] = rbf_len
mg['noise'] = noise
print mg
if optimize:
mg.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mg.predict(X_test)
score_folds[2, n] = rmse(Y_test, Y_test_pred[0])
pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test))
print pred_density
print mg
for stu_num, df in enumerate(degrees_freedoms):
#Student T
print "Student-T GP {}df".format(df)
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=df, sigma2=noise)
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood)
mstu_t.constrain_fixed('.*white', 1e-5)
mstu_t.constrain_bounded('.*t_scale2', 0.0001, 1000)
mstu_t['rbf_len'] = rbf_len
mstu_t['.*t_scale2'] = noise
print mstu_t
if optimize:
mstu_t.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mstu_t.predict(X_test)
score_folds[3+stu_num, n] = rmse(Y_test, Y_test_pred[0])
pred_density[3+stu_num, n] = np.mean(mstu_t.log_predictive_density(X_test, Y_test))
print pred_density
print mstu_t
if plot:
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('GP gauss')
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Lap gauss')
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Stu t {}df'.format(df))
print "Average scores: {}".format(np.mean(score_folds, 1))
print "Average pred density: {}".format(np.mean(pred_density, 1))
if plot:
#Plotting
stu_t_legends = ['Student T, df={}'.format(df) for df in degrees_freedoms]
legends = ['Baseline', 'Gaussian', 'Laplace Approx Gaussian'] + stu_t_legends
#Plot boxplots for RMSE density
fig = plt.figure()
ax=fig.add_subplot(111)
plt.title('RMSE')
bp = ax.boxplot(score_folds.T, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
xtickNames = plt.setp(ax, xticklabels=legends)
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel('RMSE')
ax.set_xlabel('Distribution')
#Make grid and put it below boxes
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
#Plot boxplots for predictive density
fig = plt.figure()
ax=fig.add_subplot(111)
plt.title('Predictive density')
bp = ax.boxplot(pred_density[1:,:].T, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
xtickNames = plt.setp(ax, xticklabels=legends[1:])
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel('Mean Log probability P(Y*|Y)')
ax.set_xlabel('Distribution')
#Make grid and put it below boxes
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
return mstu_t
#def precipitation_example():
#import sklearn
#from sklearn.cross_validation import KFold
#data = datasets.boston_housing()
#X = data['X'].copy()
#Y = data['Y'].copy()
#X = X-X.mean(axis=0)
#X = X/X.std(axis=0)
#Y = Y-Y.mean()
#Y = Y/Y.std()
#import ipdb; ipdb.set_trace() # XXX BREAKPOINT
#num_folds = 10
#kf = KFold(len(Y), n_folds=num_folds, indices=True)
#score_folds = np.zeros((4, num_folds))
#def rmse(Y, Ystar):
#return np.sqrt(np.mean((Y-Ystar)**2))
##for train, test in kf:
#for n, (train, test) in enumerate(kf):
#X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
#print "Fold {}".format(n)
|
strongh/GPy
|
GPy/examples/non_gaussian.py
|
Python
|
bsd-3-clause
| 10,661
|
[
"Gaussian"
] |
227fad96f93110bd5d805f4df1d4e18e592e52ce743c9590139f2fd01440722d
|
# -*- coding: utf-8 -*-
import os, datetime
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render_to_response, redirect, HttpResponse, Http404
from django.template import RequestContext
from django.contrib.auth import authenticate, logout, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.conf import settings
from django.views.generic.base import View
from django.views.decorators.csrf import csrf_exempt
from patient.models import PatientInformation, Guardian, MedicalHistory, PastMedicalHistory, PresentMedicalHistory, \
FamilyMedicalHistory, MenstrualHistory, ObstetricHistory, GynaecologicalHistory, ImmunizationHistory, \
Routinecheckup, LaboratoryTest, UltrasoundScanning, AdditionalPatientInformation, Prescription, PreviousObstetricHistory, \
PreviousSurgery, UltrasoundScanningImage
from patient.forms import AddPatientForm, PatientInformationForm, AdditionalPatientInformationForm, GuardianForm, \
MedicalHistoryForm, PastMedicalHistoryForm, PresentMedicalHistoryForm, FamilyMedicalHistoryForm, \
MenstrualHistoryForm, ObstetricHistoryForm, GynaecologicalHistoryForm, ImmunizationHistoryForm, \
RoutinecheckupForm, LaboratoryTestForm, UltrasoundScanningForm, PrescriptionForm, PreviousObstetricHistoryForm, \
PreviousSurgeryForm
from admission.models import PatientVisit, HospitalAdmission
from admission.forms import PatientVisitForm, PatientAdmissionForm
def get_patient(patient_id):
try:
return PatientInformation.objects.get(id=patient_id)
except:
raise Http404
@login_required
def patient_home(request):
"""
Show patient list. Not used
"""
context = {
'selected_page': 'home',
}
return render_to_response('patient/patient_home.html', context, RequestContext(request))
@login_required
def patient_visit(request, patient_id):
"""
Display patient visit info
"""
patient = get_patient(patient_id)
try:
visit = patient.patientvisit
except:
visit = None
admissions = HospitalAdmission.objects.filter(patient=patient)
context = {
'patient': patient,
'visit': visit,
'admissions': admissions,
}
return render_to_response('patient/patient_visit.html', context, RequestContext(request))
@login_required
def patient_visit_add(request, patient_id):
"""
Add patient visit
"""
patient = get_patient(patient_id)
try:
visit = patient.patientvisit
form = PatientVisitForm(instance=visit)
except:
form = PatientVisitForm()
if request.method == "POST":
try:
visit = patient.patientvisit
form = PatientVisitForm(request.POST, instance=visit)
except:
form = PatientVisitForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('patient_visit', args=[patient.id]))
context = {
'patient': patient,
'form': form,
'selected_tab': 'patient-visit',
'selected_page': 'patient',
}
return render_to_response('patient/patient_visit_add.html', context, RequestContext(request))
@login_required
def patient_admission_add(request, patient_id, admission_id=None):
"""
Add patient admission
"""
patient = get_patient(patient_id)
if admission_id:
admission = HospitalAdmission.objects.get(id=admission_id)
form = PatientAdmissionForm(instance=admission)
else:
form = PatientAdmissionForm()
if request.method == "POST":
if admission_id:
admission = HospitalAdmission.objects.get(id=admission_id)
form = PatientAdmissionForm(request.POST, instance=admission)
else:
form = PatientAdmissionForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('patient_visit', args=[patient.id]))
context = {
'patient': patient,
'form': form,
'selected_tab': 'patient-visit',
'selected_page': 'patient',
'admission_id': admission_id,
}
return render_to_response('patient/patient_admission_add.html', context, RequestContext(request))
@login_required
def basic_patient_information(request, patient_id):
"""
Basic Patient Information
"""
patient = get_patient(patient_id)
try:
form = PatientInformationForm(instance=patient)
except:
form = PatientInformationForm()
if request.method == "POST":
form = PatientInformationForm(request.POST, instance=patient)
if form.is_valid():
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
}
return render_to_response('patient/basic_patient_information.html', context, RequestContext(request))
@login_required
def additional_details(request, patient_id):
"""
Additional patient details
"""
patient = get_patient(patient_id)
try:
add_info = AdditionalPatientInformation.objects.filter(patient=patient).latest('id')
form = AdditionalPatientInformationForm(prefix='f1', instance=add_info)
except:
form = AdditionalPatientInformationForm(prefix='f1')
try:
guardian = Guardian.objects.filter(patient=patient).latest('id')
form2 = GuardianForm(prefix='f2', instance=guardian)
except:
form2 = GuardianForm(prefix='f2')
if request.method == "POST":
try:
add_info = AdditionalPatientInformation.objects.filter(patient=patient).latest('id')
form = AdditionalPatientInformationForm(request.POST, prefix='f1', instance=add_info)
except:
form = AdditionalPatientInformationForm(request.POST, prefix='f1')
try:
guardian = Guardian.objects.filter(patient=patient).latest('id')
form2 = GuardianForm(request.POST, prefix='f2', instance=guardian)
except:
form2 = GuardianForm(request.POST, prefix='f2')
save_msg = None
if form.has_changed():
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
save_msg = True
else:
try:
add_info = AdditionalPatientInformation.objects.filter(patient=patient).latest('id')
form = AdditionalPatientInformationForm(prefix='f1', instance=add_info)
except:
form = AdditionalPatientInformationForm(prefix='f1')
if form2.has_changed():
if form2.is_valid():
form2.save(commit=False)
form2.instance.patient = patient
form2.save()
save_msg = True
else:
try:
guardian = Guardian.objects.filter(patient=patient).latest('id')
form2 = GuardianForm(prefix='f2', instance=guardian)
except:
form2 = GuardianForm(prefix='f2')
if save_msg:
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'form2': form2,
}
return render_to_response('patient/additional_details.html', context, RequestContext(request))
@login_required
def medical_history(request, patient_id):
"""
Family medical history
"""
patient = get_patient(patient_id)
try:
medical3 = FamilyMedicalHistory.objects.filter(patient=patient).latest('id')
form = FamilyMedicalHistoryForm(instance=medical3)
except:
form = FamilyMedicalHistoryForm()
if request.method == "POST":
try:
medical3 = FamilyMedicalHistory.objects.filter(patient=patient).latest('id')
form = FamilyMedicalHistoryForm(request.POST, instance=medical3)
except:
form = FamilyMedicalHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'medical_tab': 'fmh',
}
return render_to_response('patient/medical_history.html', context, RequestContext(request))
@login_required
def past_medical_history(request, patient_id):
"""
Past medical history
"""
patient = get_patient(patient_id)
try:
medical1 = PastMedicalHistory.objects.filter(patient=patient).latest('id')
form = PastMedicalHistoryForm(instance=medical1)
except:
form = PastMedicalHistoryForm()
if request.method == "POST":
try:
medical1 = PastMedicalHistory.objects.filter(patient=patient).latest('id')
form = PastMedicalHistoryForm(request.POST, instance=medical1)
except:
form = PastMedicalHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'medical_tab': 'pah',
}
return render_to_response('patient/past_medical_history.html', context, RequestContext(request))
@login_required
def menstrual_history(request, patient_id):
"""
Menstrual history
"""
patient = get_patient(patient_id)
try:
medical4 = MenstrualHistory.objects.filter(patient=patient).latest('id')
form = MenstrualHistoryForm(instance=medical4)
except:
form = MenstrualHistoryForm()
if request.method == "POST":
try:
medical4 = MenstrualHistory.objects.filter(patient=patient).latest('id')
form = MenstrualHistoryForm(request.POST, instance=medical4)
except:
form = MenstrualHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'medical_tab': 'meh',
}
return render_to_response('patient/menstrual_history.html', context, RequestContext(request))
@login_required
def obstetric_history(request, patient_id):
"""
Obstetric history
"""
patient = get_patient(patient_id)
try:
medical5 = ObstetricHistory.objects.filter(patient=patient).latest('id')
form = ObstetricHistoryForm(instance=medical5)
except:
form = ObstetricHistoryForm()
if request.method == "POST":
try:
medical5 = ObstetricHistory.objects.filter(patient=patient).latest('id')
form = ObstetricHistoryForm(request.POST, instance=medical5)
except:
form = ObstetricHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
prev_obs = PreviousObstetricHistory.objects.filter(patient=patient).order_by('dob')
context = {
'patient': patient,
'form': form,
'medical_tab': 'obh',
'prev_obs': prev_obs,
}
return render_to_response('patient/obstetric_history.html', context, RequestContext(request))
@login_required
def obstetric_add(request, patient_id):
"""
Add obstetric history
"""
patient = get_patient(patient_id)
form = PreviousObstetricHistoryForm()
if request.method == "POST":
form = PreviousObstetricHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('obstetric_history', args=[patient.id]))
context = {
'patient': patient,
'form': form,
'medical_tab': 'obh',
}
return render_to_response('patient/obstetric_add.html', context, RequestContext(request))
@login_required
def present_medical_history(request, patient_id):
"""
Present medical history
"""
patient = get_patient(patient_id)
try:
medical2 = PresentMedicalHistory.objects.filter(patient=patient).latest('id')
form = PresentMedicalHistoryForm(instance=medical2)
except:
form = PresentMedicalHistoryForm()
if request.method == "POST":
try:
medical2 = PresentMedicalHistory.objects.filter(patient=patient).latest('id')
form = PresentMedicalHistoryForm(request.POST, instance=medical2)
except:
form = PresentMedicalHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'medical_tab': 'prh',
}
return render_to_response('patient/present_medical_history.html', context, RequestContext(request))
@login_required
def gynaecological_history(request, patient_id):
"""
Gynaecological history
"""
patient = get_patient(patient_id)
try:
medical6 = GynaecologicalHistory.objects.filter(patient=patient).latest('id')
form = GynaecologicalHistoryForm(prefix='f1', instance=medical6)
except:
form = GynaecologicalHistoryForm(prefix='f1')
try:
medical7 = PreviousSurgery.objects.filter(patient=patient).latest('id')
form2 = PreviousSurgeryForm(prefix='f2', instance=medical7)
except:
form2 = PreviousSurgeryForm(prefix='f2')
if request.method == "POST":
try:
medical6 = GynaecologicalHistory.objects.filter(patient=patient).latest('id')
form = GynaecologicalHistoryForm(request.POST, prefix='f1', instance=medical6)
except:
form = GynaecologicalHistoryForm(request.POST, prefix='f1')
try:
medical7 = PreviousSurgery.objects.filter(patient=patient).latest('id')
form2 = PreviousSurgeryForm(request.POST, prefix='f2', instance=medical7)
except:
form2 = PreviousSurgeryForm(request.POST, prefix='f2')
if form.is_valid() and form2.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
form2.save(commit=False)
form2.instance.patient = patient
form2.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'form2': form2,
'medical_tab': 'gyh',
}
return render_to_response('patient/gynaecological_history.html', context, RequestContext(request))
@login_required
def immunization_history(request, patient_id):
"""
Immunization history
"""
patient = get_patient(patient_id)
try:
medical7 = ImmunizationHistory.objects.filter(patient=patient).latest('id')
form = ImmunizationHistoryForm(instance=medical7)
except:
form = ImmunizationHistoryForm()
if request.method == "POST":
try:
medical7 = ImmunizationHistory.objects.filter(patient=patient).latest('id')
form = ImmunizationHistoryForm(request.POST, instance=medical7)
except:
form = ImmunizationHistoryForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
context = {
'patient': patient,
'form': form,
'medical_tab': 'imh',
}
return render_to_response('patient/immunization_history.html', context, RequestContext(request))
@login_required
def routine_checkup(request, patient_id):
"""
routine checkup
"""
patient = get_patient(patient_id)
routines = Routinecheckup.objects.filter(patient=patient)
context = {
'patient': patient,
'routines': routines,
}
return render_to_response('patient/routine_checkup.html', context, RequestContext(request))
@login_required
def routine_checkup_add(request, patient_id, routine_id=None):
"""
routine checkup add
"""
patient = get_patient(patient_id)
if routine_id:
routine = Routinecheckup.objects.get(id=routine_id)
form = RoutinecheckupForm(instance=routine)
else:
form = RoutinecheckupForm()
if request.method == "POST":
if routine_id:
routine = Routinecheckup.objects.get(id=routine_id)
form = RoutinecheckupForm(request.POST, instance=routine)
else:
form = RoutinecheckupForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('routine_checkup', args=[patient.id]))
context = {
'patient': patient,
'form': form,
'routine_id': routine_id,
}
return render_to_response('patient/routine_checkup_add.html', context, RequestContext(request))
@login_required
def laboratory_test(request, patient_id):
"""
laboratory test
"""
patient = get_patient(patient_id)
labs = LaboratoryTest.objects.filter(patient=patient)
context = {
'patient': patient,
'labs': labs,
}
return render_to_response('patient/laboratory_test.html', context, RequestContext(request))
@login_required
def laboratory_test_add(request, patient_id, lab_id=None):
"""
laboratory test add
"""
patient = get_patient(patient_id)
if lab_id:
lab = LaboratoryTest.objects.get(id=lab_id)
form = LaboratoryTestForm(instance=lab)
else:
form = LaboratoryTestForm()
if request.method == "POST":
if lab_id:
lab = LaboratoryTest.objects.get(id=lab_id)
form = LaboratoryTestForm(request.POST, instance=lab)
else:
form = LaboratoryTestForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('laboratory_test', args=[patient.id]))
context = {
'patient': patient,
'form': form,
'lab_id': lab_id,
}
return render_to_response('patient/laboratory_test_add.html', context, RequestContext(request))
@login_required
def ultrasound_scanning(request, patient_id):
"""
ultrasound scanning
"""
patient = get_patient(patient_id)
scans = UltrasoundScanning.objects.filter(patient=patient)
context = {
'patient': patient,
'scans': scans,
}
return render_to_response('patient/ultrasound_scanning.html', context, RequestContext(request))
@login_required
def ultrasound_scanning_add(request, patient_id, us_id=None):
"""
ultrasound scanning add
"""
patient = get_patient(patient_id)
if us_id:
scan = UltrasoundScanning.objects.get(id=us_id)
form = UltrasoundScanningForm(instance=scan)
else:
scan = None
form = UltrasoundScanningForm()
if request.method == "POST":
if us_id:
scan = UltrasoundScanning.objects.get(id=us_id)
form = UltrasoundScanningForm(request.POST, instance=scan)
else:
scan = None
form = UltrasoundScanningForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
scan = form.save()
img_id = request.POST.get('temp_img', None)
if img_id:
img = UltrasoundScanningImage.objects.get(id=img_id)
img.us = scan
img.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('ultrasound_scanning', args=[patient.id]))
if scan:
image = scan.get_image_url()
else:
image = ''
context = {
'patient': patient,
'form': form,
'us_id': us_id,
'image': image,
'scan': scan,
}
return render_to_response('patient/ultrasound_scanning_add.html', context, RequestContext(request))
@csrf_exempt
def upload_image(request):
if request.method == 'POST':
img = UltrasoundScanningImage(image=request.FILES.get('file_upload'))
scan_id = int(request.POST.get('scan', None))
if scan_id:
scan = UltrasoundScanning.objects.get(id=scan_id)
img.us = scan
img.save()
return HttpResponse(img.id)
return HttpResponse('')
@login_required
def ultrasound_scanning_img(request, img_id):
image = ''
try:
img = UltrasoundScanningImage.objects.get(id=img_id)
image = '<img src="%s" alt="Ultrasound Scan Image" class="ultrasound-img" />' % img.image.url
except:
pass
return HttpResponse(image)
@login_required
def prescription(request, patient_id):
"""
prescription
"""
patient = get_patient(patient_id)
prescriptions = Prescription.objects.filter(patient=patient)
context = {
'patient': patient,
'prescriptions': prescriptions,
}
return render_to_response('patient/prescription.html', context, RequestContext(request))
@login_required
def prescription_add(request, patient_id, prs_id=None):
"""
prescription add
"""
patient = get_patient(patient_id)
if prs_id:
prs = Prescription.objects.get(id=prs_id)
form = PrescriptionForm(instance=prs)
else:
form = PrescriptionForm()
if request.method == "POST":
if prs_id:
prs = Prescription.objects.get(id=prs_id)
form = PrescriptionForm(request.POST, instance=prs)
else:
form = PrescriptionForm(request.POST)
if form.is_valid():
form.save(commit=False)
form.instance.patient = patient
form.save()
messages.success(request, _("Successfully save."), fail_silently=True)
return redirect(reverse('prescription', args=[patient.id]))
context = {
'patient': patient,
'form': form,
'prs_id': prs_id,
}
return render_to_response('patient/prescription_add.html', context, RequestContext(request))
@login_required
def patient_info(request, patient_id, current_tab=1):
"""
Display patient info
"""
try:
patient = PatientInformation.objects.get(id=patient_id)
request.session['patient_id'] = patient.id
except:
raise Http404
context = {
'selected_page': 'patient',
'patient': patient,
'current_tab': current_tab,
}
return render_to_response('patient/patient_info.html', context, RequestContext(request))
@login_required
def patient_add(request):
"""
Add new patient
"""
if request.method == "POST":
form = AddPatientForm(request.POST)
if form.is_valid():
patient = form.save()
request.session['patient_id'] = patient.id
# return redirect('patient_info', patient_id=patient.id)
return redirect(reverse('patient_info', args=[patient.id]))
else:
form = AddPatientForm()
context = {
'selected_page': 'patient',
'form': form,
}
return render_to_response('patient/patient_add.html', context, RequestContext(request))
|
aazhbd/medical_info01
|
patient/views.py
|
Python
|
bsd-3-clause
| 24,670
|
[
"VisIt"
] |
014c81b4c43701d2ca71469758b2cb930e24880008631e5fde8694b975ad5649
|
#!/usr/bin/env python
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
from math import *
import matplotlib.pyplot as plt
from gi.repository import GObject
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
#
# Initializing the library objects, this must be called before
# any other library function.
#
Ncm.cfg_init ()
#
# New homogeneous and isotropic cosmological model NcHICosmoDEXcdm
#
cosmo = Nc.HICosmo.new_from_name (Nc.HICosmo, "NcHICosmoDEXcdm")
#
# New homogeneous and isotropic reionization object.
#
reion = Nc.HIReionCamb.new ()
#
# New homogeneous and isotropic primordial object.
#
prim = Nc.HIPrimPowerLaw.new ()
#
# Adding submodels to the main cosmological model.
#
cosmo.add_submodel (reion)
cosmo.add_submodel (prim)
#
# New cosmological distance objects optimizied to perform calculations
# up to redshift 2.0.
#
dist = Nc.Distance.new (2.0)
#
# New transfer function 'NcTransferFuncEH' using the Einsenstein, Hu
# fitting formula.
#
tf = Nc.TransferFunc.new_from_name ("NcTransferFuncEH")
#
# New linear matter power spectrum object based of the EH transfer function.
#
psml = Nc.PowspecMLTransfer.new (tf)
psml.require_kmin (1.0e-3)
psml.require_kmax (1.0e3)
#
# Apply a tophat filter to the psml object, set best output interval.
#
psf = Ncm.PowspecFilter.new (psml, Ncm.PowspecFilterType.TOPHAT)
psf.set_best_lnr0 ()
#
# New multiplicity function 'NcMultiplicityFuncTinkerMean'
#
#mulf = Nc.MultiplicityFuncTinker.new ()
mulf = Nc.MultiplicityFuncBocquet.new ()
mulf.set_mdef (Nc.MultiplicityFuncMassDef.CRITICAL)
mulf.set_Delta (200.0)
mulf.set_sim (Nc.MultiplicityFuncBocquetSim.DM)
#
# New mass function object using the objects defined above.
#
mf = Nc.HaloMassFunction.new (dist, psf, mulf)
#
# New Cluster Mass object using Log normal distribution
#
lnMobs_min = log (1.0e14)
lnMobs_max = log (1.0e16)
cluster_m = Nc.ClusterMass.new_from_name ("NcClusterMassLnnormal{'lnMobs-min':<%20.15e>, 'lnMobs-max':<%20.15e>}" % (lnMobs_min, lnMobs_max))
#
# New Cluster Redshift object using a global gaussian distribution
#
z_min = 0.0
z_max = 0.7
cluster_z = Nc.ClusterRedshift.new_from_name ("NcClusterPhotozGaussGlobal{'pz-min':<%20.15e>, 'pz-max':<%20.15e>, 'z-bias':<0.0>, 'sigma0':<0.03>}" % (z_min, z_max))
#
# New Cluster abundance object that uses all objects above
#
cad = Nc.ClusterAbundance.new (mf, None)
#
# New NcmData object for number count calculations
#
ncdata = Nc.DataClusterNCount.new (cad)
#
# Creating a new Modelset and set cosmo as the HICosmo model to be used
# and cluster_m as the distribution of the mass-observable relation
#
mset = Ncm.MSet.new_array ([cosmo, cluster_z, cluster_m])
#
# Setting values for the cosmological model, those not set stay in the
# default values. Remember to use the _orig_ version to set the original
# parameters when a reparametrization is used.
#
cosmo.props.H0 = 70.0
cosmo.props.Omegab = 0.05
cosmo.props.Omegac = 0.25
cosmo.props.Omegax = 0.70
cosmo.props.Tgamma0 = 2.72
cosmo.props.w = -1.0
#
# Setting values for the mass distribution model
#
cluster_m.props.bias = 0.0
cluster_m.props.sigma = 0.2
#
# Printing the parameters used.
#
mset.pretty_log ()
#
# Creates a new random number generator from a pool named "example_ca_sampling"
# it implicitly creates this pool.
#
rng = Ncm.RNG.pool_get ("example_ca_sampling");
#
# Since ncdata is currently empty, run init_from_sampling
# using the objects above and an survey area of 300degsq^2
#
ncdata.init_from_sampling (mset, 270 * (pi / 180.0)**2, rng)
#
# Save to a fits file
#
ncdata.catalog_save ("ca_data.fits", True)
#
# Generate another sample by resampling from mset
#
ncdata.resample (mset, rng)
#
# Checking if it has the mass truth table, if so gets it
#
has_lnM_true = ncdata.has_lnM_true ()
print ("# Has mass truth table = ", has_lnM_true)
lnM_true = None
if ncdata.has_lnM_true ():
lnM_true = ncdata.get_lnM_true ()
#
# Checking if it has the redshift truth table, if so gets it
#
has_z_true = ncdata.has_z_true ()
print ("# Has redshift truth table = ", has_z_true)
z_true = None
if ncdata.has_z_true ():
z_true = ncdata.get_z_true ()
#
# Gets the mass observables and its parameters
#
lnM_obs = ncdata.get_lnM_obs ()
lnM_obs_params = ncdata.get_lnM_obs_params ()
#
# Gets the redshift observables
#
z_obs = ncdata.get_z_obs ()
z_obs_params = ncdata.get_z_obs_params ()
#
# Print everything in a file
#
nobjects = ncdata.get_len ()
print ("# There are ", nobjects, " objects in the catalog (%d, %d)" % (lnM_obs.col_len (), z_obs.col_len ()))
f = open ('ca_data.dat', 'w')
for i in range (0, nobjects):
row = "%d " % (i)
if has_lnM_true:
row += "%f " % (lnM_true.get (i))
if has_z_true:
row += "%f " % (z_true.get (i))
for j in range (0, lnM_obs.row_len ()):
row += "%f " % (lnM_obs.get (i, j))
for j in range (0, z_obs.row_len ()):
row += "%f " % (z_obs.get (i, j))
if lnM_obs_params:
for j in range (0, lnM_obs_params.row_len ()):
row += "%f " % (lnM_obs_params.get (i, j))
if z_obs_params:
for j in range (0, z_obs_params.row_len ()):
row += "%f " % (z_obs_params.get (i, j))
f.write (row)
f.write ("\n")
f.close ()
|
NumCosmo/NumCosmo
|
examples/example_ca_sampling.py
|
Python
|
gpl-3.0
| 5,324
|
[
"Gaussian"
] |
a31786018575aa73cc795d76a315a2e4ff3cfa8c223cc4c94a5ca471abbab315
|
"""
Single page performance tests for LMS.
"""
from bok_choy.web_app_test import with_cache
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.login import LoginPage
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
from common.test.acceptance.tests.helpers import UniqueCourseTest, load_data_str
from nose.plugins.attrib import attr
@attr(har_mode='explicit')
class LmsPerformanceTest(UniqueCourseTest):
"""
Base class to capture LMS performance with HTTP Archives.
"""
username = 'test_student'
email = 'student101@example.com'
def setUp(self):
"""
Setup course
"""
super(LmsPerformanceTest, self).setUp()
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(CourseUpdateDesc(date='January 29, 2014', content='Test course update1'))
course_fix.add_update(CourseUpdateDesc(date='January 30, 2014', content='Test course update2'))
course_fix.add_update(CourseUpdateDesc(date='January 31, 2014', content='Test course update3'))
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML', data="<html>Html child text</html>"),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('html', 'Html Child', data="<html>Html child text</html>")
)
),
XBlockFixtureDesc('chapter', 'Test Section 3').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 3').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3')
)
)
).install()
AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()
def _make_har_file(self, page):
"""
Visit page and make HAR file.
"""
har_name = '{page}_{course}'.format(page=type(page).__name__, course=self.course_info['number'])
self.har_capturer.add_page(self.browser, har_name)
page.visit()
self.har_capturer.save_har(self.browser, har_name)
@with_cache
def test_visit_coursware(self):
"""
Produce a HAR for loading the Coursware page.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
self._make_har_file(courseware_page)
@with_cache
def test_visit_dashboard(self):
"""
Produce a HAR for loading the Dashboard page.
"""
dashboard_page = DashboardPage(self.browser)
self._make_har_file(dashboard_page)
@with_cache
def test_visit_course_info(self):
"""
Produce a HAR for loading the Course Info page.
"""
course_info_page = CourseInfoPage(self.browser, self.course_id)
self._make_har_file(course_info_page)
@with_cache
def test_visit_login_page(self):
"""
Produce a HAR for loading the Login page.
"""
login_page = LoginPage(self.browser)
# Logout previously logged in user to be able to see Login page.
LogoutPage(self.browser).visit()
self._make_har_file(login_page)
@with_cache
def test_visit_progress_page(self):
"""
Produce a HAR for loading the Progress page.
"""
progress_page = ProgressPage(self.browser, self.course_id)
self._make_har_file(progress_page)
|
louyihua/edx-platform
|
common/test/acceptance/performance/test_lms_performance.py
|
Python
|
agpl-3.0
| 4,552
|
[
"VisIt"
] |
325bdab47dd413994a4f7a06d7128a0440c1c0c51ae41a7ce8993c545d483afd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.