repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
YangSongzhou/django
|
tests/utils_tests/test_jslex.py
|
153
|
9837
|
# -*- coding: utf-8 -*-
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", [
"dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40",
"dnum 1e1", "dnum .123",
]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """, [
r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""",
r'''string "'"''', r"""string '\''""", r'''string "\""'''
]),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409 # NOQA
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this",
"punct .", "id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"',
"punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
|
bsd-3-clause
|
ovnicraft/edx-platform
|
common/lib/chem/chem/chemcalc.py
|
162
|
14775
|
from __future__ import division
from fractions import Fraction
from pyparsing import (Literal, StringEnd, OneOrMore, ParseException)
import nltk
from nltk.tree import Tree
ARROWS = ('<->', '->')
## Defines a simple pyparsing tokenizer for chemical equations
elements = ['Ac', 'Ag', 'Al', 'Am', 'Ar', 'As', 'At', 'Au', 'B', 'Ba', 'Be',
'Bh', 'Bi', 'Bk', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cf', 'Cl', 'Cm',
'Cn', 'Co', 'Cr', 'Cs', 'Cu', 'Db', 'Ds', 'Dy', 'Er', 'Es', 'Eu',
'F', 'Fe', 'Fl', 'Fm', 'Fr', 'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf',
'Hg', 'Ho', 'Hs', 'I', 'In', 'Ir', 'K', 'Kr', 'La', 'Li', 'Lr',
'Lu', 'Lv', 'Md', 'Mg', 'Mn', 'Mo', 'Mt', 'N', 'Na', 'Nb', 'Nd',
'Ne', 'Ni', 'No', 'Np', 'O', 'Os', 'P', 'Pa', 'Pb', 'Pd', 'Pm',
'Po', 'Pr', 'Pt', 'Pu', 'Ra', 'Rb', 'Re', 'Rf', 'Rg', 'Rh', 'Rn',
'Ru', 'S', 'Sb', 'Sc', 'Se', 'Sg', 'Si', 'Sm', 'Sn', 'Sr', 'Ta',
'Tb', 'Tc', 'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'Uuo', 'Uup',
'Uus', 'Uut', 'V', 'W', 'Xe', 'Y', 'Yb', 'Zn', 'Zr']
digits = map(str, range(10))
symbols = list("[](){}^+-/")
phases = ["(s)", "(l)", "(g)", "(aq)"]
tokens = reduce(lambda a, b: a ^ b, map(Literal, elements + digits + symbols + phases))
tokenizer = OneOrMore(tokens) + StringEnd()
def _orjoin(l):
return "'" + "' | '".join(l) + "'"
## Defines an NLTK parser for tokenized expressions
grammar = """
S -> multimolecule | multimolecule '+' S
multimolecule -> count molecule | molecule
count -> number | number '/' number
molecule -> unphased | unphased phase
unphased -> group | paren_group_round | paren_group_square
element -> """ + _orjoin(elements) + """
digit -> """ + _orjoin(digits) + """
phase -> """ + _orjoin(phases) + """
number -> digit | digit number
group -> suffixed | suffixed group
paren_group_round -> '(' group ')'
paren_group_square -> '[' group ']'
plus_minus -> '+' | '-'
number_suffix -> number
ion_suffix -> '^' number plus_minus | '^' plus_minus
suffix -> number_suffix | number_suffix ion_suffix | ion_suffix
unsuffixed -> element | paren_group_round | paren_group_square
suffixed -> unsuffixed | unsuffixed suffix
"""
parser = nltk.ChartParser(nltk.parse_cfg(grammar))
def _clean_parse_tree(tree):
''' The parse tree contains a lot of redundant
nodes. E.g. paren_groups have groups as children, etc. This will
clean up the tree.
'''
def unparse_number(n):
''' Go from a number parse tree to a number '''
if len(n) == 1:
rv = n[0][0]
else:
rv = n[0][0] + unparse_number(n[1])
return rv
def null_tag(n):
''' Remove a tag '''
return n[0]
def ion_suffix(n):
'''1. "if" part handles special case
2. "else" part is general behaviour '''
if n[1:][0].node == 'number' and n[1:][0][0][0] == '1':
# if suffix is explicitly 1, like ^1-
# strip 1, leave only sign: ^-
return nltk.tree.Tree(n.node, n[2:])
else:
return nltk.tree.Tree(n.node, n[1:])
dispatch = {'number': lambda x: nltk.tree.Tree("number", [unparse_number(x)]),
'unphased': null_tag,
'unsuffixed': null_tag,
'number_suffix': lambda x: nltk.tree.Tree('number_suffix', [unparse_number(x[0])]),
'suffixed': lambda x: len(x) > 1 and x or x[0],
'ion_suffix': ion_suffix,
'paren_group_square': lambda x: nltk.tree.Tree(x.node, x[1]),
'paren_group_round': lambda x: nltk.tree.Tree(x.node, x[1])}
if isinstance(tree, str):
return tree
old_node = None
## This loop means that if a node is processed, and returns a child,
## the child will be processed.
while tree.node in dispatch and tree.node != old_node:
old_node = tree.node
tree = dispatch[tree.node](tree)
children = []
for child in tree:
child = _clean_parse_tree(child)
children.append(child)
tree = nltk.tree.Tree(tree.node, children)
return tree
def _merge_children(tree, tags):
''' nltk, by documentation, cannot do arbitrary length
groups. Instead of:
(group 1 2 3 4)
It has to handle this recursively:
(group 1 (group 2 (group 3 (group 4))))
We do the cleanup of converting from the latter to the former.
'''
if tree is None:
# There was a problem--shouldn't have empty trees (NOTE: see this with input e.g. 'H2O(', or 'Xe+').
# Haven't grokked the code to tell if this is indeed the right thing to do.
raise ParseException("Shouldn't have empty trees")
if isinstance(tree, str):
return tree
merged_children = []
done = False
#print '00000', tree
## Merge current tag
while not done:
done = True
for child in tree:
if isinstance(child, nltk.tree.Tree) and child.node == tree.node and tree.node in tags:
merged_children = merged_children + list(child)
done = False
else:
merged_children = merged_children + [child]
tree = nltk.tree.Tree(tree.node, merged_children)
merged_children = []
#print '======',tree
# And recurse
children = []
for child in tree:
children.append(_merge_children(child, tags))
#return tree
return nltk.tree.Tree(tree.node, children)
def _render_to_html(tree):
''' Renders a cleaned tree to HTML '''
def molecule_count(tree, children):
# If an integer, return that integer
if len(tree) == 1:
return tree[0][0]
# If a fraction, return the fraction
if len(tree) == 3:
return " <sup>{num}</sup>⁄<sub>{den}</sub> ".format(num=tree[0][0], den=tree[2][0])
return "Error"
def subscript(tree, children):
return "<sub>{sub}</sub>".format(sub=children)
def superscript(tree, children):
return "<sup>{sup}</sup>".format(sup=children)
def round_brackets(tree, children):
return "({insider})".format(insider=children)
def square_brackets(tree, children):
return "[{insider}]".format(insider=children)
dispatch = {'count': molecule_count,
'number_suffix': subscript,
'ion_suffix': superscript,
'paren_group_round': round_brackets,
'paren_group_square': square_brackets}
if isinstance(tree, str):
return tree
else:
children = "".join(map(_render_to_html, tree))
if tree.node in dispatch:
return dispatch[tree.node](tree, children)
else:
return children.replace(' ', '')
def render_to_html(eq):
'''
Render a chemical equation string to html.
Renders each molecule separately, and returns invalid input wrapped in a <span>.
'''
def err(s):
"Render as an error span"
return '<span class="inline-error inline">{0}</span>'.format(s)
def render_arrow(arrow):
"""Turn text arrows into pretty ones"""
if arrow == '->':
return u'\u2192'
if arrow == '<->':
return u'\u2194'
# this won't be reached unless we add more arrow types, but keep it to avoid explosions when
# that happens.
return arrow
def render_expression(ex):
"""
Render a chemical expression--no arrows.
"""
try:
return _render_to_html(_get_final_tree(ex))
except ParseException:
return err(ex)
def spanify(s):
return u'<span class="math">{0}</span>'.format(s)
left, arrow, right = split_on_arrow(eq)
if arrow == '':
# only one side
return spanify(render_expression(left))
return spanify(render_expression(left) + render_arrow(arrow) + render_expression(right))
def _get_final_tree(s):
'''
Return final tree after merge and clean.
Raises pyparsing.ParseException if s is invalid.
'''
tokenized = tokenizer.parseString(s)
parsed = parser.parse(tokenized)
merged = _merge_children(parsed, {'S', 'group'})
final = _clean_parse_tree(merged)
return final
def _check_equality(tuple1, tuple2):
''' return True if tuples of multimolecules are equal '''
list1 = list(tuple1)
list2 = list(tuple2)
# Hypo: trees where are levels count+molecule vs just molecule
# cannot be sorted properly (tested on test_complex_additivity)
# But without factors and phases sorting seems to work.
# Also for lists of multimolecules without factors and phases
# sorting seems to work fine.
list1.sort()
list2.sort()
return list1 == list2
def compare_chemical_expression(s1, s2, ignore_state=False):
''' It does comparison between two expressions.
It uses divide_chemical_expression and check if division is 1
'''
return divide_chemical_expression(s1, s2, ignore_state) == 1
def divide_chemical_expression(s1, s2, ignore_state=False):
'''Compare two chemical expressions for equivalence up to a multiplicative factor:
- If they are not the same chemicals, returns False.
- If they are the same, "divide" s1 by s2 to returns a factor x such that s1 / s2 == x as a Fraction object.
- if ignore_state is True, ignores phases when doing the comparison.
Examples:
divide_chemical_expression("H2O", "3H2O") -> Fraction(1,3)
divide_chemical_expression("3H2O", "H2O") -> 3 # actually Fraction(3, 1), but compares == to 3.
divide_chemical_expression("2H2O(s) + 2CO2", "H2O(s)+CO2") -> 2
divide_chemical_expression("H2O(s) + CO2", "3H2O(s)+2CO2") -> False
Implementation sketch:
- extract factors and phases to standalone lists,
- compare expressions without factors and phases,
- divide lists of factors for each other and check
for equality of every element in list,
- return result of factor division
'''
# parsed final trees
treedic = {}
treedic['1'] = _get_final_tree(s1)
treedic['2'] = _get_final_tree(s2)
# strip phases and factors
# collect factors in list
for i in ('1', '2'):
treedic[i + ' cleaned_mm_list'] = []
treedic[i + ' factors'] = []
treedic[i + ' phases'] = []
for el in treedic[i].subtrees(filter=lambda t: t.node == 'multimolecule'):
count_subtree = [t for t in el.subtrees() if t.node == 'count']
group_subtree = [t for t in el.subtrees() if t.node == 'group']
phase_subtree = [t for t in el.subtrees() if t.node == 'phase']
if count_subtree:
if len(count_subtree[0]) > 1:
treedic[i + ' factors'].append(
int(count_subtree[0][0][0]) /
int(count_subtree[0][2][0]))
else:
treedic[i + ' factors'].append(int(count_subtree[0][0][0]))
else:
treedic[i + ' factors'].append(1.0)
if phase_subtree:
treedic[i + ' phases'].append(phase_subtree[0][0])
else:
treedic[i + ' phases'].append(' ')
treedic[i + ' cleaned_mm_list'].append(
Tree('multimolecule', [Tree('molecule', group_subtree)]))
# order of factors and phases must mirror the order of multimolecules,
# use 'decorate, sort, undecorate' pattern
treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'] = zip(
*sorted(zip(treedic['1 cleaned_mm_list'], treedic['1 factors'], treedic['1 phases'])))
treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'] = zip(
*sorted(zip(treedic['2 cleaned_mm_list'], treedic['2 factors'], treedic['2 phases'])))
# check if expressions are correct without factors
if not _check_equality(treedic['1 cleaned_mm_list'], treedic['2 cleaned_mm_list']):
return False
# phases are ruled by ingore_state flag
if not ignore_state: # phases matters
if treedic['1 phases'] != treedic['2 phases']:
return False
if any(
[
x / y - treedic['1 factors'][0] / treedic['2 factors'][0]
for (x, y) in zip(treedic['1 factors'], treedic['2 factors'])
]
):
# factors are not proportional
return False
else:
# return ratio
return Fraction(treedic['1 factors'][0] / treedic['2 factors'][0])
def split_on_arrow(eq):
"""
Split a string on an arrow. Returns left, arrow, right. If there is no arrow, returns the
entire eq in left, and '' in arrow and right.
Return left, arrow, right.
"""
# order matters -- need to try <-> first
for arrow in ARROWS:
left, a, right = eq.partition(arrow)
if a != '':
return left, a, right
return eq, '', ''
def chemical_equations_equal(eq1, eq2, exact=False):
"""
Check whether two chemical equations are the same. (equations have arrows)
If exact is False, then they are considered equal if they differ by a
constant factor.
arrows matter: -> and <-> are different.
e.g.
chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 -> H2O2') -> True
chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + 2H2 -> H2O2') -> False
chemical_equations_equal('H2 + O2 -> H2O2', 'O2 + H2 <-> H2O2') -> False
chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2') -> True
chemical_equations_equal('H2 + O2 -> H2O2', '2 H2 + 2 O2 -> 2 H2O2', exact=True) -> False
If there's a syntax error, we return False.
"""
left1, arrow1, right1 = split_on_arrow(eq1)
left2, arrow2, right2 = split_on_arrow(eq2)
if arrow1 == '' or arrow2 == '':
return False
# TODO: may want to be able to give student helpful feedback about why things didn't work.
if arrow1 != arrow2:
# arrows don't match
return False
try:
factor_left = divide_chemical_expression(left1, left2)
if not factor_left:
# left sides don't match
return False
factor_right = divide_chemical_expression(right1, right2)
if not factor_right:
# right sides don't match
return False
if factor_left != factor_right:
# factors don't match (molecule counts to add up)
return False
if exact and factor_left != 1:
# want an exact match.
return False
return True
except ParseException:
# Don't want external users to have to deal with parsing exceptions. Just return False.
return False
|
agpl-3.0
|
tempbottle/pulsar
|
pulsar/async/futures.py
|
5
|
11508
|
from collections import Mapping
from inspect import isgeneratorfunction
from functools import wraps, partial
from asyncio import Future, CancelledError, TimeoutError, async, sleep
from .consts import MAX_ASYNC_WHILE
from .access import get_event_loop, LOGGER, isfuture, is_async
__all__ = ['maybe_async',
'run_in_loop',
'add_errback',
'add_callback',
'task_callback',
'multi_async',
'as_coroutine',
'task',
'async',
'async_while',
'chain_future',
'future_result_exc',
'AsyncObject']
def chain_future(future, callback=None, errback=None, next=None):
'''Chain a :class:`~asyncio.Future` to an existing ``future``.
This function `chain` the ``next`` future to an existing ``future``.
When the input ``future`` receive a result the optional
``callback`` is executed and its result set as the results of ``next``.
If an exception occurs the optional ``errback`` is executed.
:param future: the original :class:`~asyncio.Future` (can be a coroutine)
:param callback: optional callback to execute on the result of ``future``
:param errback: optional callback to execute on the exception of ``future``
:param next: optional :class:`~asyncio.Future` to chain.
If not provided a new future is created
:return: the future ``next``
'''
future = async(future)
if next is None:
next = Future(loop=future._loop)
def _callback(fut):
try:
try:
result = future.result()
except Exception as exc:
if errback:
result = errback(exc)
exc = None
else:
raise
else:
if callback:
result = callback(result)
except Exception as exc:
next.set_exception(exc)
else:
if isinstance(result, Future):
chain_future(result, next=next)
else:
next.set_result(result)
future.add_done_callback(_callback)
return next
def as_exception(future):
if future._exception:
return future.exception()
elif future.cancelled():
return CancelledError()
def add_errback(future, callback, loop=None):
'''Add a ``callback`` to a ``future`` executed only if an exception
or cancellation has occurred.'''
def _error_back(fut):
if fut._exception:
callback(fut.exception())
elif fut.cancelled():
callback(CancelledError())
future = async(future, loop=None)
future.add_done_callback(_error_back)
return future
def add_callback(future, callback, loop=None):
'''Add a ``callback`` to ``future`` executed only if an exception
has not occurred.'''
def _call_back(fut):
if not (fut._exception or fut.cancelled()):
callback(fut.result())
future = async(future, loop=None)
future.add_done_callback(_call_back)
return future
def future_result_exc(future):
'''Return a two elements tuple containing the future result and exception.
The :class:`.Future` must be ``done``
'''
if future.cancelled():
return None, CancelledError()
elif future._exception:
return None, future.exception()
else:
return future.result(), None
def task_callback(callback):
@wraps(callback)
def _task_callback(fut):
return async(callback(fut.result()), fut._loop)
return _task_callback
def maybe_async(value, loop=None):
'''Handle a possible asynchronous ``value``.
Return an :ref:`asynchronous instance <tutorials-coroutine>`
only if ``value`` is a generator, a :class:`.Future`.
:parameter value: the value to convert to an asynchronous instance
if it needs to.
:parameter loop: optional :class:`.EventLoop`.
:return: a :class:`.Future` or a synchronous ``value``.
'''
try:
return async(value, loop=loop)
except TypeError:
return value
def as_coroutine(value):
if is_async(value):
value = yield from value
return value
def task(function):
'''Thread-safe decorator to run a ``function`` in an event loop.
:param function: a callable which can return coroutines,
:class:`.asyncio.Future` or synchronous data. Can be a method of
an :ref:`async object <async-object>`, in which case the loop
is given by the object ``_loop`` attribute.
:return: a :class:`~asyncio.Future`
'''
if isgeneratorfunction(function):
wrapper = function
else:
def wrapper(*args, **kw):
res = function(*args, **kw)
if res:
res = yield from res
return res
@wraps(function)
def _(*args, **kwargs):
loop = getattr(args[0], '_loop', None) if args else None
coro = wrapper(*args, **kwargs)
return async(coro, loop=loop)
return _
def run_in_loop(_loop, callable, *args, **kwargs):
'''Run ``callable`` in the event ``loop`` thread, thread safe.
:param _loop: The event loop where ``callable`` is run
:return: a :class:`~asyncio.Future`
'''
waiter = Future(loop=_loop)
def _():
try:
result = callable(*args, **kwargs)
except Exception as exc:
waiter.set_exception(exc)
else:
try:
future = async(result, loop=_loop)
except TypeError:
waiter.set_result(result)
else:
chain_future(future, next=waiter)
_loop.call_soon_threadsafe(_)
return waiter
def async_while(timeout, while_clause, *args):
'''The asynchronous equivalent of ``while while_clause(*args):``
Use this function within a :ref:`coroutine <coroutine>` when you need
to wait ``while_clause`` to be satisfied.
:parameter timeout: a timeout in seconds after which this function stop.
:parameter while_clause: while clause callable.
:parameter args: optional arguments to pass to the ``while_clause``
callable.
:return: A :class:`.Future`.
'''
loop = get_event_loop()
start = loop.time()
di = 0.1
interval = 0
result = while_clause(*args)
while result:
interval = min(interval+di, MAX_ASYNC_WHILE)
try:
yield from sleep(interval, loop=loop)
except TimeoutError:
pass
if timeout and loop.time() - start >= timeout:
break
result = while_clause(*args)
return result
# ############################################################## Bench
class Bench:
'''Execute a given number of asynchronous requests and wait for results.
'''
start = None
'''The :meth:`~asyncio.BaseEventLoop.time` when the execution starts'''
finish = None
'''The :meth:`~asyncio.BaseEventLoop.time` when the execution finishes'''
result = ()
'''Tuple of results'''
def __init__(self, times, loop=None):
self._loop = loop or get_event_loop()
self.times = times
@property
def taken(self):
'''The total time taken for execution
'''
if self.finish:
return self.finish - self.start
def __call__(self, func, *args, **kwargs):
self.start = self._loop.time()
data = (func(*args, **kwargs) for t in range(self.times))
self.result = multi_async(data, loop=self._loop)
return chain_future(self.result, callback=self._done)
def _done(self, result):
self.finish = self._loop.time()
self.result = tuple(result)
return self
# ############################################################## AsyncObject
class AsyncObject(object):
'''Interface for :ref:`async objects <async-object>`
.. attribute:: _loop
The :ref:`event loop <asyncio-event-loop>` associated with this object
.. attribute:: _logger
Optional logger instance, used by the :attr:`logger` attribute
'''
_logger = None
_loop = None
@property
def logger(self):
'''The logger for this object.
It is either the :attr:`_logger` or the logger of the :attr:`_loop`
'''
return self._logger or getattr(self._loop, 'logger', LOGGER)
def timeit(self, method, times, *args, **kwargs):
'''Useful utility for benchmarking an asynchronous ``method``.
:param method: the name of the ``method`` to execute
:param times: number of times to execute the ``method``
:param args: positional arguments to pass to the ``method``
:param kwargs: key-valued arguments to pass to the ``method``
:return: a :class:`~asyncio.Future` which results in a :class:`Bench`
object if successful
The usage is simple::
>>> b = self.timeit('asyncmethod', 100)
'''
bench = Bench(times, loop=self._loop)
return bench(getattr(self, method), *args, **kwargs)
# ############################################################## MultiFuture
class MultiFuture(Future):
'''Handle several futures at once. Thread safe.
'''
def __init__(self, data=None, loop=None, type=None, raise_on_error=True):
super().__init__(loop=loop)
self._futures = {}
self._failures = []
self._raise_on_error = raise_on_error
if data is not None:
type = type or data.__class__
if issubclass(type, Mapping):
data = data.items()
else:
type = list
data = enumerate(data)
else:
type = list
data = ()
self._stream = type()
for key, value in data:
value = self._get_set_item(key, maybe_async(value, loop))
if isfuture(value):
self._futures[key] = value
value.add_done_callback(partial(self._future_done, key))
elif self.done():
break
self._check()
@property
def failures(self):
return self._failures
# INTERNALS
def _check(self):
if not self._futures and not self.done():
self.set_result(self._stream)
def _future_done(self, key, future, inthread=False):
# called by future when future is done
# thread safe
if inthread or future._loop is self._loop:
self._futures.pop(key, None)
if not self.done():
self._get_set_item(key, future)
self._check()
else:
self._loop.call_soon_threadsafe(
self._future_done, key, future, True)
def _get_set_item(self, key, value):
if isfuture(value):
if value.done():
exc = as_exception(value)
if exc:
if self._raise_on_error:
self._futures.clear()
self.set_exception(exc)
return
else:
self._failures.append(exc)
value = exc
else:
value = value._result
stream = self._stream
if isinstance(stream, list) and key == len(stream):
stream.append(value)
else:
stream[key] = value
return value
# Backward compatibility
multi_async = MultiFuture
|
bsd-3-clause
|
fishcorn/pylearn2
|
pylearn2/sandbox/lisa_rl/bandit/average_agent.py
|
44
|
2251
|
__author__ = "Ian Goodfellow"
import numpy as np
from theano import function
from theano import tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.sandbox.lisa_rl.bandit.agent import Agent
from pylearn2.utils import sharedX
class AverageAgent(Agent):
"""
A simple n-armed bandit playing agent that always plays the
arm with the highest estimated reward. The estimated reward is just
based on the average of all observations from that arm. If an arm
has not been tried, the estimated reward is given by init_reward_estimate.
.. todo::
WRITEME : parameter list
"""
def __init__(self, init_reward_estimate, num_arms):
self.__dict__.update(locals())
del self.self
self.estimated_rewards = sharedX(np.zeros((num_arms,)) \
+ self.init_reward_estimate)
self.observation_counts = sharedX(np.zeros((num_arms,)))
def get_decide_func(self):
"""
Returns a theano function that decides what action to take.
Since this is a bandit playing agent, there is no input.
"""
# Cast is for compatibility with default bit depth of T.iscalar
# (wtf, theano?)
return function([], T.cast(T.argmax(self.estimated_rewards), 'int32'))
def get_learn_func(self):
"""
Returns a theano function that takes an action and a reward,
and updates the agent based on this experience.
"""
a = T.iscalar()
r = T.scalar()
old_estimated_reward = self.estimated_rewards[a]
old_observation_count = self.observation_counts[a]
observation_count = old_observation_count + 1.
delta = r - old_estimated_reward
new_estimated_reward = old_estimated_reward + delta / observation_count
new_estimated_rewards = T.set_subtensor(self.estimated_rewards[a],
new_estimated_reward)
new_observation_counts = T.set_subtensor(self.observation_counts[a], observation_count)
updates = OrderedDict([
(self.estimated_rewards, new_estimated_rewards),
(self.observation_counts, new_observation_counts)
])
rval = function([a, r], updates=updates)
return rval
|
bsd-3-clause
|
Erethon/synnefo
|
snf-cyclades-app/synnefo/logic/tests/rapi_pool_tests.py
|
9
|
2959
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.test import TestCase
from synnefo.logic import rapi_pool
from mock import patch
@patch('synnefo.logic.rapi_pool.GanetiRapiClient', spec=True)
class GanetiRapiPoolTest(TestCase):
def test_new_client(self, rclient):
cl = rapi_pool.get_rapi_client(1, 'amxixa', 'cluster0', '5080', 'user',
'pass')
rclient.assert_called_once_with("cluster0", "5080", "user", "pass")
self.assertTrue('amxixa' in rapi_pool._pools)
self.assertTrue(cl._pool is rapi_pool._pools[rapi_pool._hashes[1]])
def test_invalid_get(self, rclient):
self.assertRaises(ValueError, rapi_pool.get_rapi_client, 1, 'amxixa',
None, '5080', 'user', 'pass')
self.assertRaises(ValueError, rapi_pool.get_rapi_client, 1, 'amxixa',
'Foo', None, 'user', 'pass')
def test_get_from_pool(self, rclient):
cl = rapi_pool.get_rapi_client(1, 'dummyhash', 'cluster1', '5080',
'user', 'pass')
rclient.assert_called_once_with("cluster1", "5080", "user", "pass")
rapi_pool.put_rapi_client(cl)
rclient.reset_mock()
cl2 = rapi_pool.get_rapi_client(1, 'dummyhash', 'cluster1', '5080',
'user', 'pass')
self.assertTrue(cl is cl2)
self.assertFalse(rclient.mock_calls)
def test_changed_credentials(self, rclient):
cl = rapi_pool.get_rapi_client(1, 'dummyhash2', 'cluster2', '5080',
'user', 'pass')
rclient.assert_called_once_with("cluster2", "5080", "user", "pass")
rapi_pool.put_rapi_client(cl)
rclient.reset_mock()
rapi_pool.get_rapi_client(1, 'dummyhash3', 'cluster2', '5080',
'user', 'new_pass')
rclient.assert_called_once_with("cluster2", "5080", "user", "new_pass")
self.assertFalse('dummyhash2' in rapi_pool._pools)
def test_no_pool(self, rclient):
cl = rapi_pool.get_rapi_client(1, 'dummyhash2', 'cluster2', '5080',
'user', 'pass')
cl._pool = None
rapi_pool.put_rapi_client(cl)
self.assertTrue(cl not in rapi_pool._pools.values())
|
gpl-3.0
|
ojengwa/oh-mainline
|
vendor/packages/python-openid/openid/yadis/xri.py
|
167
|
4383
|
# -*- test-case-name: openid.test.test_xri -*-
"""Utility functions for handling XRIs.
@see: XRI Syntax v2.0 at the U{OASIS XRI Technical Committee<http://www.oasis-open.org/committees/tc_home.php?wg_abbrev=xri>}
"""
import re
XRI_AUTHORITIES = ['!', '=', '@', '+', '$', '(']
try:
unichr(0x10000)
except ValueError:
# narrow python build
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
]
IPRIVATE = [
(0xE000, 0xF8FF),
]
else:
UCSCHAR = [
(0xA0, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD),
(0x20000, 0x2FFFD),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD),
(0x50000, 0x5FFFD),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD),
(0x80000, 0x8FFFD),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD),
(0xB0000, 0xBFFFD),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD),
(0xE1000, 0xEFFFD),
]
IPRIVATE = [
(0xE000, 0xF8FF),
(0xF0000, 0xFFFFD),
(0x100000, 0x10FFFD),
]
_escapeme_re = re.compile('[%s]' % (''.join(
map(lambda (m, n): u'%s-%s' % (unichr(m), unichr(n)),
UCSCHAR + IPRIVATE)),))
def identifierScheme(identifier):
"""Determine if this identifier is an XRI or URI.
@returns: C{"XRI"} or C{"URI"}
"""
if identifier.startswith('xri://') or (
identifier and identifier[0] in XRI_AUTHORITIES):
return "XRI"
else:
return "URI"
def toIRINormal(xri):
"""Transform an XRI to IRI-normal form."""
if not xri.startswith('xri://'):
xri = 'xri://' + xri
return escapeForIRI(xri)
_xref_re = re.compile('\((.*?)\)')
def _escape_xref(xref_match):
"""Escape things that need to be escaped if they're in a cross-reference.
"""
xref = xref_match.group()
xref = xref.replace('/', '%2F')
xref = xref.replace('?', '%3F')
xref = xref.replace('#', '%23')
return xref
def escapeForIRI(xri):
"""Escape things that need to be escaped when transforming to an IRI."""
xri = xri.replace('%', '%25')
xri = _xref_re.sub(_escape_xref, xri)
return xri
def toURINormal(xri):
"""Transform an XRI to URI normal form."""
return iriToURI(toIRINormal(xri))
def _percentEscapeUnicode(char_match):
c = char_match.group()
return ''.join(['%%%X' % (ord(octet),) for octet in c.encode('utf-8')])
def iriToURI(iri):
"""Transform an IRI to a URI by escaping unicode."""
# According to RFC 3987, section 3.1, "Mapping of IRIs to URIs"
return _escapeme_re.sub(_percentEscapeUnicode, iri)
def providerIsAuthoritative(providerID, canonicalID):
"""Is this provider ID authoritative for this XRI?
@returntype: bool
"""
# XXX: can't use rsplit until we require python >= 2.4.
lastbang = canonicalID.rindex('!')
parent = canonicalID[:lastbang]
return parent == providerID
def rootAuthority(xri):
"""Return the root authority for an XRI.
Example::
rootAuthority("xri://@example") == "xri://@"
@type xri: unicode
@returntype: unicode
"""
if xri.startswith('xri://'):
xri = xri[6:]
authority = xri.split('/', 1)[0]
if authority[0] == '(':
# Cross-reference.
# XXX: This is incorrect if someone nests cross-references so there
# is another close-paren in there. Hopefully nobody does that
# before we have a real xriparse function. Hopefully nobody does
# that *ever*.
root = authority[:authority.index(')') + 1]
elif authority[0] in XRI_AUTHORITIES:
# Other XRI reference.
root = authority[0]
else:
# IRI reference. XXX: Can IRI authorities have segments?
segments = authority.split('!')
segments = reduce(list.__add__,
map(lambda s: s.split('*'), segments))
root = segments[0]
return XRI(root)
def XRI(xri):
"""An XRI object allowing comparison of XRI.
Ideally, this would do full normalization and provide comparsion
operators as per XRI Syntax. Right now, it just does a bit of
canonicalization by ensuring the xri scheme is present.
@param xri: an xri string
@type xri: unicode
"""
if not xri.startswith('xri://'):
xri = 'xri://' + xri
return xri
|
agpl-3.0
|
russel1237/scikit-learn
|
sklearn/gaussian_process/correlation_models.py
|
230
|
7630
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.abs(np.asarray(d, dtype=np.float))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
|
bsd-3-clause
|
lintzc/gpdb
|
src/test/tinc/ext/suds/metrics.py
|
211
|
2004
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{metrics} module defines classes and other resources
designed for collecting and reporting performance metrics.
"""
import time
from logging import getLogger
from suds import *
from math import modf
log = getLogger(__name__)
class Timer:
def __init__(self):
self.started = 0
self.stopped = 0
def start(self):
self.started = time.time()
self.stopped = 0
return self
def stop(self):
if self.started > 0:
self.stopped = time.time()
return self
def duration(self):
return ( self.stopped - self.started )
def __str__(self):
if self.started == 0:
return 'not-running'
if self.started > 0 and self.stopped == 0:
return 'started: %d (running)' % self.started
duration = self.duration()
jmod = ( lambda m : (m[1], m[0]*1000) )
if duration < 1:
ms = (duration*1000)
return '%d (ms)' % ms
if duration < 60:
m = modf(duration)
return '%d.%.3d (seconds)' % jmod(m)
m = modf(duration/60)
return '%d.%.3d (minutes)' % jmod(m)
|
apache-2.0
|
topic2k/EventGhost
|
lib27/site-packages/requests/packages/chardet/cp949prober.py
|
2801
|
1782
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
|
gpl-2.0
|
jumpjam/django-storages
|
storages/tests/s3boto.py
|
13
|
11381
|
import mock
import datetime
import urlparse
from django.test import TestCase
from django.core.files.base import ContentFile
from boto.s3.key import Key
from storages.backends import s3boto
__all__ = (
'ParseTsExtendedCase',
'SafeJoinTest',
'S3BotoStorageTests',
#'S3BotoStorageFileTests',
)
class ParseTsExtendedCase(TestCase):
def test_normal(self):
value = s3boto.parse_ts_extended("Wed, 13 Mar 2013 12:45:49 GMT")
self.assertEquals(value, datetime.datetime(2013, 3, 13, 12, 45, 49))
class S3BotoTestCase(TestCase):
@mock.patch('storages.backends.s3boto.S3Connection')
def setUp(self, S3Connection):
self.storage = s3boto.S3BotoStorage()
self.storage._connection = mock.MagicMock()
class SafeJoinTest(TestCase):
def test_normal(self):
path = s3boto.safe_join("", "path/to/somewhere", "other", "path/to/somewhere")
self.assertEquals(path, "path/to/somewhere/other/path/to/somewhere")
def test_with_dot(self):
path = s3boto.safe_join("", "path/./somewhere/../other", "..",
".", "to/./somewhere")
self.assertEquals(path, "path/to/somewhere")
def test_base_url(self):
path = s3boto.safe_join("base_url", "path/to/somewhere")
self.assertEquals(path, "base_url/path/to/somewhere")
def test_base_url_with_slash(self):
path = s3boto.safe_join("base_url/", "path/to/somewhere")
self.assertEquals(path, "base_url/path/to/somewhere")
def test_suspicious_operation(self):
self.assertRaises(ValueError,
s3boto.safe_join, "base", "../../../../../../../etc/passwd")
def test_trailing_slash(self):
"""
Test safe_join with paths that end with a trailing slash.
"""
path = s3boto.safe_join("base_url/", "path/to/somewhere/")
self.assertEquals(path, "base_url/path/to/somewhere/")
def test_trailing_slash_multi(self):
"""
Test safe_join with multiple paths that end with a trailing slash.
"""
path = s3boto.safe_join("base_url/", "path/to/" "somewhere/")
self.assertEquals(path, "base_url/path/to/somewhere/")
class S3BotoStorageTests(S3BotoTestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_storage_url(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.get_key.assert_called_once_with(name)
key = self.storage.bucket.get_key.return_value
key.set_metadata.assert_called_with('Content-Type', 'text/plain')
key.set_contents_from_file.assert_called_with(
content,
headers={'Content-Type': 'text/plain'},
policy=self.storage.default_acl,
reduced_redundancy=self.storage.reduced_redundancy,
rewind=True
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
if not s3boto.S3BotoStorage.gzip: # Gzip not available.
return
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
key = self.storage.bucket.get_key.return_value
key.set_metadata.assert_called_with('Content-Type', 'text/css')
key.set_contents_from_file.assert_called_with(
content,
headers={'Content-Type': 'text/css', 'Content-Encoding': 'gzip'},
policy=self.storage.default_acl,
reduced_redundancy=self.storage.reduced_redundancy,
rewind=True,
)
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
if not s3boto.S3BotoStorage.gzip: # Gzip not available.
return
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writing.txt'
content = 'new content'
# Set the ACL header used when creating/writing data.
self.storage.bucket.connection.provider.acl_header = 'x-amz-acl'
# Set the mocked key's bucket
self.storage.bucket.get_key.return_value.bucket = self.storage.bucket
# Set the name of the mock object
self.storage.bucket.get_key.return_value.name = name
file = self.storage.open(name, 'w')
self.storage.bucket.get_key.assert_called_with(name)
file.write(content)
self.storage.bucket.initiate_multipart_upload.assert_called_with(
name,
headers={
'Content-Type': 'text/plain',
'x-amz-acl': 'public-read',
},
reduced_redundancy=self.storage.reduced_redundancy,
)
# Save the internal file before closing
_file = file.file
file.close()
file._multipart.upload_part_from_file.assert_called_with(
_file, 1, headers=self.storage.headers,
)
file._multipart.complete_upload.assert_called_once()
def test_storage_exists(self):
key = self.storage.bucket.new_key.return_value
key.exists.return_value = True
self.assertTrue(self.storage.exists("file.txt"))
def test_storage_exists_false(self):
key = self.storage.bucket.new_key.return_value
key.exists.return_value = False
self.assertFalse(self.storage.exists("file.txt"))
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.delete_key.assert_called_with("path/to/file.txt")
def test_storage_listdir_base(self):
file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
self.storage.bucket.list.return_value = []
for p in file_names:
key = mock.MagicMock(spec=Key)
key.name = p
self.storage.bucket.list.return_value.append(key)
dirs, files = self.storage.listdir("")
self.assertEqual(len(dirs), 2)
for directory in ["some", "other"]:
self.assertTrue(directory in dirs,
""" "%s" not in directory list "%s".""" % (
directory, dirs))
self.assertEqual(len(files), 2)
for filename in ["2.txt", "4.txt"]:
self.assertTrue(filename in files,
""" "%s" not in file list "%s".""" % (
filename, files))
def test_storage_listdir_subdir(self):
file_names = ["some/path/1.txt", "some/2.txt"]
self.storage.bucket.list.return_value = []
for p in file_names:
key = mock.MagicMock(spec=Key)
key.name = p
self.storage.bucket.list.return_value.append(key)
dirs, files = self.storage.listdir("some/")
self.assertEqual(len(dirs), 1)
self.assertTrue('path' in dirs,
""" "path" not in directory list "%s".""" % (dirs,))
self.assertEqual(len(files), 1)
self.assertTrue('2.txt' in files,
""" "2.txt" not in files list "%s".""" % (files,))
def test_storage_size(self):
key = self.storage.bucket.get_key.return_value
key.size = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), key.size)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.connection.generate_url.return_value = url
self.assertEquals(self.storage.url(name), url)
self.storage.connection.generate_url.assert_called_with(
self.storage.querystring_expire,
method='GET',
bucket=self.storage.bucket.name,
key=name,
query_auth=self.storage.querystring_auth,
force_http=not self.storage.secure_urls,
headers=None,
response_headers=None,
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
#class S3BotoStorageFileTests(S3BotoTestCase):
# def test_multipart_upload(self):
# nparts = 2
# name = self.prefix_path("test_multipart_upload.txt")
# mode = 'w'
# f = s3boto.S3BotoStorageFile(name, mode, self.storage)
# content_length = 1024 * 1024# 1 MB
# content = 'a' * content_length
#
# bytes = 0
# target = f._write_buffer_size * nparts
# while bytes < target:
# f.write(content)
# bytes += content_length
#
# # make the buffer roll over so f._write_counter
# # is incremented
# f.write("finished")
#
# # verify upload was multipart and correctly partitioned
# self.assertEqual(f._write_counter, nparts)
#
# # complete the upload
# f.close()
#
# # verify that the remaining buffered bytes were
# # uploaded when the file was closed.
# self.assertEqual(f._write_counter, nparts+1)
|
bsd-3-clause
|
OpenUpgrade-dev/OpenUpgrade
|
addons/hr_payroll/wizard/hr_payroll_contribution_register_report.py
|
337
|
2074
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil import relativedelta
from openerp.osv import fields, osv
class payslip_lines_contribution_register(osv.osv_memory):
_name = 'payslip.lines.contribution.register'
_description = 'PaySlip Lines by Contribution Registers'
_columns = {
'date_from': fields.date('Date From', required=True),
'date_to': fields.date('Date To', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
}
def print_report(self, cr, uid, ids, context=None):
datas = {
'ids': context.get('active_ids', []),
'model': 'hr.contribution.register',
'form': self.read(cr, uid, ids, context=context)[0]
}
return self.pool['report'].get_action(
cr, uid, [], 'hr_payroll.report_contributionregister', data=datas, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
daniell/django-notification
|
notification/lockfile.py
|
39
|
15067
|
"""
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = FileLock('somefile')
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print 'somefile', 'is locked already.'
... except LockFailed:
... print 'somefile', 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = FileLock('somefile')
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
"""
from __future__ import division
import sys
import socket
import os
import threading
import time
import errno
# Work with PEP8 and non-PEP8 versions of threading module.
try:
threading.current_thread
except AttributeError:
threading.current_thread = threading.currentThread
try:
# python 2.6 has threading.current_thread so we need to do this separately.
threading.Thread.get_name
except AttributeError:
threading.Thread.get_name = threading.Thread.getName
__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock']
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class LockBase:
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True):
"""
>>> lock = LockBase('somefile')
>>> lock = LockBase('somefile', threaded=False)
"""
self.path = path
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
tname = "%s-" % threading.current_thread().get_name()
else:
tname = ""
dirname = os.path.dirname(self.lock_file)
self.unique_name = os.path.join(dirname,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
"""
raise NotImplemented("implement in subclass")
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
"""
raise NotImplemented("implement in subclass")
def is_locked(self):
"""
Tell whether or not the file is locked.
"""
raise NotImplemented("implement in subclass")
def i_am_locking(self):
"""
Return True if this object is locking the file.
"""
raise NotImplemented("implement in subclass")
def break_lock(self):
"""
Remove a lock. Useful if a locking thread failed to unlock.
"""
raise NotImplemented("implement in subclass")
def __enter__(self):
"""
Context manager support.
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""
Context manager support.
"""
self.release()
class LinkFileLock(LockBase):
"""Lock access to a file using atomic property of link(2)."""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
class MkdirFileLock(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirFileLock('somefile')
>>> lock = MkdirFileLock('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
if threaded:
tname = "%x-" % thread.get_ident()
else:
tname = ""
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
class SQLiteFileLock(LockBase):
"Demonstration of using same SQL-based locking."
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
def __init__(self, path, threaded=True):
LockBase.__init__(self, path, threaded)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
import sqlite3
self.connection = sqlite3.connect(SQLiteFileLock.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteFileLock.testdb)
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked
if not self.i_am_locking():
raise NotMyLock((self._who_is_locking(), self.unique_name))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
if hasattr(os, "link"):
FileLock = LinkFileLock
else:
FileLock = MkdirFileLock
|
mit
|
bob-the-hamster/commandergenius
|
project/jni/python/src/Lib/test/test_os.py
|
48
|
23971
|
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import unittest
import warnings
import sys
from test import test_support
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assert_(os.access(test_support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
print >> sys.stderr, (
"couldn't allocate two consecutive fds, "
"skipping test_closerange")
return
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
def test_rename(self):
path = unicode(test_support.TESTFN)
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.failIf(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assert_(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assert_(s == "foobar")
def test_tmpnam(self):
import sys
if not hasattr(os, "tmpnam"):
return
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.failIf(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEquals(result[stat.ST_SIZE], 3)
self.assertEquals(result.st_size, 3)
import sys
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEquals(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assert_(attr in members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
import errno
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEquals(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEquals(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEquals(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(test_support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEquals(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
value = os.popen("/bin/sh -c 'echo $HELLO'").read().strip()
self.assertEquals(value, "World")
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.failUnlessRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom(self):
try:
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
# see http://bugs.python.org/issue3708
self.assertEqual(len(os.urandom(0.9)), 0)
self.assertEqual(len(os.urandom(1.1)), 1)
self.assertEqual(len(os.urandom(2.0)), 2)
except NotImplementedError:
pass
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_access(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, 0)
def test_chmod(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
self.assertRaises(OSError, f, test_support.make_bad_fd(), *args)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = test_support.make_bad_fd()
self.assertEqual(os.closerange(fd, fd + 10), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
#this is a weird one, it raises IOError unlike the others
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.assertRaises(IOError, os.ftruncate, test_support.make_bad_fd(),
0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, " ")
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
else:
class PosixUidGidTests(unittest.TestCase):
pass
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests
)
if __name__ == "__main__":
test_main()
|
lgpl-2.1
|
ashengwang/node-gyp
|
gyp/pylib/gyp/generator/dump_dependency_json.py
|
899
|
2768
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
mit
|
crobinso/virt-manager
|
virtinst/cloner.py
|
2
|
19014
|
#
# Copyright 2013, 2015 Red Hat, Inc.
# Copyright(c) FUJITSU Limited 2007.
#
# Cloning a virtual machine module.
#
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import re
import os
import libvirt
from . import generatename
from . import progress
from . import xmlutil
from .guest import Guest
from .devices import DeviceInterface
from .devices import DeviceDisk
from .logger import log
from .devices import DeviceChannel
def _replace_vm(conn, name):
"""
Remove the existing VM with the same name if requested
"""
try:
vm = conn.lookupByName(name)
except libvirt.libvirtError:
return
try:
log.debug("Explicitly replacing guest '%s'", name)
if vm.ID() != -1:
log.debug("Destroying guest '%s'", name)
vm.destroy()
log.debug("Undefining guest '%s'", name)
vm.undefine()
except libvirt.libvirtError as e: # pragma: no cover
msg = (_("Could not remove old vm '%(vm)s': %(error)s") % {
"vm": name, "error": str(e)})
raise RuntimeError(msg) from None
def _generate_clone_name(conn, basename):
"""
If the orig name is "foo-clone", we don't want the clone to be
"foo-clone-clone", we want "foo-clone1"
"""
regex = r"-clone[1-9]*$"
match = re.search(regex, basename)
start_num = 1
force_num = False
if match:
num_match = re.search("[1-9]+$", match.group())
force_num = True
if num_match:
start_num = int(str(num_match.group())) + 1
basename = basename[:match.start()]
def cb(n):
return generatename.check_libvirt_collision(
conn.lookupByName, n)
basename = basename + "-clone"
return generatename.generate_name(basename, cb,
sep="", start_num=start_num, force_num=force_num)
def _generate_clone_disk_path(conn, origname, newname, origpath):
"""
Generate desired cloned disk path name, derived from the
original path, original VM name, and proposed new VM name
"""
if origpath is None:
return None
path = origpath
suffix = ""
# Try to split the suffix off the existing disk name. Ex.
# foobar.img -> foobar-clone.img
#
# If the suffix is greater than 7 characters, assume it isn't
# a file extension and is part of the disk name, at which point
# just stick '-clone' on the end.
if "." in origpath and len(origpath.rsplit(".", 1)[1]) <= 7:
path, suffix = origpath.rsplit(".", 1)
suffix = "." + suffix
dirname = os.path.dirname(path)
basename = os.path.basename(path)
clonebase = basename + "-clone"
if origname and basename == origname:
clonebase = newname
clonebase = os.path.join(dirname, clonebase)
def cb(p):
return DeviceDisk.path_definitely_exists(conn, p)
return generatename.generate_name(clonebase, cb, suffix=suffix)
def _lookup_vm(conn, name):
try:
return conn.lookupByName(name)
except libvirt.libvirtError:
e = ValueError(_("Domain '%s' was not found.") % str(name))
raise e from None
def _build_clone_vol_install(orig_disk, new_disk):
# We set a stub size for initial creation
# set_input_vol will overwrite it
size = .000001
sparse = False
vol_install = DeviceDisk.build_vol_install(
orig_disk.conn, os.path.basename(new_disk.get_source_path()),
new_disk.get_parent_pool(), size, sparse)
vol_install.set_input_vol(orig_disk.get_vol_object())
return vol_install
def _build_clone_disk(orig_disk, clonepath, allow_create, sparse):
conn = orig_disk.conn
device = DeviceDisk.DEVICE_DISK
if not clonepath:
device = DeviceDisk.DEVICE_CDROM
new_disk = DeviceDisk(conn)
new_disk.set_source_path(clonepath)
new_disk.device = device
if not allow_create:
new_disk.validate()
return new_disk
if new_disk.get_vol_object():
# Special case: non remote cloning of a guest using
# managed block devices: fall back to local cloning if
# we have permissions to do so. This validation check
# caused a few bug reports in a short period of time,
# so must be a common case.
if (conn.is_remote() or
new_disk.type != new_disk.TYPE_BLOCK or
not orig_disk.get_source_path() or
not os.access(orig_disk.get_source_path(), os.R_OK) or
not new_disk.get_source_path() or
not os.access(new_disk.get_source_path(), os.W_OK)):
raise RuntimeError(
_("Clone onto existing storage volume is not "
"currently supported: '%s'") % new_disk.get_source_path())
if (orig_disk.get_vol_object() and
new_disk.wants_storage_creation()):
vol_install = _build_clone_vol_install(orig_disk, new_disk)
if not sparse:
vol_install.allocation = vol_install.capacity
new_disk.set_vol_install(vol_install)
elif orig_disk.get_source_path():
new_disk.set_local_disk_to_clone(orig_disk, sparse)
new_disk.validate()
return new_disk
def _get_cloneable_msg(disk):
"""
If the disk storage is not cloneable, return a string explaining why
"""
if disk.wants_storage_creation():
return _("Disk path '%s' does not exist.") % disk.get_source_path()
if disk.type == "network":
proto = disk.source.protocol
if proto not in ["rbd"]:
return _("Disk network type '%s' is not cloneable.") % proto
disk.set_backend_for_existing_path()
if not disk.get_vol_object():
return _("Cloning disk network type '%s' requires "
"managed storage.") % proto
else:
# This case, rbd with managed storage, is implementable. It
# requires open coding a bunch of work in cloner, or reworking
# other disk code to add unique URIs for rbd volumes and pools
return _("Cloning rbd volumes is not yet supported.")
def _get_shareable_msg(disk):
if disk.is_empty():
return _("No storage to clone.")
if disk.read_only:
return _("Read Only")
if disk.shareable or disk.transient_shareBacking:
return _("Marked as shareable")
class _CloneDiskInfo:
"""
Class that tracks some additional information about how we want
to default handle each disk of the source VM
For any source disk there's 3 main scenarios:
* clone: Copy contents from src to dst. If dst path doesn't
exist we attempt to create it. If it exists we overwrite it
* preserve: Destination path is an existing, and no copying is performed.
* share: Original disk XML is used unchanged for the new disk
"""
_ACTION_SHARE = 1
_ACTION_CLONE = 2
_ACTION_PRESERVE = 3
def __init__(self, srcdisk):
self.disk = DeviceDisk(srcdisk.conn, parsexml=srcdisk.get_xml())
self.disk.set_backend_for_existing_path()
self.new_disk = None
self._share_msg = _get_shareable_msg(self.disk)
self._cloneable_msg = -1
self._newpath_msg = None
self._action = None
self.set_clone_requested()
if self.get_share_msg():
self.set_share_requested()
def is_clone_requested(self):
return self._action in [self._ACTION_CLONE]
def is_share_requested(self):
return self._action in [self._ACTION_SHARE]
def is_preserve_requested(self):
return self._action in [self._ACTION_PRESERVE]
def _set_action(self, action):
if action != self._action:
self._action = action
def set_clone_requested(self):
self._set_action(self._ACTION_CLONE)
def set_share_requested(self):
self._set_action(self._ACTION_SHARE)
def set_preserve_requested(self):
self._set_action(self._ACTION_PRESERVE)
def set_new_path(self, path, sparse):
allow_create = not self.is_preserve_requested()
if allow_create:
msg = self.get_cloneable_msg()
if msg:
return
try:
self.new_disk = Cloner.build_clone_disk(
self.disk, path, allow_create, sparse)
except Exception as e:
log.debug("Error setting clone path.", exc_info=True)
err = (_("Could not use path '%(path)s' for cloning: %(error)s") %
{"path": path, "error": str(e)})
self._newpath_msg = err
def get_share_msg(self):
return self._share_msg
def get_cloneable_msg(self):
if self._cloneable_msg == -1:
self._cloneable_msg = _get_cloneable_msg(self.disk)
return self._cloneable_msg
def get_newpath_msg(self):
return self._newpath_msg
def raise_error(self):
if self.is_clone_requested() and self.get_cloneable_msg():
msg = self.get_cloneable_msg()
err = _("Could not determine original disk information: %s" % msg)
raise ValueError(err)
if self.is_share_requested():
return
if self.get_newpath_msg():
msg = self.get_newpath_msg()
raise ValueError(msg)
class Cloner(object):
@staticmethod
def generate_clone_name(conn, basename):
return _generate_clone_name(conn, basename)
@staticmethod
def generate_clone_disk_path(conn, origname, newname, origpath):
return _generate_clone_disk_path(conn, origname, newname, origpath)
@staticmethod
def build_clone_disk(orig_disk, clonepath, allow_create, sparse):
return _build_clone_disk(orig_disk, clonepath, allow_create, sparse)
def __init__(self, conn, src_name=None, src_xml=None):
self.conn = conn
self._src_guest = None
self._new_guest = None
self._diskinfos = []
self._nvram_diskinfo = None
self._init_src(src_name, src_xml)
self._new_nvram_path = None
self._sparse = True
self._replace = False
self._reflink = False
#################
# Init routines #
#################
def _init_src(self, src_name, src_xml):
"""
Set up the source VM info we are cloning, from passed in VM name
or full XML
"""
if not src_xml:
dom = _lookup_vm(self.conn, src_name)
status = dom.info()[0]
if status not in [libvirt.VIR_DOMAIN_SHUTOFF]:
raise RuntimeError(_("Domain to clone must be shutoff."))
flags = libvirt.VIR_DOMAIN_XML_SECURE
src_xml = dom.XMLDesc(flags)
log.debug("Original XML:\n%s", src_xml)
self._src_guest = Guest(self.conn, parsexml=src_xml)
self._new_guest = Guest(self.conn, parsexml=src_xml)
self._init_new_guest()
# Collect disk info for every disk to determine if we will
# default to cloning or not
for disk in self._src_guest.devices.disk:
self._diskinfos.append(_CloneDiskInfo(disk))
for diskinfo in [d for d in self._diskinfos if d.is_clone_requested()]:
disk = diskinfo.disk
log.debug("Wants cloning: size=%s path=%s",
disk.get_size(), disk.get_source_path())
if self._src_guest.os.nvram:
old_nvram = DeviceDisk(self.conn)
old_nvram.set_source_path(self._new_guest.os.nvram)
self._nvram_diskinfo = _CloneDiskInfo(old_nvram)
def _init_new_guest(self):
"""
Perform the series of unconditional new VM changes we always make
"""
self._new_guest.id = None
self._new_guest.title = None
self._new_guest.uuid = None
self._new_guest.uuid = Guest.generate_uuid(self.conn)
for dev in self._new_guest.devices.graphics:
if dev.port and dev.port != -1:
log.warning(_("Setting the graphics device port to autoport, "
"in order to avoid conflicting."))
dev.port = -1
for iface in self._new_guest.devices.interface:
iface.target_dev = None
iface.macaddr = DeviceInterface.generate_mac(self.conn)
# For guest agent channel, remove a path to generate a new one with
# new guest name
for channel in self._new_guest.devices.channel:
if (channel.type == DeviceChannel.TYPE_UNIX and
channel.target_name and channel.source.path and
channel.target_name in channel.source.path):
channel.source.path = None
new_name = Cloner.generate_clone_name(self.conn, self.src_name)
log.debug("Auto-generated clone name '%s'", new_name)
self.set_clone_name(new_name)
##############
# Properties #
##############
@property
def src_name(self):
"""
The name of the original VM we are cloning
"""
return self._src_guest.name
@property
def new_guest(self):
"""
The Guest instance of the new XML we will create
"""
return self._new_guest
@property
def nvram_diskinfo(self):
return self._nvram_diskinfo
def set_clone_name(self, name):
self._new_guest.name = name
def set_clone_uuid(self, uuid):
"""
Override the new VMs generated UUId
"""
self._new_guest.uuid = uuid
def set_replace(self, val):
"""
If True, don't check for clone name collision, simply undefine
any conflicting guest.
"""
self._replace = bool(val)
def set_reflink(self, reflink):
"""
If true, use COW lightweight copy
"""
self._reflink = reflink
def set_sparse(self, flg):
"""
If True, attempt sparse allocation during cloning
"""
self._sparse = flg
def get_diskinfos(self):
"""
Return the list of _CloneDiskInfo instances
"""
return self._diskinfos[:]
def get_nonshare_diskinfos(self):
"""
Return a list of _CloneDiskInfo that are tagged for cloning
"""
return [di for di in self.get_diskinfos() if
not di.is_share_requested()]
def set_nvram_path(self, val):
"""
If the VM needs to have nvram content cloned, this overrides the
destination path
"""
self._new_nvram_path = val
######################
# Functional methods #
######################
def _prepare_nvram(self):
if not self._nvram_diskinfo:
return
new_nvram_path = self._new_nvram_path
if new_nvram_path is None:
nvram_dir = os.path.dirname(self._new_guest.os.nvram)
new_nvram_path = os.path.join(
nvram_dir, "%s_VARS.fd" % self._new_guest.name)
diskinfo = self._nvram_diskinfo
new_nvram = DeviceDisk(self.conn)
new_nvram.set_source_path(new_nvram_path)
old_nvram = DeviceDisk(self.conn)
old_nvram.set_source_path(diskinfo.disk.get_source_path())
if (diskinfo.is_clone_requested() and
new_nvram.wants_storage_creation() and
diskinfo.disk.get_vol_object()):
# We only run validation if there's some existing nvram we
# can copy. It's valid for nvram to not exist at VM define
# time, libvirt will create it for us
diskinfo.set_new_path(new_nvram_path, self._sparse)
diskinfo.raise_error()
diskinfo.new_disk.get_vol_install().reflink = self._reflink
else:
# There's no action to perform for this case, so drop it
self._nvram_diskinfo = None
self._new_guest.os.nvram = new_nvram.get_source_path()
def prepare(self):
"""
Validate and set up all parameters needed for the new (clone) VM
"""
try:
Guest.validate_name(self.conn, self._new_guest.name,
check_collision=not self._replace,
validate=False)
except ValueError as e:
msg = _("Invalid name for new guest: %s") % e
raise ValueError(msg) from None
for diskinfo in self.get_nonshare_diskinfos():
orig_disk = diskinfo.disk
if not diskinfo.new_disk:
# User didn't set a path, generate one
newpath = Cloner.generate_clone_disk_path(
self.conn, self.src_name,
self.new_guest.name,
orig_disk.get_source_path())
diskinfo.set_new_path(newpath, self._sparse)
if not diskinfo.new_disk:
# We hit an error, clients will raise it later
continue
new_disk = diskinfo.new_disk
assert new_disk
log.debug("Cloning srcpath=%s dstpath=%s",
orig_disk.get_source_path(), new_disk.get_source_path())
if self._reflink:
vol_install = new_disk.get_vol_install()
vol_install.reflink = self._reflink
for disk in self._new_guest.devices.disk:
if disk.target == orig_disk.target:
xmldisk = disk
# Change the XML
xmldisk.set_source_path(None)
xmldisk.type = new_disk.type
xmldisk.driver_name = orig_disk.driver_name
xmldisk.driver_type = orig_disk.driver_type
xmldisk.set_source_path(new_disk.get_source_path())
self._prepare_nvram()
# Save altered clone xml
diff = xmlutil.diff(self._src_guest.get_xml(),
self._new_guest.get_xml())
log.debug("Clone guest xml diff:\n%s", diff)
def start_duplicate(self, meter=None):
"""
Actually perform the duplication: cloning disks if needed and defining
the new clone xml.
"""
log.debug("Starting duplicate.")
meter = progress.ensure_meter(meter)
dom = None
try:
# Replace orig VM if required
if self._replace:
_replace_vm(self.conn, self._new_guest.name)
# Define domain early to catch any xml errors before duping storage
dom = self.conn.defineXML(self._new_guest.get_xml())
diskinfos = self.get_diskinfos()
if self._nvram_diskinfo:
diskinfos.append(self._nvram_diskinfo)
for diskinfo in diskinfos:
if not diskinfo.is_clone_requested():
continue
diskinfo.new_disk.build_storage(meter)
except Exception as e:
log.debug("Duplicate failed: %s", str(e))
if dom:
dom.undefine()
raise
log.debug("Duplicating finished.")
|
gpl-2.0
|
aronnem/IMProToo
|
examples/batch_makeQuicklooks.py
|
1
|
6396
|
# -*- coding: utf-8 -*-
'''
Copyright (C) 2011,2012 Maximilian Maahn, IGMK (mmaahn@meteo.uni-koeln.de)
make quicklooks from IMProToo NetCDF files.
use: python batch_makeQuicklooks.py pathIn pathOut site
requires:
numpy, matplotlib, netcdf4-python or python-netcdf
'''
import sys
import numpy as np
import glob
import calendar
import datetime
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc,ticker
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import random
import string
from copy import deepcopy
import IMProToo
from IMProToo.tools import *
try:
import netCDF4 as nc
pyNc = True
except:
import Scientific.IO.NetCDF as nc
pyNc = False
tmpDir="/tmp/"
skipExisting = True
def unix2timestamp(unix):
return datetime.datetime.utcfromtimestamp(unix).strftime("%Y%m%d")
def timestamp2unix(timestamp):
return calendar.timegm(datetime.datetime(year = int(timestamp[0:4]), month = int(timestamp[4:6]), day = int(timestamp[6:8]), hour = 0, minute = 0, second = 0).timetuple())
def quicklook(site,ncFile,imgFile,imgTitle):
"""
Makes Quicklooks of MRR data
@parameter site (str): code for the site where the data was recorded (usually 3 letter)
@parameter ncFile (str): netcdf file name incl. path, usually "path/mrr_site_yyyymmdd.nc"
@parameter imgFile (str): image file name, incl. path, extensions determines file format (e.g. png, eps, pdf ...)
@parameter imgTitle (str): plot title
"""
print "##### " + imgTitle + "######"
tmpFile = False
if ncFile.split(".")[-1]=="gz":
tmpFile = True
gzFile = deepcopy(ncFile)
ncFile = tmpDir+"/maxLibs_netcdf_"+''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5))+".tmp.nc"
print 'uncompressing', gzFile, "->",ncFile
os.system("zcat "+gzFile+">"+ncFile)
else:
print 'opening', ncFile
if pyNc: ncData = nc.Dataset(ncFile,'r')
else: ncData = nc.NetCDFFile(ncFile,'r')
timestampsNew = ncData.variables["time"][:]
HNew = ncData.variables["height"][:]
ZeNew = ncData.variables["Ze"][:]
noiseAveNew = ncData.variables["etaNoiseAve"][:]
noiseStdNew = ncData.variables["etaNoiseStd"][:]
spectralWidthNew = ncData.variables["spectralWidth"][:]
WNew = ncData.variables["W"][:]
qualityNew = ncData.variables["quality"][:]
ncData.close()
if (tmpFile):
os.system("rm -f "+ncFile)
date = unix2timestamp(timestampsNew[0])
starttime = timestamp2unix(date)
endtime = starttime+60*60*24
HNew[np.isnan(HNew)] = -9999
ylim = [np.min(HNew[HNew!=-9999]),np.max(HNew)]
xlim = [starttime,endtime]
timestampsNew = oneD2twoD(timestampsNew,ZeNew.shape[1],1)
fig=plt.figure(figsize=(10, 13))
sp1 = fig.add_subplot(511)
sp1.set_title(imgTitle)
levels = np.arange(-15,40,0.1)
plotCF = sp1.contourf(timestampsNew,HNew, ZeNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('MRR Ze [dBz]')
sp1.set_ylim(ylim)
sp1.set_xlim(xlim)
sp1.axhline(HNew[-1,2])
sp1.axhline(HNew[-1,29])
sp2 = fig.add_subplot(512)
levels = np.arange(-10,18,0.1)
plotCF = sp2.contourf(timestampsNew,HNew, WNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('MRR W [m/s]')
sp2.set_ylim(ylim)
sp2.set_xlim(xlim)
sp2.axhline(HNew[-1,2])
sp2.axhline(HNew[-1,29])
sp3 = fig.add_subplot(513)
levels = np.arange(0,1.5,0.1)
plotCF = sp3.contourf(timestampsNew,HNew, spectralWidthNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('spectralWidth [m/s]')
sp3.set_ylim(ylim)
sp3.set_xlim(xlim)
sp3.axhline(HNew[-1,2])
sp3.axhline(HNew[-1,29])
sp4 = fig.add_subplot(514)
levels = np.arange(1e-10,1e-8,2e-10)
plotCF = sp4.contourf(timestampsNew,HNew, noiseAveNew, levels,cmap=plt.get_cmap("spectral"), extend="both")#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('mean spectral noise [1/m]')
sp4.set_ylim(ylim)
sp4.set_xlim(xlim)
sp4.axhline(HNew[-1,2])
sp4.axhline(HNew[-1,29])
#import pdb;pdb.set_trace()
sp5 = fig.add_subplot(515)
levels = np.arange(20)
for i in levels:
levels[i] = 2**i
plotCF = sp5.contourf(timestampsNew,HNew, qualityNew, levels,cmap=plt.get_cmap("spectral"), norm = matplotlib.colors.LogNorm())#
cbZe=plt.colorbar(plotCF)
cbZe.set_label('quality array')
sp5.set_ylim(ylim)
sp5.set_xlim(xlim)
sp5.axhline(HNew[-1,2])
sp5.axhline(HNew[-1,29])
#sp1.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp1.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp1.set_xticklabels([])
#sp2.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp2.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp2.set_xticklabels([])
#sp3.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp3.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp3.set_xticklabels([])
#sp4.set_xlim(np.min(timestampsNew),np.max(timestampsNew))
sp4.set_xticks(np.arange(sp1.get_xlim()[0],sp1.get_xlim()[1],7200))
sp4.set_xticklabels([])
#pdb.set_trace()
#sp5.set_xlim(np.min(timestampsNew)-60,np.max(timestampsNew))
sp5.set_xticks(np.arange(sp5.get_xlim()[0],sp5.get_xlim()[1]+7200,7200))
niceDates = list()
for timestamp in np.arange(sp5.get_xlim()[0],sp5.get_xlim()[1]+7200,7200):
niceDates.append(str(datetime.datetime.utcfromtimestamp(timestamp).strftime("%H:%M")))
sp5.set_xticklabels(niceDates)
plt.subplots_adjust(hspace=0.02,left=0.085,right=0.78)
plt.savefig(imgFile)
print(imgFile)
plt.close()
return
if len(sys.argv) < 4:
print 'use: batch_makeQuicklooks.py pathIn pathOut site'
sys.exit()
pathIn = sys.argv[1]
pathOut = sys.argv[2]
site = sys.argv[3]
try: os.mkdir(pathOut)
except OSError: pass
for ncFile in np.sort(glob.glob(pathIn+"/*")):
#import pdb;pdb.set_trace()
date = ncFile.split("_")[-1].split(".")[0]
print date, ncFile
imgFile = pathOut + "/mrr_improtoo_"+IMProToo.__version__+'_'+site+"_"+date+".png"
imgTitle = site + " " + date + " IMProToo " + IMProToo.__version__
if skipExisting and os.path.isfile(imgFile):
print "Quicklook aready exists, skipping: ", date, ncFile, imgFile
continue
quicklook(site,ncFile,imgFile,imgTitle)
|
gpl-3.0
|
sherazkasi/SabreSoftware
|
Lib/site-packages/numpy/core/code_generators/numpy_api.py
|
54
|
13599
|
"""Here we define the exported functions, types, etc... which need to be
exported through a global C pointer.
Each dictionary contains name -> index pair.
Whenever you change one index, you break the ABI (and the ABI version number
should be incremented). Whenever you add an item to one of the dict, the API
needs to be updated.
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
exception, so it should hopefully not get unnoticed).
"""
multiarray_global_vars = {
'XXX6': 6,
}
multiarray_global_vars_types = {
'XXX6': 'int',
}
multiarray_scalar_bool_values = {
'_PyArrayScalar_BoolValues': 8
}
multiarray_types_api = {
'PyArray_Type': 1,
'PyArrayDescr_Type': 2,
'PyArrayFlags_Type': 3,
'PyArrayIter_Type': 4,
'PyArrayMultiIter_Type': 5,
'PyBoolArrType_Type': 7,
'PyGenericArrType_Type': 9,
'PyNumberArrType_Type': 10,
'PyIntegerArrType_Type': 11,
'PySignedIntegerArrType_Type': 12,
'PyUnsignedIntegerArrType_Type': 13,
'PyInexactArrType_Type': 14,
'PyFloatingArrType_Type': 15,
'PyComplexFloatingArrType_Type': 16,
'PyFlexibleArrType_Type': 17,
'PyCharacterArrType_Type': 18,
'PyByteArrType_Type': 19,
'PyShortArrType_Type': 20,
'PyIntArrType_Type': 21,
'PyLongArrType_Type': 22,
'PyLongLongArrType_Type': 23,
'PyUByteArrType_Type': 24,
'PyUShortArrType_Type': 25,
'PyUIntArrType_Type': 26,
'PyULongArrType_Type': 27,
'PyULongLongArrType_Type': 28,
'PyFloatArrType_Type': 29,
'PyDoubleArrType_Type': 30,
'PyLongDoubleArrType_Type': 31,
'PyCFloatArrType_Type': 32,
'PyCDoubleArrType_Type': 33,
'PyCLongDoubleArrType_Type': 34,
'PyObjectArrType_Type': 35,
'PyStringArrType_Type': 36,
'PyUnicodeArrType_Type': 37,
'PyVoidArrType_Type': 38,
'PyTimeIntegerArrType_Type': 39,
'PyDatetimeArrType_Type': 40,
'PyTimedeltaArrType_Type': 41,
}
#define NPY_NUMUSERTYPES (*(int *)PyArray_API[6])
#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7])
#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8])
multiarray_funcs_api = {
'PyArray_GetNDArrayCVersion': 0,
'PyArray_SetNumericOps': 42,
'PyArray_GetNumericOps': 43,
'PyArray_INCREF': 44,
'PyArray_XDECREF': 45,
'PyArray_SetStringFunction': 46,
'PyArray_DescrFromType': 47,
'PyArray_TypeObjectFromType': 48,
'PyArray_Zero': 49,
'PyArray_One': 50,
'PyArray_CastToType': 51,
'PyArray_CastTo': 52,
'PyArray_CastAnyTo': 53,
'PyArray_CanCastSafely': 54,
'PyArray_CanCastTo': 55,
'PyArray_ObjectType': 56,
'PyArray_DescrFromObject': 57,
'PyArray_ConvertToCommonType': 58,
'PyArray_DescrFromScalar': 59,
'PyArray_DescrFromTypeObject': 60,
'PyArray_Size': 61,
'PyArray_Scalar': 62,
'PyArray_FromScalar': 63,
'PyArray_ScalarAsCtype': 64,
'PyArray_CastScalarToCtype': 65,
'PyArray_CastScalarDirect': 66,
'PyArray_ScalarFromObject': 67,
'PyArray_GetCastFunc': 68,
'PyArray_FromDims': 69,
'PyArray_FromDimsAndDataAndDescr': 70,
'PyArray_FromAny': 71,
'PyArray_EnsureArray': 72,
'PyArray_EnsureAnyArray': 73,
'PyArray_FromFile': 74,
'PyArray_FromString': 75,
'PyArray_FromBuffer': 76,
'PyArray_FromIter': 77,
'PyArray_Return': 78,
'PyArray_GetField': 79,
'PyArray_SetField': 80,
'PyArray_Byteswap': 81,
'PyArray_Resize': 82,
'PyArray_MoveInto': 83,
'PyArray_CopyInto': 84,
'PyArray_CopyAnyInto': 85,
'PyArray_CopyObject': 86,
'PyArray_NewCopy': 87,
'PyArray_ToList': 88,
'PyArray_ToString': 89,
'PyArray_ToFile': 90,
'PyArray_Dump': 91,
'PyArray_Dumps': 92,
'PyArray_ValidType': 93,
'PyArray_UpdateFlags': 94,
'PyArray_New': 95,
'PyArray_NewFromDescr': 96,
'PyArray_DescrNew': 97,
'PyArray_DescrNewFromType': 98,
'PyArray_GetPriority': 99,
'PyArray_IterNew': 100,
'PyArray_MultiIterNew': 101,
'PyArray_PyIntAsInt': 102,
'PyArray_PyIntAsIntp': 103,
'PyArray_Broadcast': 104,
'PyArray_FillObjectArray': 105,
'PyArray_FillWithScalar': 106,
'PyArray_CheckStrides': 107,
'PyArray_DescrNewByteorder': 108,
'PyArray_IterAllButAxis': 109,
'PyArray_CheckFromAny': 110,
'PyArray_FromArray': 111,
'PyArray_FromInterface': 112,
'PyArray_FromStructInterface': 113,
'PyArray_FromArrayAttr': 114,
'PyArray_ScalarKind': 115,
'PyArray_CanCoerceScalar': 116,
'PyArray_NewFlagsObject': 117,
'PyArray_CanCastScalar': 118,
'PyArray_CompareUCS4': 119,
'PyArray_RemoveSmallest': 120,
'PyArray_ElementStrides': 121,
'PyArray_Item_INCREF': 122,
'PyArray_Item_XDECREF': 123,
'PyArray_FieldNames': 124,
'PyArray_Transpose': 125,
'PyArray_TakeFrom': 126,
'PyArray_PutTo': 127,
'PyArray_PutMask': 128,
'PyArray_Repeat': 129,
'PyArray_Choose': 130,
'PyArray_Sort': 131,
'PyArray_ArgSort': 132,
'PyArray_SearchSorted': 133,
'PyArray_ArgMax': 134,
'PyArray_ArgMin': 135,
'PyArray_Reshape': 136,
'PyArray_Newshape': 137,
'PyArray_Squeeze': 138,
'PyArray_View': 139,
'PyArray_SwapAxes': 140,
'PyArray_Max': 141,
'PyArray_Min': 142,
'PyArray_Ptp': 143,
'PyArray_Mean': 144,
'PyArray_Trace': 145,
'PyArray_Diagonal': 146,
'PyArray_Clip': 147,
'PyArray_Conjugate': 148,
'PyArray_Nonzero': 149,
'PyArray_Std': 150,
'PyArray_Sum': 151,
'PyArray_CumSum': 152,
'PyArray_Prod': 153,
'PyArray_CumProd': 154,
'PyArray_All': 155,
'PyArray_Any': 156,
'PyArray_Compress': 157,
'PyArray_Flatten': 158,
'PyArray_Ravel': 159,
'PyArray_MultiplyList': 160,
'PyArray_MultiplyIntList': 161,
'PyArray_GetPtr': 162,
'PyArray_CompareLists': 163,
'PyArray_AsCArray': 164,
'PyArray_As1D': 165,
'PyArray_As2D': 166,
'PyArray_Free': 167,
'PyArray_Converter': 168,
'PyArray_IntpFromSequence': 169,
'PyArray_Concatenate': 170,
'PyArray_InnerProduct': 171,
'PyArray_MatrixProduct': 172,
'PyArray_CopyAndTranspose': 173,
'PyArray_Correlate': 174,
'PyArray_TypestrConvert': 175,
'PyArray_DescrConverter': 176,
'PyArray_DescrConverter2': 177,
'PyArray_IntpConverter': 178,
'PyArray_BufferConverter': 179,
'PyArray_AxisConverter': 180,
'PyArray_BoolConverter': 181,
'PyArray_ByteorderConverter': 182,
'PyArray_OrderConverter': 183,
'PyArray_EquivTypes': 184,
'PyArray_Zeros': 185,
'PyArray_Empty': 186,
'PyArray_Where': 187,
'PyArray_Arange': 188,
'PyArray_ArangeObj': 189,
'PyArray_SortkindConverter': 190,
'PyArray_LexSort': 191,
'PyArray_Round': 192,
'PyArray_EquivTypenums': 193,
'PyArray_RegisterDataType': 194,
'PyArray_RegisterCastFunc': 195,
'PyArray_RegisterCanCast': 196,
'PyArray_InitArrFuncs': 197,
'PyArray_IntTupleFromIntp': 198,
'PyArray_TypeNumFromName': 199,
'PyArray_ClipmodeConverter': 200,
'PyArray_OutputConverter': 201,
'PyArray_BroadcastToShape': 202,
'_PyArray_SigintHandler': 203,
'_PyArray_GetSigintBuf': 204,
'PyArray_DescrAlignConverter': 205,
'PyArray_DescrAlignConverter2': 206,
'PyArray_SearchsideConverter': 207,
'PyArray_CheckAxis': 208,
'PyArray_OverflowMultiplyList': 209,
'PyArray_CompareString': 210,
'PyArray_MultiIterFromObjects': 211,
'PyArray_GetEndianness': 212,
'PyArray_GetNDArrayCFeatureVersion': 213,
'PyArray_Correlate2': 214,
'PyArray_NeighborhoodIterNew': 215,
'PyArray_SetDatetimeParseFunction': 216,
'PyArray_DatetimeToDatetimeStruct': 217,
'PyArray_TimedeltaToTimedeltaStruct': 218,
'PyArray_DatetimeStructToDatetime': 219,
'PyArray_TimedeltaStructToTimedelta': 220,
}
ufunc_types_api = {
'PyUFunc_Type': 0
}
ufunc_funcs_api = {
'PyUFunc_FromFuncAndData': 1,
'PyUFunc_RegisterLoopForType': 2,
'PyUFunc_GenericFunction': 3,
'PyUFunc_f_f_As_d_d': 4,
'PyUFunc_d_d': 5,
'PyUFunc_f_f': 6,
'PyUFunc_g_g': 7,
'PyUFunc_F_F_As_D_D': 8,
'PyUFunc_F_F': 9,
'PyUFunc_D_D': 10,
'PyUFunc_G_G': 11,
'PyUFunc_O_O': 12,
'PyUFunc_ff_f_As_dd_d': 13,
'PyUFunc_ff_f': 14,
'PyUFunc_dd_d': 15,
'PyUFunc_gg_g': 16,
'PyUFunc_FF_F_As_DD_D': 17,
'PyUFunc_DD_D': 18,
'PyUFunc_FF_F': 19,
'PyUFunc_GG_G': 20,
'PyUFunc_OO_O': 21,
'PyUFunc_O_O_method': 22,
'PyUFunc_OO_O_method': 23,
'PyUFunc_On_Om': 24,
'PyUFunc_GetPyValues': 25,
'PyUFunc_checkfperr': 26,
'PyUFunc_clearfperr': 27,
'PyUFunc_getfperr': 28,
'PyUFunc_handlefperr': 29,
'PyUFunc_ReplaceLoopBySignature': 30,
'PyUFunc_FromFuncAndDataAndSignature': 31,
'PyUFunc_SetUsesArraysAsData': 32,
}
# List of all the dicts which define the C API
# XXX: DO NOT CHANGE THE ORDER OF TUPLES BELOW !
multiarray_api = (
multiarray_global_vars,
multiarray_global_vars_types,
multiarray_scalar_bool_values,
multiarray_types_api,
multiarray_funcs_api,
)
ufunc_api = (
ufunc_funcs_api,
ufunc_types_api
)
full_api = multiarray_api + ufunc_api
|
gpl-3.0
|
woggle/mesos-old
|
third_party/boto-2.0b2/boto/cloudfront/distribution.py
|
4
|
19098
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.s3.acl import ACL
class DistributionConfig:
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
origin_access_identity=None, trusted_signers=None,
default_root_object=None):
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.origin_access_identity = origin_access_identity
self.trusted_signers = trusted_signers
self.logging = None
self.default_root_object = default_root_object
def get_oai_value(self):
if isinstance(self.origin_access_identity, OriginAccessIdentity):
return self.origin_access_identity.uri()
else:
return self.origin_access_identity
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
s += ' <Origin>%s</Origin>\n' % self.origin
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.origin_access_identity:
val = self.get_oai_value()
s += '<OriginAccessIdentity>%s</OriginAccessIdentity>\n' % val
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Origin':
self.origin = value
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'OriginAccessIdentity':
self.origin_access_identity = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment=''):
DistributionConfig.__init__(self, connection, origin,
enabled, caller_reference,
cnames, comment)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-12-01/">\n'
s += ' <Origin>%s</Origin>\n' % self.origin
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
s += '</StreamingDistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
pass
class DistributionSummary:
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin='',
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution:
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None,
origin_access_identity=None,
trusted_signers=None,
default_root_object=None):
"""
Update the configuration of the Distribution.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
:type origin_access_identity: :class:`boto.cloudfront.identity.OriginAccessIdentity`
:param origin_access_identity: The CloudFront origin access identity
associated with the distribution. This
must be provided if you want the
distribution to serve private content.
:type trusted_signers: :class:`boto.cloudfront.signers.TrustedSigner`
:param trusted_signers: The AWS users who are authorized to sign
URL's for private content in this Distribution.
:type default_root_object: str
:param default_root_object: An option field that specifies a default
root object for the distribution (e.g. index.html)
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.origin_access_identity,
self.config.trusted_signers,
self.config.default_root_object)
if enabled != None:
new_config.enabled = enabled
if cnames != None:
new_config.cnames = cnames
if comment != None:
new_config.comment = comment
if origin_access_identity != None:
new_config.origin_access_identity = origin_access_identity
if trusted_signers:
new_config.trusted_signers = trusted_signers
if default_root_object:
new_config.default_root_object = default_root_object
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if not self._bucket:
bucket_name = self.config.origin.split('.')[0]
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if self.config.origin_access_identity:
id = self.config.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin_access_identity:
self.set_permissions(object, replace)
return object
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
Distribution.__init__(self, connection, config, domain_name,
id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return None
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment)
if enabled != None:
new_config.enabled = enabled
if cnames != None:
new_config.cnames = cnames
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
|
apache-2.0
|
xkollar/spacewalk
|
backend/server/repomd/view.py
|
10
|
14048
|
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import re
XML_ENCODING = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
class RepoView:
def __init__(self, primary, filelists, other, updateinfo, groups, fileobj,
checksum_type):
self.primary = primary
self.filelists = filelists
self.other = other
self.updateinfo = updateinfo
self.groups = groups
self.fileobj = fileobj
if checksum_type == 'sha1':
self.checksum_type = 'sha'
else:
self.checksum_type = checksum_type
def _get_data(self, data_type, data_obj):
output = []
output.append(" <data type=\"%s\">" % (data_type))
output.append(" <location href=\"repodata/%s.xml.gz\"/>"
% (data_type))
output.append(" <checksum type=\"%s\">%s</checksum>"
% (self.checksum_type, data_obj['gzip_checksum']))
output.append(" <timestamp>%d</timestamp>" % (data_obj['timestamp']))
output.append(" <open-checksum type=\"%s\">%s</open-checksum>"
% (self.checksum_type, data_obj['open_checksum']))
output.append(" </data>")
return output
def _get_comps_data(self):
output = []
if self.groups:
output.append(" <data type=\"group\">")
output.append(" <location href=\"repodata/comps.xml\"/>")
output.append(" <checksum type=\"%s\">%s</checksum>"
% (self.checksum_type, self.groups['open_checksum']))
output.append(" <timestamp>%d</timestamp>"
% (self.groups['timestamp']))
output.append(" </data>")
return output
def write_repomd(self):
output = []
output.append(XML_ENCODING)
output.append("<repomd xmlns=\"http://linux.duke.edu/metadata/repo\">")
output.extend(self._get_data('primary', self.primary))
output.extend(self._get_data('filelists', self.filelists))
output.extend(self._get_data('other', self.other))
output.extend(self._get_data('updateinfo', self.updateinfo))
output.extend(self._get_comps_data())
output.append("</repomd>")
self.fileobj.write('\n'.join(output))
class PrimaryView(object):
def __init__(self, channel, fileobj):
self.channel = channel
self.fileobj = fileobj
def _get_deps(self, deps):
output = []
for dep in deps:
if dep['flag']:
line = " <rpm:entry name=\"%s\" flags=\"%s\" \
epoch=\"%s\" ver=\"%s\" " % (dep['name'], dep['flag'],
dep['epoch'], dep['version'])
if dep['release']:
line += "rel=\"%s\" " % dep['release']
line += "/>"
output.append(line)
else:
output.append(" <rpm:entry name=\"%s\" />"
% (text_filter(dep['name'])))
return output
def _get_files(self, files):
output = []
filere = re.compile('.*bin\/.*|^\/etc\/.*|^\/usr\/lib\.sendmail$')
for pkg_file in files:
if filere.match(pkg_file):
output.append(" <file>%s</file>"
% (text_filter(pkg_file)))
return output
def _get_package(self, package):
output = []
output.append(" <package type=\"rpm\">")
output.append(" <name>%s</name>" % (package.name))
output.append(" <arch>%s</arch>" % (package.arch))
output.append(" <version epoch=\"%s\" ver=\"%s\" rel=\"%s\" />"
% (package.epoch, package.version, package.release))
output.append(" <checksum type=\"%s\" pkgid=\"YES\">%s</checksum>"
% (package.checksum_type, package.checksum))
output.append(" <summary>%s</summary>"
% (text_filter(package.summary)))
output.append(" <description>%s</description>"
% (text_filter(package.description)))
output.append(" <packager></packager>")
output.append(" <url></url>")
output.append(" <time file=\"%d\" build=\"%d\" />"
% (package.build_time, package.build_time))
output.append(" <size package=\"%d\" installed=\"%d\" "
"archive=\"%d\" />"
% (package.package_size, package.installed_size,
package.payload_size))
output.append(" <location href=\"getPackage/%s\" />"
% (package.filename))
output.append(" <format>")
output.append(" <rpm:license>%s</rpm:license>"
% (text_filter(package.copyright)))
output.append(" <rpm:vendor>%s</rpm:vendor>"
% (text_filter(package.vendor)))
output.append(" <rpm:group>%s</rpm:group>"
% (text_filter(package.package_group)))
output.append(" <rpm:buildhost>%s</rpm:buildhost>"
% (text_filter(package.build_host)))
output.append(" <rpm:sourcerpm>%s</rpm:sourcerpm>"
% (text_filter(package.source_rpm)))
output.append(" <rpm:header-range start=\"%d\" end=\"%d\" />"
% (package.header_start, package.header_end))
output.append(" <rpm:provides>")
output.extend(self._get_deps(package.provides))
output.append(" </rpm:provides>")
output.append(" <rpm:requires>")
output.extend(self._get_deps(package.requires))
output.append(" </rpm:requires>")
output.append(" <rpm:recommends>")
output.extend(self._get_deps(package.recommends))
output.append(" </rpm:recommends>")
output.append(" <rpm:suggests>")
output.extend(self._get_deps(package.suggests))
output.append(" </rpm:suggests>")
output.append(" <rpm:supplements>")
output.extend(self._get_deps(package.supplements))
output.append(" </rpm:supplements>")
output.append(" <rpm:enhances>")
output.extend(self._get_deps(package.enhances))
output.append(" </rpm:enhances>")
output.append(" <rpm:conflicts>")
output.extend(self._get_deps(package.conflicts))
output.append(" </rpm:conflicts>")
output.append(" <rpm:obsoletes>")
output.extend(self._get_deps(package.obsoletes))
output.append(" </rpm:obsoletes>")
output.extend(self._get_files(package.files))
output.append(" </format>")
output.append(" </package>")
return output
def write_start(self):
output = XML_ENCODING + "\n" + \
"<metadata xmlns=\"http://linux.duke.edu/metadata/common\" " + \
"xmlns:rpm=\"http://linux.duke.edu/metadata/rpm\" " + \
"packages=\"%d\">" % self.channel.num_packages
self.fileobj.write(output)
def write_package(self, package):
self.fileobj.write('\n'.join(self._get_package(package)))
def write_end(self):
self.fileobj.write("</metadata>")
class FilelistsView(object):
def __init__(self, channel, fileobj):
self.channel = channel
self.fileobj = fileobj
def _get_package(self, package):
output = []
output.append(" <package pkgid=\"%s\" name=\"%s\" arch=\"%s\">"
% (package.checksum, package.name, package.arch))
output.append(" <version epoch=\"%s\" ver=\"%s\" rel=\"%s\" />"
% (package.epoch, package.version, package.release))
for file_name in package.files:
output.append(" <file>%s</file>" % (text_filter(file_name)))
output.append(" </package>")
return output
def write_start(self):
output = XML_ENCODING + "\n" + \
"<filelists xmlns=\"http://linux.duke.edu/metadata/filelists\" " + \
"packages=\"%d\">" % self.channel.num_packages
self.fileobj.write(output)
def write_package(self, package):
self.fileobj.write('\n'.join(self._get_package(package)))
def write_end(self):
self.fileobj.write("</filelists>")
class OtherView(object):
def __init__(self, channel, fileobj):
self.channel = channel
self.fileobj = fileobj
def _get_package(self, package):
output = []
output.append(" <package pkgid=\"%s\" name=\"%s\" arch=\"%s\">"
% (package.checksum, package.name, package.arch))
output.append(" <version epoch=\"%s\" ver=\"%s\" rel=\"%s\" />"
% (package.epoch, package.version, package.release))
for changelog in package.changelog:
output.append(" <changelog author=\"%s\" date=\"%d\">"
% (text_filter_attribute(changelog['author']),
changelog['date']))
output.append(" " + text_filter(changelog['text']))
output.append(" </changelog>")
output.append(" </package>")
return output
def write_start(self):
output = XML_ENCODING + "\n" + \
"<otherdata xmlns=\"http://linux.duke.edu/metadata/other\" " + \
"packages=\"%d\">" % self.channel.num_packages
self.fileobj.write(output)
def write_package(self, package):
self.fileobj.write('\n'.join(self._get_package(package)))
def write_end(self):
self.fileobj.write("</otherdata>")
class UpdateinfoView(object):
def __init__(self, channel, fileobj):
self.channel = channel
self.fileobj = fileobj
def _get_references(self, erratum):
output = []
output.append(" <references>")
ref_string = " <reference href=\"%s%s\" id=\"%s\" type=\"%s\">"
for cve_ref in erratum.cve_references:
output.append(ref_string
% ("http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=",
cve_ref, cve_ref, "cve"))
output.append(" </reference>")
for bz_ref in erratum.bz_references:
output.append(ref_string
% ("http://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=",
bz_ref['bug_id'], bz_ref['bug_id'], "bugzilla"))
output.append(" " + text_filter(bz_ref['summary']))
output.append(" </reference>")
output.append(" </references>")
return output
def _get_packages(self, erratum):
output = []
output.append(" <pkglist>")
output.append(" <collection short=\"%s\">"
% text_filter_attribute(self.channel.label))
output.append(" <name>%s</name>"
% text_filter(self.channel.name))
for package in erratum.packages:
output.append(" <package name=\"%s\" version=\"%s\" "
"release=\"%s\" epoch=\"%s\" arch=\"%s\" src=\"%s\">"
% (package.name, package.version, package.release,
package.epoch, package.arch, text_filter(package.source_rpm)))
output.append(" <filename>%s</filename>"
% text_filter(package.filename))
output.append(" <sum type=\"%s\">%s</sum>"
% (package.checksum_type, package.checksum))
output.append(" </package>")
output.append(" </collection>")
output.append(" </pkglist>")
return output
def _get_erratum(self, erratum):
output = []
output.append(" <update from=\"security@redhat.com\" " +
"status=\"final\" type=\"%s\" version=\"%s\">"
% (erratum.advisory_type, erratum.version))
output.append(" <id>%s</id>" % erratum.readable_id)
output.append(" <title>%s</title>" % text_filter(erratum.title))
output.append(" <issued date=\"%s\"/>" % erratum.issued)
output.append(" <updated date=\"%s\"/>" % erratum.updated)
output.append(" <description>%s</description>"
% text_filter("%s\n\n\%s" % (erratum.synopsis, erratum.description)))
output.extend(self._get_references(erratum))
output.extend(self._get_packages(erratum))
output.append(" </update>")
return output
def write_updateinfo(self):
output = XML_ENCODING + "\n" + "<updates>\n"
self.fileobj.write(output)
for erratum in self.channel.errata:
self.fileobj.write('\n'.join(self._get_erratum(erratum)))
self.fileobj.write("\n</updates>")
class CompsView(object):
def __init__(self, comps):
self.comps = comps
def get_file(self):
comps_file = open(self.comps.filename)
return comps_file
def text_filter(text):
# do & first
s = text.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
return s
def text_filter_attribute(text):
s = text_filter(text)
s = s.replace('"', '"')
return s
|
gpl-2.0
|
hopeall/odoo
|
addons/l10n_et/__openerp__.py
|
387
|
1968
|
#-*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Ethiopia - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
Base Module for Ethiopian Localization
======================================
This is the latest Ethiopian OpenERP localization and consists of:
- Chart of Accounts
- VAT tax structure
- Withholding tax structure
- Regional State listings
""",
'author':'Michael Telahun Makonnen <mmakonnen@gmail.com>',
'website':'http://miketelahun.wordpress.com',
'depends': [
'base_vat',
'account_chart',
],
'init_xml': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account.chart.template.csv',
'data/account.tax.template.csv',
'data/res.country.state.csv',
],
'data': [
'l10n_et_wizard.xml',
],
'test': [
],
'demo': [
],
'installable': True,
'active': False,
}
|
agpl-3.0
|
zerebubuth/mapnik
|
scons/scons-local-2.5.0/SCons/Job.py
|
3
|
16096
|
"""SCons.Job
This module defines the Serial and Parallel classes that execute tasks to
complete a build. The Jobs class provides a higher level interface to start,
stop, and wait on jobs.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Job.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.compat
import os
import signal
import SCons.Errors
# The default stack size (in kilobytes) of the threads used to execute
# jobs in parallel.
#
# We use a stack size of 256 kilobytes. The default on some platforms
# is too large and prevents us from creating enough threads to fully
# parallelized the build. For example, the default stack size on linux
# is 8 MBytes.
explicit_stack_size = None
default_stack_size = 256
interrupt_msg = 'Build interrupted.'
class InterruptState(object):
def __init__(self):
self.interrupted = False
def set(self):
self.interrupted = True
def __call__(self):
return self.interrupted
class Jobs(object):
"""An instance of this class initializes N jobs, and provides
methods for starting, stopping, and waiting on all N jobs.
"""
def __init__(self, num, taskmaster):
"""
Create 'num' jobs using the given taskmaster.
If 'num' is 1 or less, then a serial job will be used,
otherwise a parallel job with 'num' worker threads will
be used.
The 'num_jobs' attribute will be set to the actual number of jobs
allocated. If more than one job is requested but the Parallel
class can't do it, it gets reset to 1. Wrapping interfaces that
care should check the value of 'num_jobs' after initialization.
"""
self.job = None
if num > 1:
stack_size = explicit_stack_size
if stack_size is None:
stack_size = default_stack_size
try:
self.job = Parallel(taskmaster, num, stack_size)
self.num_jobs = num
except NameError:
pass
if self.job is None:
self.job = Serial(taskmaster)
self.num_jobs = 1
def run(self, postfunc=lambda: None):
"""Run the jobs.
postfunc() will be invoked after the jobs has run. It will be
invoked even if the jobs are interrupted by a keyboard
interrupt (well, in fact by a signal such as either SIGINT,
SIGTERM or SIGHUP). The execution of postfunc() is protected
against keyboard interrupts and is guaranteed to run to
completion."""
self._setup_sig_handler()
try:
self.job.start()
finally:
postfunc()
self._reset_sig_handler()
def were_interrupted(self):
"""Returns whether the jobs were interrupted by a signal."""
return self.job.interrupted()
def _setup_sig_handler(self):
"""Setup an interrupt handler so that SCons can shutdown cleanly in
various conditions:
a) SIGINT: Keyboard interrupt
b) SIGTERM: kill or system shutdown
c) SIGHUP: Controlling shell exiting
We handle all of these cases by stopping the taskmaster. It
turns out that it's very difficult to stop the build process
by throwing asynchronously an exception such as
KeyboardInterrupt. For example, the python Condition
variables (threading.Condition) and queues do not seem to be
asynchronous-exception-safe. It would require adding a whole
bunch of try/finally block and except KeyboardInterrupt all
over the place.
Note also that we have to be careful to handle the case when
SCons forks before executing another process. In that case, we
want the child to exit immediately.
"""
def handler(signum, stack, self=self, parentpid=os.getpid()):
if os.getpid() == parentpid:
self.job.taskmaster.stop()
self.job.interrupted.set()
else:
os._exit(2)
self.old_sigint = signal.signal(signal.SIGINT, handler)
self.old_sigterm = signal.signal(signal.SIGTERM, handler)
try:
self.old_sighup = signal.signal(signal.SIGHUP, handler)
except AttributeError:
pass
def _reset_sig_handler(self):
"""Restore the signal handlers to their previous state (before the
call to _setup_sig_handler()."""
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
try:
signal.signal(signal.SIGHUP, self.old_sighup)
except AttributeError:
pass
class Serial(object):
"""This class is used to execute tasks in series, and is more efficient
than Parallel, but is only appropriate for non-parallel builds. Only
one instance of this class should be in existence at a time.
This class is not thread safe.
"""
def __init__(self, taskmaster):
"""Create a new serial job given a taskmaster.
The taskmaster's next_task() method should return the next task
that needs to be executed, or None if there are no more tasks. The
taskmaster's executed() method will be called for each task when it
is successfully executed, or failed() will be called if it failed to
execute (e.g. execute() raised an exception)."""
self.taskmaster = taskmaster
self.interrupted = InterruptState()
def start(self):
"""Start the job. This will begin pulling tasks from the taskmaster
and executing them, and return when there are no more tasks. If a task
fails to execute (i.e. execute() raises an exception), then the job will
stop."""
while True:
task = self.taskmaster.next_task()
if task is None:
break
try:
task.prepare()
if task.needs_execute():
task.execute()
except:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
else:
task.exception_set()
# Let the failed() callback function arrange for the
# build to stop if that's appropriate.
task.failed()
else:
task.executed()
task.postprocess()
self.taskmaster.cleanup()
# Trap import failure so that everything in the Job module but the
# Parallel class (and its dependent classes) will work if the interpreter
# doesn't support threads.
try:
import queue
import threading
except ImportError:
pass
else:
class Worker(threading.Thread):
"""A worker thread waits on a task to be posted to its request queue,
dequeues the task, executes it, and posts a tuple including the task
and a boolean indicating whether the task executed successfully. """
def __init__(self, requestQueue, resultsQueue, interrupted):
threading.Thread.__init__(self)
self.setDaemon(1)
self.requestQueue = requestQueue
self.resultsQueue = resultsQueue
self.interrupted = interrupted
self.start()
def run(self):
while True:
task = self.requestQueue.get()
if task is None:
# The "None" value is used as a sentinel by
# ThreadPool.cleanup(). This indicates that there
# are no more tasks, so we should quit.
break
try:
if self.interrupted():
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
task.execute()
except:
task.exception_set()
ok = False
else:
ok = True
self.resultsQueue.put((task, ok))
class ThreadPool(object):
"""This class is responsible for spawning and managing worker threads."""
def __init__(self, num, stack_size, interrupted):
"""Create the request and reply queues, and 'num' worker threads.
One must specify the stack size of the worker threads. The
stack size is specified in kilobytes.
"""
self.requestQueue = queue.Queue(0)
self.resultsQueue = queue.Queue(0)
try:
prev_size = threading.stack_size(stack_size*1024)
except AttributeError, e:
# Only print a warning if the stack size has been
# explicitly set.
if not explicit_stack_size is None:
msg = "Setting stack size is unsupported by this version of Python:\n " + \
e.args[0]
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
except ValueError, e:
msg = "Setting stack size failed:\n " + str(e)
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
# Create worker threads
self.workers = []
for _ in range(num):
worker = Worker(self.requestQueue, self.resultsQueue, interrupted)
self.workers.append(worker)
if 'prev_size' in locals():
threading.stack_size(prev_size)
def put(self, task):
"""Put task into request queue."""
self.requestQueue.put(task)
def get(self):
"""Remove and return a result tuple from the results queue."""
return self.resultsQueue.get()
def preparation_failed(self, task):
self.resultsQueue.put((task, False))
def cleanup(self):
"""
Shuts down the thread pool, giving each worker thread a
chance to shut down gracefully.
"""
# For each worker thread, put a sentinel "None" value
# on the requestQueue (indicating that there's no work
# to be done) so that each worker thread will get one and
# terminate gracefully.
for _ in self.workers:
self.requestQueue.put(None)
# Wait for all of the workers to terminate.
#
# If we don't do this, later Python versions (2.4, 2.5) often
# seem to raise exceptions during shutdown. This happens
# in requestQueue.get(), as an assertion failure that
# requestQueue.not_full is notified while not acquired,
# seemingly because the main thread has shut down (or is
# in the process of doing so) while the workers are still
# trying to pull sentinels off the requestQueue.
#
# Normally these terminations should happen fairly quickly,
# but we'll stick a one-second timeout on here just in case
# someone gets hung.
for worker in self.workers:
worker.join(1.0)
self.workers = []
class Parallel(object):
"""This class is used to execute tasks in parallel, and is somewhat
less efficient than Serial, but is appropriate for parallel builds.
This class is thread safe.
"""
def __init__(self, taskmaster, num, stack_size):
"""Create a new parallel job given a taskmaster.
The taskmaster's next_task() method should return the next
task that needs to be executed, or None if there are no more
tasks. The taskmaster's executed() method will be called
for each task when it is successfully executed, or failed()
will be called if the task failed to execute (i.e. execute()
raised an exception).
Note: calls to taskmaster are serialized, but calls to
execute() on distinct tasks are not serialized, because
that is the whole point of parallel jobs: they can execute
multiple tasks simultaneously. """
self.taskmaster = taskmaster
self.interrupted = InterruptState()
self.tp = ThreadPool(num, stack_size, self.interrupted)
self.maxjobs = num
def start(self):
"""Start the job. This will begin pulling tasks from the
taskmaster and executing them, and return when there are no
more tasks. If a task fails to execute (i.e. execute() raises
an exception), then the job will stop."""
jobs = 0
while True:
# Start up as many available tasks as we're
# allowed to.
while jobs < self.maxjobs:
task = self.taskmaster.next_task()
if task is None:
break
try:
# prepare task for execution
task.prepare()
except:
task.exception_set()
task.failed()
task.postprocess()
else:
if task.needs_execute():
# dispatch task
self.tp.put(task)
jobs = jobs + 1
else:
task.executed()
task.postprocess()
if not task and not jobs: break
# Let any/all completed tasks finish up before we go
# back and put the next batch of tasks on the queue.
while True:
task, ok = self.tp.get()
jobs = jobs - 1
if ok:
task.executed()
else:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
# Let the failed() callback function arrange
# for the build to stop if that's appropriate.
task.failed()
task.postprocess()
if self.tp.resultsQueue.empty():
break
self.tp.cleanup()
self.taskmaster.cleanup()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lgpl-2.1
|
jakirkham/lazyflow
|
lazyflow/utility/io/tiledVolume.py
|
2
|
18511
|
import os
import numpy
import vigra
from functools import partial
from StringIO import StringIO
## Instead of importing requests and PIL here,
## use late imports (below) so people who don't use TiledVolume don't have to have them
# New dependency: requests is way more convenient than urllib or httplib
#import requests
# Use PIL instead of vigra since it allows us to open images in-memory
#from PIL import Image
from lazyflow.utility.timer import Timer
from lazyflow.utility.jsonConfig import JsonConfigParser, AutoEval, FormattedField
from lazyflow.roi import getIntersectingBlocks, getBlockBounds, roiToSlice, getIntersection
from lazyflow.request import Request, RequestPool
import logging
logger = logging.getLogger(__name__)
class TiledVolume(object):
"""
Given a directory of image tiles that make up a volume, produces numpy array volumes for arbitrary roi requests.
"""
#: These fields describe the schema of the description file.
#: See the source code comments for a description of each field.
DescriptionFields = \
{
"_schema_name" : "tiled-volume-description",
"_schema_version" : 1.0,
"name" : str,
"format" : str,
"dtype" : AutoEval(),
"bounds_zyx" : AutoEval(numpy.array), # Maximum coordinates (+1)
"view_origin_zyx" : AutoEval(numpy.array), # Optional offset for output 'view'
"view_shape_zyx" : AutoEval(numpy.array), # Shape of the output 'view'. If not provided, defaults to bounds - origin
"resolution_zyx" : AutoEval(numpy.array),
"tile_shape_2d_yx" : AutoEval(numpy.array),
"is_rgb" : bool, # Indicates that we must convert to grayscale
"username" : str,
"password" : str,
# This doesn't change how the data is read from the server,
# but instead specifies the indexing order of the numpy volumes produced.
"output_axes" : str,
"cache_tiles" : bool,
# Offset not supported for now...
#"origin_offset" : AutoEval(numpy.array),
# For now we support 3D-only, sliced across Z (TODO: Support 5D?)
# We allow multiple url schemes: tiles might be addressed via pixel coordinates or row/column indexing
# (z_index and z_start are synonyms here -- either is allowed)
# Example: pixel-wise tile names:
# "tile_url_format" : "http://my.tiles.org/my_tiles/{z_start}-{z_stop}/{y_start}-{y_stop}/{x_start}-{x_stop}.jpg"
# Example: row/column-wise tile names
# "tile_url_format" : "http://my.tiles.org/my_tiles/{z_index}/{y_index}/{x_index}.jpg"
# Also, local tile sources (filesystem, not http) are okay:
# "tile_url_format" : "/my_hard_disk/my_tiles/{z_index}/{y_index}/{x_index}.jpg"
"tile_url_format" : FormattedField( requiredFields=[],
optionalFields=["x_start", "y_start", "z_start",
"x_stop", "y_stop", "z_stop",
"x_index", "y_index", "z_index",
"raveler_z_base"] ), # Special keyword for Raveler session directories. See notes below.
"invert_y_axis" : bool, # For raveler volumes, the y-axis coordinate is inverted.
# A list of lists, mapping src slices to destination slices (for "filling in" missing slices)
# Example If slices 101,102,103 are missing data, you might want to simply repeat the data from slice 100:
# "extend_slices" : [ [100, [101, 102, 103]] ]
"extend_slices" : list,
# Some tiled volumes have complicated mappings from "real" or "global" coordinates to url/filepath coordinates.
# This field will be eval()'d before the tile is retrieved
# For example, if the slices were named according to their position in nanometers instead of pixels, this might do the trick:
# "z_translation_function" : "lambda z: 40*z"
"z_translation_function" : str,
# Optional data transform. For example:
# "data_transform_function" : "lambda a: a == 0",
"data_transform_function" : str
}
DescriptionSchema = JsonConfigParser( DescriptionFields )
@classmethod
def readDescription(cls, descriptionFilePath):
# Read file
description = TiledVolume.DescriptionSchema.parseConfigFile( descriptionFilePath )
cls.updateDescription(description)
return description
@classmethod
def updateDescription(cls, description):
"""
Some description fields are optional.
If they aren't provided in the description JSON file, then this function provides
them with default values, based on the other description fields.
"""
# Augment with default parameters.
logger.debug(str(description))
if description.view_origin_zyx is None:
description.view_origin_zyx = numpy.array( [0]*len(description.bounds_zyx) )
if description.view_shape_zyx is None:
description.view_shape_zyx = description.bounds_zyx - description.view_origin_zyx
if not description.output_axes:
description.output_axes = "zyx"
assert description.output_axes is None or set(description.output_axes) == set("zyx"), \
"Axis order must include x,y,z (and nothing else)"
if not description.extend_slices:
description.extend_slices = []
if description.cache_tiles is None:
description.cache_tiles = False
def __init__( self, descriptionFilePath ):
self.description = TiledVolume.readDescription( descriptionFilePath )
self._session = None
assert self.description.format in vigra.impex.listExtensions().split(), \
"Unknown tile format: {}".format( self.description.format )
assert self.description.tile_shape_2d_yx.shape == (2,)
assert self.description.bounds_zyx.shape == (3,)
assert self.description.view_shape_zyx.shape == (3,)
shape_dict = dict( zip('zyx', self.description.view_shape_zyx) )
self.output_shape = tuple( shape_dict[k] for k in self.description.output_axes )
self._slice_remapping = {}
for source, destinations in self.description.extend_slices:
for dest in destinations:
self._slice_remapping[dest] = source
def close(self):
if self._session:
self._session.close()
def read(self, view_roi, result_out):
"""
roi: (start, stop) tuples, ordered according to description.output_axes
roi should be relative to the view
"""
output_axes = self.description.output_axes
roi_transposed = zip(*view_roi)
roi_dict = dict( zip(output_axes, roi_transposed) )
view_roi = zip( *(roi_dict['z'], roi_dict['y'], roi_dict['x']) )
# First, normalize roi and result to zyx order
result_out = vigra.taggedView(result_out, output_axes)
result_out = result_out.withAxes(*'zyx')
assert numpy.array(view_roi).shape == (2,3), "Invalid roi for 3D volume: {}".format( view_roi )
view_roi = numpy.array(view_roi)
assert (result_out.shape == (view_roi[1] - view_roi[0])).all()
# User gave roi according to the view output.
# Now offset it find global roi.
roi = view_roi + self.description.view_origin_zyx
tile_blockshape = (1,) + tuple(self.description.tile_shape_2d_yx)
tile_starts = getIntersectingBlocks( tile_blockshape, roi )
pool = RequestPool()
for tile_start in tile_starts:
tile_roi_in = getBlockBounds( self.description.bounds_zyx, tile_blockshape, tile_start )
tile_roi_in = numpy.array(tile_roi_in)
# This tile's portion of the roi
intersecting_roi = getIntersection( roi, tile_roi_in )
intersecting_roi = numpy.array( intersecting_roi )
# Compute slicing within destination array and slicing within this tile
destination_relative_intersection = numpy.subtract(intersecting_roi, roi[0])
tile_relative_intersection = intersecting_roi - tile_roi_in[0]
# Get a view to the output slice
result_region = result_out[roiToSlice(*destination_relative_intersection)]
rest_args = self._get_rest_args(tile_blockshape, tile_roi_in)
if self.description.tile_url_format.startswith('http'):
retrieval_fn = partial( self._retrieve_remote_tile, rest_args, tile_relative_intersection, result_region )
else:
retrieval_fn = partial( self._retrieve_local_tile, rest_args, tile_relative_intersection, result_region )
PARALLEL_REQ = True
if PARALLEL_REQ:
pool.add( Request( retrieval_fn ) )
else:
# execute serially (leave the pool empty)
retrieval_fn()
if PARALLEL_REQ:
with Timer() as timer:
pool.wait()
logger.info("Loading {} tiles took a total of {}".format( len(tile_starts), timer.seconds() ))
def _get_rest_args(self, tile_blockshape, tile_roi_in):
"""
For a single tile, return a dict of all possible parameters that can be substituted
into the tile_url_format string from the volume json description file.
tile_blockshape: The 3D blockshape of the tile
(since tiles are only 1 slice thick, the blockshape always begins with 1).
tile_roi_in: The ROI within the total volume for a particular tile.
(Note that the size of the ROI is usually, but not always, the same as tile_blockshape.
Near the volume borders, the tile_roi_in may be smaller.)
"""
# Special feature:
# Some slices are missing, in which case we provide fake data from a different slice.
# Overwrite the rest args to pull data from an alternate source tile.
z_start = tile_roi_in[0][0]
if z_start in self._slice_remapping:
new_source_slice = self._slice_remapping[z_start]
tile_roi_in[0][0] = new_source_slice
tile_roi_in[1][0] = new_source_slice+1
tile_index = numpy.array(tile_roi_in[0]) / tile_blockshape
rest_args = { 'z_start' : tile_roi_in[0][0],
'z_stop' : tile_roi_in[1][0],
'y_start' : tile_roi_in[0][1],
'y_stop' : tile_roi_in[1][1],
'x_start' : tile_roi_in[0][2],
'x_stop' : tile_roi_in[1][2],
'z_index' : tile_index[0],
'y_index' : tile_index[1],
'x_index' : tile_index[2] }
# Apply special z_translation_function
if self.description.z_translation_function is not None:
z_update_func = eval(self.description.z_translation_function)
rest_args['z_index'] = rest_args['z_start'] = z_update_func(rest_args['z_index'])
rest_args['z_stop'] = 1 + rest_args['z_start']
# Quick sanity check
assert rest_args['z_index'] == rest_args['z_start']
# Special arg for Raveler session directories:
# For files with Z < 1000, no extra directory level
# For files with Z >= 1000, there is an extra directory level,
# in which case the extra '/' is INCLUDED here in the rest arg.
raveler_z_base = (rest_args['z_index'] // 1000) * 1000
if raveler_z_base == 0:
rest_args['raveler_z_base'] = ""
else:
rest_args['raveler_z_base'] = str(raveler_z_base) + '/'
return rest_args
def _retrieve_local_tile(self, rest_args, tile_relative_intersection, data_out):
tile_path = self.description.tile_url_format.format( **rest_args )
logger.debug("Opening {}".format( tile_path ))
if not os.path.exists(tile_path):
logger.error("Tile does not exist: {}".format( tile_path ))
data_out[...] = 0
return
# Read the image from the disk with vigra
img = vigra.impex.readImage(tile_path, dtype='NATIVE')
assert img.ndim == 3
if self.description.is_rgb:
# "Convert" to grayscale -- just take first channel.
img = img[...,0:1]
assert img.shape[-1] == 1, "Image has more channels than expected. "\
"If it is RGB, be sure to set the is_rgb flag in your description json."
# img has axes xyc, but we want zyx
img = img.transpose()[None,0,:,:]
if self.description.invert_y_axis:
# More special Raveler support:
# Raveler's conventions for the Y-axis are the reverse for everyone else's.
img = img[:, ::-1, :]
# Copy just the part we need into the destination array
assert img[roiToSlice(*tile_relative_intersection)].shape == data_out.shape
data_out[:] = img[roiToSlice(*tile_relative_intersection)]
# If there's a special transform, apply it now.
if self.description.data_transform_function is not None:
transform = eval(self.description.data_transform_function)
data_out[:] = transform(data_out)
# For late imports
requests = None
PIL = None
TEST_MODE = False # For testing purposes only. See below.
def _retrieve_remote_tile(self, rest_args, tile_relative_intersection, data_out):
# Late import
if not TiledVolume.requests:
import requests
TiledVolume.requests = requests
requests = TiledVolume.requests
tile_url = self.description.tile_url_format.format( **rest_args )
logger.debug("Retrieving {}".format( tile_url ))
try:
if self._session is None:
self._session = self._create_session()
# Provide authentication if we have the details.
if self.description.username and self.description.password:
self._session.auth = (self.description.username, self.description.password)
success = False
tries = 0
while not success:
try:
# Note: We give timeout as a tuple, which requires a recent version of requests.
# If you get an exception about that, upgrade your requests module.
r = self._session.get(tile_url, timeout=(3.0, 20.0))
success = True
except requests.ConnectionError:
# This special 'pass' is here because we keep running into exceptions like this:
# ConnectionError: HTTPConnectionPool(host='neurocean.int.janelia.org', port=6081):
# Max retries exceeded with url: /ssd-3-tiles/abd1.5/43/24_25_0.jpg
# (Caused by <class 'httplib.BadStatusLine'>: '')
# So now we loop a few times and only give up if something is really wrong.
if tries == 5:
raise # give up
tries += 1
except:
# During testing, the server we're pulling from might be in our own process.
# Apparently that means that it is not very responsive, leading to exceptions.
# As a cheap workaround, just try one more time.
if self.TEST_MODE:
import time
time.sleep(0.01)
r = self._session.get(tile_url, timeout=(3.0, 20.0))
else:
raise
if r.status_code == requests.codes.not_found:
logger.warn("NOTFOUND: {}".format( tile_url ))
data_out[:] = 0
else:
# late import
if not TiledVolume.PIL:
import PIL
import PIL.Image
TiledVolume.PIL = PIL
PIL = TiledVolume.PIL
img = numpy.asarray( PIL.Image.open(StringIO(r.content)) )
if self.description.is_rgb:
# "Convert" to grayscale -- just take first channel.
assert img.ndim == 3
img = img[...,0]
assert img.ndim == 2, "Image seems to be of the wrong dimension. "\
"If it is RGB, be sure to set the is_rgb flag in your description json."
# img has axes xy, but we want zyx
img = img[None]
if self.description.invert_y_axis:
# More special Raveler support:
# Raveler's conventions for the Y-axis are the reverse for everyone else's.
img = img[:, ::-1, :]
# Copy just the part we need into the destination array
assert img[roiToSlice(*tile_relative_intersection)].shape == data_out.shape
data_out[:] = img[roiToSlice(*tile_relative_intersection)]
# If there's a special transform, apply it now.
if self.description.data_transform_function is not None:
transform = eval(self.description.data_transform_function)
data_out[:] = transform(data_out)
@classmethod
def _create_session(cls):
"""
Generate a requests.Session object to use for this TiledVolume.
Using a session allows us to benefit from a connection pool
instead of establishing a new connection for every request.
"""
# Late import
if not TiledVolume.requests:
import requests
TiledVolume.requests = requests
requests = TiledVolume.requests
session = requests.Session()
# Replace the session http adapters with ones that use larger connection pools
n_threads = max(1, Request.global_thread_pool.num_workers)
adapter = requests.adapters.HTTPAdapter(pool_connections=n_threads, pool_maxsize=n_threads)
adapter2 = requests.adapters.HTTPAdapter(pool_connections=n_threads, pool_maxsize=n_threads)
session.mount('http://', adapter)
session.mount('https://', adapter2)
return session
|
lgpl-3.0
|
zsiciarz/django
|
tests/utils_tests/test_numberformat.py
|
25
|
3750
|
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3, thousand_sep='comma', force_grouping=True), '10comma000')
def test_large_number(self):
most_max = (
'{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}'
)
most_max2 = (
'{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736'
)
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super().__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
|
bsd-3-clause
|
ThiagoGarciaAlves/intellij-community
|
python/lib/Lib/site-packages/django/conf/global_settings.py
|
73
|
20942
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
# Legacy format
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# New format
DATABASES = {
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# 404s that may be ignored.
IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf')
IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php')
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when spliting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = False # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_BACKEND = 'locmem://'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in the
# 'hasNoProfanities' validator. All of these should be in lowercase.
PROFANITIES_LIST = ()
# The group ID that designates which users are banned.
# Set to None if you're not using it.
COMMENTS_BANNED_USERS_GROUP = None
# The group ID that designates which users can moderate comments.
# Set to None if you're not using it.
COMMENTS_MODERATORS_GROUP = None
# The group ID that designates the users whose comments should be e-mailed to MANAGERS.
# Set to None if you're not using it.
COMMENTS_SKETCHY_USERS_GROUP = None
# The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each
# user. Set this to 0 if you want to disable it.
COMMENTS_FIRST_FEW = 0
# A tuple of IP addresses that have been banned from participating in various
# Django-powered features.
BANNED_IPS = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Name and domain for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
############
# MESSAGES #
############
# Class to use as messges backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.user_messages.LegacyFallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request':{
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
# The name of the database to use for testing purposes.
# If None, a name of 'test_' + DATABASE_NAME will be assumed
TEST_DATABASE_NAME = None
# Strings used to set the character set and collation order for the test
# database. These values are passed literally to the server, so they are
# backend-dependent. If None, no special settings are sent (system defaults are
# used).
TEST_DATABASE_CHARSET = None
TEST_DATABASE_COLLATION = None
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# URL prefix for admin media -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
|
apache-2.0
|
openmv/openmv
|
scripts/examples/OpenMV/09-Feature-Detection/keypoints_save.py
|
4
|
1209
|
# Keypoints descriptor example.
# This example shows how to save a keypoints descriptor to file. Show the camera an object
# and then run the script. The script will extract and save a keypoints descriptor and the image.
# You can use the keypoints_editor.py util to remove unwanted keypoints.
#
# NOTE: Please reset the camera after running this script to see the new file.
import sensor, time, image
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False, value=100)
FILE_NAME = "desc"
img = sensor.snapshot()
# NOTE: See the docs for other arguments
# NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid.
kpts = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2)
if (kpts == None):
raise(Exception("Couldn't find any keypoints!"))
image.save_descriptor(kpts, "/%s.orb"%(FILE_NAME))
img.save("/%s.pgm"%(FILE_NAME))
img.draw_keypoints(kpts)
sensor.snapshot()
time.sleep_ms(1000)
raise(Exception("Done! Please reset the camera"))
|
mit
|
111pontes/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_infra_correlator_oper.py
|
1
|
63673
|
""" Cisco_IOS_XR_infra_correlator_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-correlator package operational data.
This module contains definitions
for the following management objects\:
suppression\: Suppression operational data
correlator\: correlator
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AcRuleStateEnum(Enum):
"""
AcRuleStateEnum
Ac rule state
.. data:: rule_unapplied = 0
Rule is in Unapplied state
.. data:: rule_applied = 1
Rule is Applied to specified RacksSlots,
Contexts and Sources
.. data:: rule_applied_all = 2
Rule is Applied to all of router
"""
rule_unapplied = 0
rule_applied = 1
rule_applied_all = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['AcRuleStateEnum']
class AlAlarmBistateEnum(Enum):
"""
AlAlarmBistateEnum
Al alarm bistate
.. data:: not_available = 0
not available
.. data:: active = 1
active
.. data:: clear = 2
clear
"""
not_available = 0
active = 1
clear = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['AlAlarmBistateEnum']
class AlAlarmSeverityEnum(Enum):
"""
AlAlarmSeverityEnum
Al alarm severity
.. data:: unknown = -1
unknown
.. data:: emergency = 0
emergency
.. data:: alert = 1
alert
.. data:: critical = 2
critical
.. data:: error = 3
error
.. data:: warning = 4
warning
.. data:: notice = 5
notice
.. data:: informational = 6
informational
.. data:: debugging = 7
debugging
"""
unknown = -1
emergency = 0
alert = 1
critical = 2
error = 3
warning = 4
notice = 5
informational = 6
debugging = 7
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['AlAlarmSeverityEnum']
class Suppression(object):
"""
Suppression operational data
.. attribute:: rule_details
Table that contains the database of suppression rule details
**type**\: :py:class:`RuleDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Suppression.RuleDetails>`
.. attribute:: rule_summaries
Table that contains the database of suppression rule summary
**type**\: :py:class:`RuleSummaries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Suppression.RuleSummaries>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.rule_details = Suppression.RuleDetails()
self.rule_details.parent = self
self.rule_summaries = Suppression.RuleSummaries()
self.rule_summaries.parent = self
class RuleSummaries(object):
"""
Table that contains the database of suppression
rule summary
.. attribute:: rule_summary
One of the suppression rules
**type**\: list of :py:class:`RuleSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Suppression.RuleSummaries.RuleSummary>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_summary = YList()
self.rule_summary.parent = self
self.rule_summary.name = 'rule_summary'
class RuleSummary(object):
"""
One of the suppression rules
.. attribute:: rule_name <key>
Suppression Rule Name
**type**\: str
**length:** 1..32
.. attribute:: rule_name_xr
Suppress Rule Name
**type**\: str
.. attribute:: rule_state
Applied state of the rule It could be not applied, applied or applied to all
**type**\: :py:class:`AcRuleStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AcRuleStateEnum>`
.. attribute:: suppressed_alarms_count
Number of suppressed alarms associated with this rule
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_name = None
self.rule_name_xr = None
self.rule_state = None
self.suppressed_alarms_count = None
@property
def _common_path(self):
if self.rule_name is None:
raise YPYModelError('Key property rule_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:suppression/Cisco-IOS-XR-infra-correlator-oper:rule-summaries/Cisco-IOS-XR-infra-correlator-oper:rule-summary[Cisco-IOS-XR-infra-correlator-oper:rule-name = ' + str(self.rule_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_name is not None:
return True
if self.rule_name_xr is not None:
return True
if self.rule_state is not None:
return True
if self.suppressed_alarms_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression.RuleSummaries.RuleSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:suppression/Cisco-IOS-XR-infra-correlator-oper:rule-summaries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_summary is not None:
for child_ref in self.rule_summary:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression.RuleSummaries']['meta_info']
class RuleDetails(object):
"""
Table that contains the database of suppression
rule details
.. attribute:: rule_detail
Details of one of the suppression rules
**type**\: list of :py:class:`RuleDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Suppression.RuleDetails.RuleDetail>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_detail = YList()
self.rule_detail.parent = self
self.rule_detail.name = 'rule_detail'
class RuleDetail(object):
"""
Details of one of the suppression rules
.. attribute:: rule_name <key>
Suppression Rule Name
**type**\: str
**length:** 1..32
.. attribute:: alarm_severity
Severity level to suppress
**type**\: :py:class:`AlAlarmSeverityEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AlAlarmSeverityEnum>`
.. attribute:: all_alarms
Match any alarm
**type**\: bool
.. attribute:: apply_source
Sources (R/S/M) to which the rule is applied
**type**\: list of str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: codes
Message codes defining the rule
**type**\: list of :py:class:`Codes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Suppression.RuleDetails.RuleDetail.Codes>`
.. attribute:: rule_summary
Rule summary, name, etc
**type**\: :py:class:`RuleSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Suppression.RuleDetails.RuleDetail.RuleSummary>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_name = None
self.alarm_severity = None
self.all_alarms = None
self.apply_source = YLeafList()
self.apply_source.parent = self
self.apply_source.name = 'apply_source'
self.codes = YList()
self.codes.parent = self
self.codes.name = 'codes'
self.rule_summary = Suppression.RuleDetails.RuleDetail.RuleSummary()
self.rule_summary.parent = self
class RuleSummary(object):
"""
Rule summary, name, etc
.. attribute:: rule_name_xr
Suppress Rule Name
**type**\: str
.. attribute:: rule_state
Applied state of the rule It could be not applied, applied or applied to all
**type**\: :py:class:`AcRuleStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AcRuleStateEnum>`
.. attribute:: suppressed_alarms_count
Number of suppressed alarms associated with this rule
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_name_xr = None
self.rule_state = None
self.suppressed_alarms_count = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:rule-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_name_xr is not None:
return True
if self.rule_state is not None:
return True
if self.suppressed_alarms_count is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression.RuleDetails.RuleDetail.RuleSummary']['meta_info']
class Codes(object):
"""
Message codes defining the rule.
.. attribute:: category
Category of messages to which this alarm belongs
**type**\: str
.. attribute:: code
Alarm code which further qualifies the alarm within a message group
**type**\: str
.. attribute:: group
Group of messages to which this alarm belongs
**type**\: str
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.category = None
self.code = None
self.group = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:codes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.category is not None:
return True
if self.code is not None:
return True
if self.group is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression.RuleDetails.RuleDetail.Codes']['meta_info']
@property
def _common_path(self):
if self.rule_name is None:
raise YPYModelError('Key property rule_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:suppression/Cisco-IOS-XR-infra-correlator-oper:rule-details/Cisco-IOS-XR-infra-correlator-oper:rule-detail[Cisco-IOS-XR-infra-correlator-oper:rule-name = ' + str(self.rule_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_name is not None:
return True
if self.alarm_severity is not None:
return True
if self.all_alarms is not None:
return True
if self.apply_source is not None:
for child in self.apply_source:
if child is not None:
return True
if self.codes is not None:
for child_ref in self.codes:
if child_ref._has_data():
return True
if self.rule_summary is not None and self.rule_summary._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression.RuleDetails.RuleDetail']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:suppression/Cisco-IOS-XR-infra-correlator-oper:rule-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_detail is not None:
for child_ref in self.rule_detail:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression.RuleDetails']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:suppression'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_details is not None and self.rule_details._has_data():
return True
if self.rule_summaries is not None and self.rule_summaries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Suppression']['meta_info']
class Correlator(object):
"""
correlator
.. attribute:: alarms
Correlated alarms Table
**type**\: :py:class:`Alarms <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.Alarms>`
.. attribute:: buffer_status
Describes buffer utilization and parameters configured
**type**\: :py:class:`BufferStatus <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.BufferStatus>`
.. attribute:: rule_details
Table that contains the database of correlation rule details
**type**\: :py:class:`RuleDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleDetails>`
.. attribute:: rule_set_details
Table that contains the ruleset detail info
**type**\: :py:class:`RuleSetDetails <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSetDetails>`
.. attribute:: rule_set_summaries
Table that contains the ruleset summary info
**type**\: :py:class:`RuleSetSummaries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSetSummaries>`
.. attribute:: rule_summaries
Table that contains the database of correlation rule summary
**type**\: :py:class:`RuleSummaries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSummaries>`
.. attribute:: rules
Table that contains the database of correlation rules
**type**\: :py:class:`Rules <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.Rules>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.alarms = Correlator.Alarms()
self.alarms.parent = self
self.buffer_status = Correlator.BufferStatus()
self.buffer_status.parent = self
self.rule_details = Correlator.RuleDetails()
self.rule_details.parent = self
self.rule_set_details = Correlator.RuleSetDetails()
self.rule_set_details.parent = self
self.rule_set_summaries = Correlator.RuleSetSummaries()
self.rule_set_summaries.parent = self
self.rule_summaries = Correlator.RuleSummaries()
self.rule_summaries.parent = self
self.rules = Correlator.Rules()
self.rules.parent = self
class Rules(object):
"""
Table that contains the database of correlation
rules
.. attribute:: rule
One of the correlation rules
**type**\: list of :py:class:`Rule <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.Rules.Rule>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule = YList()
self.rule.parent = self
self.rule.name = 'rule'
class Rule(object):
"""
One of the correlation rules
.. attribute:: rule_name <key>
Correlation Rule Name
**type**\: str
**length:** 1..32
.. attribute:: apply_context
Contexts (Interfaces) to which the rule is applied
**type**\: list of str
**length:** 0..33
.. attribute:: apply_location
Locations (R/S/M) to which the rule is applied
**type**\: list of str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: codes
Message codes defining the rule
**type**\: list of :py:class:`Codes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.Rules.Rule.Codes>`
.. attribute:: rule_name_xr
Correlation Rule Name
**type**\: str
.. attribute:: rule_state
Applied state of the rule It could be not applied, applied or applied to all
**type**\: :py:class:`AcRuleStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AcRuleStateEnum>`
.. attribute:: timeout
Time window (in ms) for which root/all messages are kept in correlater before sending them to the logger
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_name = None
self.apply_context = YLeafList()
self.apply_context.parent = self
self.apply_context.name = 'apply_context'
self.apply_location = YLeafList()
self.apply_location.parent = self
self.apply_location.name = 'apply_location'
self.codes = YList()
self.codes.parent = self
self.codes.name = 'codes'
self.rule_name_xr = None
self.rule_state = None
self.timeout = None
class Codes(object):
"""
Message codes defining the rule.
.. attribute:: category
Category of messages to which this alarm belongs
**type**\: str
.. attribute:: code
Alarm code which further qualifies the alarm within a message group
**type**\: str
.. attribute:: group
Group of messages to which this alarm belongs
**type**\: str
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.category = None
self.code = None
self.group = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:codes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.category is not None:
return True
if self.code is not None:
return True
if self.group is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.Rules.Rule.Codes']['meta_info']
@property
def _common_path(self):
if self.rule_name is None:
raise YPYModelError('Key property rule_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rules/Cisco-IOS-XR-infra-correlator-oper:rule[Cisco-IOS-XR-infra-correlator-oper:rule-name = ' + str(self.rule_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_name is not None:
return True
if self.apply_context is not None:
for child in self.apply_context:
if child is not None:
return True
if self.apply_location is not None:
for child in self.apply_location:
if child is not None:
return True
if self.codes is not None:
for child_ref in self.codes:
if child_ref._has_data():
return True
if self.rule_name_xr is not None:
return True
if self.rule_state is not None:
return True
if self.timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.Rules.Rule']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rules'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule is not None:
for child_ref in self.rule:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.Rules']['meta_info']
class BufferStatus(object):
"""
Describes buffer utilization and parameters
configured
.. attribute:: configured_size
Configured buffer size
**type**\: int
**range:** 0..4294967295
.. attribute:: current_size
Current buffer usage
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.configured_size = None
self.current_size = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:buffer-status'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.configured_size is not None:
return True
if self.current_size is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.BufferStatus']['meta_info']
class Alarms(object):
"""
Correlated alarms Table
.. attribute:: alarm
One of the correlated alarms
**type**\: list of :py:class:`Alarm <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.Alarms.Alarm>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.alarm = YList()
self.alarm.parent = self
self.alarm.name = 'alarm'
class Alarm(object):
"""
One of the correlated alarms
.. attribute:: alarm_id <key>
Alarm ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: alarm_info
Correlated alarm information
**type**\: :py:class:`AlarmInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.Alarms.Alarm.AlarmInfo>`
.. attribute:: context
Context string for the alarm
**type**\: str
.. attribute:: rule_name
Correlation rule name
**type**\: str
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.alarm_id = None
self.alarm_info = Correlator.Alarms.Alarm.AlarmInfo()
self.alarm_info.parent = self
self.context = None
self.rule_name = None
class AlarmInfo(object):
"""
Correlated alarm information
.. attribute:: additional_text
Full text of the Alarm
**type**\: str
.. attribute:: category
Category of the alarm
**type**\: str
.. attribute:: code
Alarm code which further qualifies the alarm within a message group
**type**\: str
.. attribute:: correlation_id
Correlation Identifier
**type**\: int
**range:** 0..4294967295
.. attribute:: group
Group of messages to which this alarm belongs to
**type**\: str
.. attribute:: is_admin
Indicates the event id admin\-level
**type**\: bool
.. attribute:: severity
Severity of the alarm
**type**\: :py:class:`AlAlarmSeverityEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AlAlarmSeverityEnum>`
.. attribute:: source_id
Source Identifier(Location).Indicates the node in which the alarm was generated
**type**\: str
.. attribute:: state
State of the alarm (bistate alarms only)
**type**\: :py:class:`AlAlarmBistateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AlAlarmBistateEnum>`
.. attribute:: timestamp
Time when the alarm was generated. It is expressed in number of milliseconds since 00\:00 \:00 UTC, January 1, 1970
**type**\: int
**range:** 0..18446744073709551615
**units**\: millisecond
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.additional_text = None
self.category = None
self.code = None
self.correlation_id = None
self.group = None
self.is_admin = None
self.severity = None
self.source_id = None
self.state = None
self.timestamp = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:alarm-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.additional_text is not None:
return True
if self.category is not None:
return True
if self.code is not None:
return True
if self.correlation_id is not None:
return True
if self.group is not None:
return True
if self.is_admin is not None:
return True
if self.severity is not None:
return True
if self.source_id is not None:
return True
if self.state is not None:
return True
if self.timestamp is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.Alarms.Alarm.AlarmInfo']['meta_info']
@property
def _common_path(self):
if self.alarm_id is None:
raise YPYModelError('Key property alarm_id is None')
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:alarms/Cisco-IOS-XR-infra-correlator-oper:alarm[Cisco-IOS-XR-infra-correlator-oper:alarm-id = ' + str(self.alarm_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.alarm_id is not None:
return True
if self.alarm_info is not None and self.alarm_info._has_data():
return True
if self.context is not None:
return True
if self.rule_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.Alarms.Alarm']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:alarms'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.alarm is not None:
for child_ref in self.alarm:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.Alarms']['meta_info']
class RuleSetSummaries(object):
"""
Table that contains the ruleset summary info
.. attribute:: rule_set_summary
Summary of one of the correlation rulesets
**type**\: list of :py:class:`RuleSetSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSetSummaries.RuleSetSummary>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_set_summary = YList()
self.rule_set_summary.parent = self
self.rule_set_summary.name = 'rule_set_summary'
class RuleSetSummary(object):
"""
Summary of one of the correlation rulesets
.. attribute:: rule_set_name <key>
Ruleset Name
**type**\: str
**length:** 1..32
.. attribute:: rule_set_name_xr
Ruleset Name
**type**\: str
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_set_name = None
self.rule_set_name_xr = None
@property
def _common_path(self):
if self.rule_set_name is None:
raise YPYModelError('Key property rule_set_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-set-summaries/Cisco-IOS-XR-infra-correlator-oper:rule-set-summary[Cisco-IOS-XR-infra-correlator-oper:rule-set-name = ' + str(self.rule_set_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_set_name is not None:
return True
if self.rule_set_name_xr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSetSummaries.RuleSetSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-set-summaries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_set_summary is not None:
for child_ref in self.rule_set_summary:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSetSummaries']['meta_info']
class RuleSetDetails(object):
"""
Table that contains the ruleset detail info
.. attribute:: rule_set_detail
Detail of one of the correlation rulesets
**type**\: list of :py:class:`RuleSetDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSetDetails.RuleSetDetail>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_set_detail = YList()
self.rule_set_detail.parent = self
self.rule_set_detail.name = 'rule_set_detail'
class RuleSetDetail(object):
"""
Detail of one of the correlation rulesets
.. attribute:: rule_set_name <key>
Ruleset Name
**type**\: str
**length:** 1..32
.. attribute:: rule_set_name_xr
Ruleset Name
**type**\: str
.. attribute:: rules
Rules contained in a ruleset
**type**\: list of :py:class:`Rules <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSetDetails.RuleSetDetail.Rules>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_set_name = None
self.rule_set_name_xr = None
self.rules = YList()
self.rules.parent = self
self.rules.name = 'rules'
class Rules(object):
"""
Rules contained in a ruleset
.. attribute:: buffered_alarms_count
Number of buffered alarms correlated to this rule
**type**\: int
**range:** 0..4294967295
.. attribute:: rule_name_xr
Correlation Rule Name
**type**\: str
.. attribute:: rule_state
Applied state of the rule It could be not applied, applied or applied to all
**type**\: :py:class:`AcRuleStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AcRuleStateEnum>`
.. attribute:: stateful
Whether the rule is stateful
**type**\: bool
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.buffered_alarms_count = None
self.rule_name_xr = None
self.rule_state = None
self.stateful = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:rules'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.buffered_alarms_count is not None:
return True
if self.rule_name_xr is not None:
return True
if self.rule_state is not None:
return True
if self.stateful is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSetDetails.RuleSetDetail.Rules']['meta_info']
@property
def _common_path(self):
if self.rule_set_name is None:
raise YPYModelError('Key property rule_set_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-set-details/Cisco-IOS-XR-infra-correlator-oper:rule-set-detail[Cisco-IOS-XR-infra-correlator-oper:rule-set-name = ' + str(self.rule_set_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_set_name is not None:
return True
if self.rule_set_name_xr is not None:
return True
if self.rules is not None:
for child_ref in self.rules:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSetDetails.RuleSetDetail']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-set-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_set_detail is not None:
for child_ref in self.rule_set_detail:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSetDetails']['meta_info']
class RuleDetails(object):
"""
Table that contains the database of correlation
rule details
.. attribute:: rule_detail
Details of one of the correlation rules
**type**\: list of :py:class:`RuleDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleDetails.RuleDetail>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_detail = YList()
self.rule_detail.parent = self
self.rule_detail.name = 'rule_detail'
class RuleDetail(object):
"""
Details of one of the correlation rules
.. attribute:: rule_name <key>
Correlation Rule Name
**type**\: str
**length:** 1..32
.. attribute:: apply_context
Contexts (Interfaces) to which the rule is applied
**type**\: list of str
**length:** 0..33
.. attribute:: apply_location
Locations (R/S/M) to which the rule is applied
**type**\: list of str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: codes
Message codes defining the rule
**type**\: list of :py:class:`Codes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleDetails.RuleDetail.Codes>`
.. attribute:: context_correlation
Whether context correlation is enabled
**type**\: bool
.. attribute:: internal
True if the rule is internal
**type**\: bool
.. attribute:: reissue_non_bistate
Whether to reissue non\-bistate alarms
**type**\: bool
.. attribute:: reparent
Reparent
**type**\: bool
.. attribute:: root_cause_timeout
Timeout before root cause alarm
**type**\: int
**range:** 0..4294967295
.. attribute:: rule_summary
Rule summary, name, etc
**type**\: :py:class:`RuleSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleDetails.RuleDetail.RuleSummary>`
.. attribute:: timeout
Time window (in ms) for which root/all messages are kept in correlater before sending them to the logger
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_name = None
self.apply_context = YLeafList()
self.apply_context.parent = self
self.apply_context.name = 'apply_context'
self.apply_location = YLeafList()
self.apply_location.parent = self
self.apply_location.name = 'apply_location'
self.codes = YList()
self.codes.parent = self
self.codes.name = 'codes'
self.context_correlation = None
self.internal = None
self.reissue_non_bistate = None
self.reparent = None
self.root_cause_timeout = None
self.rule_summary = Correlator.RuleDetails.RuleDetail.RuleSummary()
self.rule_summary.parent = self
self.timeout = None
class RuleSummary(object):
"""
Rule summary, name, etc
.. attribute:: buffered_alarms_count
Number of buffered alarms correlated to this rule
**type**\: int
**range:** 0..4294967295
.. attribute:: rule_name_xr
Correlation Rule Name
**type**\: str
.. attribute:: rule_state
Applied state of the rule It could be not applied, applied or applied to all
**type**\: :py:class:`AcRuleStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AcRuleStateEnum>`
.. attribute:: stateful
Whether the rule is stateful
**type**\: bool
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.buffered_alarms_count = None
self.rule_name_xr = None
self.rule_state = None
self.stateful = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:rule-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.buffered_alarms_count is not None:
return True
if self.rule_name_xr is not None:
return True
if self.rule_state is not None:
return True
if self.stateful is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleDetails.RuleDetail.RuleSummary']['meta_info']
class Codes(object):
"""
Message codes defining the rule.
.. attribute:: category
Category of messages to which this alarm belongs
**type**\: str
.. attribute:: code
Alarm code which further qualifies the alarm within a message group
**type**\: str
.. attribute:: group
Group of messages to which this alarm belongs
**type**\: str
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.category = None
self.code = None
self.group = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-correlator-oper:codes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.category is not None:
return True
if self.code is not None:
return True
if self.group is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleDetails.RuleDetail.Codes']['meta_info']
@property
def _common_path(self):
if self.rule_name is None:
raise YPYModelError('Key property rule_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-details/Cisco-IOS-XR-infra-correlator-oper:rule-detail[Cisco-IOS-XR-infra-correlator-oper:rule-name = ' + str(self.rule_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_name is not None:
return True
if self.apply_context is not None:
for child in self.apply_context:
if child is not None:
return True
if self.apply_location is not None:
for child in self.apply_location:
if child is not None:
return True
if self.codes is not None:
for child_ref in self.codes:
if child_ref._has_data():
return True
if self.context_correlation is not None:
return True
if self.internal is not None:
return True
if self.reissue_non_bistate is not None:
return True
if self.reparent is not None:
return True
if self.root_cause_timeout is not None:
return True
if self.rule_summary is not None and self.rule_summary._has_data():
return True
if self.timeout is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleDetails.RuleDetail']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-details'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_detail is not None:
for child_ref in self.rule_detail:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleDetails']['meta_info']
class RuleSummaries(object):
"""
Table that contains the database of correlation
rule summary
.. attribute:: rule_summary
One of the correlation rules
**type**\: list of :py:class:`RuleSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.Correlator.RuleSummaries.RuleSummary>`
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_summary = YList()
self.rule_summary.parent = self
self.rule_summary.name = 'rule_summary'
class RuleSummary(object):
"""
One of the correlation rules
.. attribute:: rule_name <key>
Correlation Rule Name
**type**\: str
**length:** 1..32
.. attribute:: buffered_alarms_count
Number of buffered alarms correlated to this rule
**type**\: int
**range:** 0..4294967295
.. attribute:: rule_name_xr
Correlation Rule Name
**type**\: str
.. attribute:: rule_state
Applied state of the rule It could be not applied, applied or applied to all
**type**\: :py:class:`AcRuleStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_correlator_oper.AcRuleStateEnum>`
.. attribute:: stateful
Whether the rule is stateful
**type**\: bool
"""
_prefix = 'infra-correlator-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rule_name = None
self.buffered_alarms_count = None
self.rule_name_xr = None
self.rule_state = None
self.stateful = None
@property
def _common_path(self):
if self.rule_name is None:
raise YPYModelError('Key property rule_name is None')
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-summaries/Cisco-IOS-XR-infra-correlator-oper:rule-summary[Cisco-IOS-XR-infra-correlator-oper:rule-name = ' + str(self.rule_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_name is not None:
return True
if self.buffered_alarms_count is not None:
return True
if self.rule_name_xr is not None:
return True
if self.rule_state is not None:
return True
if self.stateful is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSummaries.RuleSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator/Cisco-IOS-XR-infra-correlator-oper:rule-summaries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rule_summary is not None:
for child_ref in self.rule_summary:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator.RuleSummaries']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-correlator-oper:correlator'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.alarms is not None and self.alarms._has_data():
return True
if self.buffer_status is not None and self.buffer_status._has_data():
return True
if self.rule_details is not None and self.rule_details._has_data():
return True
if self.rule_set_details is not None and self.rule_set_details._has_data():
return True
if self.rule_set_summaries is not None and self.rule_set_summaries._has_data():
return True
if self.rule_summaries is not None and self.rule_summaries._has_data():
return True
if self.rules is not None and self.rules._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_correlator_oper as meta
return meta._meta_table['Correlator']['meta_info']
|
apache-2.0
|
JimCircadian/ansible
|
lib/ansible/modules/storage/netapp/na_ontap_interface.py
|
8
|
13359
|
#!/usr/bin/python
""" this is interface module
(c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: na_ontap_interface
short_description: ONTAP LIF configuration
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: chhaya gunawat (chhayag@netapp.com)
description:
- Creating / deleting and modifying the LIF.
options:
state:
description:
- Whether the specified interface should exist or not.
choices: ['present', 'absent']
default: present
interface_name:
description:
- Specifies the logical interface (LIF) name.
required: true
home_node:
description:
- Specifies the LIF's home node.
- Required when C(state=present).
home_port:
description:
- Specifies the LIF's home port.
- Required when C(state=present)
role:
description:
- Specifies the role of the LIF.
- Required when C(state=present).
address:
description:
- Specifies the LIF's IP address.
- Required when C(state=present)
netmask:
description:
- Specifies the LIF's netmask.
- Required when C(state=present).
vserver:
description:
- The name of the vserver to use.
required: true
firewall_policy:
description:
- Specifies the firewall policy for the LIF.
failover_policy:
description:
- Specifies the failover policy for the LIF.
admin_status:
choices: ['up', 'down']
description:
- Specifies the administrative status of the LIF.
is_auto_revert:
description:
If true, data LIF will revert to its home node under certain circumstances such as startup, and load balancing
migration capability is disabled automatically
protocols:
description:
Specifies the list of data protocols configured on the LIF. By default, the values in this element are nfs, cifs and fcache.
Other supported protocols are iscsi and fcp. A LIF can be configured to not support any data protocols by specifying 'none'.
Protocol values of none, iscsi or fcp can't be combined with any other data protocol(s).
'''
EXAMPLES = '''
- name: Create interface
na_ontap_interface:
state: present
interface_name: data2
home_port: e0d
home_node: laurentn-vsim1
role: data
protocols: nfs
admin_status: up
failover_policy: local-only
firewall_policy: mgmt
is_auto_revert: true
address: 10.10.10.10
netmask: 255.255.255.0
vserver: svm1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete interface
na_ontap_interface:
state: absent
interface_name: data2
vserver: svm1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
'''
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapInterface(object):
''' object to describe interface info '''
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=[
'present', 'absent'], default='present'),
interface_name=dict(required=True, type='str'),
home_node=dict(required=False, type='str', default=None),
home_port=dict(required=False, type='str'),
role=dict(required=False, type='str'),
address=dict(required=False, type='str'),
netmask=dict(required=False, type='str'),
vserver=dict(required=True, type='str'),
firewall_policy=dict(required=False, type='str', default=None),
failover_policy=dict(required=False, type='str', default=None),
admin_status=dict(required=False, choices=['up', 'down']),
is_auto_revert=dict(required=False, type='str', default=None),
protocols=dict(required=False, type='list')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
params = self.module.params
# set up state variables
self.state = params['state']
self.interface_name = params['interface_name']
self.home_node = params['home_node']
self.home_port = params['home_port']
self.role = params['role']
self.vserver = params['vserver']
self.address = params['address']
self.netmask = params['netmask']
self.admin_status = params['admin_status']
self.failover_policy = params['failover_policy']
self.firewall_policy = params['firewall_policy']
self.is_auto_revert = params['is_auto_revert']
self.protocols = params['protocols']
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_interface(self):
"""
Return details about the interface
:param:
name : Name of the name of the interface
:return: Details about the interface. None if not found.
:rtype: dict
"""
interface_info = netapp_utils.zapi.NaElement('net-interface-get-iter')
interface_attributes = netapp_utils.zapi.NaElement(
'net-interface-info')
interface_attributes.add_new_child(
'interface-name', self.interface_name)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(interface_attributes)
interface_info.add_child_elem(query)
result = self.server.invoke_successfully(interface_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
interface_attributes = result.get_child_by_name('attributes-list').\
get_child_by_name('net-interface-info')
return_value = {
'interface_name': self.interface_name,
'admin_status': interface_attributes.get_child_content('administrative-status'),
'home_port': interface_attributes.get_child_content('home-port'),
'home_node': interface_attributes.get_child_content('home-node'),
'address': interface_attributes.get_child_content('address'),
'netmask': interface_attributes.get_child_content('netmask'),
'failover_policy': interface_attributes.get_child_content('failover-policy'),
'firewall_policy': interface_attributes.get_child_content('firewall-policy'),
'is_auto_revert': interface_attributes.get_child_content('is-auto-revert'),
}
return return_value
def create_interface(self):
''' calling zapi to create interface '''
options = {'interface-name': self.interface_name,
'vserver': self.vserver}
if self.home_port is not None:
options['home-port'] = self.home_port
if self.home_node is not None:
options['home-node'] = self.home_node
if self.address is not None:
options['address'] = self.address
if self.netmask is not None:
options['netmask'] = self.netmask
if self.role is not None:
options['role'] = self.role
if self.failover_policy is not None:
options['failover-policy'] = self.failover_policy
if self.firewall_policy is not None:
options['firewall-policy'] = self.firewall_policy
if self.is_auto_revert is not None:
options['is-auto-revert'] = self.is_auto_revert
if self.admin_status is not None:
options['administrative-status'] = self.admin_status
interface_create = netapp_utils.zapi.NaElement.create_node_with_children(
'net-interface-create', **options)
if self.protocols is not None:
data_protocols_obj = netapp_utils.zapi.NaElement('data-protocols')
interface_create.add_child_elem(data_protocols_obj)
for protocol in self.protocols:
data_protocols_obj.add_new_child('data-protocol', protocol)
try:
self.server.invoke_successfully(interface_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
self.module.fail_json(msg='Error Creating interface %s: %s' %
(self.interface_name, to_native(exc)), exception=traceback.format_exc())
def delete_interface(self, current_status):
''' calling zapi to delete interface '''
if current_status == 'up':
self.admin_status = 'down'
self.modify_interface()
interface_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'net-interface-delete', **{'interface-name': self.interface_name,
'vserver': self.vserver})
try:
self.server.invoke_successfully(interface_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as exc:
self.module.fail_json(msg='Error deleting interface %s: %s' % (self.interface_name, to_native(exc)),
exception=traceback.format_exc())
def modify_interface(self):
"""
Modify the interface.
"""
options = {'interface-name': self.interface_name,
'vserver': self.vserver
}
if self.admin_status is not None:
options['administrative-status'] = self.admin_status
if self.failover_policy is not None:
options['failover-policy'] = self.failover_policy
if self.firewall_policy is not None:
options['firewall-policy'] = self.firewall_policy
if self.is_auto_revert is not None:
options['is-auto-revert'] = self.is_auto_revert
if self.netmask is not None:
options['netmask'] = self.netmask
if self.address is not None:
options['address'] = self.address
interface_modify = netapp_utils.zapi.NaElement.create_node_with_children(
'net-interface-modify', **options)
try:
self.server.invoke_successfully(interface_modify,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error modifying interface %s: %s' % (self.interface_name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
''' calling all interface features '''
changed = False
interface_exists = False
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_interface", cserver)
interface_detail = self.get_interface()
if interface_detail:
interface_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
if (self.admin_status is not None and self.admin_status != interface_detail['admin_status']) or \
(self.address is not None and self.address != interface_detail['address']) or \
(self.netmask is not None and self.netmask != interface_detail['netmask']) or \
(self.failover_policy is not None and self.failover_policy != interface_detail['failover_policy']) or \
(self.firewall_policy is not None and self.firewall_policy != interface_detail['firewall_policy']) or \
(self.is_auto_revert is not None and self.is_auto_revert != interface_detail['is_auto_revert']):
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if interface_exists is False:
self.create_interface()
else:
self.modify_interface()
elif self.state == 'absent':
self.delete_interface(interface_detail['admin_status'])
self.module.exit_json(changed=changed)
def main():
interface = NetAppOntapInterface()
interface.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
benthomasson/ansible
|
lib/ansible/modules/network/f5/bigip_gtm_facts.py
|
78
|
16084
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_facts
short_description: Collect facts from F5 BIG-IP GTM devices.
description:
- Collect facts from F5 BIG-IP GTM devices.
version_added: "2.3"
options:
include:
description:
- Fact category to collect
required: true
choices:
- pool
- wide_ip
- virtual_server
filter:
description:
- Perform regex filter of response. Filtering is done on the name of
the resource. Valid filters are anything that can be provided to
Python's C(re) module.
required: false
default: None
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Get pool facts
bigip_gtm_facts:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
include: "pool"
filter: "my_pool"
delegate_to: localhost
'''
RETURN = '''
wide_ip:
description:
Contains the lb method for the wide ip and the pools
that are within the wide ip.
returned: changed
type: dict
sample:
wide_ip:
- enabled: "True"
failure_rcode: "noerror"
failure_rcode_response: "disabled"
failure_rcode_ttl: "0"
full_path: "/Common/foo.ok.com"
last_resort_pool: ""
minimal_response: "enabled"
name: "foo.ok.com"
partition: "Common"
persist_cidr_ipv4: "32"
persist_cidr_ipv6: "128"
persistence: "disabled"
pool_lb_mode: "round-robin"
pools:
- name: "d3qw"
order: "0"
partition: "Common"
ratio: "1"
ttl_persistence: "3600"
type: "naptr"
pool:
description: Contains the pool object status and enabled status.
returned: changed
type: dict
sample:
pool:
- alternate_mode: "round-robin"
dynamic_ratio: "disabled"
enabled: "True"
fallback_mode: "return-to-dns"
full_path: "/Common/d3qw"
load_balancing_mode: "round-robin"
manual_resume: "disabled"
max_answers_returned: "1"
members:
- disabled: "True"
flags: "a"
full_path: "ok3.com"
member_order: "0"
name: "ok3.com"
order: "10"
preference: "10"
ratio: "1"
service: "80"
name: "d3qw"
partition: "Common"
qos_hit_ratio: "5"
qos_hops: "0"
qos_kilobytes_second: "3"
qos_lcs: "30"
qos_packet_rate: "1"
qos_rtt: "50"
qos_topology: "0"
qos_vs_capacity: "0"
qos_vs_score: "0"
ttl: "30"
type: "naptr"
verify_member_availability: "disabled"
virtual_server:
description:
Contains the virtual server enabled and availability
status, and address
returned: changed
type: dict
sample:
virtual_server:
- addresses:
- device_name: "/Common/qweqwe"
name: "10.10.10.10"
translation: "none"
datacenter: "/Common/xfxgh"
enabled: "True"
expose_route_domains: "no"
full_path: "/Common/qweqwe"
iq_allow_path: "yes"
iq_allow_service_check: "yes"
iq_allow_snmp: "yes"
limit_cpu_usage: "0"
limit_cpu_usage_status: "disabled"
limit_max_bps: "0"
limit_max_bps_status: "disabled"
limit_max_connections: "0"
limit_max_connections_status: "disabled"
limit_max_pps: "0"
limit_max_pps_status: "disabled"
limit_mem_avail: "0"
limit_mem_avail_status: "disabled"
link_discovery: "disabled"
monitor: "/Common/bigip "
name: "qweqwe"
partition: "Common"
product: "single-bigip"
virtual_server_discovery: "disabled"
virtual_servers:
- destination: "10.10.10.10:0"
enabled: "True"
full_path: "jsdfhsd"
limit_max_bps: "0"
limit_max_bps_status: "disabled"
limit_max_connections: "0"
limit_max_connections_status: "disabled"
limit_max_pps: "0"
limit_max_pps_status: "disabled"
name: "jsdfhsd"
translation_address: "none"
translation_port: "0"
'''
try:
from distutils.version import LooseVersion
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import re
class BigIpGtmFactsCommon(object):
def __init__(self):
self.api = None
self.attributes_to_remove = [
'kind', 'generation', 'selfLink', '_meta_data',
'membersReference', 'datacenterReference',
'virtualServersReference', 'nameReference'
]
self.gtm_types = dict(
a_s='a',
aaaas='aaaa',
cnames='cname',
mxs='mx',
naptrs='naptr',
srvs='srv'
)
self.request_params = dict(
params='expandSubcollections=true'
)
def is_version_less_than_12(self):
version = self.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
def format_string_facts(self, parameters):
result = dict()
for attribute in self.attributes_to_remove:
parameters.pop(attribute, None)
for key, val in parameters.items():
result[key] = str(val)
return result
def filter_matches_name(self, name):
if not self.params['filter']:
return True
matches = re.match(self.params['filter'], str(name))
if matches:
return True
else:
return False
def get_facts_from_collection(self, collection, collection_type=None):
results = []
for item in collection:
if not self.filter_matches_name(item.name):
continue
facts = self.format_facts(item, collection_type)
results.append(facts)
return results
def connect_to_bigip(self, **kwargs):
return ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
class BigIpGtmFactsPools(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsPools, self).__init__()
self.params = kwargs
def get_facts(self):
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
def get_facts_from_device(self):
try:
if self.is_version_less_than_12():
return self.get_facts_without_types()
else:
return self.get_facts_with_types()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_with_types(self):
result = []
for key, type in self.gtm_types.items():
facts = self.get_all_facts_by_type(key, type)
if facts:
result.append(facts)
return result
def get_facts_without_types(self):
pools = self.api.tm.gtm.pools.get_collection(**self.request_params)
return self.get_facts_from_collection(pools)
def get_all_facts_by_type(self, key, type):
collection = getattr(self.api.tm.gtm.pools, key)
pools = collection.get_collection(**self.request_params)
return self.get_facts_from_collection(pools, type)
def format_facts(self, pool, collection_type):
result = dict()
pool_dict = pool.to_dict()
result.update(self.format_string_facts(pool_dict))
result.update(self.format_member_facts(pool))
if collection_type:
result['type'] = collection_type
return camel_dict_to_snake_dict(result)
def format_member_facts(self, pool):
result = []
if not 'items' in pool.membersReference:
return dict(members=[])
for member in pool.membersReference['items']:
member_facts = self.format_string_facts(member)
result.append(member_facts)
return dict(members=result)
class BigIpGtmFactsWideIps(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsWideIps, self).__init__()
self.params = kwargs
def get_facts(self):
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
def get_facts_from_device(self):
try:
if self.is_version_less_than_12():
return self.get_facts_without_types()
else:
return self.get_facts_with_types()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_with_types(self):
result = []
for key, type in self.gtm_types.items():
facts = self.get_all_facts_by_type(key, type)
if facts:
result.append(facts)
return result
def get_facts_without_types(self):
wideips = self.api.tm.gtm.wideips.get_collection(
**self.request_params
)
return self.get_facts_from_collection(wideips)
def get_all_facts_by_type(self, key, type):
collection = getattr(self.api.tm.gtm.wideips, key)
wideips = collection.get_collection(**self.request_params)
return self.get_facts_from_collection(wideips, type)
def format_facts(self, wideip, collection_type):
result = dict()
wideip_dict = wideip.to_dict()
result.update(self.format_string_facts(wideip_dict))
result.update(self.format_pool_facts(wideip))
if collection_type:
result['type'] = collection_type
return camel_dict_to_snake_dict(result)
def format_pool_facts(self, wideip):
result = []
if not hasattr(wideip, 'pools'):
return dict(pools=[])
for pool in wideip.pools:
pool_facts = self.format_string_facts(pool)
result.append(pool_facts)
return dict(pools=result)
class BigIpGtmFactsVirtualServers(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsVirtualServers, self).__init__()
self.params = kwargs
def get_facts(self):
try:
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_from_device(self):
servers = self.api.tm.gtm.servers.get_collection(
**self.request_params
)
return self.get_facts_from_collection(servers)
def format_facts(self, server, collection_type=None):
result = dict()
server_dict = server.to_dict()
result.update(self.format_string_facts(server_dict))
result.update(self.format_address_facts(server))
result.update(self.format_virtual_server_facts(server))
return camel_dict_to_snake_dict(result)
def format_address_facts(self, server):
result = []
if not hasattr(server, 'addresses'):
return dict(addresses=[])
for address in server.addresses:
address_facts = self.format_string_facts(address)
result.append(address_facts)
return dict(addresses=result)
def format_virtual_server_facts(self, server):
result = []
if not 'items' in server.virtualServersReference:
return dict(virtual_servers=[])
for server in server.virtualServersReference['items']:
server_facts = self.format_string_facts(server)
result.append(server_facts)
return dict(virtual_servers=result)
class BigIpGtmFactsManager(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
self.api = None
def get_facts(self):
result = dict()
facts = dict()
if 'pool' in self.params['include']:
facts['pool'] = self.get_pool_facts()
if 'wide_ip' in self.params['include']:
facts['wide_ip'] = self.get_wide_ip_facts()
if 'virtual_server' in self.params['include']:
facts['virtual_server'] = self.get_virtual_server_facts()
result.update(**facts)
result.update(dict(changed=True))
return result
def get_pool_facts(self):
pools = BigIpGtmFactsPools(**self.params)
return pools.get_facts()
def get_wide_ip_facts(self):
wide_ips = BigIpGtmFactsWideIps(**self.params)
return wide_ips.get_facts()
def get_virtual_server_facts(self):
wide_ips = BigIpGtmFactsVirtualServers(**self.params)
return wide_ips.get_facts()
class BigIpGtmFactsModuleConfig(object):
def __init__(self):
self.argument_spec = dict()
self.meta_args = dict()
self.supports_check_mode = False
self.valid_includes = ['pool', 'wide_ip', 'virtual_server']
self.initialize_meta_args()
self.initialize_argument_spec()
def initialize_meta_args(self):
args = dict(
include=dict(type='list', required=True),
filter=dict(type='str', required=False)
)
self.meta_args = args
def initialize_argument_spec(self):
self.argument_spec = f5_argument_spec()
self.argument_spec.update(self.meta_args)
def create(self):
return AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=self.supports_check_mode
)
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
config = BigIpGtmFactsModuleConfig()
module = config.create()
try:
obj = BigIpGtmFactsManager(
check_mode=module.check_mode, **module.params
)
result = obj.get_facts()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
nguyentu1602/numpy
|
numpy/distutils/command/install.py
|
240
|
3127
|
from __future__ import division, absolute_import, print_function
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
have_setuptools = True
else:
import distutils.command.install as old_install_mod
have_setuptools = False
from distutils.file_util import write_file
old_install = old_install_mod.install
class install(old_install):
# Always run install_clib - the command is cheap, so no need to bypass it;
# but it's not run by setuptools -- so it's run again in install_data
sub_commands = old_install.sub_commands + [
('install_clib', lambda x: True)
]
def finalize_options (self):
old_install.finalize_options(self)
self.install_lib = self.install_libbase
def setuptools_run(self):
""" The setuptools version of the .run() method.
We must pull in the entire code so we can override the level used in the
_getframe() call since we wrap this call by one more level.
"""
from distutils.command.install import install as distutils_install
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return distutils_install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(3)
caller_module = caller.f_globals.get('__name__', '')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
distutils_install.run(self)
else:
self.do_egg_install()
def run(self):
if not have_setuptools:
r = old_install.run(self)
else:
r = self.setuptools_run()
if self.record:
# bdist_rpm fails when INSTALLED_FILES contains
# paths with spaces. Such paths must be enclosed
# with double-quotes.
f = open(self.record, 'r')
lines = []
need_rewrite = False
for l in f:
l = l.rstrip()
if ' ' in l:
need_rewrite = True
l = '"%s"' % (l)
lines.append(l)
f.close()
if need_rewrite:
self.execute(write_file,
(self.record, lines),
"re-writing list of installed files to '%s'" %
self.record)
return r
|
bsd-3-clause
|
FreicoinAlliance/p2pool
|
SOAPpy/Parser.py
|
158
|
36216
|
# SOAPpy modules
from Config import Config
from Types import *
from NS import NS
from Utilities import *
import string
import fpconst
import xml.sax
from wstools.XMLname import fromXMLname
try: from M2Crypto import SSL
except: pass
ident = '$Id: Parser.py 1497 2010-03-08 06:06:52Z pooryorick $'
from version import __version__
################################################################################
# SOAP Parser
################################################################################
class RefHolder:
def __init__(self, name, frame):
self.name = name
self.parent = frame
self.pos = len(frame)
self.subpos = frame.namecounts.get(name, 0)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
def __str__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
class SOAPParser(xml.sax.handler.ContentHandler):
class Frame:
def __init__(self, name, kind = None, attrs = {}, rules = {}):
self.name = name
self.kind = kind
self.attrs = attrs
self.rules = rules
self.contents = []
self.names = []
self.namecounts = {}
self.subattrs = []
def append(self, name, data, attrs):
self.names.append(name)
self.contents.append(data)
self.subattrs.append(attrs)
if self.namecounts.has_key(name):
self.namecounts[name] += 1
else:
self.namecounts[name] = 1
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
self.contents[pos] = value
if attrs:
self.attrs.update(attrs)
def __len__(self):
return len(self.contents)
def __repr__(self):
return "<%s %s at %d>" % (self.__class__, self.name, id(self))
def __init__(self, rules = None):
xml.sax.handler.ContentHandler.__init__(self)
self.body = None
self.header = None
self.attrs = {}
self._data = None
self._next = "E" # Keeping state for message validity
self._stack = [self.Frame('SOAP')]
# Make two dictionaries to store the prefix <-> URI mappings, and
# initialize them with the default
self._prem = {NS.XML_T: NS.XML}
self._prem_r = {NS.XML: NS.XML_T}
self._ids = {}
self._refs = {}
self._rules = rules
def startElementNS(self, name, qname, attrs):
def toStr( name ):
prefix = name[0]
tag = name[1]
if self._prem_r.has_key(prefix):
tag = self._prem_r[name[0]] + ':' + name[1]
elif prefix:
tag = prefix + ":" + tag
return tag
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
name = (None, name[1][1:])
else:
name = tuple(name)
# First some checking of the layout of the message
if self._next == "E":
if name[1] != 'Envelope':
raise Error, "expected `SOAP-ENV:Envelope', " \
"got `%s'" % toStr( name )
if name[0] != NS.ENV:
raise faultType, ("%s:VersionMismatch" % NS.ENV_T,
"Don't understand version `%s' Envelope" % name[0])
else:
self._next = "HorB"
elif self._next == "HorB":
if name[0] == NS.ENV and name[1] in ("Header", "Body"):
self._next = None
else:
raise Error, \
"expected `SOAP-ENV:Header' or `SOAP-ENV:Body', " \
"got `%s'" % toStr( name )
elif self._next == "B":
if name == (NS.ENV, "Body"):
self._next = None
else:
raise Error, "expected `SOAP-ENV:Body', " \
"got `%s'" % toStr( name )
elif self._next == "":
raise Error, "expected nothing, " \
"got `%s'" % toStr( name )
if len(self._stack) == 2:
rules = self._rules
else:
try:
rules = self._stack[-1].rules[name[1]]
except:
rules = None
if type(rules) not in (NoneType, DictType):
kind = rules
else:
kind = attrs.get((NS.ENC, 'arrayType'))
if kind != None:
del attrs._attrs[(NS.ENC, 'arrayType')]
i = kind.find(':')
if i >= 0:
try:
kind = (self._prem[kind[:i]], kind[i + 1:])
except:
kind = None
else:
kind = None
self.pushFrame(self.Frame(name[1], kind, attrs._attrs, rules))
self._data = [] # Start accumulating
def pushFrame(self, frame):
self._stack.append(frame)
def popFrame(self):
return self._stack.pop()
def endElementNS(self, name, qname):
# Workaround two sax bugs
if name[0] == None and name[1][0] == ' ':
ns, name = None, name[1][1:]
else:
ns, name = tuple(name)
name = fromXMLname(name) # convert to SOAP 1.2 XML name encoding
if self._next == "E":
raise Error, "didn't get SOAP-ENV:Envelope"
if self._next in ("HorB", "B"):
raise Error, "didn't get SOAP-ENV:Body"
cur = self.popFrame()
attrs = cur.attrs
idval = None
if attrs.has_key((None, 'id')):
idval = attrs[(None, 'id')]
if self._ids.has_key(idval):
raise Error, "duplicate id `%s'" % idval
del attrs[(None, 'id')]
root = 1
if len(self._stack) == 3:
if attrs.has_key((NS.ENC, 'root')):
root = int(attrs[(NS.ENC, 'root')])
# Do some preliminary checks. First, if root="0" is present,
# the element must have an id. Next, if root="n" is present,
# n something other than 0 or 1, raise an exception.
if root == 0:
if idval == None:
raise Error, "non-root element must have an id"
elif root != 1:
raise Error, "SOAP-ENC:root must be `0' or `1'"
del attrs[(NS.ENC, 'root')]
while 1:
href = attrs.get((None, 'href'))
if href:
if href[0] != '#':
raise Error, "Non-local hrefs are not yet suppported."
if self._data != None and \
string.join(self._data, "").strip() != '':
raise Error, "hrefs can't have data"
href = href[1:]
if self._ids.has_key(href):
data = self._ids[href]
else:
data = RefHolder(name, self._stack[-1])
if self._refs.has_key(href):
self._refs[href].append(data)
else:
self._refs[href] = [data]
del attrs[(None, 'href')]
break
kind = None
if attrs:
for i in NS.XSI_L:
if attrs.has_key((i, 'type')):
kind = attrs[(i, 'type')]
del attrs[(i, 'type')]
if kind != None:
i = kind.find(':')
if i >= 0:
try:
kind = (self._prem[kind[:i]], kind[i + 1:])
except:
kind = (None, kind)
else:
# XXX What to do here? (None, kind) is just going to fail in convertType
#print "Kind with no NS:", kind
kind = (None, kind)
null = 0
if attrs:
for i in (NS.XSI, NS.XSI2):
if attrs.has_key((i, 'null')):
null = attrs[(i, 'null')]
del attrs[(i, 'null')]
if attrs.has_key((NS.XSI3, 'nil')):
null = attrs[(NS.XSI3, 'nil')]
del attrs[(NS.XSI3, 'nil')]
## Check for nil
# check for nil='true'
if type(null) in (StringType, UnicodeType):
if null.lower() == 'true':
null = 1
# check for nil=1, but watch out for string values
try:
null = int(null)
except ValueError, e:
if not e[0].startswith("invalid literal for int()"):
raise e
null = 0
if null:
if len(cur) or \
(self._data != None and string.join(self._data, "").strip() != ''):
raise Error, "nils can't have data"
data = None
break
if len(self._stack) == 2:
if (ns, name) == (NS.ENV, "Header"):
self.header = data = headerType(attrs = attrs)
self._next = "B"
break
elif (ns, name) == (NS.ENV, "Body"):
self.body = data = bodyType(attrs = attrs)
self._next = ""
break
elif len(self._stack) == 3 and self._next == None:
if (ns, name) == (NS.ENV, "Fault"):
data = faultType()
self._next = None # allow followons
break
#print "\n"
#print "data=", self._data
#print "kind=", kind
#print "cur.kind=", cur.kind
#print "cur.rules=", cur.rules
#print "\n"
if cur.rules != None:
rule = cur.rules
if type(rule) in (StringType, UnicodeType):
rule = (None, rule) # none flags special handling
elif type(rule) == ListType:
rule = tuple(rule)
#print "kind=",kind
#print "rule=",rule
# XXX What if rule != kind?
if callable(rule):
data = rule(string.join(self._data, ""))
elif type(rule) == DictType:
data = structType(name = (ns, name), attrs = attrs)
elif rule[1][:9] == 'arrayType':
data = self.convertType(cur.contents,
rule, attrs)
else:
data = self.convertType(string.join(self._data, ""),
rule, attrs)
break
#print "No rules, using kind or cur.kind..."
if (kind == None and cur.kind != None) or \
(kind == (NS.ENC, 'Array')):
kind = cur.kind
if kind == None:
kind = 'ur-type[%d]' % len(cur)
else:
kind = kind[1]
if len(cur.namecounts) == 1:
elemsname = cur.names[0]
else:
elemsname = None
data = self.startArray((ns, name), kind, attrs, elemsname)
break
if len(self._stack) == 3 and kind == None and \
len(cur) == 0 and \
(self._data == None or string.join(self._data, "").strip() == ''):
data = structType(name = (ns, name), attrs = attrs)
break
if len(cur) == 0 and ns != NS.URN:
# Nothing's been added to the current frame so it must be a
# simple type.
# print "cur:", cur
# print "ns:", ns
# print "attrs:", attrs
# print "kind:", kind
if kind == None:
# If the current item's container is an array, it will
# have a kind. If so, get the bit before the first [,
# which is the type of the array, therefore the type of
# the current item.
kind = self._stack[-1].kind
if kind != None:
i = kind[1].find('[')
if i >= 0:
kind = (kind[0], kind[1][:i])
elif ns != None:
kind = (ns, name)
if kind != None:
try:
data = self.convertType(string.join(self._data, ""),
kind, attrs)
except UnknownTypeError:
data = None
else:
data = None
if data == None:
if self._data == None:
data = ''
else:
data = string.join(self._data, "")
if len(attrs) == 0:
try: data = str(data)
except: pass
break
data = structType(name = (ns, name), attrs = attrs)
break
if isinstance(data, compoundType):
for i in range(len(cur)):
v = cur.contents[i]
data._addItem(cur.names[i], v, cur.subattrs[i])
if isinstance(v, RefHolder):
v.parent = data
if root:
self._stack[-1].append(name, data, attrs)
if idval != None:
self._ids[idval] = data
if self._refs.has_key(idval):
for i in self._refs[idval]:
i.parent._placeItem(i.name, data, i.pos, i.subpos, attrs)
del self._refs[idval]
self.attrs[id(data)] = attrs
if isinstance(data, anyType):
data._setAttrs(attrs)
self._data = None # Stop accumulating
def endDocument(self):
if len(self._refs) == 1:
raise Error, \
"unresolved reference " + self._refs.keys()[0]
elif len(self._refs) > 1:
raise Error, \
"unresolved references " + ', '.join(self._refs.keys())
def startPrefixMapping(self, prefix, uri):
self._prem[prefix] = uri
self._prem_r[uri] = prefix
def endPrefixMapping(self, prefix):
try:
del self._prem_r[self._prem[prefix]]
del self._prem[prefix]
except:
pass
def characters(self, c):
if self._data != None:
self._data.append(c)
arrayre = '^(?:(?P<ns>[^:]*):)?' \
'(?P<type>[^[]+)' \
'(?:\[(?P<rank>,*)\])?' \
'(?:\[(?P<asize>\d+(?:,\d+)*)?\])$'
def startArray(self, name, kind, attrs, elemsname):
if type(self.arrayre) == StringType:
self.arrayre = re.compile (self.arrayre)
offset = attrs.get((NS.ENC, "offset"))
if offset != None:
del attrs[(NS.ENC, "offset")]
try:
if offset[0] == '[' and offset[-1] == ']':
offset = int(offset[1:-1])
if offset < 0:
raise Exception
else:
raise Exception
except:
raise AttributeError, "invalid Array offset"
else:
offset = 0
try:
m = self.arrayre.search(kind)
if m == None:
raise Exception
t = m.group('type')
if t == 'ur-type':
return arrayType(None, name, attrs, offset, m.group('rank'),
m.group('asize'), elemsname)
elif m.group('ns') != None:
return typedArrayType(None, name,
(self._prem[m.group('ns')], t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
else:
return typedArrayType(None, name, (None, t), attrs, offset,
m.group('rank'), m.group('asize'), elemsname)
except:
raise AttributeError, "invalid Array type `%s'" % kind
# Conversion
class DATETIMECONSTS:
SIGNre = '(?P<sign>-?)'
CENTURYre = '(?P<century>\d{2,})'
YEARre = '(?P<year>\d{2})'
MONTHre = '(?P<month>\d{2})'
DAYre = '(?P<day>\d{2})'
HOURre = '(?P<hour>\d{2})'
MINUTEre = '(?P<minute>\d{2})'
SECONDre = '(?P<second>\d{2}(?:\.\d*)?)'
TIMEZONEre = '(?P<zulu>Z)|(?P<tzsign>[-+])(?P<tzhour>\d{2}):' \
'(?P<tzminute>\d{2})'
BOSre = '^\s*'
EOSre = '\s*$'
__allres = {'sign': SIGNre, 'century': CENTURYre, 'year': YEARre,
'month': MONTHre, 'day': DAYre, 'hour': HOURre,
'minute': MINUTEre, 'second': SECONDre, 'timezone': TIMEZONEre,
'b': BOSre, 'e': EOSre}
dateTime = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)sT' \
'%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % __allres
timeInstant = dateTime
timePeriod = dateTime
time = '%(b)s%(hour)s:%(minute)s:%(second)s(%(timezone)s)?%(e)s' % \
__allres
date = '%(b)s%(sign)s%(century)s%(year)s-%(month)s-%(day)s' \
'(%(timezone)s)?%(e)s' % __allres
century = '%(b)s%(sign)s%(century)s(%(timezone)s)?%(e)s' % __allres
gYearMonth = '%(b)s%(sign)s%(century)s%(year)s-%(month)s' \
'(%(timezone)s)?%(e)s' % __allres
gYear = '%(b)s%(sign)s%(century)s%(year)s(%(timezone)s)?%(e)s' % \
__allres
year = gYear
gMonthDay = '%(b)s--%(month)s-%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDate = gMonthDay
gDay = '%(b)s---%(day)s(%(timezone)s)?%(e)s' % __allres
recurringDay = gDay
gMonth = '%(b)s--%(month)s--(%(timezone)s)?%(e)s' % __allres
month = gMonth
recurringInstant = '%(b)s%(sign)s(%(century)s|-)(%(year)s|-)-' \
'(%(month)s|-)-(%(day)s|-)T' \
'(%(hour)s|-):(%(minute)s|-):(%(second)s|-)' \
'(%(timezone)s)?%(e)s' % __allres
duration = '%(b)s%(sign)sP' \
'((?P<year>\d+)Y)?' \
'((?P<month>\d+)M)?' \
'((?P<day>\d+)D)?' \
'((?P<sep>T)' \
'((?P<hour>\d+)H)?' \
'((?P<minute>\d+)M)?' \
'((?P<second>\d*(?:\.\d*)?)S)?)?%(e)s' % \
__allres
timeDuration = duration
# The extra 31 on the front is:
# - so the tuple is 1-based
# - so months[month-1] is December's days if month is 1
months = (31, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def convertDateTime(self, value, kind):
def getZoneOffset(d):
zoffs = 0
try:
if d['zulu'] == None:
zoffs = 60 * int(d['tzhour']) + int(d['tzminute'])
if d['tzsign'] != '-':
zoffs = -zoffs
except TypeError:
pass
return zoffs
def applyZoneOffset(months, zoffs, date, minfield, posday = 1):
if zoffs == 0 and (minfield > 4 or 0 <= date[5] < 60):
return date
if minfield > 5: date[5] = 0
if minfield > 4: date[4] = 0
if date[5] < 0:
date[4] += int(date[5]) / 60
date[5] %= 60
date[4] += zoffs
if minfield > 3 or 0 <= date[4] < 60: return date
date[3] += date[4] / 60
date[4] %= 60
if minfield > 2 or 0 <= date[3] < 24: return date
date[2] += date[3] / 24
date[3] %= 24
if minfield > 1:
if posday and date[2] <= 0:
date[2] += 31 # zoffs is at most 99:59, so the
# day will never be less than -3
return date
while 1:
# The date[1] == 3 (instead of == 2) is because we're
# going back a month, so we need to know if the previous
# month is February, so we test if this month is March.
leap = minfield == 0 and date[1] == 3 and \
date[0] % 4 == 0 and \
(date[0] % 100 != 0 or date[0] % 400 == 0)
if 0 < date[2] <= months[date[1]] + leap: break
date[2] += months[date[1] - 1] + leap
date[1] -= 1
if date[1] > 0: break
date[1] = 12
if minfield > 0: break
date[0] -= 1
return date
try:
exp = getattr(self.DATETIMECONSTS, kind)
except AttributeError:
return None
if type(exp) == StringType:
exp = re.compile(exp)
setattr (self.DATETIMECONSTS, kind, exp)
m = exp.search(value)
try:
if m == None:
raise Exception
d = m.groupdict()
f = ('century', 'year', 'month', 'day',
'hour', 'minute', 'second')
fn = len(f) # Index of first non-None value
r = []
if kind in ('duration', 'timeDuration'):
if d['sep'] != None and d['hour'] == None and \
d['minute'] == None and d['second'] == None:
raise Exception
f = f[1:]
for i in range(len(f)):
s = d[f[i]]
if s != None:
if f[i] == 'second':
s = float(s)
else:
try: s = int(s)
except ValueError: s = long(s)
if i < fn: fn = i
r.append(s)
if fn > len(r): # Any non-Nones?
raise Exception
if d['sign'] == '-':
r[fn] = -r[fn]
return tuple(r)
if kind == 'recurringInstant':
for i in range(len(f)):
s = d[f[i]]
if s == None or s == '-':
if i > fn:
raise Exception
s = None
else:
if i < fn:
fn = i
if f[i] == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
r.append(s)
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if fn < len(r) and d['sign'] == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
return tuple(applyZoneOffset(self.DATETIMECONSTS.months,
getZoneOffset(d), r, fn, 0))
r = [0, 0, 1, 1, 0, 0, 0]
for i in range(len(f)):
field = f[i]
s = d.get(field)
if s != None:
if field == 'second':
s = float(s)
else:
try:
s = int(s)
except ValueError:
s = long(s)
if i < fn:
fn = i
r[i] = s
if fn > len(r): # Any non-Nones?
raise Exception
s = r.pop(0)
if fn == 0:
r[0] += s * 100
else:
fn -= 1
if d.get('sign') == '-':
r[fn] = -r[fn]
cleanDate(r, fn)
zoffs = getZoneOffset(d)
if zoffs:
r = applyZoneOffset(self.DATETIMECONSTS.months, zoffs, r, fn)
if kind == 'century':
return r[0] / 100
s = []
for i in range(1, len(f)):
if d.has_key(f[i]):
s.append(r[i - 1])
if len(s) == 1:
return s[0]
return tuple(s)
except Exception, e:
raise Error, "invalid %s value `%s' - %s" % (kind, value, e)
intlimits = \
{
'nonPositiveInteger': (0, None, 0),
'non-positive-integer': (0, None, 0),
'negativeInteger': (0, None, -1),
'negative-integer': (0, None, -1),
'long': (1, -9223372036854775808L,
9223372036854775807L),
'int': (0, -2147483648L, 2147483647L),
'short': (0, -32768, 32767),
'byte': (0, -128, 127),
'nonNegativeInteger': (0, 0, None),
'non-negative-integer': (0, 0, None),
'positiveInteger': (0, 1, None),
'positive-integer': (0, 1, None),
'unsignedLong': (1, 0, 18446744073709551615L),
'unsignedInt': (0, 0, 4294967295L),
'unsignedShort': (0, 0, 65535),
'unsignedByte': (0, 0, 255),
}
floatlimits = \
{
'float': (7.0064923216240861E-46, -3.4028234663852886E+38,
3.4028234663852886E+38),
'double': (2.4703282292062327E-324, -1.7976931348623158E+308,
1.7976931348623157E+308),
}
zerofloatre = '[1-9]'
def convertType(self, d, t, attrs, config=Config):
if t[0] is None and t[1] is not None:
type = t[1].strip()
if type[:9] == 'arrayType':
index_eq = type.find('=')
index_obr = type.find('[')
index_cbr = type.find(']')
elemtype = type[index_eq+1:index_obr]
elemnum = type[index_obr+1:index_cbr]
if elemtype=="ur-type":
return(d)
else:
newarr = map( lambda(di):
self.convertToBasicTypes(d=di,
t = ( NS.XSD, elemtype),
attrs=attrs,
config=config),
d)
return newarr
else:
t = (NS.XSD, t[1])
return self.convertToBasicTypes(d, t, attrs, config)
def convertToSOAPpyTypes(self, d, t, attrs, config=Config):
pass
def convertToBasicTypes(self, d, t, attrs, config=Config):
dnn = d or ''
#if Config.debug:
#print "convertToBasicTypes:"
#print " requested_type=", t
#print " data=", d
# print "convertToBasicTypes:"
# print " requested_type=", t
# print " data=", d
# print " attrs=", attrs
# print " t[0]=", t[0]
# print " t[1]=", t[1]
# print " in?", t[0] in NS.EXSD_L
if t[0] in NS.EXSD_L:
if t[1]=="integer": # unbounded integer type
try:
d = int(d)
if len(attrs):
d = long(d)
except:
d = long(d)
return d
if self.intlimits.has_key (t[1]): # range-bounded integer types
l = self.intlimits[t[1]]
try: d = int(d)
except: d = long(d)
if l[1] != None and d < l[1]:
raise UnderflowError, "%s too small" % d
if l[2] != None and d > l[2]:
raise OverflowError, "%s too large" % d
if l[0] or len(attrs):
return long(d)
return d
if t[1] == "string":
if len(attrs):
return unicode(dnn)
try:
return str(dnn)
except:
return dnn
if t[1] in ("bool", "boolean"):
d = d.strip().lower()
if d in ('0', 'false'):
return False
if d in ('1', 'true'):
return True
raise AttributeError, "invalid boolean value"
if t[1] in ('double','float'):
l = self.floatlimits[t[1]]
s = d.strip().lower()
# Explicitly check for NaN and Infinities
if s == "nan":
d = fpconst.NaN
elif s[0:2]=="inf" or s[0:3]=="+inf":
d = fpconst.PosInf
elif s[0:3] == "-inf":
d = fpconst.NegInf
else :
d = float(s)
if config.strict_range:
if fpconst.isNaN(d):
if s[0:2] != 'nan':
raise ValueError, "invalid %s: %s" % (t[1], s)
elif fpconst.isNegInf(d):
if s[0:3] != '-inf':
raise UnderflowError, "%s too small: %s" % (t[1], s)
elif fpconst.isPosInf(d):
if s[0:2] != 'inf' and s[0:3] != '+inf':
raise OverflowError, "%s too large: %s" % (t[1], s)
elif d < 0 and d < l[1]:
raise UnderflowError, "%s too small: %s" % (t[1], s)
elif d > 0 and ( d < l[0] or d > l[2] ):
raise OverflowError, "%s too large: %s" % (t[1], s)
elif d == 0:
if type(self.zerofloatre) == StringType:
self.zerofloatre = re.compile(self.zerofloatre)
if self.zerofloatre.search(s):
raise UnderflowError, "invalid %s: %s" % (t[1], s)
return d
if t[1] in ("dateTime", "date", "timeInstant", "time"):
return self.convertDateTime(d, t[1])
if t[1] == "decimal":
return float(d)
if t[1] in ("language", "QName", "NOTATION", "NMTOKEN", "Name",
"NCName", "ID", "IDREF", "ENTITY"):
return collapseWhiteSpace(d)
if t[1] in ("IDREFS", "ENTITIES", "NMTOKENS"):
d = collapseWhiteSpace(d)
return d.split()
if t[0] in NS.XSD_L:
if t[1] in ("base64", "base64Binary"):
if d:
return base64.decodestring(d)
else:
return ''
if t[1] == "hexBinary":
if d:
return decodeHexString(d)
else:
return
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("normalizedString", "token"):
return collapseWhiteSpace(d)
if t[0] == NS.ENC:
if t[1] == "base64":
if d:
return base64.decodestring(d)
else:
return ''
if t[0] == NS.XSD:
if t[1] == "binary":
try:
e = attrs[(None, 'encoding')]
if d:
if e == 'hex':
return decodeHexString(d)
elif e == 'base64':
return base64.decodestring(d)
else:
return ''
except:
pass
raise Error, "unknown or missing binary encoding"
if t[1] == "uri":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "recurringInstant":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.ENC):
if t[1] == "uriReference":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] == "timePeriod":
return self.convertDateTime(d, t[1])
if t[1] in ("century", "year"):
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD, NS.XSD2, NS.ENC):
if t[1] == "timeDuration":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD3:
if t[1] == "anyURI":
return urllib.unquote(collapseWhiteSpace(d))
if t[1] in ("gYearMonth", "gMonthDay"):
return self.convertDateTime(d, t[1])
if t[1] == "gYear":
return self.convertDateTime(d, t[1])
if t[1] == "gMonth":
return self.convertDateTime(d, t[1])
if t[1] == "gDay":
return self.convertDateTime(d, t[1])
if t[1] == "duration":
return self.convertDateTime(d, t[1])
if t[0] in (NS.XSD2, NS.XSD3):
if t[1] == "token":
return collapseWhiteSpace(d)
if t[1] == "recurringDate":
return self.convertDateTime(d, t[1])
if t[1] == "month":
return self.convertDateTime(d, t[1])
if t[1] == "recurringDay":
return self.convertDateTime(d, t[1])
if t[0] == NS.XSD2:
if t[1] == "CDATA":
return collapseWhiteSpace(d)
raise UnknownTypeError, "unknown type `%s'" % (str(t[0]) + ':' + t[1])
################################################################################
# call to SOAPParser that keeps all of the info
################################################################################
def _parseSOAP(xml_str, rules = None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
parser = xml.sax.make_parser()
t = SOAPParser(rules = rules)
parser.setContentHandler(t)
e = xml.sax.handler.ErrorHandler()
parser.setErrorHandler(e)
inpsrc = xml.sax.xmlreader.InputSource()
inpsrc.setByteStream(StringIO(xml_str))
# turn on namespace mangeling
parser.setFeature(xml.sax.handler.feature_namespaces,1)
parser.setFeature(xml.sax.handler.feature_external_ges, 0)
try:
parser.parse(inpsrc)
except xml.sax.SAXParseException, e:
parser._parser = None
raise e
return t
################################################################################
# SOAPParser's more public interface
################################################################################
def parseSOAP(xml_str, attrs = 0):
t = _parseSOAP(xml_str)
if attrs:
return t.body, t.attrs
return t.body
def parseSOAPRPC(xml_str, header = 0, body = 0, attrs = 0, rules = None):
t = _parseSOAP(xml_str, rules = rules)
p = t.body[0]
# Empty string, for RPC this translates into a void
if type(p) in (type(''), type(u'')) and p in ('', u''):
name = "Response"
for k in t.body.__dict__.keys():
if k[0] != "_":
name = k
p = structType(name)
if header or body or attrs:
ret = (p,)
if header : ret += (t.header,)
if body: ret += (t.body,)
if attrs: ret += (t.attrs,)
return ret
else:
return p
|
gpl-3.0
|
magic0704/neutron
|
neutron/agent/linux/dhcp.py
|
2
|
38663
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import six
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import ipv6_utils
from neutron.common import utils as commonutils
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
DNSMASQ_SERVICE_NAME = 'dnsmasq'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in self.iteritems():
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, process_monitor,
version=None, plugin=None):
self.conf = conf
self.network = network
self.process_monitor = process_monitor
self.device_manager = DeviceManager(self.conf, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
class DhcpLocalProcess(DhcpBase):
PORTS = []
def __init__(self, conf, network, process_monitor, version=None,
plugin=None):
super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
version, plugin)
self.confs_dir = self.get_confs_dir(conf)
self.network_conf_dir = os.path.join(self.confs_dir, network.id)
utils.ensure_dir(self.network_conf_dir)
@staticmethod
def get_confs_dir(conf):
return os.path.abspath(os.path.normpath(conf.dhcp_confs))
def get_conf_file_name(self, kind):
"""Returns the file name for a given kind of config file."""
return os.path.join(self.network_conf_dir, kind)
def _remove_config_files(self):
shutil.rmtree(self.network_conf_dir, ignore_errors=True)
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
utils.ensure_dir(self.network_conf_dir)
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def _get_process_manager(self, cmd_callback=None):
return external_process.ProcessManager(
conf=self.conf,
uuid=self.network.id,
namespace=self.network.namespace,
default_cmd_callback=cmd_callback,
pid_file=self.get_conf_file_name('pid'))
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME)
self._get_process_manager().disable()
if not retain_port:
self._destroy_namespace_and_port()
self._remove_config_files()
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete interface: %s'),
self.interface_name)
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete namespace: %s'),
self.network.namespace)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface')
utils.replace_file(interface_file_path, value)
@property
def active(self):
return self._get_process_manager().active
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {constants.IP_VERSION_4:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
constants.IP_VERSION_6:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
@classmethod
def check_version(cls):
pass
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = cls.get_confs_dir(conf)
try:
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
except OSError:
return []
def _build_cmdline_callback(self, pid_file):
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--leasefile-ro',
'--dhcp-authoritative',
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
if cfg.CONF.advertise_mtu:
mtu = self.network.mtu
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
return cmd
def spawn_process(self):
"""Spawn the process, if it's not spawned already."""
self._spawn_or_reload_process(reload_with_HUP=False)
def _spawn_or_reload_process(self, reload_with_HUP):
"""Spawns or reloads a Dnsmasq process for the network.
When reload_with_HUP is True, dnsmasq receives a HUP signal,
or it's reloaded if the process is not running.
"""
self._output_config_files()
pm = self._get_process_manager(
cmd_callback=self._build_cmdline_callback)
pm.enable(reload_cfg=reload_with_HUP)
self.process_monitor.register(uuid=self.network.id,
service_name=DNSMASQ_SERVICE_NAME,
monitored_process=pm)
def _release_lease(self, mac_address, ip):
"""Release a DHCP lease."""
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace)
ip_wrapper.netns.execute(cmd)
def _output_config_files(self):
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug('Killing dnsmasq for network since all subnets have '
'turned off DHCP: %s', self.network.id)
return
self._release_unused_leases()
self._spawn_or_reload_process(reload_with_HUP=True)
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
# if alloc is None, it means there is no need to allocate
# an IPv6 address because of stateless DHCPv6 network.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
for alloc in port.fixed_ips:
# Note(scollins) Only create entries that are
# associated with the subnet being managed by this
# dhcp agent
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
if addr_mode == constants.IPV6_SLAAC:
continue
elif addr_mode == constants.DHCPV6_STATELESS:
alloc = hostname = fqdn = None
yield (port, alloc, hostname, fqdn)
continue
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn)
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename)
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
for (port, alloc, hostname, name) in self._iter_hosts():
if not alloc:
if getattr(port, 'extra_dhcp_opts', False):
buf.write('%s,%s%s\n' %
(port.mac_address, 'set:', port.id))
continue
# don't write ip address which belongs to a dhcp disabled subnet.
if alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
ip_address = alloc.ip_address
if netaddr.valid_ipv6(ip_address):
ip_address = '[%s]' % ip_address
if getattr(port, 'extra_dhcp_opts', False):
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug('Done building host file %s with contents:\n%s', filename,
buf.getvalue())
return filename
def _read_hosts_file_leases(self, filename):
leases = set()
if os.path.exists(filename):
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
leases.add((host[2].strip('[]'), host[0]))
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
for port in self.network.ports:
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address))
for ip, mac in old_leases - new_leases:
self._release_lease(mac, ip)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmaq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for (port, alloc, hostname, fqdn) in self._iter_hosts():
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
if alloc:
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
options, subnet_index_map = self._generate_opts_per_subnet()
options += self._generate_opts_per_port(subnet_index_map)
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _generate_opts_per_subnet(self):
options = []
subnet_index_map = {}
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
isolated_subnets = self.get_isolated_subnets(self.network)
for i, subnet in enumerate(self.network.subnets):
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
getattr(subnet, 'ipv6_address_mode', None)
in [None, constants.IPV6_SLAAC])):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_index_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == constants.IPv4_ANY:
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if subnet.ip_version == 4:
host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in
self.network.subnets
if (s.ip_version == 4 and
s.cidr != subnet.cidr)])
if host_routes:
if gateway:
host_routes.append("%s,%s" % (constants.IPv4_ANY,
gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
return options, subnet_index_map
def _generate_opts_per_port(self, subnet_index_map):
options = []
dhcp_ips = collections.defaultdict(list)
for port in self.network.ports:
if getattr(port, 'extra_dhcp_opts', False):
port_ip_versions = set(
[netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips])
for opt in port.extra_dhcp_opts:
opt_ip_version = opt.ip_version
if opt_ip_version in port_ip_versions:
options.append(
self._format_option(opt_ip_version, port.id,
opt.opt_name, opt.opt_value))
else:
LOG.info(_LI("Cannot apply dhcp option %(opt)s "
"because it's ip_version %(version)d "
"is not in port's address IP versions"),
{'opt': opt.opt_name,
'version': opt_ip_version})
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_index_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if vx_ips:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
return options
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(self.interface_name,
namespace=self.network.namespace)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
pattern = "(tag:(.*),)?(.*)$"
matches = re.match(pattern, option)
extra_tag = matches.groups()[0]
option = matches.groups()[2]
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
if extra_tag:
tags = ('tag:' + tag, extra_tag[:-1], '%s' % option)
else:
tags = ('tag:' + tag, '%s' % option)
return ','.join(tags + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a nuetron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router), when the enable_isolated_metadata flag is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.use_namespaces or not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
class DeviceManager(object):
def __init__(self, conf, plugin):
self.conf = conf
self.plugin = plugin
if not conf.interface_driver:
LOG.error(_LE('An interface driver must be specified'))
raise SystemExit(1)
try:
self.driver = importutils.import_object(
conf.interface_driver, conf)
except Exception as e:
LOG.error(_LE("Error importing interface driver '%(driver)s': "
"%(inner)s"),
{'driver': conf.interface_driver,
'inner': e})
raise SystemExit(1)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
'%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
# check for a reserved DHCP port
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if dhcp_port:
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.', {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
if not dhcp_port:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
namespace=network.namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
self.fill_dhcp_udp_checksums(namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
if not ipv6_utils.is_auto_address_subnet(subnet):
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network, interface_name)
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
self.driver.unplug(device_name, namespace=network.namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
def fill_dhcp_udp_checksums(self, namespace):
"""Ensure DHCP reply packets always have correct UDP checksums."""
iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False,
namespace=namespace)
ipv4_rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
% constants.DHCP_RESPONSE_PORT)
iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule)
iptables_mgr.apply()
|
apache-2.0
|
repotvsupertuga/tvsupertuga.repository
|
plugin.video.youtube/resources/lib/kodion/impl/abstract_context_ui.py
|
31
|
1364
|
__author__ = 'bromix'
class AbstractContextUI(object):
def __init__(self):
pass
def create_progress_dialog(self, heading, text=None, background=False):
raise NotImplementedError()
def set_view_mode(self, view_mode):
raise NotImplementedError()
def get_view_mode(self):
raise NotImplementedError()
def get_skin_id(self):
raise NotImplementedError()
def on_keyboard_input(self, title, default='', hidden=False):
raise NotImplementedError()
def on_numeric_input(self, title, default=''):
raise NotImplementedError()
def on_yes_no_input(self, title, text):
raise NotImplementedError()
def on_ok(self, title, text):
raise NotImplementedError()
def on_remove_content(self, content_name):
raise NotImplementedError()
def on_select(self, title, items=[]):
raise NotImplementedError()
def open_settings(self):
raise NotImplementedError()
def show_notification(self, message, header='', image_uri='', time_milliseconds=5000):
raise NotImplementedError()
def refresh_container(self):
"""
Needs to be implemented by a mock for testing or the real deal.
This will refresh the current container or list.
:return:
"""
raise NotImplementedError()
pass
|
gpl-2.0
|
Parsely/Petrel
|
petrel/petrel/emitter.py
|
3
|
1030
|
import os
import sys
import storm
class EmitterBase(object):
DEFAULT_PYTHON = 'python%d.%d' % (sys.version_info.major, sys.version_info.minor)
def __init__(self, script):
# We assume 'script' is in the current directory. We simply get the
# base part and turn it into a .py name for inclusion in the Storm
# jar we create.
path, basename = os.path.split(os.path.relpath(script))
assert len(path) == 0
script = '%s.py' % os.path.splitext(basename)[0]
self.execution_command = self.DEFAULT_PYTHON
self.script = script
self._json = {}
super(EmitterBase, self).__init__()
def declareOutputFields(declarer):
raise NotImplementedError()
def getComponentConfiguration(self):
if len(self._json):
return self._json
else:
return None
class Spout(EmitterBase, storm.Spout):
pass
class BasicBolt(EmitterBase, storm.BasicBolt):
pass
class Bolt(EmitterBase, storm.Bolt):
pass
|
bsd-3-clause
|
barentsen/dave
|
pipeline/task.py
|
1
|
6229
|
# -*- coding: utf-8 -*-
from multiprocessing import pool
import multiprocessing
import contextlib
import clipboard
import traceback
import parmap
import signal
import time
import pdb
import sys
__version__ = "$Id: task.py 2130 2015-09-11 16:56:55Z fmullall $"
__URL__ = "$URL: svn+ssh://flux/home/fmullall/svn/kepler/k2phot/task.py $"
"""
A task is a serial element of a pipeline intended to be run in parallal.
It contains a specific step of the process and helps make your pipeline
more modular. It also makes debugging error easier.
Here is an example
@task
def exampleTask(clip):
#clip is a Clipboard object
cache = clip['config.cachePath']
outPath = clip['config.outputPath']
clip['example'] = runSillyOperation(cache, out)
#Check required key is produced
clip['example.dummyValue']
return clip
def runSillyOperation(cache, out):
out = dict()
out['dummyValue'] = 42
return out
"""
def task(func):
"""A decorator for a task function
The k2phot model is that you write wrapper code around your pipeline functions
that extract values from a clipboard to pass as input arguments,
and store the return values back in the clipboard. This wrapper code
is called a "task".
This decorator watches for any exceptions thrown by the task (or
underlying code) and decides whether to fire up the debugger to
figure out what's going on, or to fail gracefully by merely storing
the raised exception for later debugging. This decorator considerably reduces
the code duplication between different tasks.
To use this code in your function, use the following import statement
from task import task
then simply write the text "@text" above your task. For example
from task import task
@task
def exampleTask(clip):
pass
See an example down below.
"""
def wrapperForTask(*args, **kwargs):
"""Decorator for a k2phot task
Catches exceptions and either stores them or passes them to the debugger
"""
assert(len(args) == 1)
assert(len(kwargs) == 0)
clip = args[0]
if 'exception' in clip.keys():
print "INFO: %s not run because exception previously raised" \
%(func.func_name)
return clip
if "__meta__" not in clip.keys():
clip["__meta__"] = dict()
debug = clip.get('config.debug', False)
timeout_sec = clip.get('config.timeout_sec', defaultValue=0)
#If specified, let the function timeout if it takes too long.
#If the function takes too long, this will timeout
if timeout_sec > 0:
signal.signal(signal.SIGALRM, handleTimeout)
signal.alarm(timeout_sec)
dtype = type(clip)
t0 = time.time()
try:
clip = func(*args, **kwargs)
except SyntaxError, e:
raise(e)
except Exception, e:
if debug:
#Cancel timeout, if any
signal.alarm(0)
print e
pdb.post_mortem(sys.exc_info()[2])
raise e
else:
clip['exception'] = e
clip['backtrace'] = traceback.format_exc()
#Cancel any timeouts, if necessary
signal.alarm(0)
#Check that function returns a clipboard.
if not isinstance(clip, dtype):
if not isinstance(clip, dict):
throwable = ValueError("FAIL: %s did not return a clipboard" %(func.func_name))
if debug:
raise throwable
else:
print throwable
clip = {'exception': throwable}
clip['functionName'] = func.__name__
key = "%s-elapsedTime" %(func.__name__)
clip["__meta__"][key] = time.time() - t0
return clip
wrapperForTask.__name__ = func.__name__
return wrapperForTask
class TimeoutError(Exception):
pass
def handleTimeout(signum, frame):
raise TimeoutError()
# set the timeout handler
try:
result = func(*args, **kwargs)
except TimeoutError as exc:
result = default
finally:
signal.alarm(0)
return result
def runAll(func, iterable, config):
"""Run func over every element on iterable in parallel.
Not yet run or tested.
Inputs:
----------
func
(A function) The top level function, e.g runOne(), below
iterable
(list, array, etc.) A list of values to operate on.
config
(Clipboard) A configuration clipboard.
"""
count = multiprocessing.cpu_count() - 1
p = pool.Pool(count)
parallel = config.get('debug', False)
with contextlib.closing(pool.Pool(count)) as p:
out = parmap.map(runOne, iterable, config, pool=p, parallel=parallel)
return out
def runOne(value, config):
"""A sample top level function to run a single instance of a pipeline
Not yet run or tested.
Inputs:
----------
value
(undefined) The input value that is unique to this run, eg the name of
the file to process, or the value to perform some computation on.
config
(Clipboard) A configuration clipboard. Must contain the key 'taskList'. See below
Returns:
-----------
A clipboard, containing the results of the processing.
Notes:
----------
The config clipboard must contain the key taskList. taskList is
a list of strings, each listing the function name of a task to
run in the pipeline, in the order they should be run. The
first task should look of the unique value in clip['value']
The tasks as passed as strings because functions can't bepa
ssed to parallel processes.This means that your tasks must
be defined in scope of this file. The easiest way
to do this looks like
import task
import stuff
task.func1 = stuff.func1
task.func2 = stuff.func2
config['taskList'] = ["func1", "func2"]
"""
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = value
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
|
mit
|
krishna11888/ai
|
third_party/pattern/test/test_metrics.py
|
21
|
17844
|
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import unittest
import time
import math
from pattern import metrics
try:
PATH = os.path.dirname(os.path.realpath(__file__))
except:
PATH = ""
#---------------------------------------------------------------------------------------------------
class TestProfiling(unittest.TestCase):
def setUp(self):
# Test set for accuracy, precision and recall:
self.documents = (
(None, True),
(None, True),
(None, False)
)
def test_duration(self):
# Assert 0.1 or slightly higher.
v = metrics.duration(time.sleep, 0.1)
self.assertTrue(v > 0.1)
print("pattern.metrics.duration()")
def test_confustion_matrix(self):
# Assert 2 true positives (TP) and 1 false positive (FP).
v = metrics.confusion_matrix(lambda document: True, self.documents)
self.assertEqual(v, (2,0,1,0))
# Assert 1 true negative (TN) and 2 false negatives (FN).
v = metrics.confusion_matrix(lambda document: False, self.documents)
self.assertEqual(v, (0,1,0,2))
print("pattern.metrics.confusion_matrix()" )
def test_accuracy(self):
# Assert 2.0/3.0 (two out of three correct predictions).
v = metrics.accuracy(lambda document: True, self.documents)
self.assertEqual(v, 2.0/3.0)
print("pattern.metrics.accuracy()")
def test_precision(self):
# Assert 2.0/3.0 (2 TP, 1 FP).
v = metrics.precision(lambda document: True, self.documents)
self.assertEqual(v, 2.0/3.0)
# Assert 0.0 (no TP).
v = metrics.precision(lambda document: False, self.documents)
self.assertEqual(v, 0.0)
print("pattern.metrics.precision()")
def test_recall(self):
# Assert 1.0 (no FN).
v = metrics.recall(lambda document: True, self.documents)
self.assertEqual(v, 1.0)
# Assert 0.0 (no TP).
v = metrics.recall(lambda document: False, self.documents)
self.assertEqual(v, 0.0)
print("pattern.metrics.recall()")
def test_F1(self):
# Assert 0.8 (F1 for precision=2/3 and recall=1).
v = metrics.F1(lambda document: True, self.documents)
self.assertEqual(v, 0.8)
self.assertEqual(v, metrics.F(lambda document: True, self.documents, beta=1))
print("pattern.metrics.F1()")
def test_agreement(self):
# Assert 0.210 (example from http://en.wikipedia.org/wiki/Fleiss'_kappa).
m = [[0, 0, 0, 0, 14],
[0, 2, 6, 4, 2 ],
[0, 0, 3, 5, 6 ],
[0, 3, 9, 2, 0 ],
[2, 2, 8, 1, 1 ],
[7, 7, 0, 0, 0 ],
[3, 2, 6, 3, 0 ],
[2, 5, 3, 2, 2 ],
[6, 5, 2, 1, 0 ],
[0, 2, 2, 3, 7 ]]
v = metrics.agreement(m)
self.assertAlmostEqual(v, 0.210, places=3)
print("pattern.metrics.agreement()")
class TestTextMetrics(unittest.TestCase):
def setUp(self):
pass
def test_levenshtein(self):
# Assert 0 (identical strings).
v = metrics.levenshtein("gallahad", "gallahad")
self.assertEqual(v, 0)
# Assert 3 (1 insert, 1 delete, 1 replace).
v = metrics.levenshtein("gallahad", "_g_llaha")
self.assertEqual(v, 3)
print("pattern.metrics.levenshtein()")
def test_levenshtein_similarity(self):
# Assert 1.0 (identical strings).
v = metrics.levenshtein_similarity("gallahad", "gallahad")
self.assertEqual(v, 1.0)
# Assert 0.75 (2 out of 8 characters differ).
v = metrics.levenshtein_similarity("gallahad", "g_ll_had")
self.assertEqual(v, 0.75)
print("pattern.metrics.levenshtein_similarity()")
def test_dice_coefficient(self):
# Assert 1.0 (identical strings).
v = metrics.dice_coefficient("gallahad", "gallahad")
self.assertEqual(v, 1.0)
# Assert 0.25 (example from http://en.wikipedia.org/wiki/Dice_coefficient).
v = metrics.dice_coefficient("night", "nacht")
self.assertEqual(v, 0.25)
print("pattern.metrics.dice_coefficient()")
def test_similarity(self):
self.assertEqual(
metrics.levenshtein_similarity("night", "nacht"),
metrics.similarity("night", "nacht", metrics.LEVENSHTEIN))
self.assertEqual(
metrics.dice_coefficient("night", "nacht"),
metrics.similarity("night", "nacht", metrics.DICE))
print("pattern.metrics.similarity()")
def test_readability(self):
# Assert that technical jargon is in the "difficult" range (< 0.30).
s = "The Australian platypus is seemingly a hybrid of a mammal and reptilian creature."
v = metrics.readability(s)
self.assertTrue(v < 0.30)
# Assert that Dr. Seuss is in the "easy" range (> 0.70).
s = "'I know some good games we could play,' said the cat. " + \
"'I know some new tricks,' said the cat in the hat. " + \
"'A lot of good tricks. I will show them to you.' " + \
"'Your mother will not mind at all if I do.'"
v = metrics.readability(s)
self.assertTrue(v > 0.70)
print("pattern.metrics.readability()")
def test_intertextuality(self):
# Evaluate accuracy for plagiarism detection.
from pattern.db import Datasheet
data = Datasheet.load(os.path.join(PATH, "corpora", "plagiarism-clough&stevenson.csv"))
data = [((txt, src), int(plagiarism) > 0) for txt, src, plagiarism in data]
def plagiarism(txt, src):
return metrics.intertextuality([txt, src], n=3)[0,1] > 0.05
A, P, R, F = metrics.test(lambda x: plagiarism(*x), data)
self.assertTrue(P > 0.96)
self.assertTrue(R > 0.94)
print("pattern.metrics.intertextuality()")
def test_ttr(self):
# Assert type-token ratio: words = 7, unique words = 6.
s = "The black cat \n sat on the mat."
v = metrics.ttr(s)
self.assertAlmostEqual(v, 0.86, places=2)
print("pattern.metrics.ttr()")
def test_suffixes(self):
# Assert base => inflected and reversed inflected => base suffixes.
s = [("beau", "beaux"), ("jeune", "jeunes"), ("hautain", "hautaines")]
v = metrics.suffixes(s, n=3)
self.assertEqual(v, [
(2, "nes", [("ne", 0.5), ("n", 0.5)]),
(1, "aux", [("au", 1.0)])])
v = metrics.suffixes(s, n=2, reverse=False)
self.assertEqual(v, [
(1, "ne", [("nes", 1.0)]),
(1, "in", [("ines", 1.0)]),
(1, "au", [("aux", 1.0)])])
print("pattern.metrics.suffixes()")
def test_isplit(self):
# Assert string.split() iterator.
v = metrics.isplit("test\nisplit")
self.assertTrue(hasattr(v, "next"))
self.assertEqual(list(v), ["test", "isplit"])
print("pattern.metrics.isplit()")
def test_cooccurrence(self):
s = "The black cat sat on the mat."
v = metrics.cooccurrence(s, window=(-1, 1),
term1 = lambda w: w in ("cat",),
normalize = lambda w: w.lower().strip(".:;,!?()[]'\""))
self.assertEqual(sorted(v.keys()), ["cat"])
self.assertEqual(sorted(v["cat"].keys()), ["black", "cat", "sat"])
self.assertEqual(sorted(v["cat"].values()), [1, 1, 1])
s = [("The","DT"), ("black","JJ"), ("cat","NN"), ("sat","VB"), ("on","IN"), ("the","DT"), ("mat","NN")]
v = metrics.co_occurrence(s, window=(-2, -1),
term1 = lambda token: token[1].startswith("NN"),
term2 = lambda token: token[1].startswith("JJ"))
self.assertEqual(v, {("cat", "NN"): {("black", "JJ"): 1}})
print("pattern.metrics.cooccurrence()")
class TestInterpolation(unittest.TestCase):
def setUp(self):
pass
def test_lerp(self):
# Assert linear interpolation.
v = metrics.lerp(100, 200, 0.5)
self.assertEqual(v, 150.0)
print("pattern.metrics.lerp()")
def test_smoothstep(self):
# Assert cubic interpolation.
v1 = metrics.smoothstep(0.0, 1.0, 0.5)
v2 = metrics.smoothstep(0.0, 1.0, 0.9)
v3 = metrics.smoothstep(0.0, 1.0, 0.1)
self.assertEqual(v1, 0.5)
self.assertTrue(v2 > 0.9)
self.assertTrue(v3 < 0.1)
print("pattern.metrics.smoothstep()")
def test_smoothrange(self):
# Assert nice ranges for line charts.
v = list(metrics.smoothrange(0.0, 1.0))
[self.assertAlmostEqual(x, y, places=1) for x, y in zip(v,
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])]
v = list(metrics.smoothrange(-2, 2))
[self.assertAlmostEqual(x, y, places=1) for x, y in zip(v,
[-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0])]
v = list(metrics.smoothrange(1, 13))
[self.assertAlmostEqual(x, y, places=1) for x, y in zip(v,
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0])]
print("pattern.metrics.smoothrange()")
class TestStatistics(unittest.TestCase):
def setUp(self):
pass
def test_mean(self):
# Assert (1+2+3+4) / 4 = 2.5.
v = metrics.mean([1,2,3,4])
self.assertEqual(v, 2.5)
print("pattern.metrics.mean()")
def test_median(self):
# Assert 2.5 (between 2 and 3).
v = metrics.median([1,2,3,4])
self.assertEqual(v, 2.5)
# Assert 3 (middle of list).
v = metrics.median([1,2,3,4,5])
self.assertEqual(v, 3)
# Assert that empty list raises ValueError.
self.assertRaises(ValueError, metrics.median, [])
print("pattern.metrics.median()")
def test_variance(self):
# Assert 2.5.
v = metrics.variance([1,2,3,4,5], sample=True)
self.assertEqual(v, 2.5)
# Assert 2.0 (population variance).
v = metrics.variance([1,2,3,4,5], sample=False)
self.assertEqual(v, 2.0)
print("pattern.metrics.variance()")
def test_standard_deviation(self):
# Assert 2.429 (sample).
v = metrics.standard_deviation([1,5,6,7,6,8], sample=True)
self.assertAlmostEqual(v, 2.429, places=3)
# Assert 2.217 (population).
v = metrics.standard_deviation([1,5,6,7,6,8], sample=False)
self.assertAlmostEqual(v, 2.217, places=3)
print("pattern.metrics.standard_deviation()")
def test_histogram(self):
# Assert 1 bin.
v = metrics.histogram([1,2,3,4], k=0)
self.assertTrue(len(v) == 1)
# Assert 4 bins, each with one value, each with midpoint == value.
v = metrics.histogram([1,2,3,4], k=4, range=(0.5,4.5))
for i, ((start, stop), v) in enumerate(sorted(v.items())):
self.assertTrue(i+1 == v[0])
self.assertAlmostEqual(start + (stop-start)/2, i+1, places=3)
# Assert 2 bins, one with all the low numbers, one with the high number.
v = metrics.histogram([1,2,3,4,100], k=2)
v = sorted(v.values(), key=lambda item: len(item))
self.assertTrue(v[0] == [100])
self.assertTrue(v[1] == [1,2,3,4])
print("pattern.metrics.histogram()")
def test_moment(self):
# Assert 0.0 (1st central moment = 0.0).
v = metrics.moment([1,2,3,4,5], n=1)
self.assertEqual(v, 0.0)
# Assert 2.0 (2nd central moment = population variance).
v = metrics.moment([1,2,3,4,5], n=2)
self.assertEqual(v, 2.0)
print("pattern.metrics.moment()")
def test_skewness(self):
# Assert < 0.0 (few low values).
v = metrics.skewness([1,100,101,102,103])
self.assertTrue(v < 0.0)
# Assert > 0.0 (few high values).
v = metrics.skewness([1,2,3,4,100])
self.assertTrue(v > 0.0)
# Assert 0.0 (evenly distributed).
v = metrics.skewness([1,2,3,4])
self.assertTrue(v == 0.0)
print("pattern.metrics.skewness()")
def test_kurtosis(self):
# Assert -1.2 for the uniform distribution.
a = 1
b = 1000
v = metrics.kurtosis([float(i-a)/(b-a) for i in range(a,b)])
self.assertAlmostEqual(v, -1.2, places=3)
print("pattern.metrics.kurtosis()")
def test_quantile(self):
# Assert 2.5 (quantile with p=0.5 == median).
v = metrics.quantile([1,2,3,4], p=0.5, a=1, b=-1, c=0, d=1)
self.assertEqual(v, 2.5)
# Assert 3.0 (discontinuous sample).
v = metrics.quantile([1,2,3,4], p=0.5, a=0.5, b=0, c=1, d=0)
self.assertEqual(v, 3.0)
return "pattern.metrics.quantile()"
def test_boxplot(self):
# Different a,b,c,d quantile parameters produce different results.
# By approximation, assert (53, 79.5, 84.5, 92, 98).
a = [79,53,82,91,87,98,80,93]
v = metrics.boxplot(a)
self.assertEqual(v[0], min(a))
self.assertTrue(abs(v[1] - 79.5) <= 0.5)
self.assertTrue(abs(v[2] - metrics.median(a)) <= 0.5)
self.assertTrue(abs(v[3] - 92.0) <= 0.5)
self.assertEqual(v[4], max(a))
print("pattern.metrics.boxplot()")
class TestStatisticalTests(unittest.TestCase):
def setUp(self):
pass
def test_fisher_test(self):
# Assert Fisher exact test significance.
v = metrics.fisher_exact_test(a=1, b=9, c=11, d=3)
self.assertAlmostEqual(v, 0.0028, places=4)
v = metrics.fisher_exact_test(a=45, b=15, c=75, d=45)
self.assertAlmostEqual(v, 0.1307, places=4)
print("pattern.metrics.fisher_test()")
def test_chi_squared(self):
# Assert chi-squared test (upper tail).
o1, e1 = [[44, 56]], [[50, 50]]
o2, e2 = [[22, 21, 22, 27, 22, 36]], []
o3, e3 = [[48, 35, 15, 3]], [[58, 34.5, 7, 0.5]]
o4, e4 = [[36, 14], [30, 25]], []
o5, e5 = [[46, 71], [37, 83]], [[40.97, 76.02], [42.03, 77.97]]
v1 = metrics.chi2(o1, e1)
v2 = metrics.chi2(o2, e2)
v3 = metrics.chi2(o3, e3)
v4 = metrics.chi2(o4, e4)
v5 = metrics.chi2(o5, e5)
self.assertAlmostEqual(v1[0], 1.4400, places=4)
self.assertAlmostEqual(v1[1], 0.2301, places=4)
self.assertAlmostEqual(v2[0], 6.7200, places=4)
self.assertAlmostEqual(v2[1], 0.2423, places=4)
self.assertAlmostEqual(v3[0], 23.3742, places=4)
self.assertAlmostEqual(v4[0], 3.4177, places=4)
self.assertAlmostEqual(v5[0], 1.8755, places=4)
print("pattern.metrics.chi2()")
def test_chi_squared_p(self):
# Assert chi-squared P-value (upper tail).
for df, X2 in [
(1, ( 3.85, 5.05, 6.65, 7.90)),
(2, ( 6.00, 7.40, 9.25, 10.65)),
(3, ( 7.85, 9.40, 11.35, 12.85)),
(4, ( 9.50, 11.15, 13.30, 14.90)),
(5, (11.10, 12.85, 15.10, 16.80))]:
for i, x2 in enumerate(X2):
v = metrics.chi2p(x2, df, tail=metrics.UPPER)
self.assertTrue(v < (0.05, 0.025, 0.01, 0.005)[i])
print("pattern.metrics.chi2p()")
def test_kolmogorov_smirnov(self):
v = metrics.ks2([1, 2, 3], [1, 2, 4])
self.assertAlmostEqual(v[0], 0.3333, places=4)
self.assertAlmostEqual(v[1], 0.9762, places=4)
print("pattern.metrics.ks2()")
class TestSpecialFunctions(unittest.TestCase):
def setUp(self):
pass
def test_gamma(self):
# Assert complete gamma function.
v = metrics.gamma(0.5)
self.assertAlmostEqual(v, math.sqrt(math.pi), places=4)
print("pattern.metrics.gamma()")
def test_gammai(self):
# Assert incomplete gamma function.
v = metrics.gammai(a=1, x=2)
self.assertAlmostEqual(v, 0.1353, places=4)
print("pattern.metrics.gammai()")
def test_erfc(self):
# Assert complementary error function.
for x, y in [
(-3.00, 2.000),
(-2.00, 1.995),
(-1.00, 1.843),
(-0.50, 1.520),
(-0.25, 1.276),
( 0.00, 1.000),
( 0.25, 0.724),
( 0.50, 0.480),
( 1.00, 0.157),
( 2.00, 0.005),
( 3.00, 0.000)]:
self.assertAlmostEqual(metrics.erfc(x), y, places=3)
print("pattern.metrics.erfc()")
def test_kolmogorov(self):
# Assert Kolmogorov limit distribution.
self.assertAlmostEqual(metrics.kolmogorov(0.0), 1.0000, places=4)
self.assertAlmostEqual(metrics.kolmogorov(0.5), 0.9639, places=4)
self.assertAlmostEqual(metrics.kolmogorov(1.0), 0.2700, places=4)
self.assertAlmostEqual(metrics.kolmogorov(2.0), 0.0007, places=4)
self.assertAlmostEqual(metrics.kolmogorov(4.0), 0.0000, places=4)
print("pattern.metrics.kolmogorov()")
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestProfiling))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTextMetrics))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestInterpolation))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestStatistics))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestStatisticalTests))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSpecialFunctions))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite())
|
gpl-2.0
|
kressi/erpnext
|
erpnext/accounts/report/payment_period_based_on_invoice_date/payment_period_based_on_invoice_date.py
|
39
|
3748
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.accounts.report.accounts_receivable.accounts_receivable import get_ageing_data
from frappe.utils import getdate, flt
def execute(filters=None):
if not filters: filters = {}
validate_filters(filters)
columns = get_columns(filters)
entries = get_entries(filters)
invoice_details = get_invoice_posting_date_map(filters)
data = []
for d in entries:
invoice = invoice_details.get(d.against_voucher) or frappe._dict()
if d.reference_type=="Purchase Invoice":
payment_amount = flt(d.debit) or -1 * flt(d.credit)
else:
payment_amount = flt(d.credit) or -1 * flt(d.debit)
row = [d.voucher_type, d.voucher_no, d.party_type, d.party, d.posting_date, d.against_voucher,
invoice.posting_date, invoice.due_date, d.debit, d.credit, d.remarks]
if d.against_voucher:
row += get_ageing_data(30, 60, 90, d.posting_date, invoice.posting_date, payment_amount)
else:
row += ["", "", "", "", ""]
if invoice.due_date:
row.append((getdate(d.posting_date) - getdate(invoice.due_date)).days or 0)
data.append(row)
return columns, data
def validate_filters(filters):
if (filters.get("payment_type") == "Incoming" and filters.get("party_type") == "Supplier") or \
(filters.get("payment_type") == "Outgoing" and filters.get("party_type") == "Customer"):
frappe.throw(_("{0} payment entries can not be filtered by {1}")\
.format(filters.payment_type, filters.party_type))
def get_columns(filters):
return [
_("Payment Document") + ":: 100",
_("Payment Entry") + ":Dynamic Link/"+_("Payment Document")+":140",
_("Party Type") + "::100",
_("Party") + ":Dynamic Link/Party Type:140",
_("Posting Date") + ":Date:100",
_("Invoice") + (":Link/Purchase Invoice:130" if filters.get("payment_type") == "Outgoing" else ":Link/Sales Invoice:130"),
_("Invoice Posting Date") + ":Date:130",
_("Payment Due Date") + ":Date:130",
_("Debit") + ":Currency:120",
_("Credit") + ":Currency:120",
_("Remarks") + "::150",
_("Age") +":Int:40",
"0-30:Currency:100",
"30-60:Currency:100",
"60-90:Currency:100",
_("90-Above") + ":Currency:100",
_("Delay in payment (Days)") + "::150"
]
def get_conditions(filters):
conditions = []
if not filters.party_type:
if filters.payment_type == "Outgoing":
filters.party_type = "Supplier"
else:
filters.party_type = "Customer"
if filters.party_type:
conditions.append("party_type=%(party_type)s")
if filters.party:
conditions.append("party=%(party)s")
if filters.party_type:
conditions.append("against_voucher_type=%(reference_type)s")
filters["reference_type"] = "Sales Invoice" if filters.party_type=="Customer" else "Purchase Invoice"
if filters.get("from_date"):
conditions.append("posting_date >= %(from_date)s")
if filters.get("to_date"):
conditions.append("posting_date <= %(to_date)s")
return "and " + " and ".join(conditions) if conditions else ""
def get_entries(filters):
return frappe.db.sql("""select
voucher_type, voucher_no, party_type, party, posting_date, debit, credit, remarks, against_voucher
from `tabGL Entry`
where company=%(company)s and voucher_type in ('Journal Entry', 'Payment Entry') {0}
""".format(get_conditions(filters)), filters, as_dict=1)
def get_invoice_posting_date_map(filters):
invoice_details = {}
dt = "Sales Invoice" if filters.get("payment_type") == "Incoming" else "Purchase Invoice"
for t in frappe.db.sql("select name, posting_date, due_date from `tab{0}`".format(dt), as_dict=1):
invoice_details[t.name] = t
return invoice_details
|
gpl-3.0
|
Hndrx616/Group-Clustering
|
python_Lib/bs4/bs4/element.py
|
4
|
49509
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substition
FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
@classmethod
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self.FORMATTERS.get(
formatter, EntitySubstitution.substitute_xml)
if formatter is None:
output = s
else:
output = formatter(s)
return output
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
def select(self, selector):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
for index, token in enumerate(tokens):
if tokens[index - 1] == '>':
# already found direct descendants in last step. skip this
# step.
continue
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = self._attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend(
[el for el in context.find_all(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if tag == "":
tag = True
el = current_context[0].find(tag, {'id': id})
if el is None:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
if not tag_name:
tag_name = True
classes = set(klass.split('.'))
found = []
def classes_match(tag):
if tag_name is not True and tag.name != tag_name:
return False
if not tag.has_attr('class'):
return False
return classes.issubset(tag['class'])
for context in current_context:
found.extend(context.find_all(classes_match))
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
if token == '>':
# Child selector
tag = tokens[index + 1]
if not tag:
tag = True
found = []
for context in current_context:
found.extend(context.find_all(tag, recursive=False))
current_context = found
continue
# Here we should just have a regular tag
if not self.tag_name_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False):
"""Yield all child strings, possibly stripping them."""
for descendant in self.descendants:
if not isinstance(descendant, NavigableString):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(strip)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = str(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
str(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = (indent_level is not None)
if pretty_print:
space = (' ' * (indent_level - 1))
indent_contents = indent_level + 1
else:
space = ''
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if pretty_print:
s.append(space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if pretty_print and closeTag and self.next_sibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level:
text = text.strip()
if text:
if pretty_print:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print:
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
# This was kind of misleading because has_key() (attributes) was
# different from __in__ (contents). has_key() is gone in Python 3,
# anyway.
has_key = has_attr
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
|
gpl-3.0
|
paulbnjl/PySerSpec
|
PySerSpec/ASC_nozzle.py
|
1
|
1507
|
#-*- coding: UTF-8 -*
############################################################################
############################################################################
#### Person responsible for this pure evil spaghetti code : ####
#### Paul Bonijol -> paul [.] bnjl ['AT'] gmail [.] com ####
#### Works with python3.4 ####
#### ####
#### PySerSpec.py is free software: you can redistribute it and/or ####
#### modify it under the terms of the GNU General Public License as ####
#### published by the Free Software Foundation, either version 3 of the ####
#### License, or (at your option) any later version. ####
#### See http://www.gnu.org/licenses for more information. ####
#### ####
#### I hope this program will be useful to somebody else ! ####
#### But please keep in mind that it comes WITHOUT ANY WARRANTY ! ####
#### If something bad happens, well, sorry ! :( ####
########################### ASC NOZZLE STATE CLASS #########################
############################### UNTESTED ! #################################
class AscPosition:
def __init__(self): pass
def get_ASC_POS(self, DATA_output):
asc_down = set('r1')
asc_up = set('r2')
for asc_state in DATA_output:
if asc_down & set(asc_state):
print("ASC nozzle lowered.")
elif asc_up & set(asc_state):
print('ASC nozzle raised.')
else:
print("ERROR. Look if the ASC nozzle is connected and check the position again.")
|
gpl-3.0
|
rishilification/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal_unittest.py
|
128
|
2653
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.applywatchlistlocal import ApplyWatchListLocal
class ApplyWatchListLocalTest(CommandsTest):
def test_args_parsing(self):
expected_logs = """MockWatchList: determine_cc_and_messages
No bug was updated because no id was given.
Result of watchlist: cc "abarth@webkit.org, eric@webkit.org, levin@chromium.org" messages "Message1.
Message2."
"""
self.assert_execute_outputs(ApplyWatchListLocal(), [''], expected_logs=expected_logs)
def test_args_parsing_with_bug(self):
expected_logs = """MockWatchList: determine_cc_and_messages
MOCK bug comment: bug_id=50002, cc=set(['eric@webkit.org', 'levin@chromium.org', 'abarth@webkit.org'])
--- Begin comment ---
Message1.
Message2.
--- End comment ---\n\n"""
self.assert_execute_outputs(ApplyWatchListLocal(), ['50002'], expected_logs=expected_logs)
def test_args_parsing_with_two_bugs(self):
self._assertRaisesRegexp(Exception, 'Too many arguments given: 1234 5678', self.assert_execute_outputs, ApplyWatchListLocal(), ['1234', '5678'])
|
bsd-3-clause
|
miing/mci_migo
|
identityprovider/migrations/0007_auto__add_field_invalidatedemailaddress_date_invalidated__add_field_in.py
|
1
|
19118
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from identityprovider.models import InvalidatedEmailAddress
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InvalidatedEmailAddress.date_invalidated'
db.add_column(u'invalidated_emailaddress', 'date_invalidated',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.utcnow, null=True, blank=True),
keep_default=False)
# Adding field 'InvalidatedEmailAddress.account_notified'
db.add_column(u'invalidated_emailaddress', 'account_notified',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# existing InvalidatedEmailAddress should not be notified
InvalidatedEmailAddress.objects.all().update(account_notified=True)
def backwards(self, orm):
# Deleting field 'InvalidatedEmailAddress.date_invalidated'
db.delete_column(u'invalidated_emailaddress', 'date_invalidated')
# Deleting field 'InvalidatedEmailAddress.account_notified'
db.delete_column(u'invalidated_emailaddress', 'account_notified')
models = {
'identityprovider.account': {
'Meta': {'object_name': 'Account', 'db_table': "u'account'"},
'creation_rationale': ('django.db.models.fields.IntegerField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'date_status_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'displayname': ('identityprovider.models.account.DisplaynameField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_openid_identifier': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'openid_identifier': ('django.db.models.fields.TextField', [], {'default': "u'EeE8MYB'", 'unique': 'True'}),
'preferredlanguage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'status_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twofactor_attempts': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True'}),
'twofactor_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'warn_about_backup_device': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'identityprovider.accountpassword': {
'Meta': {'object_name': 'AccountPassword', 'db_table': "u'accountpassword'"},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['identityprovider.Account']", 'unique': 'True', 'db_column': "'account'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('identityprovider.models.account.PasswordField', [], {})
},
'identityprovider.apiuser': {
'Meta': {'object_name': 'APIUser', 'db_table': "'api_user'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'identityprovider.authenticationdevice': {
'Meta': {'ordering': "('id',)", 'object_name': 'AuthenticationDevice'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'devices'", 'to': "orm['identityprovider.Account']"}),
'counter': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'device_type': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.TextField', [], {})
},
'identityprovider.authtoken': {
'Meta': {'object_name': 'AuthToken', 'db_table': "u'authtoken'"},
'date_consumed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'db_index': 'True', 'blank': 'True'}),
'displayname': ('identityprovider.models.account.DisplaynameField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('identityprovider.models.account.PasswordField', [], {'null': 'True', 'blank': 'True'}),
'redirection_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'null': 'True', 'db_column': "'requester'", 'blank': 'True'}),
'requester_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'token_type': ('django.db.models.fields.IntegerField', [], {})
},
'identityprovider.emailaddress': {
'Meta': {'object_name': 'EmailAddress', 'db_table': "u'emailaddress'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'null': 'True', 'db_column': "'account'", 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lp_person': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'person'", 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {})
},
'identityprovider.invalidatedemailaddress': {
'Meta': {'object_name': 'InvalidatedEmailAddress', 'db_table': "u'invalidated_emailaddress'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'null': 'True', 'db_column': "'account'", 'blank': 'True'}),
'account_notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'date_invalidated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'identityprovider.lpopenididentifier': {
'Meta': {'object_name': 'LPOpenIdIdentifier', 'db_table': "u'lp_openididentifier'"},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date.today'}),
'identifier': ('django.db.models.fields.TextField', [], {'unique': 'True', 'primary_key': 'True'}),
'lp_account': ('django.db.models.fields.IntegerField', [], {'db_column': "'account'", 'db_index': 'True'})
},
'identityprovider.openidassociation': {
'Meta': {'unique_together': "(('server_url', 'handle'),)", 'object_name': 'OpenIDAssociation', 'db_table': "u'openidassociation'"},
'assoc_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.TextField', [], {}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '2047'})
},
'identityprovider.openidauthorization': {
'Meta': {'object_name': 'OpenIDAuthorization', 'db_table': "u'openidauthorization'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'db_column': "'account'"}),
'client_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trust_root': ('django.db.models.fields.TextField', [], {})
},
'identityprovider.openidnonce': {
'Meta': {'unique_together': "(('server_url', 'timestamp', 'salt'),)", 'object_name': 'OpenIDNonce', 'db_table': "'openidnonce'"},
'salt': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '2047', 'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {})
},
'identityprovider.openidrpconfig': {
'Meta': {'object_name': 'OpenIDRPConfig', 'db_table': "'ssoopenidrpconfig'"},
'allow_unverified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allowed_sreg': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'auto_authorize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_query_any_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creation_rationale': ('django.db.models.fields.IntegerField', [], {'default': '13'}),
'description': ('django.db.models.fields.TextField', [], {}),
'displayname': ('django.db.models.fields.TextField', [], {}),
'flag_twofactor': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'ga_snippet': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prefer_canonical_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'require_two_factor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'trust_root': ('django.db.models.fields.TextField', [], {'unique': 'True'})
},
'identityprovider.openidrpsummary': {
'Meta': {'unique_together': "(('account', 'trust_root', 'openid_identifier'),)", 'object_name': 'OpenIDRPSummary', 'db_table': "u'openidrpsummary'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'db_column': "'account'"}),
'approved_data': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'date_last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'openid_identifier': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'total_logins': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'trust_root': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
'identityprovider.person': {
'Meta': {'object_name': 'Person', 'db_table': "u'lp_person'"},
'addressline1': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'addressline2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'country'", 'blank': 'True'}),
'creation_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creation_rationale': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'defaultmembershipperiod': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'defaultrenewalperiod': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'displayname': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fti': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'hide_email_addresses': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'homepage_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'icon'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'language'", 'blank': 'True'}),
'logo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'logo'", 'blank': 'True'}),
'lp_account': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_column': "'account'"}),
'mail_resumption_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mailing_list_auto_subscribe_policy': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'mailing_list_receive_duplicates': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'merged': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'merged'", 'blank': 'True'}),
'mugshot': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'mugshot'", 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'unique': 'True', 'null': 'True'}),
'organization': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'personal_standing': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'personal_standing_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'province': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'registrant': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'registrant'", 'blank': 'True'}),
'renewal_policy': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True'}),
'subscriptionpolicy': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'teamdescription': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'teamowner': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'teamowner'", 'blank': 'True'}),
'verbose_bugnotifications': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'identityprovider.personlocation': {
'Meta': {'object_name': 'PersonLocation', 'db_table': "u'lp_personlocation'"},
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'last_modified_by'"}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['identityprovider.Person']", 'unique': 'True', 'null': 'True', 'db_column': "'person'"}),
'time_zone': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
'identityprovider.teamparticipation': {
'Meta': {'unique_together': "(('team', 'person'),)", 'object_name': 'TeamParticipation', 'db_table': "u'lp_teamparticipation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Person']", 'null': 'True', 'db_column': "'person'"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_participations'", 'null': 'True', 'db_column': "'team'", 'to': "orm['identityprovider.Person']"})
}
}
complete_apps = ['identityprovider']
|
agpl-3.0
|
jamespcole/home-assistant
|
homeassistant/components/websocket_api/http.py
|
1
|
6675
|
"""View to accept incoming websocket connection."""
import asyncio
from contextlib import suppress
from functools import partial
import json
import logging
from aiohttp import web, WSMsgType
import async_timeout
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.helpers.json import JSONEncoder
from .const import (
MAX_PENDING_MSG, CANCELLATION_ERRORS, URL, ERR_UNKNOWN_ERROR,
SIGNAL_WEBSOCKET_CONNECTED, SIGNAL_WEBSOCKET_DISCONNECTED)
from .auth import AuthPhase, auth_required_message
from .error import Disconnect
from .messages import error_message
JSON_DUMP = partial(json.dumps, cls=JSONEncoder, allow_nan=False)
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
async def get(self, request):
"""Handle an incoming websocket connection."""
return await WebSocketHandler(
request.app['hass'], request).async_handle()
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock = None
self._to_write = asyncio.Queue(maxsize=MAX_PENDING_MSG, loop=hass.loop)
self._handle_task = None
self._writer_task = None
self._logger = logging.getLogger(
"{}.connection.{}".format(__name__, id(self)))
async def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
try:
await self.wsock.send_json(message, dumps=JSON_DUMP)
except (ValueError, TypeError) as err:
self._logger.error('Unable to serialize to JSON: %s\n%s',
err, message)
await self.wsock.send_json(error_message(
message['id'], ERR_UNKNOWN_ERROR,
'Invalid JSON in response'))
@callback
def _send_message(self, message):
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error("Client exceeded max pending messages [2]: %s",
MAX_PENDING_MSG)
self._cancel()
@callback
def _cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
async def async_handle(self):
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected")
# Py3.7+
if hasattr(asyncio, 'current_task'):
# pylint: disable=no-member
self._handle_task = asyncio.current_task()
else:
self._handle_task = asyncio.Task.current_task(loop=self.hass.loop)
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop)
self._writer_task = self.hass.async_create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError:
disconnect_warn = \
'Did not receive auth message within 10 seconds'
raise Disconnect
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message.'
raise Disconnect
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
raise Disconnect
self._logger.debug("Received %s", msg)
connection = await auth.async_handle(msg)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
elif msg.type != WSMsgType.TEXT:
disconnect_warn = 'Received non-Text message.'
break
try:
msg = msg.json()
except ValueError:
disconnect_warn = 'Received invalid JSON.'
break
self._logger.debug("Received %s", msg)
connection.async_handle(msg)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
await wsock.close()
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED)
return wsock
|
apache-2.0
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/pygments/styles/fruity.py
|
31
|
1298
|
# -*- coding: utf-8 -*-
"""
pygments.styles.fruity
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "fruity" vim theme.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, \
Generic, Number, String, Whitespace
class FruityStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#111111'
highlight_color = '#333333'
styles = {
Whitespace: '#888888',
Token: '#ffffff',
Generic.Output: '#444444 bg:#222222',
Keyword: '#fb660a bold',
Keyword.Pseudo: 'nobold',
Number: '#0086f7 bold',
Name.Tag: '#fb660a bold',
Name.Variable: '#fb660a',
Comment: '#008800 bg:#0f140f italic',
Name.Attribute: '#ff0086 bold',
String: '#0086d2',
Name.Function: '#ff0086 bold',
Generic.Heading: '#ffffff bold',
Keyword.Type: '#cdcaa9 bold',
Generic.Subheading: '#ffffff bold',
Name.Constant: '#0086d2',
Comment.Preproc: '#ff0007 bold'
}
|
gpl-3.0
|
dsweet04/rekall
|
rekall-core/rekall/plugins/overlays/darwin/darwin.py
|
1
|
54906
|
# Rekall Memory Forensics
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <scudette@gmail.com>"
from rekall import obj
from rekall.plugins.addrspaces import amd64
from rekall.plugins.overlays import basic
from rekall_lib import utils
darwin_overlay = {
"proc": [None, {
# Some standard fields for Darwin processes.
"name": lambda x: x.p_comm,
"pid": lambda x: x.p_pid.v(),
"dtb": lambda x: x.task.map.pmap.pm_cr3.v(),
"p_list": [None, ["LIST_ENTRY"]],
"p_sibling": [None, ["LIST_ENTRY"]],
"p_comm": [None, ["UnicodeString", dict(length=17)]],
"task": [None, ["Pointer", dict(
target="task"
)]],
}],
"task": [None, {
"bsd_info": [None, ["Pointer", dict(target="proc")]],
}],
"rtentry": [None, {
"base_calendartime": [None, ["UnixTimeStamp"]],
"source_ip": lambda x: x.rt_nodes[0].rn_u.rn_leaf.m(
"rn_Key").dereference_as("sockaddr"),
"dest_ip": lambda x: x.rt_gateway.deref(),
"name": lambda x: x.rt_ifp.name,
"sent": lambda x: x.m("rt_stats").nstat_txpackets,
"rx": lambda x: x.rt_expire if x.m("rt_stats") else None,
"delta": lambda x: (x.rt_expire - x.base_uptime
if x.rt_expire else 0),
}],
"kmod_info": [None, {
"name": lambda x: utils.SmartUnicode(x.m("name").cast("UnicodeString")),
"base": lambda x: x.address.v(),
"end": lambda x: x.base + x.size,
"version": [None, ["String"]],
# Starting address of the kernel module.
"address": [None, ["Pointer"]],
}],
"sockaddr": [None, {
# github.com/opensource-apple/xnu/blob/10.9/bsd/sys/socket.h#L370
"sa_family": [None, ["Enumeration", dict(
enum_name="sa_family_t",
target="unsigned char",
)]],
}],
"sysctl_oid": [None, {
# xnu-2422.1.72/bsd/sys/sysctl.h: 148
# This field is reused for two purposes, the first is type of node,
# while the second is the permissions.
"oid_kind_type": lambda x: x.m("oid_kind").cast(
"Enumeration", choices={
1: "CTLTYPE_NODE",
2: "CTLTYPE_INT",
3: "CTLTYPE_STRING",
4: "CTLTYPE_QUAD",
5: "CTLTYPE_OPAQUE",
},
target="BitField",
target_args=dict(start_bit=0, end_bit=8),
),
"oid_perms": lambda x: x.m("oid_kind").cast(
"Flags", maskmap={
"CTLFLAG_RD": 0x80000000, # Allow reads of variable */
"CTLFLAG_WR": 0x40000000, # Allow writes to the variable
# A node will handle locking by itself.
"CTLFLAG_LOCKED": 0x00800000,
},
),
"oid_name": [None, ["Pointer", dict(target="String")]],
}],
"zone": [None, {
"zone_name": [None, ["Pointer", dict(target="String")]],
"free_elements": [None, ["Pointer", dict(
target="zone_free_element"
)]],
}],
"vm_map_entry": [None, {
# xnu-2422.1.72/osfmk/mach/vm_prot.h:81
"protection": [None, ["Flags", dict(
maskmap={
"VM_PROT_READ": 1,
"VM_PROT_WRITE": 2,
"VM_PROT_EXECUTE": 4,
},
target="BitField",
target_args=dict(
start_bit=7,
end_bit=10
)
)]],
"max_protection": [None, ["Flags", dict(
maskmap={
"VM_PROT_READ": 1,
"VM_PROT_WRITE": 2,
"VM_PROT_EXECUTE": 4,
},
target="BitField",
target_args=dict(
start_bit=10,
end_bit=13
)
)]],
}],
"vnode": [None, {
"v_name": [None, ["Pointer", dict(target="String")]],
"path": lambda self: "/".join(reversed(
[unicode(y.v_name.deref()) for y in self.walk_list("v_parent")])),
# xnu-2422.1.72/bsd/sys/vnode_internal.h:230
"v_flag": [None, ["Flags", dict(
maskmap={
"VROOT": 0x000001,
"VTEXT": 0x000002,
"VSYSTEM": 0x000004,
"VISTTY": 0x000008,
"VRAGE": 0x000010,
}
)]],
}],
"cat_attr": [None, {
"ca_atime": [None, ["UnixTimeStamp"]],
"ca_atimeondisk": [None, ["UnixTimeStamp"]],
"ca_mtime": [None, ["UnixTimeStamp"]],
"ca_ctime": [None, ["UnixTimeStamp"]],
"ca_itime": [None, ["UnixTimeStamp"]],
"ca_btime": [None, ["UnixTimeStamp"]],
}],
"ifnet": [None, {
"if_name": [None, ["Pointer", dict(target="String")]],
}],
"session": [None, {
"s_login": [None, ["String"]],
}],
"filedesc": [None, {
# Defined here:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/filedesc.h#L113
"fd_ofileflags": [None, ["Pointer", dict(
target="Array",
target_args=dict(
target="Flags",
target_args=dict(
target="unsigned char",
maskmap=utils.MaskMapFromDefines("""
/*
* Per-process open flags.
*/
#define UF_EXCLOSE 0x01 /* auto-close on exec */
#define UF_FORKCLOSE 0x02 /* auto-close on fork */
#define UF_RESERVED 0x04 /* open pending / in progress */
#define UF_CLOSING 0x08 /* close in progress */
#define UF_RESVWAIT 0x10 /* close in progress */
#define UF_INHERIT 0x20 /* "inherit-on-exec" */
"""))))]],
"fd_ofiles": [None, ["Pointer", dict(
target="Array",
target_args=dict(
target="Pointer",
count=lambda x: x.fd_lastfile,
target_args=dict(
target="fileproc"
)
)
)]],
}],
"mount": [None, {
# xnu-2422.1.72/bsd/sys/mount.h
"mnt_flag": [None, ["Flags", dict(
maskmap={
"MNT_LOCAL": 0x00001000,
"MNT_QUOTA": 0x00002000,
"MNT_ROOTFS": 0x00004000,
}
)]],
}],
"vfsstatfs": [None, {
"f_mntonname": [None, ["String"]],
"f_mntfromname": [None, ["String"]],
"f_fstypename": [None, ["String"]],
}],
"sockaddr_un": [None, {
# sun_len is the number of bytes from start of the struct to the NULL
# terminator in the sun_path member. (Yes, really. [1]) The
# original/Volatility code used it as length of the string itself,
# leading to a subtle bug, when the sun_path wasn't NULL-terminated
# (which it doesn't have to be.)
# [1]
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/un.h#L77
"sun_path": [None, ["String", dict(
length=lambda x: (
x.sun_len
- x.obj_profile.get_obj_offset("sockaddr_un", "sun_path")
),
)]],
}],
"domain": [None, {
# xnu-2422.1.72/bsd/sys/domain.h: 99
"dom_family": [None, ["Enumeration", dict(
enum_name="sa_family_t",
target="unsigned char",
)]],
}],
"tcpcb": [None, {
"t_state": [None, ["Enumeration", dict(
# xnu-2422.1.72/bsd/netinet/tcp_fsm.h: 75
choices={
0: "TCPS_CLOSED",
1: "TCPS_LISTEN",
2: "TCPS_SYN_SENT",
3: "TCPS_SYN_RECEIVED",
4: "TCPS_ESTABLISHED",
5: "TCPS_CLOSE_WAIT",
6: "TCPS_FIN_WAIT_1",
7: "TCPS_CLOSING",
8: "TCPS_LAST_ACK",
9: "TCPS_FIN_WAIT_2",
10: "TCPS_TIME_WAIT",
},
target="int",
)]],
}],
"protosw": [None, {
"pr_protocol": [None, ["Enumeration", dict(
# xnu-2422.1.72/bsd/netinet/in.h: 99
choices={
0: "IPPROTO_IP",
1: "IPPROTO_ICMP",
2: "IPPROTO_IGMP",
4: "IPPROTO_IPV4",
6: "IPPROTO_TCP",
17: "IPPROTO_UDP",
41: "IPPROTO_IPV6",
50: "IPPROTO_ESP",
51: "IPPROTO_AH",
58: "IPPROTO_ICMPV6",
255: "IPPROTO_RAW",
},
target="short",
)]],
"pr_type": [None, ["Enumeration", dict(
enum_name="pr_type",
target="short",
)]],
}],
"OSDictionary": [None, {
"dictionary": [None, ["Pointer", dict(
target="Array",
target_args=dict(
target="dictEntry",
count=lambda x: x.count,
)
)]],
}],
"OSString": [None, {
"value": lambda x: x.obj_profile.UnicodeString(
offset=x.string,
length=x.length
),
}],
"OSOrderedSet": [None, {
"array": [None, ["Pointer", dict(
target="Array",
target_args=dict(
target="_Element",
count=lambda x: x.count
)
)]],
}],
"_Element": [8, {
"obj": [0, ["pointer", ["OSMetaClassBase"]]]
}],
"PE_state": [None, {
"bootArgs": [None, ["Pointer", dict(target="boot_args")]],
}],
"boot_args": [None, {
"CommandLine": [None, ["String"]]
}],
"EfiMemoryRange": [None, {
# xnu-1699.26.8/pexpert/pexpert/i386/boot.h: 46
"Type": [None, ["Enumeration", dict(
choices={
0: "kEfiReservedMemoryType",
1: "kEfiLoaderCode",
2: "kEfiLoaderData",
3: "kEfiBootServicesCode",
4: "kEfiBootServicesData",
5: "kEfiRuntimeServicesCode",
6: "kEfiRuntimeServicesData",
7: "kEfiConventionalMemory",
8: "kEfiUnusableMemory",
9: "kEfiACPIReclaimMemory",
10: "kEfiACPIMemoryNVS",
11: "kEfiMemoryMappedIO",
12: "kEfiMemoryMappedIOPortSpace",
13: "kEfiPalCode",
14: "kEfiMaxMemoryType",
},
target="unsigned int"
)]],
}]
}
darwin_enums = {
# Socket address families, defined here because we use them in a couple of
# places. This is a "fake enum" in the sense that the values are declared
# as #defines, but only ever used in conjunction with a specific datatype
# (which is sa_family_t).
#
# From the horse's mouth:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/socket.h#L370
#
# The data type is defined here:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/_types/_sa_family_t.h?source=cc#L30
#
# Basically the exact same fake enumeration is used in the same file for
# protocol families here:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/socket.h#L493
#
# Because the numbers are the same (in fact, PF_ defines are mapped 1:1
# with addressing families) we just use AF_ for everything.
"sa_family_t": utils.EnumerationFromDefines("""
/*
* Address families.
*/
#define AF_UNSPEC 0 /* unspecified */
#define AF_UNIX 1 /* local to host (pipes) */
#define AF_INET 2 /* internetwork: UDP, TCP, etc. */
#define AF_IMPLINK 3 /* arpanet imp addresses */
#define AF_PUP 4 /* pup protocols: e.g. BSP */
#define AF_CHAOS 5 /* mit CHAOS protocols */
#define AF_NS 6 /* XEROX NS protocols */
#define AF_ISO 7 /* ISO protocols */
#define AF_ECMA 8 /* European computer manufacturers */
#define AF_DATAKIT 9 /* datakit protocols */
#define AF_CCITT 10 /* CCITT protocols, X.25 etc */
#define AF_SNA 11 /* IBM SNA */
#define AF_DECnet 12 /* DECnet */
#define AF_DLI 13 /* DEC Direct data link interface */
#define AF_LAT 14 /* LAT */
#define AF_HYLINK 15 /* NSC Hyperchannel */
#define AF_APPLETALK 16 /* Apple Talk */
#define AF_ROUTE 17 /* Internal Routing Protocol */
#define AF_LINK 18 /* Link layer interface */
#define pseudo_AF_XTP 19 /* eXpress Transfer Protocol (no AF) */
#define AF_COIP 20 /* connection-oriented IP, aka ST II */
#define AF_CNT 21 /* Computer Network Technology */
#define pseudo_AF_RTIP 22 /* Help Identify RTIP packets */
#define AF_IPX 23 /* Novell Internet Protocol */
#define AF_SIP 24 /* Simple Internet Protocol */
#define pseudo_AF_PIP 25 /* Help Identify PIP packets */
#define AF_NDRV 27 /* Network Driver 'raw' access */
#define AF_ISDN 28 /* Integrated Services Digital Network*/
#define pseudo_AF_KEY 29 /* Internal key-management function */
#define AF_INET6 30 /* IPv6 */
#define AF_NATM 31 /* native ATM access */
#define AF_SYSTEM 32 /* Kernel event messages */
#define AF_NETBIOS 33 /* NetBIOS */
#define AF_PPP 34 /* PPP communication protocol */
#define pseudo_AF_HDRCMPLT 35 /* Used by BPF to not rewrite headers
* in interface output routine */
#define AF_AFP 36 /* Used by AFP */
#define AF_IEEE80211 37 /* IEEE 802.11 protocol */
#define AF_UTUN 38
#define AF_MULTIPATH 39
#define AF_MAX 40
"""),
# Socket types, defined here:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/socket.h#L133
"pr_type": utils.EnumerationFromDefines("""
/*
* Types
*/
#define SOCK_STREAM 1 /* stream socket */
#define SOCK_DGRAM 2 /* datagram socket */
#define SOCK_RAW 3 /* raw-protocol interface */
#define SOCK_RDM 4 /* reliably-delivered message */
#define SOCK_SEQPACKET 5 /* sequenced packet stream */
"""),
}
darwin64_types = {
"LIST_ENTRY": [16, {
"le_next": [0, ["Pointer"]],
"le_prev": [8, ["Pointer", dict(target="Pointer")]],
}],
}
class LIST_ENTRY(obj.Struct):
"""XNU defines lists inline using an annonymous struct. This makes it hard
for us to automatically support lists because the debugging symbols dont
indicate this inner struct is of any particular type (since its annonymous).
We therefore depend on the overlays to redefine each list memeber as a
LIST_ENTRY member. For example we see code like:
struct proc {
LIST_ENTRY(proc) p_list;
...
Where:
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
"""
_forward = "le_next"
_backward = "le_prev"
def is_valid(self):
"""Must have both valid next and prev pointers."""
return (self.m(self._forward).is_valid() and
self.m(self._backward).is_valid())
def _GetNextEntry(self, type, member):
return self.m(self._forward).dereference_as(type).m(member)
def _GetPreviousEntry(self):
return self.m(self._backward).dereference_as(self.obj_type)
def dereference_as(self, type, member, vm=None):
"""Recasts the list entry as a member in a type, and return the type.
Args:
type: The name of this Struct type.
member: The name of the member of this Struct.
address_space: An optional address space to switch during
deferencing.
"""
offset = self.obj_profile.get_obj_offset(type, member)
item = self.obj_profile.Object(
type_name=type, offset=self.obj_offset - offset,
vm=vm or self.obj_vm, parent=self.obj_parent,
name=type, context=self.obj_context)
return item
def find_all_lists(self, type, member, seen=None):
"""Follows all the list entries starting from lst.
We basically convert the list to a tree and recursively search it for
new nodes. From each node we follow the Flink and then the Blink. When
we see a node we already have, we backtrack.
"""
if seen is None:
seen = set()
if not self.is_valid():
return seen
elif self in seen:
return seen
seen.add(self)
flink = self._GetNextEntry(type, member)
flink.find_all_lists(type, member, seen=seen)
blink = self._GetPreviousEntry()
blink.find_all_lists(type, member, seen=seen)
return seen
def list_of_type(self, type, member=None, include_current=True):
# We sort here to ensure we have stable ordering as the output of this
# call.
result = sorted(self.find_all_lists(type, member),
key=lambda x: x.obj_offset)
if member is None:
member = self.obj_name
# Return ourselves as the first item.
if include_current:
yield self.dereference_as(type, member)
# We traverse all the _LIST_ENTRYs we can find, and cast them all back
# to the required member.
for lst in result:
# Skip ourselves in this (list_of_type is usually invoked on a list
# head).
if lst.obj_offset == self.obj_offset:
continue
task = lst.dereference_as(type, member)
if task:
# Only yield valid objects (In case of dangling links).
yield task
def reflect(self, vm=None):
"""Reflect this list element by following its Flink and Blink.
This is basically the same as Flink.Blink except that it also checks
Blink.Flink. It also ensures that Flink and Blink are dereferences to
the correct type in case the vtypes do not specify them as pointers.
Returns:
the result of Flink.Blink.
"""
result1 = self.m(self._forward).dereference_as(
self.obj_type, vm=vm).m(self._backward).deref().cast(
self.obj_type)
if not result1:
return obj.NoneObject("Flink not valid.")
result2 = self.Blink.deref().dereference_as(
self.obj_type, vm=vm).m(
self._forward).dereference_as(self.obj_type)
if result1 != result2:
return obj.NoneObject("Flink and Blink not consistent.")
return result1
def __nonzero__(self):
# List entries are valid when both Flinks and Blink are valid
return bool(self.m(self._forward)) or bool(self.m(self._backward))
def __iter__(self):
return self.list_of_type(self.obj_parent.obj_type, self.obj_name)
class llinfo_arp(obj.Struct):
@utils.safe_property
def isvalid(self):
try:
return self.la_rt.rt_llinfo.v() == self.obj_offset
except AttributeError:
return False
class queue_entry(basic.ListMixIn, obj.Struct):
"""A queue_entry is an externalized linked list.
Although the queue_entry is defined as:
struct queue_entry {
struct queue_entry *next; /* next element */
struct queue_entry *prev; /* previous element */
};
This is in fact not correct since the next, and prev pointers
point to the start of the next struct. A queue_entry has a queue
head which is also a queue entry and this can eb iterated over
using the list_of_type method.
NOTE: list_of_type should only be called on the head queue_entry.
"""
_forward = "next"
_backward = "prev"
def list_of_type(self, type, member):
seen = set()
seen.add(self.prev.v())
item = self.next.dereference_as(type)
while item != None:
yield item
if item.obj_offset in seen:
return
seen.add(item.obj_offset)
item = item.m(member).next.dereference_as(type)
class sockaddr_dl(obj.Struct):
def __unicode__(self):
result = []
for i in xrange(self.sdl_alen):
result.append(
"%.02X" % ord(self.sdl_data[self.sdl_nlen + i].v()))
return ":".join(result)
class fileproc(obj.Struct):
"""Represents an open file, owned by a process."""
DTYPE_TO_HUMAN = {
"DTYPE_SOCKET": "socket",
"DTYPE_VNODE": "vnode",
"DTYPE_PSXSEM": "POSIX Semaphore",
"DTYPE_PSXSHM": "POSIX Shared Mem.",
"DTYPE_KQUEUE": "kernel queue",
"DTYPE_PIPE": "pipe",
"DTYPE_FSEVENTS": "FS Events", # needs more research
# I (Adam) /believe/ this is a remnant of the AppleTalk support,
# however nowadays it's unfortunately often used to mean basically
# DTYPE_OTHER (which XNU doesn't have). Example of how this might be
# used in current code:
# opensource.apple.com/source/xnu/xnu-1456.1.26/bsd/netat/sys_dep.c
"DTYPE_ATALK": "<unknown>",
"-": "INVALID",
}
@utils.safe_property
def fg_type(self):
"""Returns type of the fileglob (e.g. vnode, socket, etc.)"""
return self.multi_m(
# OS X 10.8 and earlier
"f_fglob.fg_type",
# OS X 10.9 and later
"f_fglob.fg_ops.fo_type")
@property
def socket(self):
"""Return the associated socket if the dtype is for socket."""
if self.fg_type == "DTYPE_SOCKET":
return self.f_fglob.fg_data.dereference_as("socket")
@property
def vnode(self):
"""Return the associated vnode if the dtype is for vnode."""
if self.fg_type == "DTYPE_VNODE":
return self.f_fglob.fg_data.dereference_as("vnode")
def autocast_fg_data(self):
"""Returns the correct struct with fg_type-specific information.
This can be one of vnode, socket, shared memory or semaphore [1].
Of those four, we currently only get extra information for vnode and
socket. For everything else, we return a NoneObject.
[1]:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/file_internal.h#L184
"""
dtype = self.fg_type
# Semaphore and shared memory are known structs, but we currently don't
# know of anything interesting that should be extracted from them.
if dtype == "DTYPE_SOCKET":
return self.f_fglob.fg_data.dereference_as("socket")
elif dtype == "DTYPE_VNODE":
return self.f_fglob.fg_data.dereference_as("vnode")
# elif dtype == "DTYPE_PSXSEM":
# return self.f_fglob.fg_data.dereference_as("semaphore")
# elif dtype == "DTYPE_PSXSHM":
# return self.f_fglob.fg_data.dereference_as("vm_shared_region")
# That would be an unknown DTYPE.
return self.f_fglob.fg_data
@utils.safe_property
def human_name(self):
return getattr(self.autocast_fg_data(), "human_name", None)
@utils.safe_property
def human_type(self):
# Delegate to fg_data if it thinks it knows what it is.
return getattr(
self.autocast_fg_data(),
"human_type",
self.DTYPE_TO_HUMAN[str(self.fg_type)]
)
class socket(obj.Struct):
"""Provides human-readable accessors for sockets of the more common AFs.
This class has two basic ways of getting information. Most attributes are
computed using the method fill_socketinfo, which is directly adapted from
the kernel function of the same name. For the few things that
fill_socketinfo doesn't care about, the properties themselves get the
data and provide references to the kernel source for anyone wondering
why and how all this works.
"""
cached_socketinfo = None
def fill_socketinfo(self):
"""Computes information about sockets of some addressing families.
This function is directly adapted from the kernel function
fill_socketinfo [1]. The original function is used to fill a struct
with addressing and other useful information about sockets of a few
key addressing families. All families are supported, but only the
following will return useful information:
- AF_INET (IPv4)
- AF_INET6 (IPv6)
- AF_UNIX (Unix socket)
- AF_NDRV (Network driver raw access)
- AF_SYSTEM (Darwin-specific; see documentation [3])
Differences between the kernel function and this adaptation:
- The kernel uses Protocol Families (prefixed with PF_). Rekall
relies on Addressing Families (AF_) which are exactly the same.
- The kernel fills a struct; this function returns a dict with the
same members.
- The kernel returns the data raw. This function converts endianness
and unions to human-readable representations, as appropriate.
- Only a subset of members are filled in.
- Other differences as documented in code.
Returns:
A dict with the same members as struct socket_info and related.
Only member that's always filled is "soi_kind". That's not Spanish,
but one of the values in this anonymous enum [2], which determines
what other members are present. (Read the code.)
[1]
https://github.com/opensource-apple/xnu/blob/10.9/bsd/kern/socket_info.c#L98
[2]
https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/proc_info.h#L503
[3] "KEXT Controls and Notifications"
https://developer.apple.com/library/mac/documentation/Darwin/Conceptual/NKEConceptual/control/control.html
"""
domain = self.so_proto.pr_domain.dom_family
type_name = self.so_proto.pr_type
protocol = self.so_proto.pr_protocol
si = {"soi_kind": "SOCKINFO_GENERIC"}
# The kind of socket is determined by the triplet
# {domain, type, protocol}
if domain in ["AF_INET", "AF_INET6"]:
si["soi_kind"] = "SOCKINFO_IN"
inp = self.so_pcb.dereference_as("inpcb")
si["insi_fport"] = utils.ntoh(inp.inp_fport)
si["insi_lport"] = utils.ntoh(inp.inp_lport)
si["insi_ip_ttl"] = inp.inp_ip_ttl.v()
# Different from kernel: insi_[df]addr is a union, and by setting
# the IPv6 address, you set the IPv4 address too. We instead return
# a string with the appropriately formatted address.
if domain == "AF_INET":
si["insi_faddr"] = utils.FormatIPAddress(
"AF_INET",
inp.inp_dependfaddr.inp46_foreign.ia46_addr4.s_addr
)
si["insi_laddr"] = utils.FormatIPAddress(
"AF_INET",
inp.inp_dependladdr.inp46_local.ia46_addr4.s_addr
)
else:
si["insi_faddr"] = utils.FormatIPAddress(
"AF_INET6",
inp.inp_dependfaddr.inp6_foreign.m("__u6_addr")
)
si["insi_laddr"] = utils.FormatIPAddress(
"AF_INET6",
inp.inp_dependladdr.inp6_local.m("__u6_addr")
)
if (type_name == "SOCK_STREAM"
and (protocol == 0 or protocol == "IPPROTO_TCP")
and inp.inp_ppcb != None):
tp = inp.inp_ppcb.dereference_as("tcpcb")
si["soi_kind"] = "SOCKINFO_TCP"
si["tcpsi_state"] = tp.t_state
si["tcpsi_flags"] = tp.t_flags
elif domain == "AF_UNIX":
unp = self.so_pcb.dereference_as("unpcb")
si["soi_kind"] = "SOCKINFO_UN"
if unp.unp_addr:
# Difference from kernel: instead of copying the whole unp_addr
# struct, we just get delegate getting the actual string to the
# unp_addr struct. (Because it's trickier than it looks.)
si["unsi_addr"] = unp.unp_addr.sun_path
elif domain == "AF_NDRV":
# This is how we get the pcb if we need to:
# ndrv_cb = self.so_pcb.dereference_as("ndrv_cb")
si["soi_kind"] = "SOCKINFO_NDRV"
elif domain == "AF_SYSTEM":
# AF_SYSTEM domain needs more research. It looks like it's used to
# communicate between user space and kernel extensions, and allows
# the former to control the latter. Naively, this looks ripe for
# rootkits to me.
if protocol == "SYSPROTO_EVENT":
# This is how we get the pcb if we need to:
# ev_pcb = self.so_pcb.dereference_as("kern_event_pcb")
si["soi_kind"] = "SOCKINFO_KERN_EVENT"
elif protocol == "SYSPROTO_CONTROL":
kcb = self.so_pcb.dereference_as("ctl_cb")
kctl = kcb.kctl
si["soi_kind"] = "SOCKINFO_KERN_CTL"
if kctl:
si["kcsi_id"] = kctl.id
si["kcsi_name"] = kctl.name
return si
def get_socketinfo_attr(self, attr):
"""Run fill_socketinfo if needed, cache result, return value of attr."""
if not self.cached_socketinfo:
self.cached_socketinfo = self.fill_socketinfo()
if attr not in self.cached_socketinfo:
return obj.NoneObject(
"socket of family {}/{} has no member {}".format(
self.addressing_family,
self.cached_socketinfo["soi_kind"],
attr))
return self.cached_socketinfo[attr]
@utils.safe_property
def src_addr(self):
"""For IPv[46] sockets, return source IP as string."""
return self.get_socketinfo_attr("insi_laddr")
@utils.safe_property
def dst_addr(self):
"""For IPv[46] sockets, return destination IP as string."""
return self.get_socketinfo_attr("insi_faddr")
@utils.safe_property
def addressing_family(self):
"""The Addressing Family corresponds roughly to OSI layer 3."""
return self.so_proto.pr_domain.dom_family
@utils.safe_property
def tcp_state(self):
return self.get_socketinfo_attr("tcpsi_state")
@utils.safe_property
def vnode(self):
"""For Unix sockets, pointer to vnode, if any.
This is the same way that OS gathers this information in response to
syscall [1] (this is the API used by netstat, among others).
1:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/kern/uipc_usrreq.c#L1683
"""
if self.addressing_family == "AF_UNIX":
return self.so_pcb.dereference_as("unpcb").unp_vnode
@utils.safe_property
def unp_conn(self):
"""For Unix sockets, the pcb of the paired socket. [1]
You most likely want to do sock.conn_pcb.unp_socket to get at the
other socket in the pair. However, because the sockets are paired
through the protocol control block, it's actually useful to have
a direct pointer at it in order to be able to spot paired sockets.
1:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/unpcb.h#L128
"""
if self.addressing_family == "AF_UNIX":
return self.so_pcb.dereference_as("unpcb").unp_conn
@utils.safe_property
def src_port(self):
return self.get_socketinfo_attr("insi_lport")
@utils.safe_property
def dst_port(self):
return self.get_socketinfo_attr("insi_fport")
@utils.safe_property
def l4_protocol(self):
if self.addressing_family in ["AF_INET", "AF_INET6"]:
# All the values start with IPPROTO_.
return str(self.so_proto.pr_protocol).replace("IPPROTO_", "")
@utils.safe_property
def unix_type(self):
if self.addressing_family == "AF_UNIX":
pr_type = str(self.so_proto.pr_type)
if pr_type:
# All values begin with SOCK_.
return pr_type.replace("SOCK_", "")
else:
# I am about 80% sure that this should never happen. Before
# deciding how this should be handled (possibly by logging an
# error), I'll need to do more research.
return "Unix Socket"
@utils.safe_property
def human_name(self):
if self.addressing_family in ["AF_INET", "AF_INET6"]:
if self.l4_protocol in ["TCP", "UDP"]:
return "{} ({}) -> {} ({})".format(
self.src_addr, self.src_port,
self.dst_addr, self.dst_port)
return "{} -> {}".format(self.src_addr, self.dst_addr)
if self.addressing_family == "AF_UNIX":
return self.get_socketinfo_attr("unsi_addr")
return None
@utils.safe_property
def human_type(self):
if self.addressing_family == "AF_INET":
return "{}v4".format(self.l4_protocol)
if self.addressing_family == "AF_INET6":
proto = self.l4_protocol
# Some v6 protocols are already named with v6 in the name.
if proto.endswith("6"):
return proto
return "{}v6".format(self.l4_protocol)
if self.addressing_family == "AF_UNIX":
return self.unix_type
return "Sock: {}".format(self.addressing_family)
class sockaddr(obj.Struct):
def _get_address_obj(self):
addr = obj.NoneObject("Unknown socket family")
if self.sa_family == "AF_INET":
addr = self.cast("sockaddr_in").sin_addr.s_addr
elif self.sa_family == "AF_INET6":
addr = self.cast("sockaddr_in6").sin6_addr.m("__u6_addr")
elif self.sa_family == "AF_LINK":
addr = self.cast("sockaddr_dl")
return addr
@utils.safe_property
def address(self):
result = ""
addr = self._get_address_obj()
if addr:
if self.sa_family in ("AF_INET6", "AF_INET"):
result = utils.FormatIPAddress(self.sa_family, addr)
elif self.sa_family == "AF_LINK":
result = addr
return str(result)
def __unicode__(self):
return self.address
class vm_map_entry(obj.Struct):
def find_vnode_object(self):
"""Find the underlying vnode object for the given vm_map_entry.
xnu-2422.1.72/osfmk/vm/bsd_vm.c: 1339.
"""
if not self.is_sub_map.v():
#/*
#* The last object in the shadow chain has the
#* relevant pager information.
#*/
shadow = self.last_shadow
if not shadow:
return shadow
if (shadow and not shadow.internal.v() and
shadow.pager_ready.v() and
not shadow.terminating.v() and
shadow.alive.v()):
memory_object = shadow.pager
pager_ops = memory_object.mo_pager_ops
# If this object points to the vnode_pager_ops, then we
# found what we're looking for. Otherwise, this
# vm_map_entry doesn't have an underlying vnode and so we
# fall through to the bottom and return NULL.
if pager_ops == self.obj_profile.get_constant(
"_vnode_pager_ops", is_address=True):
return shadow.pager.dereference_as(
"vnode_pager").vnode_handle
return obj.NoneObject("vnode not found")
@utils.safe_property
def sharing_mode(self):
"""Returns the sharing mode of the backing vm_object.
This is losely adapted from vm_map.c, void vm_map_region_top_walk(),
except we're not filling page counts for resident/reusable, etc.
"""
if not self.vmo_object or self.is_sub_map:
return "SM_EMPTY" # Nada.
vmobj = self.vmo_object
ref_count = vmobj.ref_count
if vmobj.paging_in_progress:
ref_count -= 1
if vmobj.shadow:
return "SM_COW" # Copy on write.
if self.superpage_size:
return "SM_LARGE_PAGE" # Shared large (huge) page.
if self.needs_copy:
return "SM_COW"
if ref_count == 1 or (not vmobj.pager_trusted and not
vmobj.internal):
return "SM_PRIVATE"
return "SM_SHARED"
@utils.safe_property
def code_signed(self):
return self.last_shadow.code_signed
@utils.safe_property
def last_shadow(self):
shadow = self.vmo_object
if not shadow:
return obj.NoneObject("no vm_object found")
while shadow.shadow:
shadow = shadow.shadow
return shadow
@utils.safe_property
def start(self):
return self.links.start.v()
@utils.safe_property
def end(self):
return self.links.end.v()
@utils.safe_property
def vmo_object(self):
"""Return the vm_object instance for this entry.
There's an intermediate link called struct vm_map_entry.
The members will be called either 'object' and 'vm_object' or
'vme_object' and 'vmo_object'.
There is no easy heuristic for which it will be in a particular kernel
version* so we just try both, since they mean the same thing.
* The kernel version numbers could be identical for kernels built from
a feature branch and a kernel build from trunk, and the two could be
months apart. Furthermore, the profiles are generated not from the
kernel itself but from a debug kit and can end up using out of date
naming conventions.
"""
vme_object = self.multi_m("vme_object", "object")
return vme_object.multi_m("vmo_object", "vm_object")
class clist(obj.Struct):
@utils.safe_property
def recovered_contents(self):
"""Gets the full contents of the ring buffer, which may be freed.
This is different from getting the legal contents as with b_to_q [1]
because clists are only used by TTYs and they seem to always be all
marked as consumed, so b_to_q wouldn't let us see any content.
1: github.com/opensource-apple/xnu/blob/10.9/bsd/kern/tty_subr.c#L358
"""
return utils.HexDumpedString(
self.obj_vm.read(self.c_cs, self.c_cn))
@utils.safe_property
def size(self):
return int(self.c_cn)
class tty(obj.Struct):
@utils.safe_property
def vnode(self):
return self.t_session.s_ttyvp
@utils.safe_property
def input_buffer(self):
return self.t_rawq
@utils.safe_property
def output_buffer(self):
return self.t_outq
class proc(obj.Struct):
"""Represents a Darwin process."""
@utils.safe_property
def vads(self):
return self.task.map.hdr.walk_list("links.next", include_current=False)
def get_open_files(self):
"""Gets all open files (sockets, pipes...) owned by this proc.
Yields:
tuple of (fd, fileproc, flags)
"""
# lastfile is a high water mark of valid fds [1]. That doesn't mean
# there are no invalid fds at lower indexes! fd_freefile is a free
# descriptor that tends to gravitate towards the lowest index as
# as seen here [2]. When the kernel frees an fd it sets the pointer
# to NULL and also clears the corresponding index in fd_ofilesflags
# [3]. This creates a sparse array, so the search has to skip over
# invalid fds along the way, just as the kernel does [4]. We skip
# NULL pointers (and invalid pointers) but don't check for cleared
# flags, since they're usually zero anyway.
#
# [1]:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/sys/filedesc.h#L96
# [2]:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/kern/kern_descrip.c#L412
# [3]:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/kern/kern_descrip.c#L384
# [4]:
# https://github.com/opensource-apple/xnu/blob/10.9/bsd/kern/kern_descrip.c#L2960
last_fd = self.p_fd.fd_lastfile
ofiles = self.p_fd.fd_ofiles.deref()
ofileflags = self.p_fd.fd_ofileflags
for fd in xrange(last_fd + 1): # xrange stops at N-1.
file_obj = ofiles[fd].deref()
# file_obj will be None if the pointer is NULL (see ref [4]), and
# also when the pointer is simply invalid, which can happen
# sometimes. Currently, I chalk it up to inconsistencies in the
# volatile RAM image (since it's rare) but it might have another
# explanation.
if file_obj:
yield (fd, file_obj, ofileflags[fd])
def get_process_address_space(self):
cr3 = self.task.map.pmap.pm_cr3
as_class = self.obj_vm.__class__
if self.task.map.pmap.pm_task_map == "TASK_MAP_64BIT_SHARED":
as_class = amd64.AMD64PagedMemory
return as_class(base=self.obj_vm.base, session=self.obj_vm.session,
dtb=cr3, name="Pid %s" % self.p_pid)
@utils.safe_property
def command(self):
return utils.SmartUnicode(self.p_comm)
@utils.safe_property
def cr3(self):
return self.task.map.pmap.pm_cr3
@utils.safe_property
def is_64bit(self):
return proc.task.map.pmap.pm_task_map == "TASK_MAP_64BIT"
@utils.safe_property
def argv(self):
result = []
array = self.obj_profile.ListArray(
target="String",
offset=self.user_stack - self.p_argslen,
vm=self.get_process_address_space(),
maximum_size=self.p_argslen,
)
for item in array:
# Total size of the argv array is specified in argc (not counting
# padding).
if len(result) >= self.p_argc:
break
item = unicode(item)
# The argv array may have null padding for alignment. Discard these
# empty strings.
if not len(item):
continue
result.append(item)
# argv[0] is often repeated as the executable name, to avoid confusion,
# we just discard it.
if len(result) > 1 and result[0] == result[1]:
result.pop(0)
return result
def validate(self):
"""Use heuristics to guess whether this proc is valid."""
return (self.p_argc > 0
and len(self.p_comm) > 0
and self.p_start.v() > 0
and 99999 > self.pid > 0)
class vnode(obj.Struct):
@utils.safe_property
def full_path(self):
# TODO: Speed this up by caching the paths in the session.
result = []
_vnode = self
# Iterate here until we hit the root of the filesystem.
while not (_vnode.v_flag.VROOT and
_vnode.v_mount.mnt_flag.MNT_ROOTFS):
result.append(_vnode.v_name.deref())
# If there is no parent skip to the mount point.
_vnode = _vnode.v_parent or _vnode.v_mount.mnt_vnodecovered
# This is rare, but it does happen. I currently don't understand
# why, so we just log a warning and report the node as an orphan.
if not _vnode:
self.obj_session.logging.warning("vnode at 0x%x is orphaned.",
int(_vnode))
return "<Orphan>"
path = "/" + "/".join((str(x) for x in reversed(result) if x))
return unicode(path.encode("string-escape"))
# return "/" + "/".join((unicode(x) for x in reversed(result) if x))
@utils.safe_property
def human_type(self):
return "Reg. File"
@utils.safe_property
def human_name(self):
return self.full_path
@utils.safe_property
def cnode(self):
"""If this is an HFS vnode, then v_data is a cnode."""
node = self.v_data.dereference_as("cnode")
if node.c_rwlock != node:
return obj.NoneObject("This vnode has no valid cnode.")
return node
@utils.safe_property
def uid(self):
uid = self.v_cred.cr_posix.cr_ruid
if uid:
return uid
return obj.NoneObject("Could not retrieve POSIX creds.")
class cnode(obj.Struct):
@utils.safe_property
def created_at(self):
return self.c_cattr.ca_ctime.as_datetime()
@utils.safe_property
def modified_at(self):
return self.c_cattr.ca_mtime.as_datetime()
@utils.safe_property
def accessed_at(self):
return self.c_cattr.ca_atime.as_datetime()
@utils.safe_property
def backedup_at(self):
return self.c_cattr.ca_btime.as_datetime()
class zone(obj.Struct):
@utils.safe_property
def name(self):
return utils.SmartUnicode(self.zone_name.deref())
@utils.safe_property
def count_active(self):
return int(self.count)
@utils.safe_property
def count_free(self):
return int(self.m("sum_count") - self.count)
@utils.safe_property
def tracks_pages(self):
return bool(self.m("use_page_list"))
@utils.safe_property
def known_offsets(self):
"""Find valid offsets in the zone as tuples of (state, offset).
Allocation zones keep track of potential places where an element of
fixed size may be stored. The most basic zones only keep track of free
pointers, so as to speed up allocation. Some zones also track already
allocated data, using a separate mechanism. We support both.
Returns a set of tuples of:
- State, which can be "freed", "allocated" or "unknown".
- Object offset, at which a struct may be located.
(You will want to validate the struct itself for sanity.)
"""
# Tracks what offsets we've looked at.
seen_offsets = set()
# Tracks pages we've tried to iterate through for possible offsets.
seen_pages = set()
# Let's walk the freed elements first. It's just a linked list:
for element in self.free_elements.walk_list("next"):
seen_offsets.add(element.obj_offset)
# If we found just one known offset in a given page we actually know
# that the whole page is dedicated to the zone allocator and other
# offsets in it are also likely to be valid elements. Here we try to
# discover such elements.
for offset in seen_offsets.copy():
# We assume pages are 4K. The zone allocator presently doesn't use
# 2MB pages, as far as I know.
page_start = offset & ~0xfff
if page_start in seen_pages:
continue
seen_pages.add(page_start)
seen_offsets.update(set(self._generate_page_offsets(page_start)))
# Lastly, if we happen to track pages after they've been filled up
# then we can go look at those pages. The relevant flag is
# use_page_list.
page_lists = {"all_free", "all_used", "intermediate"}
# Field not present on OSX 10.7
if self.m("use_page_list"):
for page_list in page_lists:
for page_start in self.m(page_list).walk_list("next"):
if page_start in seen_pages:
continue
seen_pages.add(page_start)
seen_offsets.update(self._generate_page_offsets(page_start))
return seen_offsets
def _generate_page_offsets(self, page_start):
limit = page_start + 0x1000 - self.elem_size
# Page metadata is always inlined at the end of the page. So that's
# space that contain valid elements.
limit -= self.obj_profile.get_obj_size("zone_page_metadata")
return xrange(page_start, limit, self.elem_size)
class ifnet(obj.Struct):
@utils.safe_property
def name(self):
return "%s%d" % (self.if_name.deref(), self.if_unit)
@utils.safe_property
def addresses(self):
# There should be exactly one link layer address.
for tqe in self.if_addrhead.tqh_first.walk_list(
"ifa_link.tqe_next"):
family = tqe.ifa_addr.sa_family
# Found the L2 address (MAC)
if family == "AF_LINK":
l2_addr = utils.SmartUnicode(tqe.ifa_addr.deref())
yield ("MAC", l2_addr)
continue
elif family == "AF_INET":
l3_proto = "IPv4"
elif family == "AF_INET6":
l3_proto = "IPv6"
else:
l3_proto = utils.SmartUnicode(family).replace("AF_", "")
l3_addr = utils.SmartUnicode(tqe.ifa_addr.deref())
yield (l3_proto, l3_addr)
@utils.safe_property
def l2_addr(self):
for proto, addr in self.addresses:
if proto == "MAC":
return addr
@utils.safe_property
def l3_addrs(self):
return [(proto, addr) for proto, addr in self.addresses
if proto != "MAC"]
@utils.safe_property
def ipv4_addr(self):
result = []
for proto, addr in self.addresses:
if proto == "IPv4":
result.append(addr)
return ", ".join(result)
@utils.safe_property
def ipv6_addr(self):
result = []
for proto, addr in self.addresses:
if proto == "IPv6":
result.append(addr)
return ", ".join(result)
class session(obj.Struct):
@utils.safe_property
def tty(self):
return self.session.s_ttyp
@utils.safe_property
def name(self):
return "Session %d (%s)" % (self.s_sid, self.s_leader.command)
@utils.safe_property
def username(self):
return utils.SmartUnicode(self.s_login)
@utils.safe_property
def uid(self):
return self.tty.vnode.uid
class OSDictionary(obj.Struct):
"""The OSDictionary is a general purpose associative array described:
xnu-1699.26.8/libkern/libkern/c++/OSDictionary.h
"""
def items(self, value_class=None):
"""Iterate over the associative array and yield key, value pairs."""
for entry in self.dictionary:
key = entry.key.dereference_as("OSString").value
if value_class:
yield key, entry.value.dereference_as(value_class)
else:
yield key, entry.value
class OSOrderedSet(obj.Struct):
"""An OSOrderedSet is a list of OSObject instances.
xnu-1699.26.8/libkern/libkern/c++/OSOrderedSet.h
"""
def list_of_type(self, type_name):
for item in self.array:
yield item.obj.dereference_as(type_name)
class Darwin32(basic.Profile32Bits, basic.BasicClasses):
"""A Darwin profile."""
METADATA = dict(
os="darwin",
arch="I386",
type="Kernel")
@classmethod
def Initialize(cls, profile):
super(Darwin32, cls).Initialize(profile)
# Some Darwin profiles add a suffix to IOKIT objects. So OSDictionary
# becomes OSDictionary_class. We automatically generate the overlays and
# classes to account for this.
for k in profile.vtypes.keys():
if k.endswith("_class"):
stripped_k = k[:-len("_class")]
if stripped_k not in profile.vtypes:
profile.vtypes[stripped_k] = profile.vtypes[k]
if stripped_k in darwin_overlay:
darwin_overlay[k] = darwin_overlay[stripped_k]
profile.add_classes(
LIST_ENTRY=LIST_ENTRY, queue_entry=queue_entry,
sockaddr=sockaddr, sockaddr_dl=sockaddr_dl,
vm_map_entry=vm_map_entry, proc=proc, vnode=vnode,
socket=socket, clist=clist, zone=zone, ifnet=ifnet, tty=tty,
# Support both forms with and without _class suffix.
OSDictionary=OSDictionary, OSDictionary_class=OSDictionary,
OSOrderedSet=OSOrderedSet, OSOrderedSet_class=OSOrderedSet,
fileproc=fileproc, session=session, cnode=cnode,
llinfo_arp=llinfo_arp)
profile.add_enums(**darwin_enums)
profile.add_overlay(darwin_overlay)
profile.add_constants(dict(default_text_encoding="utf8"))
def get_constant_cpp_object(self, constant, **kwargs):
"""A variant of get_constant_object which accounts for name mangling."""
for key in self.constants:
if constant in key:
return self.get_constant_object(key, **kwargs)
class Darwin64(basic.RelativeOffsetMixin, basic.ProfileLP64, Darwin32):
"""Support for 64 bit darwin systems."""
METADATA = dict(
os="darwin",
arch="AMD64",
type="Kernel")
image_base = 0
@classmethod
def Initialize(cls, profile):
super(Darwin64, cls).Initialize(profile)
profile.add_types(darwin64_types)
def GetImageBase(self):
if not self.image_base:
self.image_base = self.session.GetParameter(
"vm_kernel_slide", 0)
return self.image_base
|
gpl-2.0
|
hongliang5623/sentry
|
src/sentry/api/endpoints/organization_access_request_details.py
|
22
|
2318
|
from __future__ import absolute_import
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import (
AuditLogEntryEvent, OrganizationAccessRequest, OrganizationMemberTeam
)
class AccessRequestSerializer(serializers.Serializer):
isApproved = serializers.BooleanField()
class OrganizationAccessRequestDetailsEndpoint(OrganizationEndpoint):
def put(self, request, organization, request_id):
"""
Approve or deny a request
Approve or deny a request.
{method} {path}
"""
try:
access_request = OrganizationAccessRequest.objects.get(
id=request_id,
team__organization=organization,
)
except OrganizationAccessRequest.DoesNotExist:
raise ResourceDoesNotExist
serializer = AccessRequestSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
is_approved = serializer.object.get('isApproved')
if is_approved is None:
return Response(status=400)
if not access_request.member.has_global_access:
affected, _ = OrganizationMemberTeam.objects.create_or_update(
organizationmember=access_request.member,
team=access_request.team,
values={
'is_active': is_approved,
}
)
if affected and is_approved:
omt = OrganizationMemberTeam.objects.get(
organizationmember=access_request.member,
team=access_request.team,
)
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=access_request.member.user,
event=AuditLogEntryEvent.MEMBER_JOIN_TEAM,
data=omt.get_audit_log_data(),
)
access_request.send_approved_email()
access_request.delete()
return Response(status=204)
|
bsd-3-clause
|
mzweilin/pdfrw
|
pdfrw/objects/pdfstring.py
|
1
|
2811
|
# A part of pdfrw (pdfrw.googlecode.com)
# Copyright (C) 2006-2012 Patrick Maupin, Austin, Texas
# MIT license -- See LICENSE.txt for details
import re
class PdfString(str):
''' A PdfString is an encoded string. It has a decode
method to get the actual string data out, and there
is an encode class method to create such a string.
Like any PDF object, it could be indirect, but it
defaults to being a direct object.
'''
indirect = False
unescape_dict = {'\\b': '\b', '\\f': '\f', '\\n': '\n',
'\\r': '\r', '\\t': '\t',
'\\\r\n': '', '\\\r': '', '\\\n': '',
'\\\\': '\\', '\\': '',
}
unescape_pattern = (r'(\\\\|\\b|\\f|\\n|\\r|\\t'
r'|\\\r\n|\\\r|\\\n|\\[0-9]+|\\)')
unescape_func = re.compile(unescape_pattern).split
hex_pattern = '([a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])'
hex_func = re.compile(hex_pattern).split
hex_pattern2 = ('([a-fA-F0-9][a-fA-F0-9][a-fA-F0-9][a-fA-F0-9]|'
'[a-fA-F0-9][a-fA-F0-9]|[a-fA-F0-9])')
hex_func2 = re.compile(hex_pattern2).split
hex_funcs = hex_func, hex_func2
def decode_regular(self, remap=chr):
assert self[0] == '(' and self[-1] == ')'
mylist = self.unescape_func(self[1:-1])
result = []
unescape = self.unescape_dict.get
for chunk in mylist:
chunk = unescape(chunk, chunk)
if chunk.startswith('\\') and len(chunk) > 1:
value = int(chunk[1:], 8)
# FIXME: TODO: Handle unicode here
if value > 127:
value = 127
chunk = remap(value)
if chunk:
result.append(chunk)
return ''.join(result)
def decode_hex(self, remap=chr, twobytes=False):
data = ''.join(self.split())
data = self.hex_funcs[twobytes](data)
chars = data[1::2]
other = data[0::2]
assert (other[0] == '<' and
other[-1] == '>' and
''.join(other) == '<>'), self
return ''.join([remap(int(x, 16)) for x in chars])
def decode(self, remap=chr, twobytes=False):
if self.startswith('('):
return self.decode_regular(remap)
else:
return self.decode_hex(remap, twobytes)
def encode(cls, source, usehex=False):
assert not usehex, "Not supported yet"
if isinstance(source, unicode):
source = source.encode('utf-8')
else:
source = str(source)
source = source.replace('\\', '\\\\')
source = source.replace('(', '\\(')
source = source.replace(')', '\\)')
return cls('(' + source + ')')
encode = classmethod(encode)
|
mit
|
jacquesd/indico
|
doc/dev/source/conf.py
|
2
|
6719
|
# -*- coding: utf-8 -*-
#
# cds-indico documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 29 13:19:24 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path = [os.path.abspath('../../../'), os.path.abspath('../../../indico'), os.path.abspath('.')] + sys.path
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'repoze.sphinx.autointerface',
'exec_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Indico'
copyright = u'2015, Indico Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9.6'
# The full version, including alpha/beta/rc tags.
release = '1.9.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to sourc esphinx directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# include __init__() as well
autoclass_content = "both"
autodoc_default_flags = ['members', 'show-inheritance']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'indicodoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'indico.tex', u'Indico Documentation',
u'Indico Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
gpl-3.0
|
henri-hulski/cartridge_braintree
|
cartridge_braintree/countries.py
|
2
|
7200
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
# This module defines the dictionary of countries (ISO-3166) supported by
# Braintree, with Alpha2 codes as keys and translatable country names as values
# Data originally copied from
# https://developers.braintreepayments.com/reference/general/countries/python
# on 25 November 2015
COUNTRIES = {
"AF": _("Afghanistan"),
"AX": _("Åland"),
"AL": _("Albania"),
"DZ": _("Algeria"),
"AS": _("American Samoa"),
"AD": _("Andorra"),
"AO": _("Angola"),
"AI": _("Anguilla"),
"AQ": _("Antarctica"),
"AG": _("Antigua and Barbuda"),
"AR": _("Argentina"),
"AM": _("Armenia"),
"AW": _("Aruba"),
"AU": _("Australia"),
"AT": _("Austria"),
"AZ": _("Azerbaijan"),
"BS": _("Bahamas"),
"BH": _("Bahrain"),
"BD": _("Bangladesh"),
"BB": _("Barbados"),
"BY": _("Belarus"),
"BE": _("Belgium"),
"BZ": _("Belize"),
"BJ": _("Benin"),
"BM": _("Bermuda"),
"BT": _("Bhutan"),
"BO": _("Bolivia"),
"BQ": _("Bonaire, Sint Eustatius and Saba"),
"BA": _("Bosnia and Herzegovina"),
"BW": _("Botswana"),
"BV": _("Bouvet Island"),
"BR": _("Brazil"),
"IO": _("British Indian Ocean Territory"),
"BN": _("Brunei Darussalam"),
"BG": _("Bulgaria"),
"BF": _("Burkina Faso"),
"BI": _("Burundi"),
"KH": _("Cambodia"),
"CM": _("Cameroon"),
"CA": _("Canada"),
"CV": _("Cape Verde"),
"KY": _("Cayman Islands"),
"CF": _("Central African Republic"),
"TD": _("Chad"),
"CL": _("Chile"),
"CN": _("China"),
"CX": _("Christmas Island"),
"CC": _("Cocos (Keeling) Islands"),
"CO": _("Colombia"),
"KM": _("Comoros"),
"CG": _("Congo (Brazzaville)"),
"CD": _("Congo (Kinshasa)"),
"CK": _("Cook Islands"),
"CR": _("Costa Rica"),
"CI": _("Côte d'Ivoire"),
"HR": _("Croatia"),
"CU": _("Cuba"),
"CW": _("Curaçao"),
"CY": _("Cyprus"),
"CZ": _("Czech Republic"),
"DK": _("Denmark"),
"DJ": _("Djibouti"),
"DM": _("Dominica"),
"DO": _("Dominican Republic"),
"EC": _("Ecuador"),
"EG": _("Egypt"),
"SV": _("El Salvador"),
"GQ": _("Equatorial Guinea"),
"ER": _("Eritrea"),
"EE": _("Estonia"),
"ET": _("Ethiopia"),
"FK": _("Falkland Islands"),
"FO": _("Faroe Islands"),
"FJ": _("Fiji"),
"FI": _("Finland"),
"FR": _("France"),
"GF": _("French Guiana"),
"PF": _("French Polynesia"),
"TF": _("French Southern Lands"),
"GA": _("Gabon"),
"GM": _("Gambia"),
"GE": _("Georgia"),
"DE": _("Germany"),
"GH": _("Ghana"),
"GI": _("Gibraltar"),
"GR": _("Greece"),
"GL": _("Greenland"),
"GD": _("Grenada"),
"GP": _("Guadeloupe"),
"GU": _("Guam"),
"GT": _("Guatemala"),
"GG": _("Guernsey"),
"GN": _("Guinea"),
"GW": _("Guinea-Bissau"),
"GY": _("Guyana"),
"HT": _("Haiti"),
"HM": _("Heard and McDonald Islands"),
"HN": _("Honduras"),
"HK": _("Hong Kong"),
"HU": _("Hungary"),
"IS": _("Iceland"),
"IN": _("India"),
"ID": _("Indonesia"),
"IR": _("Iran"),
"IQ": _("Iraq"),
"IE": _("Ireland"),
"IM": _("Isle of Man"),
"IL": _("Israel"),
"IT": _("Italy"),
"JM": _("Jamaica"),
"JP": _("Japan"),
"JE": _("Jersey"),
"JO": _("Jordan"),
"KZ": _("Kazakhstan"),
"KE": _("Kenya"),
"KI": _("Kiribati"),
"KP": _("Korea, North"),
"KR": _("Korea, South"),
"KW": _("Kuwait"),
"KG": _("Kyrgyzstan"),
"LA": _("Laos"),
"LV": _("Latvia"),
"LB": _("Lebanon"),
"LS": _("Lesotho"),
"LR": _("Liberia"),
"LY": _("Libya"),
"LI": _("Liechtenstein"),
"LT": _("Lithuania"),
"LU": _("Luxembourg"),
"MO": _("Macau"),
"MK": _("Macedonia"),
"MG": _("Madagascar"),
"MW": _("Malawi"),
"MY": _("Malaysia"),
"MV": _("Maldives"),
"ML": _("Mali"),
"MT": _("Malta"),
"MH": _("Marshall Islands"),
"MQ": _("Martinique"),
"MR": _("Mauritania"),
"MU": _("Mauritius"),
"YT": _("Mayotte"),
"MX": _("Mexico"),
"FM": _("Micronesia"),
"MD": _("Moldova"),
"MC": _("Monaco"),
"MN": _("Mongolia"),
"ME": _("Montenegro"),
"MS": _("Montserrat"),
"MA": _("Morocco"),
"MZ": _("Mozambique"),
"MM": _("Myanmar"),
"NA": _("Namibia"),
"NR": _("Nauru"),
"NP": _("Nepal"),
"NL": _("Netherlands"),
"AN": _("Netherlands Antilles"),
"NC": _("New Caledonia"),
"NZ": _("New Zealand"),
"NI": _("Nicaragua"),
"NE": _("Niger"),
"NG": _("Nigeria"),
"NU": _("Niue"),
"NF": _("Norfolk Island"),
"MP": _("Northern Mariana Islands"),
"NO": _("Norway"),
"OM": _("Oman"),
"PK": _("Pakistan"),
"PW": _("Palau"),
"PS": _("Palestine"),
"PA": _("Panama"),
"PG": _("Papua New Guinea"),
"PY": _("Paraguay"),
"PE": _("Peru"),
"PH": _("Philippines"),
"PN": _("Pitcairn"),
"PL": _("Poland"),
"PT": _("Portugal"),
"PR": _("Puerto Rico"),
"QA": _("Qatar"),
"RE": _("Reunion"),
"RO": _("Romania"),
"RU": _("Russian Federation"),
"RW": _("Rwanda"),
"BL": _("Saint Barthélemy"),
"SH": _("Saint Helena"),
"KN": _("Saint Kitts and Nevis"),
"LC": _("Saint Lucia"),
"MF": _("Saint Martin (French part)"),
"PM": _("Saint Pierre and Miquelon"),
"VC": _("Saint Vincent and the Grenadines"),
"WS": _("Samoa"),
"SM": _("San Marino"),
"ST": _("Sao Tome and Principe"),
"SA": _("Saudi Arabia"),
"SN": _("Senegal"),
"RS": _("Serbia"),
"SC": _("Seychelles"),
"SL": _("Sierra Leone"),
"SG": _("Singapore"),
"SX": _("Sint Maarten (Dutch part)"),
"SK": _("Slovakia"),
"SI": _("Slovenia"),
"SB": _("Solomon Islands"),
"SO": _("Somalia"),
"ZA": _("South Africa"),
"GS": _("South Georgia and South Sandwich Islands"),
"SS": _("South Sudan"),
"ES": _("Spain"),
"LK": _("Sri Lanka"),
"SD": _("Sudan"),
"SR": _("Suriname"),
"SJ": _("Svalbard and Jan Mayen Islands"),
"SZ": _("Swaziland"),
"SE": _("Sweden"),
"CH": _("Switzerland"),
"SY": _("Syria"),
"TW": _("Taiwan"),
"TJ": _("Tajikistan"),
"TZ": _("Tanzania"),
"TH": _("Thailand"),
"TL": _("Timor-Leste"),
"TG": _("Togo"),
"TK": _("Tokelau"),
"TO": _("Tonga"),
"TT": _("Trinidad and Tobago"),
"TN": _("Tunisia"),
"TR": _("Turkey"),
"TM": _("Turkmenistan"),
"TC": _("Turks and Caicos Islands"),
"TV": _("Tuvalu"),
"UG": _("Uganda"),
"UA": _("Ukraine"),
"AE": _("United Arab Emirates"),
"GB": _("United Kingdom"),
"UM": _("United States Minor Outlying Islands"),
"US": _("United States of America"),
"UY": _("Uruguay"),
"UZ": _("Uzbekistan"),
"VU": _("Vanuatu"),
"VA": _("Vatican City"),
"VE": _("Venezuela"),
"VN": _("Vietnam"),
"VG": _("Virgin Islands, British"),
"VI": _("Virgin Islands, U.S."),
"WF": _("Wallis and Futuna Islands"),
"EH": _("Western Sahara"),
"YE": _("Yemen"),
"ZM": _("Zambia"),
"ZW": _("Zimbabwe"),
}
|
bsd-2-clause
|
ryano144/intellij-community
|
plugins/hg4idea/testData/bin/hgext/convert/darcs.py
|
94
|
7726
|
# darcs.py - darcs support for the convert extension
#
# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, checktool, commandline, commit, converter_source
from mercurial.i18n import _
from mercurial import util
import os, shutil, tempfile, re
# The naming drift of ElementTree is fun!
try:
from xml.etree.cElementTree import ElementTree, XMLParser
except ImportError:
try:
from xml.etree.ElementTree import ElementTree, XMLParser
except ImportError:
try:
from elementtree.cElementTree import ElementTree, XMLParser
except ImportError:
try:
from elementtree.ElementTree import ElementTree, XMLParser
except ImportError:
pass
class darcs_source(converter_source, commandline):
def __init__(self, ui, path, rev=None):
converter_source.__init__(self, ui, path, rev=rev)
commandline.__init__(self, ui, 'darcs')
# check for _darcs, ElementTree so that we can easily skip
# test-convert-darcs if ElementTree is not around
if not os.path.exists(os.path.join(path, '_darcs')):
raise NoRepo(_("%s does not look like a darcs repository") % path)
checktool('darcs')
version = self.run0('--version').splitlines()[0].strip()
if version < '2.1':
raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
version)
if "ElementTree" not in globals():
raise util.Abort(_("Python ElementTree module is not available"))
self.path = os.path.realpath(path)
self.lastrev = None
self.changes = {}
self.parents = {}
self.tags = {}
# Check darcs repository format
format = self.format()
if format:
if format in ('darcs-1.0', 'hashed'):
raise NoRepo(_("%s repository format is unsupported, "
"please upgrade") % format)
else:
self.ui.warn(_('failed to detect repository format!'))
def before(self):
self.tmppath = tempfile.mkdtemp(
prefix='convert-' + os.path.basename(self.path) + '-')
output, status = self.run('init', repodir=self.tmppath)
self.checkexit(status)
tree = self.xml('changes', xml_output=True, summary=True,
repodir=self.path)
tagname = None
child = None
for elt in tree.findall('patch'):
node = elt.get('hash')
name = elt.findtext('name', '')
if name.startswith('TAG '):
tagname = name[4:].strip()
elif tagname is not None:
self.tags[tagname] = node
tagname = None
self.changes[node] = elt
self.parents[child] = [node]
child = node
self.parents[child] = []
def after(self):
self.ui.debug('cleaning up %s\n' % self.tmppath)
shutil.rmtree(self.tmppath, ignore_errors=True)
def recode(self, s, encoding=None):
if isinstance(s, unicode):
# XMLParser returns unicode objects for anything it can't
# encode into ASCII. We convert them back to str to get
# recode's normal conversion behavior.
s = s.encode('latin-1')
return super(darcs_source, self).recode(s, encoding)
def xml(self, cmd, **kwargs):
# NOTE: darcs is currently encoding agnostic and will print
# patch metadata byte-for-byte, even in the XML changelog.
etree = ElementTree()
# While we are decoding the XML as latin-1 to be as liberal as
# possible, etree will still raise an exception if any
# non-printable characters are in the XML changelog.
parser = XMLParser(encoding='latin-1')
p = self._run(cmd, **kwargs)
etree.parse(p.stdout, parser=parser)
p.wait()
self.checkexit(p.returncode)
return etree.getroot()
def format(self):
output, status = self.run('show', 'repo', no_files=True,
repodir=self.path)
self.checkexit(status)
m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
if not m:
return None
return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
def manifest(self):
man = []
output, status = self.run('show', 'files', no_directories=True,
repodir=self.tmppath)
self.checkexit(status)
for line in output.split('\n'):
path = line[2:]
if path:
man.append(path)
return man
def getheads(self):
return self.parents[None]
def getcommit(self, rev):
elt = self.changes[rev]
date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
# etree can return unicode objects for name, comment, and author,
# so recode() is used to ensure str objects are emitted.
return commit(author=self.recode(elt.get('author')),
date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
desc=self.recode(desc).strip(),
parents=self.parents[rev])
def pull(self, rev):
output, status = self.run('pull', self.path, all=True,
match='hash %s' % rev,
no_test=True, no_posthook=True,
external_merge='/bin/false',
repodir=self.tmppath)
if status:
if output.find('We have conflicts in') == -1:
self.checkexit(status, output)
output, status = self.run('revert', all=True, repodir=self.tmppath)
self.checkexit(status, output)
def getchanges(self, rev):
copies = {}
changes = []
man = None
for elt in self.changes[rev].find('summary').getchildren():
if elt.tag in ('add_directory', 'remove_directory'):
continue
if elt.tag == 'move':
if man is None:
man = self.manifest()
source, dest = elt.get('from'), elt.get('to')
if source in man:
# File move
changes.append((source, rev))
changes.append((dest, rev))
copies[dest] = source
else:
# Directory move, deduce file moves from manifest
source = source + '/'
for f in man:
if not f.startswith(source):
continue
fdest = dest + '/' + f[len(source):]
changes.append((f, rev))
changes.append((fdest, rev))
copies[fdest] = f
else:
changes.append((elt.text.strip(), rev))
self.pull(rev)
self.lastrev = rev
return sorted(changes), copies
def getfile(self, name, rev):
if rev != self.lastrev:
raise util.Abort(_('internal calling inconsistency'))
path = os.path.join(self.tmppath, name)
data = util.readfile(path)
mode = os.lstat(path).st_mode
mode = (mode & 0111) and 'x' or ''
return data, mode
def gettags(self):
return self.tags
|
apache-2.0
|
docker-infra/ansible-modules-core
|
network/basics/slurp.py
|
134
|
2115
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: slurp
version_added: historical
short_description: Slurps a file from remote nodes
description:
- This module works like M(fetch). It is used for fetching a base64-
encoded blob containing the data in a remote file.
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a
directory.
required: true
default: null
aliases: []
notes:
- "See also: M(fetch)"
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
ansible host -m slurp -a 'src=/tmp/xx'
host | success >> {
"content": "aGVsbG8gQW5zaWJsZSB3b3JsZAo=",
"encoding": "base64"
}
'''
import base64
def main():
module = AnsibleModule(
argument_spec = dict(
src = dict(required=True, aliases=['path']),
),
supports_check_mode=True
)
source = os.path.expanduser(module.params['src'])
if not os.path.exists(source):
module.fail_json(msg="file not found: %s" % source)
if not os.access(source, os.R_OK):
module.fail_json(msg="file is not readable: %s" % source)
data = base64.b64encode(file(source).read())
module.exit_json(content=data, source=source, encoding='base64')
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
gaddman/ansible
|
lib/ansible/modules/cloud/misc/virt_pool.py
|
43
|
21722
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: virt_pool
author: "Maciej Delmanowski (@drybjed)"
version_added: "2.0"
short_description: Manage libvirt storage pools
description:
- Manage I(libvirt) storage pools.
options:
name:
required: false
aliases: [ "pool" ]
description:
- name of the storage pool being managed. Note that pool must be previously
defined with xml.
state:
required: false
choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ]
description:
- specify which state you want a storage pool to be in.
If 'active', pool will be started.
If 'present', ensure that pool is present but do not change its
state; if it's missing, you need to specify xml argument.
If 'inactive', pool will be stopped.
If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.
If 'deleted', pool contents will be deleted and then pool undefined.
command:
required: false
choices: [ "define", "build", "create", "start", "stop", "destroy",
"delete", "undefine", "get_xml", "list_pools", "facts",
"info", "status" ]
description:
- in addition to state management, various non-idempotent commands are available.
See examples.
autostart:
required: false
type: bool
description:
- Specify if a given storage pool should be started automatically on system boot.
uri:
required: false
default: "qemu:///system"
description:
- I(libvirt) connection uri.
xml:
required: false
description:
- XML document used with the define command.
mode:
required: false
choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]
description:
- Pass additional parameters to 'build' or 'delete' commands.
requirements:
- "python >= 2.6"
- "python-libvirt"
- "python-lxml"
'''
EXAMPLES = '''
# Define a new storage pool
- virt_pool:
command: define
name: vms
xml: '{{ lookup("template", "pool/dir.xml.j2") }}'
# Build a storage pool if it does not exist
- virt_pool:
command: build
name: vms
# Start a storage pool
- virt_pool:
command: create
name: vms
# List available pools
- virt_pool:
command: list_pools
# Get XML data of a specified pool
- virt_pool:
command: get_xml
name: vms
# Stop a storage pool
- virt_pool:
command: destroy
name: vms
# Delete a storage pool (destroys contents)
- virt_pool:
command: delete
name: vms
# Undefine a storage pool
- virt_pool:
command: undefine
name: vms
# Gather facts about storage pools
# Facts will be available as 'ansible_libvirt_pools'
- virt_pool:
command: facts
# Gather information about pools managed by 'libvirt' remotely using uri
- virt_pool:
command: info
uri: '{{ item }}'
with_items: '{{ libvirt_uris }}'
register: storage_pools
# Ensure that a pool is active (needs to be defined and built first)
- virt_pool:
state: active
name: vms
# Ensure that a pool is inactive
- virt_pool:
state: inactive
name: vms
# Ensure that a given pool will be started at boot
- virt_pool:
autostart: yes
name: vms
# Disable autostart for a given pool
- virt_pool:
autostart: no
name: vms
'''
try:
import libvirt
except ImportError:
HAS_VIRT = False
else:
HAS_VIRT = True
try:
from lxml import etree
except ImportError:
HAS_XML = False
else:
HAS_XML = True
from ansible.module_utils.basic import AnsibleModule
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE = 2
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
'undefine', 'destroy', 'get_xml', 'define', 'refresh']
HOST_COMMANDS = ['list_pools', 'facts', 'info']
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0: "inactive",
1: "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0: "no",
1: "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0: "no",
1: "yes"
}
ENTRY_STATE_INFO_MAP = {
0: "inactive",
1: "building",
2: "running",
3: "degraded",
4: "inaccessible"
}
ENTRY_BUILD_FLAGS_MAP = {
"new": 0,
"repair": 1,
"resize": 2,
"no_overwrite": 4,
"overwrite": 8
}
ENTRY_DELETE_FLAGS_MAP = {
"normal": 0,
"zeroed": 1
}
ALL_MODES = []
ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())
ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())
class EntryNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_entry(self, entryid):
# entryid = -1 returns a list of everything
results = []
# Get active entries
for name in self.conn.listStoragePools():
entry = self.conn.storagePoolLookupByName(name)
results.append(entry)
# Get inactive entries
for name in self.conn.listDefinedStoragePools():
entry = self.conn.storagePoolLookupByName(name)
results.append(entry)
if entryid == -1:
return results
for entry in results:
if entry.name() == entryid:
return entry
raise EntryNotFound("storage pool %s not found" % entryid)
def create(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).create()
else:
try:
state = self.find_entry(entryid).isActive()
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def destroy(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).destroy()
else:
if self.find_entry(entryid).isActive():
return self.module.exit_json(changed=True)
def undefine(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).undefine()
else:
if not self.find_entry(entryid):
return self.module.exit_json(changed=True)
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state, "unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive", "unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
def get_xml(self, entryid):
return self.find_entry(entryid).XMLDesc(0)
def get_info(self, entryid):
return self.find_entry(entryid).info()
def get_volume_count(self, entryid):
return self.find_entry(entryid).numOfVolumes()
def get_volume_names(self, entryid):
return self.find_entry(entryid).listVolumes()
def get_devices(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
if xml.xpath('/pool/source/device'):
result = []
for device in xml.xpath('/pool/source/device'):
result.append(device.get('path'))
try:
return result
except:
raise ValueError('No devices specified')
def get_format(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/pool/source/format')[0].get('type')
except:
raise ValueError('Format not specified')
return result
def get_host(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/pool/source/host')[0].get('name')
except:
raise ValueError('Host not specified')
return result
def get_source_path(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/pool/source/dir')[0].get('path')
except:
raise ValueError('Source path not specified')
return result
def get_path(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
return xml.xpath('/pool/target/path')[0].text
def get_type(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
return xml.get('type')
def build(self, entryid, flags):
if not self.module.check_mode:
return self.find_entry(entryid).build(flags)
else:
try:
state = self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def delete(self, entryid, flags):
if not self.module.check_mode:
return self.find_entry(entryid).delete(flags)
else:
try:
state = self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
if state:
return self.module.exit_json(changed=True)
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state, "unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).autostart()
else:
try:
return self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
def set_autostart(self, entryid, val):
if not self.module.check_mode:
return self.find_entry(entryid).setAutostart(val)
else:
try:
state = self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
if bool(state) != val:
return self.module.exit_json(changed=True)
def refresh(self, entryid):
return self.find_entry(entryid).refresh()
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state, "unknown")
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
return self.conn.storagePoolDefineXML(xml)
else:
try:
self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
class VirtStoragePool(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
self.conn = LibvirtConnection(self.uri, self.module)
def get_pool(self, entryid):
return self.conn.find_entry(entryid)
def list_pools(self, state=None):
results = []
for entry in self.conn.find_entry(-1):
if state:
if state == self.conn.get_status2(entry):
results.append(entry.name())
else:
results.append(entry.name())
return results
def state(self):
results = []
for entry in self.list_pools():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry, state_blurb))
return results
def autostart(self, entryid):
return self.conn.set_autostart(entryid, True)
def get_autostart(self, entryid):
return self.conn.get_autostart2(entryid)
def set_autostart(self, entryid, state):
return self.conn.set_autostart(entryid, state)
def create(self, entryid):
return self.conn.create(entryid)
def start(self, entryid):
return self.conn.create(entryid)
def stop(self, entryid):
return self.conn.destroy(entryid)
def destroy(self, entryid):
return self.conn.destroy(entryid)
def undefine(self, entryid):
return self.conn.undefine(entryid)
def status(self, entryid):
return self.conn.get_status(entryid)
def get_xml(self, entryid):
return self.conn.get_xml(entryid)
def define(self, entryid, xml):
return self.conn.define_from_xml(entryid, xml)
def build(self, entryid, flags):
return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags, 0))
def delete(self, entryid, flags):
return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags, 0))
def refresh(self, entryid):
return self.conn.refresh(entryid)
def info(self):
return self.facts(facts_mode='info')
def facts(self, facts_mode='facts'):
results = dict()
for entry in self.list_pools():
results[entry] = dict()
if self.conn.find_entry(entry):
data = self.conn.get_info(entry)
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
results[entry] = {
"status": ENTRY_STATE_INFO_MAP.get(data[0], "unknown"),
"size_total": str(data[1]),
"size_used": str(data[2]),
"size_available": str(data[3]),
}
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
results[entry]["state"] = self.conn.get_status(entry)
results[entry]["path"] = self.conn.get_path(entry)
results[entry]["type"] = self.conn.get_type(entry)
results[entry]["uuid"] = self.conn.get_uuid(entry)
if self.conn.find_entry(entry).isActive():
results[entry]["volume_count"] = self.conn.get_volume_count(entry)
results[entry]["volumes"] = list()
for volume in self.conn.get_volume_names(entry):
results[entry]["volumes"].append(volume)
else:
results[entry]["volume_count"] = -1
try:
results[entry]["host"] = self.conn.get_host(entry)
except ValueError:
pass
try:
results[entry]["source_path"] = self.conn.get_source_path(entry)
except ValueError:
pass
try:
results[entry]["format"] = self.conn.get_format(entry)
except ValueError:
pass
try:
devices = self.conn.get_devices(entry)
results[entry]["devices"] = devices
except ValueError:
pass
else:
results[entry]["state"] = self.conn.get_status(entry)
facts = dict()
if facts_mode == 'facts':
facts["ansible_facts"] = dict()
facts["ansible_facts"]["ansible_libvirt_pools"] = results
elif facts_mode == 'info':
facts['pools'] = results
return facts
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
mode = module.params.get('mode', None)
v = VirtStoragePool(uri, module)
res = {}
if state and command == 'list_pools':
res = v.list_pools(state=state)
if not isinstance(res, dict):
res = {command: res}
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if state in ['active']:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in ['present']:
try:
v.get_pool(name)
except EntryNotFound:
if not xml:
module.fail_json(msg="storage pool '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in ['inactive']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in ['undefined', 'absent']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
elif state in ['deleted']:
entries = v.list_pools()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
v.delete(name, mode)
res['changed'] = True
res['msg'] = v.undefine(name)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg="%s requires 1 argument: name" % command)
if command == 'define':
if not xml:
module.fail_json(msg="define requires xml argument")
try:
v.get_pool(name)
except EntryNotFound:
v.define(name, xml)
res = {'changed': True, 'created': name}
return VIRT_SUCCESS, res
elif command == 'build':
res = v.build(name, mode)
if not isinstance(res, dict):
res = {'changed': True, command: res}
return VIRT_SUCCESS, res
elif command == 'delete':
res = v.delete(name, mode)
if not isinstance(res, dict):
res = {'changed': True, command: res}
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
res = {command: res}
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
res = {command: res}
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % command)
if autostart is not None:
if not name:
module.fail_json(msg="state change requires a specified name")
res['changed'] = False
if autostart:
if not v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, True)
else:
if v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, False)
return VIRT_SUCCESS, res
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['pool']),
state=dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
command=dict(choices=ALL_COMMANDS),
uri=dict(default='qemu:///system'),
xml=dict(),
autostart=dict(type='bool'),
mode=dict(choices=ALL_MODES),
),
supports_check_mode=True
)
if not HAS_VIRT:
module.fail_json(
msg='The `libvirt` module is not importable. Check the requirements.'
)
if not HAS_XML:
module.fail_json(
msg='The `lxml` module is not importable. Check the requirements.'
)
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sradl1981/LIGGGHTS-PUBLIC-ParScale
|
tools/moltemplate/common/amber/amberparm_dihedral_to_lt.py
|
8
|
5261
|
#!/usr/bin/env python
# SOME UGLY CODE HERE
import sys
lines_gaff = sys.stdin.readlines()
dihedral_style_name = 'fourier'
in_dihedral_coeffs = []
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
atypes = line[:11].split('-')
atype1 = atypes[0].strip()
atype2 = atypes[1].strip()
atype3 = atypes[2].strip()
atype4 = atypes[3].strip()
at1 = atype1.replace('X','*')
at2 = atype2.replace('X','*')
at3 = atype3.replace('X','*')
at4 = atype4.replace('X','*')
dihedraltype = '@dihedral:'+atype1+'-'+atype2+'-'+atype3+'-'+atype4
tokens= line[11:].split()
npth = float(tokens[0])
Kn = float(tokens[1])
Kn /= npth # The coeff for each fourier term is Kn/npth
# ...I THINK (?). (Very confusing. See documentation below...)
dn = float(tokens[2])
n = int(float(tokens[3]))
comments=' # '+(' '.join(tokens[4:]))
in_dihedral_coeffs.append([dihedraltype, Kn, n, dn, comments])
#print(Kn, n, dn)
#for entry in in_dihedral_coeffs:
# print(entry)
#exit()
# ---- processing dihedral fourier series ----
# ---- (negative "n" values means the
# ---- Fourier series is not yet complete.
i = 0
while i < len(in_dihedral_coeffs):
type_str = in_dihedral_coeffs[i][0]
Kn = in_dihedral_coeffs[i][1]
n = in_dihedral_coeffs[i][2]
dn = in_dihedral_coeffs[i][3]
#if (i>0):
# sys.stderr.write('prev_n='+str(in_dihedral_coeffs[i-1][-3])+'\n')
#sys.stderr.write('n='+str(n)+'\n')
if ((i>0) and (in_dihedral_coeffs[i-1][-3] < 0)):
#sys.stdout.write('interation_before_append: '+str(in_dihedral_coeffs[i-1])+'\n')
assert(in_dihedral_coeffs[i-1][0] == in_dihedral_coeffs[i][0])
in_dihedral_coeffs[i-1][-3] = -in_dihedral_coeffs[i-1][-3]
comments = in_dihedral_coeffs[i-1][-1]
in_dihedral_coeffs[i-1][-1] = Kn
in_dihedral_coeffs[i-1].append(n)
in_dihedral_coeffs[i-1].append(dn)
in_dihedral_coeffs[i-1].append(comments)
#sys.stdout.write('interation_after_append: '+str(in_dihedral_coeffs[i-1])+'\n')
del in_dihedral_coeffs[i]
#elif len(in_dihedral_coeffs) < 3:
# del in_dihedral_coeffs[i]
else:
i += 1
for i in range(0, len(in_dihedral_coeffs)):
type_str = in_dihedral_coeffs[i][0]
params = in_dihedral_coeffs[i][1:]
params = map(str, params)
num_fourier_terms = (len(params)-1)/3
dihedral_coeff_str = 'dihedral_coeff '+type_str+' '+\
dihedral_style_name+' '+\
str(num_fourier_terms)+' '+ \
' '.join(params)
in_dihedral_coeffs[i] = dihedral_coeff_str
# ---- finished processing dihedral fourier series ----
sys.stdout.write(' write_once(\"In Settings\") {\n ')
sys.stdout.write('\n '.join(in_dihedral_coeffs)+'\n')
sys.stdout.write(' } # (end of dihedral_coeffs)\n')
sys.stdout.write('\n')
sys.stdout.write(' write_once("Data Dihedrals By Type") {\n')
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
atypes = line[:11].split('-')
atype1 = atypes[0].strip()
atype2 = atypes[1].strip()
atype3 = atypes[2].strip()
atype4 = atypes[3].strip()
at1 = atype1.replace('X','*')
at2 = atype2.replace('X','*')
at3 = atype3.replace('X','*')
at4 = atype4.replace('X','*')
dihedraltype = '@dihedral:'+atype1+'-'+atype2+'-'+atype3+'-'+atype4
sys.stdout.write(' '+dihedraltype+' @atom:'+at1+' @atom:'+at2+' @atom:'+at3+' @atom:'+at4+'\n')
sys.stdout.write(' } # (end of Dihedrals By Type)\n')
sys.stdout.write('\n')
"""
- 6 - ***** INPUT FOR DIHEDRAL PARAMETERS *****
IPT , JPT , KPT , LPT , IDIVF , PK , PHASE , PN
FORMAT(A2,1X,A2,1X,A2,1X,A2,I4,3F15.2)
IPT, ... The atom symbols for the atoms forming a dihedral
angle. If IPT .eq. 'X ' .and. LPT .eq. 'X ' then
any dihedrals in the system involving the atoms "JPT" and
and "KPT" are assigned the same parameters. This is
called the general dihedral type and is of the form
"X "-"JPT"-"KPT"-"X ".
IDIVF The factor by which the torsional barrier is divided.
Consult Weiner, et al., JACS 106:765 (1984) p. 769 for
details. Basically, the actual torsional potential is
(PK/IDIVF) * (1 + cos(PN*phi - PHASE))
PK The barrier height divided by a factor of 2.
PHASE The phase shift angle in the torsional function.
The unit is degrees.
PN The periodicity of the torsional barrier.
NOTE: If PN .lt. 0.0 then the torsional potential
is assumed to have more than one term, and the
values of the rest of the terms are read from the
next cards until a positive PN is encountered. The
negative value of pn is used only for identifying
the existence of the next term and only the
absolute value of PN is kept.
The input is terminated by a blank card.
"""
|
gpl-2.0
|
SoftwareIntrospectionLab/MininGit
|
pycvsanaly2/AsyncQueue.py
|
2
|
4434
|
# Copyright (C) 2008 Libresoft
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors: Carlos Garcia Campos <carlosgc@gsyc.escet.urjc.es>
import threading
from time import time as _time
from collections import deque
class TimeOut(Exception):
pass
class AsyncQueue(object):
def __init__(self, maxsize=0):
self._init(maxsize)
self.mutex = threading.Lock()
self.empty_cond = threading.Condition(self.mutex)
self.full_cond = threading.Condition(self.mutex)
self.finish = threading.Condition(self.mutex)
self.pending_items = 0
def done(self):
self.finish.acquire()
try:
pending = self.pending_items - 1
if pending < 0:
raise ValueError('done() called too many times')
elif pending == 0:
self.finish.notifyAll()
self.pending_items = pending
finally:
self.finish.release()
def join(self):
self.finish.acquire()
try:
while self.pending_items:
self.finish.wait()
finally:
self.finish.release()
def empty(self):
self.mutex.acquire()
retval = self._empty()
self.mutex.release()
return retval
def empty_unlocked(self):
return self._empty()
def put(self, item, timeout=None):
self.full_cond.acquire()
try:
if timeout is None:
while self._full():
self.full_cond.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise TimeOut
self.full_cond.wait(remaining)
self._put(item)
self.pending_items += 1
self.empty_cond.notify()
finally:
self.full_cond.release()
def put_unlocked(self, item):
self._put(item)
def get(self, timeout=None):
self.empty_cond.acquire()
try:
if timeout is None:
while self._empty():
self.empty_cond.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise TimeOut
self.empty_cond.wait(remaining)
item = self._get()
self.full_cond.notify()
return item
finally:
self.empty_cond.release()
def get_unlocked(self):
return self._get()
# Queue implementation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _empty(self):
return not self.queue
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.popleft()
if __name__ == '__main__':
def worker(q):
while True:
item = q.get()
print "Got item ", item
q.done()
q = AsyncQueue()
for i in range(5):
t = threading.Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
for item in ['foo', 'bar', 1, 2, {'a': 'b'}, [5, 6, 7]]:
q.put(item)
q.join()
try:
q.get(5)
except TimeOut:
print "Queue empty! bye bye!"
|
gpl-2.0
|
yuyu2172/chainercv
|
tests/links_tests/model_tests/test_pickable_sequential_chain.py
|
3
|
8325
|
import numpy as np
import unittest
import chainer
from chainer.backends.cuda import to_cpu
from chainer.function import Function
from chainer import testing
from chainer.testing import attr
from chainercv.links import PickableSequentialChain
from chainercv.utils.testing import ConstantStubLink
class DummyFunc(Function):
def forward(self, inputs):
return inputs[0] * 2,
class PickableSequentialChainTestBase(object):
def setUpBase(self):
self.l1 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.f1 = DummyFunc()
self.f2 = DummyFunc()
self.l2 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.link = PickableSequentialChain()
with self.link.init_scope():
self.link.l1 = self.l1
self.link.f1 = self.f1
self.link.f2 = self.f2
self.link.l2 = self.l2
if self.pick:
self.link.pick = self.pick
self.x = np.random.uniform(size=(1, 3, 24, 24))
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def test_pick(self):
self.assertEqual(self.link.pick, self.pick)
def test_pick_setter(self):
invalid_name = 'nonexistent'
self.assertNotIn(invalid_name, self.link.layer_names)
expected_message_pattern = str.format(
'^Invalid layer name .{:s}.$', invalid_name)
with self.assertRaisesRegex(ValueError, expected_message_pattern):
self.link.pick = invalid_name
invalid_names = 'nonexistent', 'nonexistent2'
for n in invalid_names:
self.assertNotIn(n, self.link.layer_names)
expected_message_pattern = str.format(
'^Invalid layer name .{:s}.$', invalid_names[0])
with self.assertRaisesRegex(ValueError, expected_message_pattern):
self.link.pick = invalid_names
def test_layer_names(self):
self.assertEqual(self.link.layer_names, ['l1', 'f1', 'f2', 'l2'])
def check_call(self, x, expects):
outs = self.link(x)
if isinstance(self.pick, tuple):
pick = self.pick
else:
if self.pick is None:
pick = ('l2',)
else:
pick = (self.pick,)
outs = (outs,)
self.assertEqual(len(outs), len(pick))
for out, layer_name in zip(outs, pick):
self.assertIsInstance(out, chainer.Variable)
self.assertIsInstance(out.array, self.link.xp.ndarray)
out = to_cpu(out.array)
np.testing.assert_equal(out, to_cpu(expects[layer_name].array))
def check_basic(self):
x = self.link.xp.asarray(self.x)
expects = {}
expects['l1'] = self.l1(x)
expects['f1'] = self.f1(expects['l1'])
expects['f2'] = self.f2(expects['f1'])
expects['l2'] = self.l2(expects['f2'])
self.check_call(x, expects)
def test_basic_cpu(self):
self.check_basic()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.check_basic()
def check_deletion(self):
x = self.link.xp.asarray(self.x)
if self.pick == 'l1' or \
(isinstance(self.pick, tuple)
and 'l1' in self.pick):
with self.assertRaises(AttributeError):
del self.link.l1
return
else:
del self.link.l1
expects = {}
expects['f1'] = self.f1(x)
expects['f2'] = self.f2(expects['f1'])
expects['l2'] = self.l2(expects['f2'])
self.check_call(x, expects)
def test_deletion_cpu(self):
self.check_deletion()
@attr.gpu
def test_deletion_gpu(self):
self.link.to_gpu()
self.check_deletion()
@testing.parameterize(
{'pick': None},
{'pick': 'f2'},
{'pick': ('f2',)},
{'pick': ('l2', 'l1', 'f2')},
{'pick': ('l2', 'l2')},
)
class TestPickableSequentialChain(
unittest.TestCase, PickableSequentialChainTestBase):
def setUp(self):
self.setUpBase()
@testing.parameterize(
*testing.product({
'mode': ['init', 'share', 'copy'],
'pick': [None, 'f1', ('f1', 'f2'), ('l2', 'l2'), ('l2', 'l1', 'f2')]
})
)
class TestCopiedPickableSequentialChain(
unittest.TestCase, PickableSequentialChainTestBase):
def setUp(self):
self.setUpBase()
self.f100 = DummyFunc()
self.l100 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.link, self.original_link = \
self.link.copy(mode=self.mode), self.link
def check_unchanged(self, link, x):
class Checker(object):
def __init__(self, tester, link, x):
self.tester = tester
self.link = link
self.x = x
def __enter__(self):
self.expected = self.link(self.x)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
return None
self.actual = self.link(self.x)
if isinstance(self.expected, tuple):
self.tester.assertEqual(
len(self.expected), len(self.actual))
for e, a in zip(self.expected, self.actual):
self.tester.assertEqual(type(e.array), type(a.array))
np.testing.assert_equal(
to_cpu(e.array), to_cpu(a.array))
else:
self.tester.assertEqual(type(self.expected.array),
type(self.actual.array))
np.testing.assert_equal(
to_cpu(self.expected.array),
to_cpu(self.actual.array))
return Checker(self, link, x)
def test_original_unaffected_by_setting_pick(self):
with self.check_unchanged(self.original_link, self.x):
self.link.pick = 'f2'
def test_original_unaffected_by_function_addition(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.f100 = self.f100
def test_original_unaffected_by_link_addition(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.l100 = self.l100
def test_original_unaffected_by_function_deletion(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.pick = None
del self.link.f1
def test_original_unaffected_by_link_deletion(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.pick = None
del self.link.l1
@testing.parameterize(
{'pick': 'l1', 'layer_names': ['l1']},
{'pick': 'f1', 'layer_names': ['l1', 'f1']},
{'pick': ['f1', 'f2'], 'layer_names': ['l1', 'f1', 'f2']},
{'pick': None, 'layer_names': ['l1', 'f1', 'f2', 'l2']}
)
class TestPickableSequentialChainRemoveUnused(unittest.TestCase):
def setUp(self):
self.l1 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.f1 = DummyFunc()
self.f2 = DummyFunc()
self.l2 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.link = PickableSequentialChain()
with self.link.init_scope():
self.link.l1 = self.l1
self.link.f1 = self.f1
self.link.f2 = self.f2
self.link.l2 = self.l2
self.link.pick = self.pick
def check_remove_unused(self):
self.link.remove_unused()
self.assertEqual(self.link.layer_names, self.layer_names)
for name in ['l1', 'f1', 'f2', 'l2']:
if name in self.layer_names:
self.assertTrue(hasattr(self.link, name))
else:
self.assertFalse(hasattr(self.link, name))
def test_remove_unused_cpu(self):
self.check_remove_unused()
@attr.gpu
def test_remove_unused_gpu(self):
self.link.to_gpu()
self.check_remove_unused()
testing.run_module(__name__, __file__)
|
mit
|
blablack/beatslash-lv2
|
waflib/extras/javatest.py
|
53
|
4133
|
#! /usr/bin/env python
# encoding: utf-8
# Federico Pellegrin, 2017 (fedepell)
"""
Provides Java Unit test support using :py:class:`waflib.Tools.waf_unit_test.utest`
task via the **javatest** feature.
This gives the possibility to run unit test and have them integrated into the
standard waf unit test environment. It has been tested with TestNG and JUnit
but should be easily expandable to other frameworks given the flexibility of
ut_str provided by the standard waf unit test environment.
Example usage:
def options(opt):
opt.load('java waf_unit_test javatest')
def configure(conf):
conf.load('java javatest')
def build(bld):
[ ... mainprog is built here ... ]
bld(features = 'javac javatest',
srcdir = 'test/',
outdir = 'test',
sourcepath = ['test'],
classpath = [ 'src' ],
basedir = 'test',
use = ['JAVATEST', 'mainprog'], # mainprog is the program being tested in src/
ut_str = 'java -cp ${CLASSPATH} ${JTRUNNER} ${SRC}',
jtest_source = bld.path.ant_glob('test/*.xml'),
)
At command line the CLASSPATH where to find the testing environment and the
test runner (default TestNG) that will then be seen in the environment as
CLASSPATH_JAVATEST (then used for use) and JTRUNNER and can be used for
dependencies and ut_str generation.
Example configure for TestNG:
waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar --jtrunner=org.testng.TestNG
or as default runner is TestNG:
waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar
Example configure for JUnit:
waf configure --jtpath=/tmp/junit.jar --jtrunner=org.junit.runner.JUnitCore
The runner class presence on the system is checked for at configuration stage.
"""
import os
from waflib import Task, TaskGen, Options
@TaskGen.feature('javatest')
@TaskGen.after_method('apply_java', 'use_javac_files', 'set_classpath')
def make_javatest(self):
"""
Creates a ``utest`` task with a populated environment for Java Unit test execution
"""
tsk = self.create_task('utest')
tsk.set_run_after(self.javac_task)
# Put test input files as waf_unit_test relies on that for some prints and log generation
# If jtest_source is there, this is specially useful for passing XML for TestNG
# that contain test specification, use that as inputs, otherwise test sources
if getattr(self, 'jtest_source', None):
tsk.inputs = self.to_nodes(self.jtest_source)
else:
if self.javac_task.srcdir[0].exists():
tsk.inputs = self.javac_task.srcdir[0].ant_glob('**/*.java', remove=False)
if getattr(self, 'ut_str', None):
self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
tsk.vars = lst + tsk.vars
if getattr(self, 'ut_cwd', None):
if isinstance(self.ut_cwd, str):
# we want a Node instance
if os.path.isabs(self.ut_cwd):
self.ut_cwd = self.bld.root.make_node(self.ut_cwd)
else:
self.ut_cwd = self.path.make_node(self.ut_cwd)
else:
self.ut_cwd = self.bld.bldnode
# Get parent CLASSPATH and add output dir of test, we run from wscript dir
# We have to change it from list to the standard java -cp format (: separated)
tsk.env.CLASSPATH = ':'.join(self.env.CLASSPATH) + ':' + self.outdir.abspath()
if not self.ut_cwd.exists():
self.ut_cwd.mkdir()
if not hasattr(self, 'ut_env'):
self.ut_env = dict(os.environ)
def configure(ctx):
cp = ctx.env.CLASSPATH or '.'
if getattr(Options.options, 'jtpath', None):
ctx.env.CLASSPATH_JAVATEST = getattr(Options.options, 'jtpath').split(':')
cp += ':' + getattr(Options.options, 'jtpath')
if getattr(Options.options, 'jtrunner', None):
ctx.env.JTRUNNER = getattr(Options.options, 'jtrunner')
if ctx.check_java_class(ctx.env.JTRUNNER, with_classpath=cp):
ctx.fatal('Could not run test class %r' % ctx.env.JTRUNNER)
def options(opt):
opt.add_option('--jtpath', action='store', default='', dest='jtpath',
help='Path to jar(s) needed for javatest execution, colon separated, if not in the system CLASSPATH')
opt.add_option('--jtrunner', action='store', default='org.testng.TestNG', dest='jtrunner',
help='Class to run javatest test [default: org.testng.TestNG]')
|
gpl-3.0
|
cpe/VAMDC-VALD
|
nodes/IDEADB/node/models.py
|
3
|
12054
|
"""
This module defines the database schema of the node.
Each model class defines a table in the database. The fields define the columns of this table.
"""
# library import
from django.db.models import *
from django.core.exceptions import ValidationError
from vamdctap import bibtextools
import re
import datetime
from node.inchivalidation import inchi2inchikey, inchikey2inchi, inchi2chemicalformula
from node.chemlib import chemicalformula2nominalmass, checkatoms, atommasses, add_charge_layer
#we define the regex object for chemical formulas here, as it is used in two different functions
re = re.compile('^([A-Z]{1}[a-z]{0,2}[0-9]{0,3})+$')
#define validations for CAS and chemical formulas
def validate_CAS(cas):
sum=0
cas_arr = cas.split('-')
if len(cas_arr) < 3:
raise ValidationError(u'%s is not a valid CAS-number!' % cas)
length = len(cas_arr[0])+2
for x in cas_arr[0]:
sum = sum + length*int(x)
length = length - 1
sum = sum + 2 * int(cas_arr[1][0]) + int(cas_arr[1][1])
if sum % 10 != int(cas_arr[2]):
raise ValidationError(u'%s is not a valid CAS-number!' % cas)
def validate_chemical_formula(chemical_formula):
""" checks chemical formalae for plausibility """
# first we check if the formula seems like a chemical formula
m = re.match(chemical_formula)
if m is None:
raise ValidationError(u'%s does not seem to be a chemical formula' % chemical_formula)
# we outsource the checking of individual atoms to the chemlib
result = checkatoms(chemical_formula)
if result != 0:
raise ValidationError(u'%s is not an atom' % result)
def validate_name(name):
m = re.match(name)
if m is not None:
raise ValidationError(u'%s seems to be a chemical formula. Please use a normal name or leave it blank.' % name)
#start defining the classes
class Author(Model):
firstname = CharField(max_length=30)
lastname = CharField(max_length=30)
middlename = CharField(max_length=30, blank=True)
email = EmailField(max_length=254, blank=True)
def __str__(self):
if self.middlename != '':
return u'%s, %s %s'%(self.lastname, self.firstname, self.middlename)
else:
return u'%s, %s'%(self.lastname, self.firstname)
class Experiment(Model):
name = CharField(max_length=10)
def __str__(self):
return u'%s'%(self.name)
class Species(Model):
name = CharField(max_length=100, db_index=True, verbose_name='Common Name (e.g. Water for H2O)', blank = True, validators=[validate_name])
chemical_formula = CharField(max_length=40, db_index=True, verbose_name='Chemical Formula', default = '', validators=[validate_chemical_formula])
mass = PositiveIntegerField(db_index=True, verbose_name='Nominal Mass')
isotope = BooleanField(verbose_name='Tick, if this is the most abundant isotope', default = True)
nuclear_charge = PositiveSmallIntegerField(verbose_name='Number of Protons', blank = True, null = True)
inchi_neutral = CharField(max_length=300,db_index=True,verbose_name='InChI neutral', blank = True)
inchi_negative = CharField(max_length=300,verbose_name='InChI anion', blank = True)
inchi_positive = CharField(max_length=300,verbose_name='InChI cation', blank = True)
inchikey_neutral = CharField(max_length=27, db_index=True, verbose_name='InChI-Key neutral', blank = True)
inchikey_negative = CharField(max_length=27, verbose_name='InChI-Key anion', blank = True)
inchikey_positive = CharField(max_length=27, verbose_name='InChI-Key cation', blank = True)
cas = CharField(max_length=12,verbose_name='CAS-Number', blank = True, validators = [validate_CAS])
molecule = BooleanField(verbose_name='Tick, if this is a molecule')
# defines some optional meta-options for viewing and storage
def __str__(self):
if self.name != '':
return u'%s (%s)'%(self.name, self.chemical_formula)
else:
return u'%s'%(self.chemical_formula)
def clean(self):
# we have to run this validator here again, because
# chemicalformula2nominalmass expects proper formulae
# django runs the validators _after_ this clean function
validate_chemical_formula(self.chemical_formula)
if self.isotope is True:
if self.mass != chemicalformula2nominalmass(self.chemical_formula):
raise ValidationError(u'Nominal mass and chemical formula are not compatible.')
# check whether the chemical formula and the information about molecule status fit each other
if self.molecule is False:
if self.chemical_formula not in atommasses:
raise ValidationError(u'%s seems not to be an atom. Please tick whether this is a molecule.' % self.chemical_formula)
else:
if self.chemical_formula in atommasses:
raise ValidationError(u'%s seems to be an atom. Please tick whether this is a molecule or an atom.' % self.chemical_formula)
#check if either inchi or inchikey are there and either complete the other one or verify
if self.inchi_neutral == '':
if self.inchikey_neutral != '':
tmpinchi = inchikey2inchi(self.inchikey_neutral)
if tmpinchi:
self.inchi = tmpinchi
else:
raise ValidationError(u'No chemical compound found for this InChI-Key.')
else:
#check if the given inchi has the InChI= in the beginning
#additionally check for Standard-InChI
if not self.inchi_neutral.startswith('InChI='):
self.inchi_neutral = 'InChI=' + self.inchi_neutral
if not self.inchi_neutral.startswith('InChI=1S'):
raise ValidationError(u'InChI %s is not a Standard-InChI (starts with 1S)' % self.inchi)
#get the rest
if self.inchikey_neutral != '':
inchikeycheck = inchi2inchikey(self.inchi_neutral)
if inchikeycheck != self.inchikey_neutral:
raise ValidationError(u'The given InChI and neutral InChI-Key are not compatible.')
else:
tmpinchikey = inchi2inchikey(self.inchi_neutral)
if tmpinchikey:
self.inchikey_neutral = tmpinchikey
else:
raise ValidationError(u'Not a valid InChI-Key.')
if self.chemical_formula != inchi2chemicalformula(self.inchi_neutral):
raise ValidationError(u'InChI %s is not compatible with the stochiometric formula %s.' % (self.inchi_neutral, self.chemical_formula))
#we check if we already have a species with same chem formula, mass and isotope-status
sp_search = Species.objects.filter(chemical_formula__exact=self.chemical_formula).filter(mass__exact=self.mass).filter(isotope__exact=self.isotope)
#exclude this very instance
sp_search = sp_search.exclude(id__exact=self.id)
if len(sp_search) > 0:
#indeed we do
#in this case they should be isomeres and therefore have a different inchi
if self.inchi_neutral in sp_search.values_list('inchi_neutral'):
raise ValidationError(u'A species with this chemical formula and InChI already exists in the database')
if self.inchi_neutral == '':
raise ValidationError(u'Isomeres need to be distinguished via their InChI')
# finally fill out the other inchis
if self.inchi_neutral is not '':
self.inchi_negative = add_charge_layer(self.inchi_neutral, -1)
self.inchi_positive = add_charge_layer(self.inchi_neutral, 1)
self.inchikey_negative = inchi2inchikey(self.inchi_negative)
self.inchikey_positive = inchi2inchikey(self.inchi_positive)
class Meta:
db_table = u'species'
verbose_name_plural = u'Species'
ordering = ['chemical_formula','name']
class Source(Model):
SOURCETYPE_CHOICES = (
('book', 'Book'),
('database', 'Database'),
('journal', 'Journal'),
('preprint', 'Preprint'),
('private communication', 'Private Communication'),
('proceeding', 'Proceeding'),
('report', 'Report'),
('thesis', 'Thesis'),
('vamdc node', 'VAMDC Node'),
)
authors = ManyToManyField(Author)
journal = CharField(max_length=200)
year = CharField(max_length=4)
number = CharField(max_length=6, blank=True)
volume = CharField(max_length=6)
doi = CharField(max_length=100, verbose_name='DOI', blank=True)
pagestart = CharField(max_length=7, verbose_name='Starting Page')
pageend = CharField(max_length=7, verbose_name='Ending Page')
url = URLField(max_length=200, blank=True)
title = CharField(max_length=500)
type = CharField(max_length=17, default='journal', choices=SOURCETYPE_CHOICES)
#define a useful unicode-expression:
def __str__(self):
return u'%s, %s'%(self.title, self.year)
class Energyscan(Model):
Y_UNITS_CHOICES = (
('1/s', '1/s'),
('cm2', 'cm2'),
('m2', 'm2'),
('unitless', 'unitless'),
)
NEUTRAL = 0
ANIONIC = -1
CATIONIC = 1
CHARGE_CHOICES = (
(NEUTRAL, 'Neutral'),
(ANIONIC, 'Anionic'),
(CATIONIC, 'Cationic'),
)
ELAT = 'elat'
DISS = 'diss'
IONI = 'ioni'
PC_CHOICES = (
(ELAT, 'Electron Attachment'),
(DISS, 'Dissociation'),
(IONI, 'Ionization'),
)
species = ForeignKey(Species, related_name='energyscan_species')
origin_species = ForeignKey(Species, related_name='energyscan_origin_species')
product_charge = IntegerField(default = ANIONIC, choices = CHARGE_CHOICES)
process_code = CharField(default = ELAT, choices = PC_CHOICES, max_length=4)
process_code_2 = CharField(choices = PC_CHOICES, blank = True, max_length=4)
source = ForeignKey(Source)
experiment = ForeignKey(Experiment)
energyscan_data = TextField(verbose_name='Paste data from Origin in this field')
y_units = CharField(max_length = 3, verbose_name='Please choose units for y-axis', default='1/s', choices=Y_UNITS_CHOICES)
productiondate = DateField(verbose_name='Production Date')
comment = TextField(blank = True, max_length = 2000, verbose_name = 'Comment (max. 2000 chars.)')
energyresolution = DecimalField(max_digits = 4, decimal_places = 3, verbose_name='Energy Resolution of the Experiment in eV')
lastmodified = DateTimeField(auto_now = True)
numberofpeaks = IntegerField(blank = True, verbose_name = 'Number of peaks visible (no shoulder structures)')
#define a useful unicode-expression:
def __unicode__(self):
return u'ID %s: %s from %s'%(self.id, self.species, self.origin_species)
def clean(self):
#we check for -- in the numerical data, because Origin occasionally produces those
if self.energyscan_data.find('--') is not -1:
raise ValidationError(u'Energyscan data contains -- most likely due to empty lines in the Origin table.')
class Resonance(Model):
energyscan = ForeignKey(Energyscan)
species = ForeignKey(Species, related_name='resonance_species')
origin_species = ForeignKey(Species, related_name='resonance_origin_species')
source = ForeignKey(Source)
energy = DecimalField(max_digits=5, decimal_places=2)
width = DecimalField(max_digits=3, decimal_places=2)
#define a useful unicode-expression:
def __str__(self):
return u'ID:%s: %s eV for %s from %s'%(self.id, self.energy, self.species, self.origin_species)
class Massspec(Model):
species = ForeignKey(Species, related_name='massspec_species')
source = ForeignKey(Source)
energy = DecimalField(max_digits=5, decimal_places=2)
massspec_data = TextField()
#define a useful unicode-expression:
def __str__(self):
return u'ID:%s %s at %s'%(self.id, self.species, self.energy)
|
gpl-3.0
|
SimonSapin/servo
|
tests/wpt/webgl/tests/closure-library/closure/bin/build/depswriter.py
|
135
|
6286
|
#!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates out a Closure deps.js file given a list of JavaScript sources.
Paths can be specified as arguments or (more commonly) specifying trees
with the flags (call with --help for descriptions).
Usage: depswriter.py [path/to/js1.js [path/to/js2.js] ...]
"""
import logging
import optparse
import os
import posixpath
import shlex
import sys
import source
import treescan
__author__ = 'nnaze@google.com (Nathan Naze)'
def MakeDepsFile(source_map):
"""Make a generated deps file.
Args:
source_map: A dict map of the source path to source.Source object.
Returns:
str, A generated deps file source.
"""
# Write in path alphabetical order
paths = sorted(source_map.keys())
lines = []
for path in paths:
js_source = source_map[path]
# We don't need to add entries that don't provide anything.
if js_source.provides:
lines.append(_GetDepsLine(path, js_source))
return ''.join(lines)
def _GetDepsLine(path, js_source):
"""Get a deps.js file string for a source."""
provides = sorted(js_source.provides)
requires = sorted(js_source.requires)
module = 'true' if js_source.is_goog_module else 'false'
return 'goog.addDependency(\'%s\', %s, %s, %s);\n' % (
path, provides, requires, module)
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
parser.add_option('--root',
dest='roots',
default=[],
action='append',
help='A root directory to scan for JS source files. '
'Paths of JS files in generated deps file will be '
'relative to this path. This flag may be specified '
'multiple times.')
parser.add_option('--root_with_prefix',
dest='roots_with_prefix',
default=[],
action='append',
help='A root directory to scan for JS source files, plus '
'a prefix (if either contains a space, surround with '
'quotes). Paths in generated deps file will be relative '
'to the root, but preceded by the prefix. This flag '
'may be specified multiple times.')
parser.add_option('--path_with_depspath',
dest='paths_with_depspath',
default=[],
action='append',
help='A path to a source file and an alternate path to '
'the file in the generated deps file (if either contains '
'a space, surround with whitespace). This flag may be '
'specified multiple times.')
return parser
def _NormalizePathSeparators(path):
"""Replaces OS-specific path separators with POSIX-style slashes.
Args:
path: str, A file path.
Returns:
str, The path with any OS-specific path separators (such as backslash on
Windows) replaced with URL-compatible forward slashes. A no-op on systems
that use POSIX paths.
"""
return path.replace(os.sep, posixpath.sep)
def _GetRelativePathToSourceDict(root, prefix=''):
"""Scans a top root directory for .js sources.
Args:
root: str, Root directory.
prefix: str, Prefix for returned paths.
Returns:
dict, A map of relative paths (with prefix, if given), to source.Source
objects.
"""
# Remember and restore the cwd when we're done. We work from the root so
# that paths are relative from the root.
start_wd = os.getcwd()
os.chdir(root)
path_to_source = {}
for path in treescan.ScanTreeForJsFiles('.'):
prefixed_path = _NormalizePathSeparators(os.path.join(prefix, path))
path_to_source[prefixed_path] = source.Source(source.GetFileContents(path))
os.chdir(start_wd)
return path_to_source
def _GetPair(s):
"""Return a string as a shell-parsed tuple. Two values expected."""
try:
# shlex uses '\' as an escape character, so they must be escaped.
s = s.replace('\\', '\\\\')
first, second = shlex.split(s)
return (first, second)
except:
raise Exception('Unable to parse input line as a pair: %s' % s)
def main():
"""CLI frontend to MakeDepsFile."""
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
path_to_source = {}
# Roots without prefixes
for root in options.roots:
path_to_source.update(_GetRelativePathToSourceDict(root))
# Roots with prefixes
for root_and_prefix in options.roots_with_prefix:
root, prefix = _GetPair(root_and_prefix)
path_to_source.update(_GetRelativePathToSourceDict(root, prefix=prefix))
# Source paths
for path in args:
path_to_source[path] = source.Source(source.GetFileContents(path))
# Source paths with alternate deps paths
for path_with_depspath in options.paths_with_depspath:
srcpath, depspath = _GetPair(path_with_depspath)
path_to_source[depspath] = source.Source(source.GetFileContents(srcpath))
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
out.write('// This file was autogenerated by %s.\n' % sys.argv[0])
out.write('// Please do not edit.\n')
out.write(MakeDepsFile(path_to_source))
if __name__ == '__main__':
main()
|
mpl-2.0
|
jabesq/home-assistant
|
homeassistant/components/netdata/sensor.py
|
7
|
4799
|
"""Support gathering system information of hosts which are running netdata."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_ICON, CONF_NAME, CONF_PORT, CONF_RESOURCES)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
CONF_DATA_GROUP = 'data_group'
CONF_ELEMENT = 'element'
CONF_INVERT = 'invert'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Netdata'
DEFAULT_PORT = 19999
DEFAULT_ICON = 'mdi:desktop-classic'
RESOURCE_SCHEMA = vol.Any({
vol.Required(CONF_DATA_GROUP): cv.string,
vol.Required(CONF_ELEMENT): cv.string,
vol.Optional(CONF_ICON, default=DEFAULT_ICON): cv.icon,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_RESOURCES): vol.Schema({cv.string: RESOURCE_SCHEMA}),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Netdata sensor."""
from netdata import Netdata
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
resources = config.get(CONF_RESOURCES)
session = async_get_clientsession(hass)
netdata = NetdataData(Netdata(host, hass.loop, session, port=port))
await netdata.async_update()
if netdata.api.metrics is None:
raise PlatformNotReady
dev = []
for entry, data in resources.items():
icon = data[CONF_ICON]
sensor = data[CONF_DATA_GROUP]
element = data[CONF_ELEMENT]
invert = data[CONF_INVERT]
sensor_name = entry
try:
resource_data = netdata.api.metrics[sensor]
unit = '%' if resource_data['units'] == 'percentage' else \
resource_data['units']
except KeyError:
_LOGGER.error("Sensor is not available: %s", sensor)
continue
dev.append(NetdataSensor(
netdata, name, sensor, sensor_name, element, icon, unit, invert))
async_add_entities(dev, True)
class NetdataSensor(Entity):
"""Implementation of a Netdata sensor."""
def __init__(
self, netdata, name, sensor, sensor_name, element, icon, unit,
invert):
"""Initialize the Netdata sensor."""
self.netdata = netdata
self._state = None
self._sensor = sensor
self._element = element
self._sensor_name = self._sensor if sensor_name is None else \
sensor_name
self._name = name
self._icon = icon
self._unit_of_measurement = unit
self._invert = invert
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, self._sensor_name)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def state(self):
"""Return the state of the resources."""
return self._state
@property
def available(self):
"""Could the resource be accessed during the last update call."""
return self.netdata.available
async def async_update(self):
"""Get the latest data from Netdata REST API."""
await self.netdata.async_update()
resource_data = self.netdata.api.metrics.get(self._sensor)
self._state = round(
resource_data['dimensions'][self._element]['value'], 2) \
* (-1 if self._invert else 1)
class NetdataData:
"""The class for handling the data retrieval."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the Netdata REST API."""
from netdata.exceptions import NetdataError
try:
await self.api.get_allmetrics()
self.available = True
except NetdataError:
_LOGGER.error("Unable to retrieve data from Netdata")
self.available = False
|
apache-2.0
|
skerit/romcollectionbrowser
|
resources/lib/dialogbase.py
|
11
|
3363
|
import xbmc, xbmcgui
import os
import util, config
from util import *
ACTION_CANCEL_DIALOG = (9,10,51,92,110)
CONTROL_BUTTON_EXIT = 5101
class DialogBaseEdit(xbmcgui.WindowXMLDialog):
def getControlById(self, controlId):
try:
control = self.getControl(controlId)
except:
return None
return control
def addItemsToList(self, controlId, options):
Logutil.log('addItemsToList', util.LOG_LEVEL_INFO)
control = self.getControlById(controlId)
control.setVisible(1)
control.reset()
items = []
for option in options:
items.append(xbmcgui.ListItem(option, '', '', ''))
control.addItems(items)
def getAvailableScrapers(self, localOnly):
Logutil.log('get available scrapers', util.LOG_LEVEL_INFO)
#Scrapers
sitesInList = []
if(not localOnly):
sitesInList.append(util.localize(32854))
#get all scrapers
for siteName in self.scraperSites:
site = self.scraperSites[siteName]
#only add scrapers without http
if(localOnly):
#don't use local nfo scraper
if(site.name == util.localize(32154)):
continue
skipScraper = False
for scraper in site.scrapers:
source = scraper.source
if(source.startswith('http')):
skipScraper = True
break
if(skipScraper):
continue
Logutil.log('add scraper name: ' +str(site.name), util.LOG_LEVEL_INFO)
sitesInList.append(site.name)
if(len(sitesInList) == 0):
sitesInList.append(util.localize(32854))
return sitesInList
def editTextProperty(self, controlId, name):
control = self.getControlById(controlId)
textValue = util.getLabel(control)
keyboard = xbmc.Keyboard()
keyboard.setHeading(util.localize(32132) %name)
keyboard.setDefault(textValue)
keyboard.doModal()
if (keyboard.isConfirmed()):
textValue = keyboard.getText()
util.setLabel(textValue, control)
return textValue
def editPathWithFileMask(self, controlId, enterString, controlIdFilemask):
dialog = xbmcgui.Dialog()
#get new value
pathValue = dialog.browse(0, enterString, 'files')
control = self.getControlById(controlId)
util.setLabel(pathValue, control)
control = self.getControlById(controlIdFilemask)
filemask = util.getLabel(control)
pathComplete = os.path.join(pathValue, filemask.strip())
return pathComplete
def editFilemask(self, controlId, enterString, pathComplete):
control = self.getControlById(controlId)
filemask = util.getLabel(control)
keyboard = xbmc.Keyboard()
keyboard.setHeading(util.localize(32132) %enterString)
keyboard.setDefault(filemask)
keyboard.doModal()
if (keyboard.isConfirmed()):
filemask = keyboard.getText()
util.setLabel(filemask, control)
pathParts = os.path.split(pathComplete)
path = pathParts[0]
pathComplete = os.path.join(path, filemask.strip())
return pathComplete
def selectItemInList(self, itemName, controlId):
Logutil.log('selectItemInList', util.LOG_LEVEL_INFO)
control = self.getControlById(controlId)
for i in range(0, control.size()):
item = control.getListItem(i)
if(item.getLabel() == itemName):
control.selectItem(i)
break
|
gpl-2.0
|
phenoxim/nova
|
nova/tests/functional/api/openstack/placement/db/test_resource_class_cache.py
|
1
|
6056
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_utils import timeutils
from nova.api.openstack.placement import exception
from nova.db.sqlalchemy import resource_class_cache as rc_cache
from nova import rc_fields as fields
from nova import test
from nova.tests import fixtures
class TestResourceClassCache(test.TestCase):
def setUp(self):
super(TestResourceClassCache, self).setUp()
self.db = self.useFixture(fixtures.Database(database='api'))
self.context = mock.Mock()
sess_mock = mock.Mock()
sess_mock.connection.side_effect = self.db.get_engine().connect
self.context.session = sess_mock
@mock.patch('sqlalchemy.select')
def test_rc_cache_std_no_db(self, sel_mock):
"""Test that looking up either an ID or a string in the resource class
cache for a standardized resource class does not result in a DB
call.
"""
cache = rc_cache.ResourceClassCache(self.context)
self.assertEqual('VCPU', cache.string_from_id(0))
self.assertEqual('MEMORY_MB', cache.string_from_id(1))
self.assertEqual(0, cache.id_from_string('VCPU'))
self.assertEqual(1, cache.id_from_string('MEMORY_MB'))
self.assertFalse(sel_mock.called)
def test_standards(self):
cache = rc_cache.ResourceClassCache(self.context)
standards = cache.STANDARDS
self.assertEqual(len(standards), len(fields.ResourceClass.STANDARD))
names = (rc['name'] for rc in standards)
for name in fields.ResourceClass.STANDARD:
self.assertIn(name, names)
cache = rc_cache.ResourceClassCache(self.context)
standards2 = cache.STANDARDS
self.assertEqual(id(standards), id(standards2))
def test_standards_have_time_fields(self):
cache = rc_cache.ResourceClassCache(self.context)
standards = cache.STANDARDS
first_standard = standards[0]
self.assertIn('updated_at', first_standard)
self.assertIn('created_at', first_standard)
self.assertIsNone(first_standard['updated_at'])
self.assertIsNone(first_standard['created_at'])
def test_standard_has_time_fields(self):
cache = rc_cache.ResourceClassCache(self.context)
vcpu_class = cache.all_from_string('VCPU')
expected = {'id': 0, 'name': 'VCPU', 'updated_at': None,
'created_at': None}
self.assertEqual(expected, vcpu_class)
def test_rc_cache_custom(self):
"""Test that non-standard, custom resource classes hit the database and
return appropriate results, caching the results after a single
query.
"""
cache = rc_cache.ResourceClassCache(self.context)
# Haven't added anything to the DB yet, so should raise
# ResourceClassNotFound
self.assertRaises(exception.ResourceClassNotFound,
cache.string_from_id, 1001)
self.assertRaises(exception.ResourceClassNotFound,
cache.id_from_string, "IRON_NFV")
# Now add to the database and verify appropriate results...
with self.context.session.connection() as conn:
ins_stmt = rc_cache._RC_TBL.insert().values(
id=1001,
name='IRON_NFV'
)
conn.execute(ins_stmt)
self.assertEqual('IRON_NFV', cache.string_from_id(1001))
self.assertEqual(1001, cache.id_from_string('IRON_NFV'))
# Try same again and verify we don't hit the DB.
with mock.patch('sqlalchemy.select') as sel_mock:
self.assertEqual('IRON_NFV', cache.string_from_id(1001))
self.assertEqual(1001, cache.id_from_string('IRON_NFV'))
self.assertFalse(sel_mock.called)
# Verify all fields available from all_from_string
iron_nfv_class = cache.all_from_string('IRON_NFV')
self.assertEqual(1001, iron_nfv_class['id'])
self.assertEqual('IRON_NFV', iron_nfv_class['name'])
# updated_at not set on insert
self.assertIsNone(iron_nfv_class['updated_at'])
self.assertIsInstance(iron_nfv_class['created_at'], datetime.datetime)
# Update IRON_NFV (this is a no-op but will set updated_at)
with self.context.session.connection() as conn:
# NOTE(cdent): When using explict SQL that names columns,
# the automatic timestamp handling provided by the oslo_db
# TimestampMixin is not provided. created_at is a default
# but updated_at is an onupdate.
upd_stmt = rc_cache._RC_TBL.update().where(
rc_cache._RC_TBL.c.id == 1001).values(
name='IRON_NFV', updated_at=timeutils.utcnow())
conn.execute(upd_stmt)
# reset cache
cache = rc_cache.ResourceClassCache(self.context)
iron_nfv_class = cache.all_from_string('IRON_NFV')
# updated_at set on update
self.assertIsInstance(iron_nfv_class['updated_at'], datetime.datetime)
def test_rc_cache_miss(self):
"""Test that we raise ResourceClassNotFound if an unknown resource
class ID or string is searched for.
"""
cache = rc_cache.ResourceClassCache(self.context)
self.assertRaises(exception.ResourceClassNotFound,
cache.string_from_id, 99999999)
self.assertRaises(exception.ResourceClassNotFound,
cache.id_from_string, 'UNKNOWN')
|
apache-2.0
|
huanpc/IoT-1
|
gui/controller/.venv/lib/python3.5/site-packages/django/contrib/gis/utils/wkt.py
|
589
|
1923
|
"""
Utilities for manipulating Geometry WKT.
"""
from django.utils import six
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> from django.contrib.gis.geos import Point
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision_wkt(pnt, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, six.string_types):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join(coord_fmt % c[:2] for c in coords)
def formatted_poly(poly):
return ','.join('(%s)' % formatted_coords(r) for r in poly)
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join('(%s)' % formatted_poly(p) for p in g)
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join(''.join(wkt for wkt in formatted_geom(child)) for child in g)
else:
raise TypeError
yield ')'
return ''.join(wkt for wkt in formatted_geom(geom))
|
mit
|
zerc/django
|
django/template/response.py
|
84
|
5682
|
from django.http import HttpResponse
from django.utils import six
from .loader import get_template, select_template
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status, charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return select_template(template, using=self.using)
elif isinstance(template, six.string_types):
return get_template(template, using=self.using)
else:
return template
def resolve_context(self, context):
return context
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request']
def __init__(self, request, template, context=None, content_type=None,
status=None, charset=None, using=None):
super(TemplateResponse, self).__init__(
template, context, content_type, status, charset, using)
self._request = request
|
bsd-3-clause
|
xionzz/earthquake
|
venv/lib/python2.7/site-packages/numpy/core/numerictypes.py
|
37
|
29192
|
"""
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| void (kind=V)
|
| str_ (string_, bytes_) (kind=S) [Python 2]
| unicode_ (kind=U) [Python 2]
|
| bytes_ (string_) (kind=S) [Python 3]
| str_ (unicode_) (kind=U) [Python 3]
|
\\-> object_ (not used much) (kind=O)
"""
from __future__ import division, absolute_import, print_function
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data', 'datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
from numpy.core.multiarray import (
typeinfo, ndarray, array, empty, dtype, datetime_data,
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
import types as _types
import sys
from numpy.compat import bytes, long
import numbers
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
#import string
# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
# LOWER_TABLE)
# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \
# UPPER_TABLE)
#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name=='void':
char = 'V'
base = 'void'
elif name=='object_':
char = 'O'
base = 'object'
bits = 0
elif name=='datetime64':
char = 'M'
elif name=='timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
if name=='bytes_':
char = 'S'
base = 'bytes'
elif name=='str_':
char = 'U'
base = 'str'
else:
if name=='string_':
char = 'S'
base = 'string'
elif name=='unicode_':
char = 'U'
base = 'unicode'
bytes = bits // 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui': continue
if base != '':
myname = "%s%d" % (base, bit)
if (name != 'longdouble' and name != 'clongdouble') or \
myname not in allTypes.keys():
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit//2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
# Integers handled so that
# The int32, int64 types should agree exactly with
# PyArray_INT32, PyArray_INT64 in C
# We need to enforce the same checking as is done
# in arrayobject.h where the order of getting a
# bit-width match is:
# long, longlong, int, short, char
# for int8, int16, int32, int64, int128
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits//8,)
ucharname = 'u%d' % (bits//8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in allTypes.keys():
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
# We use these later
void = allTypes['void']
generic = allTypes['generic']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('object_', 'object')]
if sys.version_info[0] >= 3:
type_pairs.extend([('bytes_', 'string'),
('str_', 'unicode'),
('string_', 'string')])
else:
type_pairs.extend([('str_', 'string'),
('string_', 'string'),
('bytes_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta']
if sys.version_info[0] >= 3:
# Py3K
to_remove.append('bytes')
to_remove.append('str')
to_remove.remove('unicode')
to_remove.remove('long')
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p', 'P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, str, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(np.int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
>>> np.maximum_sctype(np.complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
<type 'numpy.string_'>
>>> np.maximum_sctype('i2')
<type 'numpy.int64'>
>>> np.maximum_sctype('f4')
<type 'numpy.float96'>
"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
try:
buffer_type = _types.BufferType
except AttributeError:
# Py3K
buffer_type = memoryview
_python_types = {int: 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
bytes: 'bytes_',
unicode: 'unicode_',
buffer_type: 'void',
}
if sys.version_info[0] >= 3:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, type):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
else:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<type 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<type 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<type 'numpy.complex128'>
>>> np.obj2sctype(dict)
<type 'numpy.object_'>
>>> np.obj2sctype('string')
<type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
<type 'list'>
"""
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError is one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, np.int)
True
>>> np.issubclass_(np.int32, np.float)
False
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype,obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
True
>>> np.issubsctype(np.array([1]), np.int)
True
>>> np.issubsctype(np.array([1]), np.float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1, arg2 : dtype_like
dtype or string representing a typecode.
Returns
-------
out : bool
See Also
--------
issubsctype, issubclass_
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] // 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
... print np.sctype2char(sctype)
l
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
return _sctype2char_dict[sctype]
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,), key).dtype.str[1:]
# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
if val not in sctypeDict:
sctypeDict[val] = key
# Add additional strings to the sctypeDict
if sys.version_info[0] >= 3:
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
else:
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
typeDict = sctypeDict
typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# M -> datetime
# m -> timedelta
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a, b], start=thisind)
# Find a data-type that all data-types in a list can be coerced to
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
_register_types()
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, np.complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [np.complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc, maxa)
else:
return maxa
|
mit
|
javiplx/cobbler-old_jobs
|
cobbler/yaml/stream.py
|
12
|
5417
|
"""
pyyaml legacy
Copyright (c) 2001 Steve Howell and Friends; All Rights Reserved
(see open source license information in docs/ directory)
"""
import re
import string
def indentLevel(line):
n = 0
while n < len(line) and line[n] == ' ':
n = n + 1
return n
class LineNumberStream:
def __init__(self, filename=None):
self.curLine = 0
self.filename = filename
def get(self):
line = self.getLine()
self.curLine += 1 # used by subclass
if line:
line = noLineFeed(line)
return line
def lastLineRead(self):
return self.curLine
class FileStream(LineNumberStream):
def __init__(self, filename):
self.fp = open(filename)
LineNumberStream.__init__(self, filename)
def getLine(self):
line = self.fp.readline()
if line == '': line = None
return line
class StringStream(LineNumberStream):
def __init__(self, text):
self.lines = split(text)
self.numLines = len(self.lines)
LineNumberStream.__init__(self)
def getLine(self):
if self.curLine < self.numLines:
return self.lines[self.curLine]
def split(text):
lines = string.split(text, '\n')
if lines[-1] == '':
lines.pop()
return lines
def eatNewLines(stream):
while 1:
line = stream.get()
if line is None or len(string.strip(line)):
return line
COMMENT_LINE_REGEX = re.compile(R"\s*#")
def isComment(line):
return line is not None and COMMENT_LINE_REGEX.match(line)
class CommentEater:
def __init__(self, stream):
self.stream = stream
self.peeked = 1
self.line = eatNewLines(stream)
self.eatComments()
def eatComments(self):
while isComment(self.line):
self.line = self.stream.get()
def peek(self):
if self.peeked:
return self.line
self.peeked = 1
self.line = self.stream.get()
self.eatComments()
return self.line
def lastLineRead(self):
return self.stream.lastLineRead()
def pop(self):
data = self.peek()
self.peeked = 0
return data
class NestedText:
def __init__(self, stream):
self.commentEater = CommentEater(stream)
self.reset()
def lastLineRead(self):
return self.commentEater.lastLineRead()
def reset(self):
self.indentLevel = 0
self.oldIndents = [0]
def peek(self):
nextLine = self.commentEater.peek()
if nextLine is not None:
if indentLevel(nextLine) >= self.indentLevel:
return nextLine[self.indentLevel:]
elif nextLine == '':
return ''
def pop(self):
line = self.peek()
if line is None:
self.indentLevel = self.oldIndents.pop()
return
self.commentEater.pop()
return line
def popNestedLines(self):
nextLine = self.peek()
if nextLine is None or nextLine == '' or nextLine[0] != ' ':
return []
self.nestToNextLine()
lines = []
while 1:
line = self.pop()
if line is None:
break
lines.append(line)
return lines
def nestToNextLine(self):
line = self.commentEater.peek()
indentation = indentLevel(line)
if len(self.oldIndents) > 1 and indentation <= self.indentLevel:
self.error("Inadequate indentation", line)
self.setNewIndent(indentation)
def nestBySpecificAmount(self, adjust):
self.setNewIndent(self.indentLevel + adjust)
def setNewIndent(self, indentLevel):
self.oldIndents.append(self.indentLevel)
self.indentLevel = indentLevel
class YamlLoaderException(Exception):
def __init__(self, *args):
(self.msg, self.lineNum, self.line, self.filename) = args
def __str__(self):
msg = """\
%(msg)s:
near line %(lineNum)d:
%(line)s
""" % self.__dict__
if self.filename:
msg += "file: " + self.filename
return msg
class NestedDocs(NestedText):
def __init__(self, stream):
self.filename = stream.filename
NestedText.__init__(self,stream)
line = NestedText.peek(self)
self.sep = '---'
if self.startsWithSep(line):
self.eatenDocSep = NestedText.pop(self)
else:
self.eatenDocSep = self.sep
def startsWithSep(self,line):
if line and self.sep == line[:3]: return 1
return 0
def popDocSep(self):
line = self.eatenDocSep
self.eatenDocSep = None
self.reset()
return line
def pop(self):
if self.eatenDocSep is not None:
raise "error"
line = self.commentEater.peek()
if line and self.startsWithSep(line):
self.eatenDocSep = NestedText.pop(self)
return None
return NestedText.pop(self)
def error(self, msg, line):
raise YamlLoaderException(msg, self.lastLineRead(), line, self.filename)
def noLineFeed(s):
while s[-1:] in ('\n', '\r'):
s = s[:-1]
return s
|
gpl-2.0
|
finoradin/automation-tools
|
transfers/examples/tms.py
|
1
|
4164
|
#!/usr/bin/env python
from __future__ import print_function
import json
import os
import sys
import urllib2
import ast
import csv
def main(transfer_path):
basename = os.path.basename(transfer_path)
try:
comp_num, comp_id, obj_id = basename.split('---')
except ValueError:
return 1
# print('Component Number: ', comp_num, end='')
# print('Component ID: ', comp_id, end='')
# print('Object ID: ', obj_id, end='')
# get the object metadata
object_url = "http://vmsqlsvcs.museum.moma.org/TMSAPI/TmsObjectSvc/TmsObjects.svc/GetTombstoneDataRest/ObjectID/"+obj_id
object_request = json.load(urllib2.urlopen(object_url))
# get the component metadata
component_url = "http://vmsqlsvcs.museum.moma.org/TMSAPI/TmsObjectSvc/TmsObjects.svc/GetComponentDetails/Component/"+comp_id
component_request = json.load(urllib2.urlopen(component_url))
# put object metdata in its place
dc_ident1 = object_request["GetTombstoneDataRestIdResult"]["ObjectID"]
dc_ident2 = object_request["GetTombstoneDataRestIdResult"]["ObjectNumber"]
dc_title = object_request["GetTombstoneDataRestIdResult"]["Title"]
dc_creator = object_request["GetTombstoneDataRestIdResult"]["DisplayName"]
dc_date = object_request["GetTombstoneDataRestIdResult"]["Dated"]
dc_format1 = object_request["GetTombstoneDataRestIdResult"]["Classification"]
dc_format2 = object_request["GetTombstoneDataRestIdResult"]["Medium"]
# put component metadata in its place
componentName = component_request["GetComponentDetailsResult"]["ComponentName"]
componentNumber = component_request["GetComponentDetailsResult"]["ComponentNumber"]
componentID = component_request["GetComponentDetailsResult"]["ComponentID"]
Attributes = component_request["GetComponentDetailsResult"]["Attributes"]
#initialize component variables
componentStatus = ""
componentFormat = ""
try:
Attributes = ast.literal_eval(Attributes)
except SyntaxError:
print ("Caught a SyntaxError")
except ValueError:
print ("Caught a ValueError")
componentDate = ''
componentChannels = ''
componentCopyinSet = ''
for item in Attributes:
try:
if item['Media Label'] == 'Created Date':
componentDate = item['Remarks']
if item['Media Label'] == 'Channels':
componentChannels = item['Remarks']
if item['Media Label'] == 'Copy in set':
componentCopyinSet = item['Remarks']
componentStatus = item['Status']
componentFormat = item['Media Format']
except KeyError:
print ("nada")
# put everything in a dictionary
metadataDict = {
'parts': 'objects',
'dc.identifier': comp_num,
'dc.title': dc_title,
'dc.creator': dc_creator,
'dc.date': dc_date,
'dc.format': dc_format2,
'MoMAobjectID': dc_ident1,
'MoMAobjectNumber': dc_ident2,
'MoMAclassification': dc_format1,
'MoMAcomponentName': componentName,
'MoMAcomponentNumber': componentNumber,
'MoMAcomponentID': componentID,
'MoMAcomponentCreatedDate': componentDate,
'MoMAchannels': componentChannels,
'MoMAcopyInSet': componentCopyinSet,
'MoMAstatus': componentStatus,
}
cleanMetadataDict = dict((k, v) for k, v in metadataDict.iteritems() if v)
metadata_path = os.path.join(transfer_path, 'metadata')
if not os.path.exists(metadata_path):
os.makedirs(metadata_path)
metadata_path = os.path.join(metadata_path, 'metadata.csv')
c = csv.writer(open(metadata_path, "wb"))
keyList = []
valueList = []
for key, value in cleanMetadataDict.iteritems():
keyList.append(key)
valueList.append(value)
c.writerow(keyList)
c.writerow(valueList)
# with open(metadata_path, 'w') as f:
# json.dump(metadata, f)
return 0
if __name__ == '__main__':
transfer_path = sys.argv[1]
sys.exit(main(transfer_path))
|
agpl-3.0
|
bcheung92/Paperproject
|
gem5/src/mem/slicc/generate/dot.py
|
92
|
2077
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util.code_formatter import code_formatter
def printDotty(sm, code):
code('digraph ${{sm.getIdent()}} {')
code.indent()
for t in sm.transitions:
# Don't print ignored transitions
if t.getActionShorthands() in ("--", "z"):
continue
code('${{t.getStateShorthand()}} -> ${{t.getNextStateShorthand()}')
code(' [label="${{t.getEventShorthand()}}/${{t.getActionShorthands()}}"')
code.dedent()
code('}')
|
mit
|
jalexvig/tensorflow
|
tensorflow/contrib/factorization/python/ops/factorization_ops_test_utils.py
|
116
|
6182
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for factorization_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
INPUT_MATRIX = np.array(
[[0.1, 0.0, 0.2, 0.0, 0.4, 0.5, 0.0],
[0.0, 1.1, 0.0, 1.3, 1.4, 0.0, 1.6],
[2.0, 0.0, 0.0, 2.3, 0.0, 2.5, 0.0],
[3.0, 0.0, 3.2, 3.3, 0.0, 3.5, 0.0],
[0.0, 4.1, 0.0, 0.0, 4.4, 0.0, 4.6]]).astype(np.float32)
def remove_empty_rows_columns(np_matrix):
"""Simple util to remove empty rows and columns of a matrix.
Args:
np_matrix: A numpy array.
Returns:
A tuple consisting of:
mat: A numpy matrix obtained by removing empty rows and columns from
np_matrix.
nz_row_ids: A numpy array of the ids of non-empty rows, such that
nz_row_ids[i] is the old row index corresponding to new index i.
nz_col_ids: A numpy array of the ids of non-empty columns, such that
nz_col_ids[j] is the old column index corresponding to new index j.
"""
nz_row_ids = np.where(np.sum(np_matrix, axis=1) != 0)[0]
nz_col_ids = np.where(np.sum(np_matrix, axis=0) != 0)[0]
mat = np_matrix[np.ix_(nz_row_ids, nz_col_ids)]
return mat, nz_row_ids, nz_col_ids
def np_matrix_to_tf_sparse(np_matrix,
row_slices=None,
col_slices=None,
transpose=False,
shuffle=False):
"""Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
indices = np.nonzero(np_matrix)
# Only allow slices of whole rows or whole columns.
assert not (row_slices is not None and col_slices is not None)
if row_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[0] == r)[0] for r in row_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if col_slices is not None:
selected_ind = np.concatenate(
[np.where(indices[1] == c)[0] for c in col_slices], 0)
indices = (indices[0][selected_ind], indices[1][selected_ind])
if shuffle:
shuffled_ind = [x for x in range(len(indices[0]))]
random.shuffle(shuffled_ind)
indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])
ind = (np.concatenate((np.expand_dims(indices[1], 1),
np.expand_dims(indices[0], 1)), 1).astype(np.int64) if
transpose else np.concatenate((np.expand_dims(indices[0], 1),
np.expand_dims(indices[1], 1)),
1).astype(np.int64))
val = np_matrix[indices].astype(np.float32)
shape = (np.array([max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64)
if transpose else np.array(
[max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
return sparse_tensor.SparseTensor(ind, val, shape)
def calculate_loss(input_mat, row_factors, col_factors, regularization=None,
w0=1., row_weights=None, col_weights=None):
"""Calculates the loss of a given factorization.
Using a non distributed method, different than the one implemented in the
WALS model. The weight of an observed entry (i, j) (i.e. such that
input_mat[i, j] is non zero) is (w0 + row_weights[i]col_weights[j]).
Args:
input_mat: The input matrix, a SparseTensor of rank 2.
row_factors: The row factors, a dense Tensor of rank 2.
col_factors: The col factors, a dense Tensor of rank 2.
regularization: the regularization coefficient, a scalar.
w0: the weight of unobserved entries. A scalar.
row_weights: A dense tensor of rank 1.
col_weights: A dense tensor of rank 1.
Returns:
The total loss.
"""
wr = (array_ops.expand_dims(row_weights, 1) if row_weights is not None
else constant_op.constant(1.))
wc = (array_ops.expand_dims(col_weights, 0) if col_weights is not None
else constant_op.constant(1.))
reg = (regularization if regularization is not None
else constant_op.constant(0.))
row_indices, col_indices = array_ops.split(input_mat.indices,
axis=1,
num_or_size_splits=2)
gathered_row_factors = array_ops.gather(row_factors, row_indices)
gathered_col_factors = array_ops.gather(col_factors, col_indices)
sp_approx_vals = array_ops.squeeze(math_ops.matmul(
gathered_row_factors, gathered_col_factors, adjoint_b=True))
sp_approx = sparse_tensor.SparseTensor(
indices=input_mat.indices,
values=sp_approx_vals,
dense_shape=input_mat.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
row_norm = math_ops.reduce_sum(math_ops.square(row_factors))
col_norm = math_ops.reduce_sum(math_ops.square(col_factors))
row_col_norm = math_ops.reduce_sum(math_ops.square(math_ops.matmul(
row_factors, col_factors, transpose_b=True)))
resid = sparse_ops.sparse_add(input_mat, sp_approx * (-1))
resid_sq = math_ops.square(resid)
loss = w0 * (
sparse_ops.sparse_reduce_sum(resid_sq) -
sparse_ops.sparse_reduce_sum(sp_approx_sq)
)
loss += (sparse_ops.sparse_reduce_sum(wr * (resid_sq * wc)) +
w0 * row_col_norm + reg * (row_norm + col_norm))
return loss.eval()
|
apache-2.0
|
scripteed/mtasa-blue
|
vendor/google-breakpad/src/tools/gyp/test/win/gyptest-link-large-pdb.py
|
218
|
2332
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_large_pdb works correctly.
"""
import TestGyp
import struct
import sys
CHDIR = 'large-pdb'
def CheckImageAndPdb(test, image_basename, expected_page_size,
pdb_basename=None):
if not pdb_basename:
pdb_basename = image_basename + '.pdb'
test.built_file_must_exist(image_basename, chdir=CHDIR)
test.built_file_must_exist(pdb_basename, chdir=CHDIR)
# We expect the PDB to have the given page size. For full details of the
# header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
# We read the little-endian 4-byte unsigned integer at position 32 of the
# file.
pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
pdb_file = open(pdb_path, 'rb')
pdb_file.seek(32, 0)
page_size = struct.unpack('<I', pdb_file.read(4))[0]
if page_size != expected_page_size:
print "Expected page size of %d, got %d for PDB file `%s'." % (
expected_page_size, page_size, pdb_path)
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
test.run_gyp('large-pdb.gyp', chdir=CHDIR)
test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
# This target has a different PDB name because it uses an
# 'msvs_large_pdb_path' variable.
test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
pdb_basename='foo.pdb')
# This target has a different output name because it uses 'product_name'.
test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'bar.exe', 4096)
test.pass_test()
|
gpl-3.0
|
sxjscience/tvm
|
python/tvm/micro/compiler.py
|
1
|
11811
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines interfaces and default implementations for compiling and flashing code."""
import abc
import glob
import os
import re
from tvm.contrib import binutil
import tvm.target
from . import build
from . import class_factory
from . import debugger
from . import transport
class DetectTargetError(Exception):
"""Raised when no target comment was detected in the sources given."""
class NoDefaultToolchainMatchedError(Exception):
"""Raised when no default toolchain matches the target string."""
class Compiler(metaclass=abc.ABCMeta):
"""The compiler abstraction used with micro TVM."""
TVM_TARGET_RE = re.compile(r"^// tvm target: (.*)$")
@classmethod
def _target_from_sources(cls, sources):
"""Determine the target used to generate the given source files.
Parameters
----------
sources : List[str]
The paths to source files to analyze.
Returns
-------
tvm.target.Target :
A Target instance reconstructed from the target string listed in the source files.
"""
target_strs = set()
for obj in sources:
with open(obj) as obj_f:
for line in obj_f:
m = cls.TVM_TARGET_RE.match(line)
if m:
target_strs.add(m.group(1))
if len(target_strs) != 1:
raise DetectTargetError(
"autodetecting cross-compiler: could not extract TVM target from C source; regex "
f"{cls.TVM_TARGET_RE.pattern} does not match any line in sources: "
f'{", ".join(sources)}'
)
target_str = next(iter(target_strs))
return tvm.target.create(target_str)
# Maps regexes identifying CPUs to the default toolchain prefix for that CPU.
TOOLCHAIN_PREFIX_BY_CPU_REGEX = {
r"cortex-[am].*": "arm-none-eabi-",
"x86[_-]64": "",
"native": "",
}
def _autodetect_toolchain_prefix(self, target):
matches = []
for regex, prefix in self.TOOLCHAIN_PREFIX_BY_CPU_REGEX.items():
if re.match(regex, target.attrs["mcpu"]):
matches.append(prefix)
if matches:
if len(matches) != 1:
raise NoDefaultToolchainMatchedError(
f'{opt} matched more than 1 default toolchain prefix: {", ".join(matches)}. '
"Specify cc.cross_compiler to create_micro_library()"
)
return matches[0]
raise NoDefaultToolchainMatchedError(
f"target {str(target)} did not match any default toolchains"
)
def _defaults_from_target(self, target):
"""Determine the default compiler options from the target specified.
Parameters
----------
target : tvm.target.Target
Returns
-------
List[str] :
Default options used the configure the compiler for that target.
"""
opts = []
# TODO use march for arm(https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html)?
if target.attrs.get("mcpu"):
opts.append(f'-march={target.attrs["mcpu"]}')
if target.attrs.get("mfpu"):
opts.append(f'-mfpu={target.attrs["mfpu"]}')
return opts
@abc.abstractmethod
def library(self, output, sources, options=None):
"""Build a library from the given source files.
Parameters
----------
output : str
The path to the library that should be created. The containing directory
is guaranteed to be empty and should be the base_dir for the returned
Artifact.
sources : List[str]
A list of paths to source files that should be compiled.
options : Optional[List[str]]
If given, additional command-line flags to pass to the compiler.
Returns
-------
MicroLibrary :
The compiled library, as a MicroLibrary instance.
"""
raise NotImplementedError()
@abc.abstractmethod
def binary(self, output, objects, options=None, link_main=True, main_options=None):
"""Link a binary from the given object and/or source files.
Parameters
----------
output : str
The path to the binary that should be created. The containing directory
is guaranteed to be empty and should be the base_dir for the returned
Artifact.
objects : List[MicroLibrary]
A list of paths to source files or libraries that should be compiled. The final binary
should be statically-linked.
options: Optional[List[str]]
If given, additional command-line flags to pass to the compiler.
link_main: Optional[bool]
True if the standard main entry point for this Compiler should be included in the
binary. False if a main entry point is provided in one of `objects`.
main_options: Optional[List[str]]
If given, additional command-line flags to pass to the compiler when compiling the
main() library. In some cases, the main() may be compiled directly into the final binary
along with `objects` for logistical reasons. In those cases, specifying main_options is
an error and ValueError will be raised.
Returns
-------
MicroBinary :
The compiled binary, as a MicroBinary instance.
"""
raise NotImplementedError()
@property
def flasher_factory(self):
"""Produce a FlasherFactory for a Flasher instance suitable for this Compiler."""
raise NotImplementedError("The Compiler base class doesn't define a flasher.")
def flasher(self, **kw):
"""Return a Flasher that can be used to program a produced MicroBinary onto the target."""
return self.flasher_factory.override_kw(**kw).instantiate()
class IncompatibleTargetError(Exception):
"""Raised when source files specify a target that differs from the compiler target."""
class DefaultCompiler(Compiler):
"""A Compiler implementation that attempts to use the system-installed GCC."""
def __init__(self, target=None):
super(DefaultCompiler, self).__init__()
self.target = target
if isinstance(target, str):
self.target = tvm.target.create(target)
def library(self, output, sources, options=None):
options = options if options is not None else {}
try:
target = self._target_from_sources(sources)
except DetectTargetError:
assert self.target is not None, (
"Must specify target= to constructor when compiling sources which don't specify a "
"target"
)
target = self.target
if self.target is not None and str(self.target) != str(target):
raise IncompatibleTargetError(
f"auto-detected target {target} differs from configured {self.target}"
)
prefix = self._autodetect_toolchain_prefix(target)
outputs = []
for src in sources:
src_base, src_ext = os.path.splitext(os.path.basename(src))
compiler_name = {".c": "gcc", ".cc": "g++", ".cpp": "g++"}[src_ext]
args = [prefix + compiler_name, "-g"]
args.extend(self._defaults_from_target(target))
args.extend(options.get(f"{src_ext[1:]}flags", []))
for include_dir in options.get("include_dirs", []):
args.extend(["-I", include_dir])
output_filename = f"{src_base}.o"
output_abspath = os.path.join(output, output_filename)
binutil.run_cmd(args + ["-c", "-o", output_abspath, src])
outputs.append(output_abspath)
output_filename = f"{os.path.basename(output)}.a"
output_abspath = os.path.join(output, output_filename)
binutil.run_cmd([prefix + "ar", "-r", output_abspath] + outputs)
binutil.run_cmd([prefix + "ranlib", output_abspath])
return tvm.micro.MicroLibrary(output, [output_filename])
def binary(self, output, objects, options=None, link_main=True, main_options=None):
assert self.target is not None, (
"must specify target= to constructor, or compile sources which specify the target "
"first"
)
args = [self._autodetect_toolchain_prefix(self.target) + "g++"]
args.extend(self._defaults_from_target(self.target))
if options is not None:
args.extend(options.get("ldflags", []))
for include_dir in options.get("include_dirs", []):
args.extend(["-I", include_dir])
output_filename = os.path.basename(output)
output_abspath = os.path.join(output, output_filename)
args.extend(["-g", "-o", output_abspath])
if link_main:
host_main_srcs = glob.glob(os.path.join(build.CRT_ROOT_DIR, "host", "*.cc"))
if main_options:
main_lib = self.library(os.path.join(output, "host"), host_main_srcs, main_options)
for lib_name in main_lib.library_files:
args.append(main_lib.abspath(lib_name))
else:
args.extend(host_main_srcs)
for obj in objects:
for lib_name in obj.library_files:
args.append(obj.abspath(lib_name))
binutil.run_cmd(args)
return tvm.micro.MicroBinary(output, output_filename, [])
@property
def flasher_factory(self):
return FlasherFactory(HostFlasher, [], {})
class Flasher(metaclass=abc.ABCMeta):
"""An interface for flashing binaries and returning a transport factory."""
@abc.abstractmethod
def flash(self, micro_binary):
"""Flash a binary onto the device.
Parameters
----------
micro_binary : MicroBinary
A MicroBinary instance.
Returns
-------
transport.TransportContextManager :
A ContextManager that can be used to create and tear down an RPC transport layer between
this TVM instance and the newly-flashed binary.
"""
raise NotImplementedError()
class FlasherFactory(class_factory.ClassFactory):
"""A ClassFactory for Flasher instances."""
SUPERCLASS = Flasher
class HostFlasher(Flasher):
"""A Flasher implementation that spawns a subprocess on the host."""
def __init__(self, debug=False):
self.debug = debug
def flash(self, micro_binary):
if self.debug:
gdb_wrapper = debugger.GdbTransportDebugger(
[micro_binary.abspath(micro_binary.binary_file)]
)
return transport.DebugWrapperTransport(
debugger=gdb_wrapper, transport=gdb_wrapper.transport()
)
return transport.SubprocessTransport([micro_binary.abspath(micro_binary.binary_file)])
|
apache-2.0
|
ThiagoGarciaAlves/intellij-community
|
python/lib/Lib/rfc822.py
|
89
|
33167
|
"""RFC 2822 message manipulation.
Note: This is only a very rough sketch of a full RFC-822 parser; in particular
the tokenizing of addresses does not adhere to all the quoting rules.
Note: RFC 2822 is a long awaited update to RFC 822. This module should
conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some
effort at RFC 2822 updates have been made, but a thorough audit has not been
performed. Consider any RFC 2822 non-conformance to be a bug.
RFC 2822: http://www.faqs.org/rfcs/rfc2822.html
RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete)
Directions for use:
To create a Message object: first open a file, e.g.:
fp = open(file, 'r')
You can use any other legal way of getting an open file object, e.g. use
sys.stdin or call os.popen(). Then pass the open file object to the Message()
constructor:
m = Message(fp)
This class can work with any input object that supports a readline method. If
the input object has seek and tell capability, the rewindbody method will
work; also illegal lines will be pushed back onto the input stream. If the
input object lacks seek but has an `unread' method that can push back a line
of input, Message will use that to push back illegal lines. Thus this class
can be used to parse messages coming from a buffered stream.
The optional `seekable' argument is provided as a workaround for certain stdio
libraries in which tell() discards buffered data before discovering that the
lseek() system call doesn't work. For maximum portability, you should set the
seekable argument to zero to prevent that initial \code{tell} when passing in
an unseekable object such as a a file object created from a socket object. If
it is 1 on entry -- which it is by default -- the tell() method of the open
file object is called once; if this raises an exception, seekable is reset to
0. For other nonzero values of seekable, this test is not made.
To get the text of a particular header there are several methods:
str = m.getheader(name)
str = m.getrawheader(name)
where name is the name of the header, e.g. 'Subject'. The difference is that
getheader() strips the leading and trailing whitespace, while getrawheader()
doesn't. Both functions retain embedded whitespace (including newlines)
exactly as they are specified in the header, and leave the case of the text
unchanged.
For addresses and address lists there are functions
realname, mailaddress = m.getaddr(name)
list = m.getaddrlist(name)
where the latter returns a list of (realname, mailaddr) tuples.
There is also a method
time = m.getdate(name)
which parses a Date-like field and returns a time-compatible tuple,
i.e. a tuple such as returned by time.localtime() or accepted by
time.mktime().
See the class definition for lower level access methods.
There are also some utility functions here.
"""
# Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com>
import time
__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"]
_blanklines = ('\r\n', '\n') # Optimization for islast()
class Message:
"""Represents a single RFC 2822-compliant message."""
def __init__(self, fp, seekable = 1):
"""Initialize the class instance and read the headers."""
if seekable == 1:
# Exercise tell() to make sure it works
# (and then assume seek() works, too)
try:
fp.tell()
except (AttributeError, IOError):
seekable = 0
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
#
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
#
self.readheaders()
#
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
def rewindbody(self):
"""Rewind the file to the start of the body (if seekable)."""
if not self.seekable:
raise IOError, "unseekable file"
self.fp.seek(self.startofbody)
def readheaders(self):
"""Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).
"""
self.dict = {}
self.unixfrom = ''
self.headers = lst = []
self.status = ''
headerseen = ""
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if not line:
self.status = 'EOF in headers'
break
# Skip unix From name time lines
if firstline and line.startswith('From '):
self.unixfrom = self.unixfrom + line
continue
firstline = 0
if headerseen and line[0] in ' \t':
# It's a continuation line.
lst.append(line)
x = (self.dict[headerseen] + "\n " + line.strip())
self.dict[headerseen] = x.strip()
continue
elif self.iscomment(line):
# It's a comment. Ignore it.
continue
elif self.islast(line):
# Note! No pushback here! The delimiter line gets eaten.
break
headerseen = self.isheader(line)
if headerseen:
# It's a legal header line, save it.
lst.append(line)
self.dict[headerseen] = line[len(headerseen)+1:].strip()
continue
else:
# It's not a header line; throw it back and stop here.
if not self.dict:
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
# Try to undo the read.
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = self.status + '; bad seek'
break
def isheader(self, line):
"""Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats with special header formats.
"""
i = line.find(':')
if i > 0:
return line[:i].lower()
return None
def islast(self, line):
"""Determine whether a line is a legal end of RFC 2822 headers.
You may override this method if your application wants to bend the
rules, e.g. to strip trailing whitespace, or to recognize MH template
separators ('--------'). For convenience (e.g. for code reading from
sockets) a line consisting of \r\n also matches.
"""
return line in _blanklines
def iscomment(self, line):
"""Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats that support embedded comments or
free-text data.
"""
return False
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def getfirstmatchingheader(self, name):
"""Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines).
"""
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.headers:
if hit:
if not line[:1].isspace():
break
elif line[:n].lower() == name:
hit = 1
if hit:
lst.append(line)
return lst
def getrawheader(self, name):
"""A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur.
"""
lst = self.getfirstmatchingheader(name)
if not lst:
return None
lst[0] = lst[0][len(name) + 1:]
return ''.join(lst)
def getheader(self, name, default=None):
"""Get the header value for a name.
This is the normal interface: it returns a stripped version of the
header value for a given header name, or None if it doesn't exist.
This uses the dictionary version which finds the *last* such header.
"""
return self.dict.get(name.lower(), default)
get = getheader
def getheaders(self, name):
"""Get all values for a header.
This returns a list of values for headers given more than once; each
value in the result list is stripped in the same way as the result of
getheader(). If the header is not given, return an empty list.
"""
result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = "%s\n %s" % (current, s.strip())
else:
current = s.strip()
else:
if have_header:
result.append(current)
current = s[s.find(":") + 1:].strip()
have_header = 1
if have_header:
result.append(current)
return result
def getaddr(self, name):
"""Get a single address from a header, as a tuple.
An example return value:
('Guido van Rossum', 'guido@cwi.nl')
"""
# New, by Ben Escoto
alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
def getaddrlist(self, name):
"""Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.
"""
raw = []
for h in self.getallmatchingheaders(name):
if h[0] in ' \t':
raw.append(h)
else:
if raw:
raw.append(', ')
i = h.find(':')
if i > 0:
addr = h[i+1:]
raw.append(addr)
alladdrs = ''.join(raw)
a = AddressList(alladdrs)
return a.addresslist
def getdate(self, name):
"""Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().
"""
try:
data = self[name]
except KeyError:
return None
return parsedate(data)
def getdate_tz(self, name):
"""Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with time.mktime(),
and the 10th is the offset of the poster's time zone from GMT/UTC.
"""
try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
# Access as a dictionary (only finds *last* header of each type):
def __len__(self):
"""Get the number of headers in a message."""
return len(self.dict)
def __getitem__(self, name):
"""Get a specific header, as from a dictionary."""
return self.dict[name.lower()]
def __setitem__(self, name, value):
"""Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because any
changed headers get stuck at the end of the raw-headers list rather
than where the altered header was.
"""
del self[name] # Won't fail if it doesn't exist
self.dict[name.lower()] = value
text = name + ": " + value
for line in text.split("\n"):
self.headers.append(line + "\n")
def __delitem__(self, name):
"""Delete all occurrences of a specific header, if it is present."""
name = name.lower()
if not name in self.dict:
return
del self.dict[name]
name = name + ':'
n = len(name)
lst = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(i)
for i in reversed(lst):
del self.headers[i]
def setdefault(self, name, default=""):
lowername = name.lower()
if lowername in self.dict:
return self.dict[lowername]
else:
text = name + ": " + default
for line in text.split("\n"):
self.headers.append(line + "\n")
self.dict[lowername] = default
return default
def has_key(self, name):
"""Determine whether a message contains the named header."""
return name.lower() in self.dict
def __contains__(self, name):
"""Determine whether a message contains the named header."""
return name.lower() in self.dict
def __iter__(self):
return iter(self.dict)
def keys(self):
"""Get all of a message's header field names."""
return self.dict.keys()
def values(self):
"""Get all of a message's header field values."""
return self.dict.values()
def items(self):
"""Get all of a message's headers.
Returns a list of name, value tuples.
"""
return self.dict.items()
def __str__(self):
return ''.join(self.headers)
# Utility functions
# -----------------
# XXX Should fix unquote() and quote() to be really conformant.
# XXX The inverses of the parse functions may also be useful.
def unquote(s):
"""Remove quotes from a string."""
if len(s) > 1:
if s.startswith('"') and s.endswith('"'):
return s[1:-1].replace('\\\\', '\\').replace('\\"', '"')
if s.startswith('<') and s.endswith('>'):
return s[1:-1]
return s
def quote(s):
"""Add quotes around a string."""
return s.replace('\\', '\\\\').replace('"', '\\"')
def parseaddr(address):
"""Parse an address into a (realname, mailaddr) tuple."""
a = AddressList(address)
lst = a.addresslist
if not lst:
return (None, None)
return lst[0]
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of
RFC 2822 in front of you.
http://www.faqs.org/rfcs/rfc2822.html
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing one or more
addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos = self.pos + 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else: break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
ad = self.getaddress()
while ad:
result += ad
ad = self.getaddress()
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(' '.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(' '.join(plist) + ' (' + \
' '.join(self.commentlist) + ')', routeaddr)]
else: returnlist = [(' '.join(plist), routeaddr)]
else:
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ""
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = 0
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = 1
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else: aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return ''.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return ''.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else: sdlist.append(self.getatom())
return ''.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments = 1):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment. If self is not
looking at an instance of `beginchar' then getdelimited returns the
empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = 0
self.pos += 1
while self.pos < len(self.field):
if quote == 1:
slist.append(self.field[self.pos])
quote = 0
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = 1
else:
slist.append(self.field[self.pos])
self.pos += 1
return ''.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', 0)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', 1)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', 0)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else: atomlist.append(self.field[self.pos])
self.pos += 1
return ''.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __str__(self):
return ", ".join(map(dump_address_pair, self.addresslist))
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
def dump_address_pair(pair):
"""Dump a (name, address) pair in a canonicalized form."""
if pair[0]:
return '"' + pair[0] + '" <' + pair[1] + '>'
else:
return pair[1]
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
if not data:
return None
data = data.split()
if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
# no space after the "weekday,"?
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if not mm in _monthnames:
dd, mm = mm, dd.lower()
if not mm in _monthnames:
return None
mm = _monthnames.index(mm)+1
if mm > 12: mm = mm - 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if t is None:
return t
return t[:9]
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def formatdate(timeval=None):
"""Returns time format preferred for Internet standards.
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
According to RFC 1123, day and month names must always be in
English. If not for that, this code could use strftime(). It
can't because strftime() honors the locale and could generated
non-English names.
"""
if timeval is None:
timeval = time.time()
timeval = time.gmtime(timeval)
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]],
timeval[2],
("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1],
timeval[0], timeval[3], timeval[4], timeval[5])
# When used as script, run a small test program.
# The first command line argument must be a filename containing one
# message in RFC-822 format.
if __name__ == '__main__':
import sys, os
file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
if sys.argv[1:]: file = sys.argv[1]
f = open(file, 'r')
m = Message(f)
print 'From:', m.getaddr('from')
print 'To:', m.getaddrlist('to')
print 'Subject:', m.getheader('subject')
print 'Date:', m.getheader('date')
date = m.getdate_tz('date')
tz = date[-1]
date = time.localtime(mktime_tz(date))
if date:
print 'ParsedDate:', time.asctime(date),
hhmmss = tz
hhmm, ss = divmod(hhmmss, 60)
hh, mm = divmod(hhmm, 60)
print "%+03d%02d" % (hh, mm),
if ss: print ".%02d" % ss,
print
else:
print 'ParsedDate:', None
m.rewindbody()
n = 0
while f.readline():
n += 1
print 'Lines:', n
print '-'*70
print 'len =', len(m)
if 'Date' in m: print 'Date =', m['Date']
if 'X-Nonsense' in m: pass
print 'keys =', m.keys()
print 'values =', m.values()
print 'items =', m.items()
|
apache-2.0
|
dsm054/pandas
|
pandas/tests/plotting/test_boxplot_method.py
|
3
|
16170
|
# coding: utf-8
import pytest
import itertools
import string
from pandas import Series, DataFrame, MultiIndex
from pandas.compat import range, lzip
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from numpy import random
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot, return_type='dict')
_check_plot_works(df.boxplot, column=[
'one', 'two'], return_type='dict')
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=['one', 'two'],
by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='indic')
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting._core.boxplot, data=df['one'],
return_type='dict')
_check_plot_works(df.boxplot, notch=1, return_type='dict')
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='indic', notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
df['Y'] = Series(['A'] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by='X')
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot('Col1', by='X', ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')
ax_axes = ax.axes
assert ax_axes is axes['A']
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(column=['Col1', 'Col2'],
by='X', ax=ax, return_type='axes')
assert axes['Col1'].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type='dict')
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.boxplot(return_type='NOTATYPE')
result = df.boxplot()
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='dict')
self._check_box_return_type(result, 'dict')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='axes')
self._check_box_return_type(result, 'axes')
with tm.assert_produces_warning(False):
result = df.boxplot(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df['age'] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(['height', 'weight', 'age'], by='category')
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df['height'], height_ax)
_check_ax_limits(df['weight'], weight_ax)
_check_ax_limits(df['age'], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5),
columns=['A', 'B', 'C', 'D', 'E'])
result = df.boxplot(return_type='axes', figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(df.boxplot("a", fontsize=16),
xlabelsize=16, ylabelsize=16)
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by='gender')
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type='axes')
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False,
return_type='axes')
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(['male', 'female'], size=n)
df = DataFrame({'height': height, 'weight': weight, 'gender': gender})
gb = df.groupby('gender')
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type='axes')
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby('gender').hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by='gender')
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None,
expected_keys=['height', 'weight', 'category'])
# now for groupby
result = df.groupby('gender').boxplot(return_type='dict')
self._check_box_return_type(
result, 'dict', expected_keys=['Male', 'Female'])
columns2 = 'X B C D A G Y N Q O'.split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = 'A B C D E F G H I J'.split()
df2['category'] = categories2 * 5
for t in ['dict', 'axes', 'both']:
returned = df.groupby('classroom').boxplot(return_type=t)
self._check_box_return_type(
returned, t, expected_keys=['A', 'B', 'C'])
returned = df.boxplot(by='classroom', return_type=t)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'])
returned = df2.groupby('category').boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by='category', return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(1, 1))
pytest.raises(ValueError, df.boxplot,
column=['height', 'weight', 'category'],
layout=(2, 1), return_type='dict')
pytest.raises(ValueError, df.boxplot, column=['weight', 'height'],
by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('gender').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('classroom').boxplot,
column='height', return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(column=['height', 'weight', 'category'], by='gender',
return_type='axes')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes['height']]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes['weight'], axes['category']]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
layout=(3, 2), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(df.groupby('category').boxplot,
column='height',
layout=(3, -1), return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(4, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(column=['height', 'weight', 'category'], by='gender',
layout=(-1, 1))
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'], layout=(1, 4),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby('classroom').boxplot( # noqa
column=['height', 'weight', 'category'], layout=(1, -1),
return_type='dict')
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby('category').boxplot(
column='height', return_type='axes', ax=axes)
self._check_axes_shape(self.plt.gcf().axes,
axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(column=['height', 'weight', 'category'],
by='gender', return_type='axes', ax=axes[0])
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby('classroom').boxplot(
column=['height', 'weight', 'category'],
return_type='axes', ax=axes[1])
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby('classroom').boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(df.boxplot("a", by="b", fontsize=16),
xlabelsize=16, ylabelsize=16)
|
bsd-3-clause
|
shuggiefisher/crowdstock
|
django/conf/locale/fr/formats.py
|
232
|
1530
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
bsd-3-clause
|
BeyondTheClouds/nova
|
nova/block_device.py
|
6
|
20984
|
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
from oslo_utils import strutils
import six
import nova.conf
from nova import exception
from nova.i18n import _
from nova import utils
from nova.virt import driver
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None, **kwargs):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
bdm_dict.update(kwargs)
do_not_default = do_not_default or set()
self._validate(bdm_dict)
if bdm_dict.get('device_name'):
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
bdm_dict['delete_on_termination'] = bool(
bdm_dict.get('delete_on_termination'))
# NOTE (ndipanov): Never default db fields
self.update({field: None for field in self._fields - do_not_default})
self.update(list(six.iteritems(bdm_dict)))
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in six.iteritems(bdm_dict))
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details=_("Some fields are invalid."))
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details=_("Some required fields are missing"))
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = {fld: val for fld, val in six.iteritems(legacy_bdm)
if fld in copy_over_fields}
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details=_("Unrecognized legacy format."))
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict, image_uuid_specified):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
destination_type = api_dict.get('destination_type')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details=_("Invalid source_type field."))
elif source_type == 'blank' and device_uuid:
raise exception.InvalidBDMFormat(
details=_("Invalid device UUID."))
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details=_("Missing device UUID."))
api_dict[source_type + '_id'] = device_uuid
if source_type == 'image' and destination_type == 'local':
try:
boot_index = int(api_dict.get('boot_index', -1))
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
# if this bdm is generated from --image ,then
# source_type = image and destination_type = local is allowed
if not (image_uuid_specified and boot_index == 0):
raise exception.InvalidBDMFormat(
details=_("Mapping image to local is not supported."))
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = {field: self.get(field)
for field in copy_over_fields if field in self}
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def get_image_mapping(self):
drop_fields = (set(['connection_info']) |
self._db_only_fields)
mapping_dict = dict(self)
for fld in drop_fields:
mapping_dict.pop(fld, None)
return mapping_dict
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def create_blank_bdm(size, guest_format=None):
return BlockDeviceDict(
{'source_type': 'blank',
'delete_on_termination': True,
'device_type': 'disk',
'boot_index': -1,
'destination_type': 'local',
'guest_format': guest_format,
'volume_size': size})
def snapshot_from_bdm(snapshot_id, template):
"""Create a basic volume snapshot BDM from a given template bdm."""
copy_from_template = ('disk_bus', 'device_type', 'boot_index',
'delete_on_termination', 'volume_size',
'device_name')
snapshot_dict = {'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id}
for key in copy_from_template:
snapshot_dict[key] = template.get(key)
return BlockDeviceDict(snapshot_dict)
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None, no_root=False):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
# NOTE (ndipanov): We will not decide which device is root here - we assume
# that it will be supplied later. This is useful for having the root device
# as part of the image defined mappings that are already in the v2 format.
if no_root:
for bdm in new_bdms:
bdm['boot_index'] = -1
return new_bdms
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidBDMFormat(
details=_("Device name empty or too long."))
if ' ' in value:
raise exception.InvalidBDMFormat(
details=_("Device name contains spaces."))
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput:
# NOTE: We can remove this validation code after removing
# Nova v2.0 API code because v2.1 API validates this case
# already at its REST API layer.
raise exception.InvalidBDMFormat(
details=_("Invalid volume_size."))
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') != 'swap'):
return True
return False
def get_root_bdm(bdms):
try:
return next(bdm for bdm in bdms if bdm.get('boot_index', -1) == 0)
except StopIteration:
return None
def get_bdms_to_connect(bdms, exclude_root_mapping=False):
"""Will return non-root mappings, when exclude_root_mapping is true.
Otherwise all mappings will be returned.
"""
return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or
not exclude_root_mapping)
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s|h)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd or hd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name) if device_name else device_name
_nums = re.compile('\d+')
def get_device_letter(device_name):
letter = strip_prefix(device_name)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
return _nums.sub('', letter) if device_name else device_name
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.is_xenapi():
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
blanks = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
# ebs volume case
if bdm.destination_type == 'volume':
ebs_devices.append(bdm.device_name)
continue
if bdm.source_type == 'blank':
blanks.append(bdm)
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
ebs_devices.sort()
for nebs, ebs in enumerate(ebs_devices):
mappings['ebs%d' % nebs] = ebs
swap = [bdm for bdm in blanks if bdm.guest_format == 'swap']
if swap:
mappings['swap'] = swap.pop().device_name
ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap']
if ephemerals:
for num, eph in enumerate(ephemerals):
mappings['ephemeral%d' % num] = eph.device_name
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug("block_device_list %s", sorted(filter(None, block_device_list)))
return strip_dev(mount_device) in block_device_list
def get_bdm_ephemeral_disk_size(block_device_mappings):
return sum(bdm.get('volume_size', 0)
for bdm in block_device_mappings
if new_format_is_ephemeral(bdm))
def get_bdm_swap_list(block_device_mappings):
return [bdm for bdm in block_device_mappings
if new_format_is_swap(bdm)]
def get_bdm_local_disk_num(block_device_mappings):
return len([bdm for bdm in block_device_mappings
if bdm.get('destination_type') == 'local'])
|
apache-2.0
|
csminpp/docker-1.9.1
|
vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py
|
1232
|
3478
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
apache-2.0
|
studio1247/gertrude
|
generation/export_tablette.py
|
1
|
5449
|
# -*- coding: utf-8 -*-
# This file is part of Gertrude.
#
# Gertrude is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Gertrude is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gertrude; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from constants import *
from functions import *
from facture import *
from ooffice import *
class ExportTabletteModifications(object):
title = "Export tablette"
template = "Export tablette.ods"
def __init__(self, site, date):
self.multi = False
self.default_output = "Export tablette.ods"
self.site, self.date = site, date
self.gauge = None
self.email = None
self.site = None
self.array_enfants = {}
self.array_salaries = {}
def FetchJournal(self):
journal = config.connection.LoadJournal()
self.array_enfants = {}
self.array_salaries = {}
if not journal:
return
lines = journal.split("\n")
for line in lines:
try:
salarie, label, idx, date, heure = SplitLineTablette(line)
if date.year != self.date.year or date.month != self.date.month:
continue
if salarie:
array = self.array_salaries
else:
array = self.array_enfants
if idx not in array:
array[idx] = { }
if date not in array[idx]:
array[idx][date] = []
if label == "arrivee":
array[idx][date].append(PeriodePresence(date, heure))
elif label == "depart":
if len(array[idx][date]):
last = array[idx][date][-1]
if last.date == date and last.arrivee:
last.depart = heure
else:
array[idx][date].append(PeriodePresence(date, None, heure))
else:
array[idx][date].append(PeriodePresence(date, None, heure))
elif label == "absent":
array[idx][date].append(PeriodePresence(date, absent=True))
elif label == "malade":
array[idx][date].append(PeriodePresence(date, malade=True))
else:
print("Ligne %s inconnue" % label)
except Exception as e:
print(e)
pass
def GetHeureString(self, value):
if value is None:
return ""
heures = value / 60
minutes = value % 60
return "%dh%02d" % (heures, minutes)
def AddSheet(self, who, array, fields):
table = self.template.cloneNode(1)
table.setAttribute("table:name", GetPrenomNom(who))
self.spreadsheet.insertBefore(table, self.template)
lignes = table.getElementsByTagName("table:table-row")
ReplaceFields(lignes, fields)
lineTemplate = lignes.item(3)
dates = list(array.keys())
dates.sort()
for date in dates:
for jour in array[date]:
ligne = lineTemplate.cloneNode(1)
lineFields = fields + [('date', date),
('heure-arrivee', self.GetHeureString(jour.arrivee)),
('heure-depart', self.GetHeureString(jour.depart))]
ReplaceFields(ligne, lineFields)
table.insertBefore(ligne, lineTemplate)
table.removeChild(lineTemplate)
def SortedKeys(self, array, function):
keys = list(array.keys())
keys.sort(key=lambda key: GetPrenomNom(function(key)))
return keys
def execute(self, filename, dom):
if filename != 'content.xml':
return None
self.FetchJournal()
errors = {}
self.spreadsheet = dom.getElementsByTagName('office:spreadsheet').item(0)
self.template = self.spreadsheet.getElementsByTagName("table:table").item(0)
for key in self.SortedKeys(self.array_enfants, database.creche.GetInscrit):
inscrit = database.creche.GetInscrit(key)
if inscrit:
self.AddSheet(inscrit, self.array_enfants[key], GetInscritFields(inscrit))
else:
print("Inscrit inconnu")
for key in self.SortedKeys(self.array_salaries, database.creche.GetSalarie):
salarie = database.creche.GetSalarie(key)
if salarie:
self.AddSheet(salarie, self.array_salaries[key], GetSalarieFields(salarie))
else:
print("Salarié inconnu")
self.spreadsheet.removeChild(self.template)
if self.gauge:
self.gauge.SetValue(90)
return errors
|
gpl-3.0
|
pwendell/mesos
|
third_party/libprocess/third_party/gmock-1.6.0/gtest/test/gtest_test_utils.py
|
397
|
10437
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
apache-2.0
|
mamaddeveloper/telegrambot
|
modules/mod_xkcd.py
|
2
|
1742
|
from modules.module_base import ModuleBase
import requests
from lxml import html
from urllib.parse import urljoin
from urllib.request import urlretrieve
class ModuleXKCD(ModuleBase):
def __init__(self, bot):
ModuleBase.__init__(self, bot)
self.name = "xkcd"
def getXKCDImage(self, chat, path = "", prev=""):
try:
response = requests.get("http://%sxkcd.com/%s/" % (prev, path))
parsed_body = html.fromstring(response.text)
image = parsed_body.xpath('//*[@id="comic"]//img/@src')
imageAlt = parsed_body.xpath('//*[@id="comic"]//img/@alt')
imageTitle = parsed_body.xpath('//*[@id="comic"]//img/@title')[0]
# Convert any relative urls to absolute urls
image = urljoin(response.url, image[0])
self.bot.sendPhotoUrl(chat, image, imageAlt)
self.bot.sendMessage(imageTitle, chat)
except Exception as e:
self.logger.exception("xkcd error", exc_info=True)
self.bot.sendMessage("xkcd error", chat)
def notify_command(self, message_id, from_attr, date, chat, commandName, commandStr):
if commandName == "xkcd":
if commandStr == "last":
self.getXKCDImage(chat["id"])
elif commandStr == "random" or commandStr == "":
self.getXKCDImage(chat["id"], "/random/comic", "c.")
elif commandStr.isnumeric():
self.getXKCDImage(chat["id"], int(commandStr))
else:
self.bot.sendMessage("Bad arguments for xkcd", chat["id"])
def get_commands(self):
return [
("xkcd", "Get xkcd comics. Use with keywords: last, random or <number> "),
]
|
mit
|
rdo-management/tuskar
|
tuskar/tests/test_utils.py
|
1
|
3658
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tuskar.common import utils
from tuskar.storage import models
from tuskar.tests import base
class CommonUtilsTestCase(base.TestCase):
def test_resolve_role_extra_name_from_path(self):
expected = [{"/path/to/FOO": "extra_FOO_"},
{"/hieradata/config.yaml": "extra_config_yaml"},
{"./name.has.dots": "extra_name.has_dots"},
{"/path/name.": "extra_name_"},
{"/path/cdefile.c": "extra_cdefile_c"},
{"./name_underscore_no_extension":
"extra_name_underscore_no_extension_"},
{"/path/name_underscore.ext":
"extra_name_underscore_ext"}, ]
for params in expected:
path = params.keys()[0]
res = utils.resolve_role_extra_name_from_path(path)
self.assertEqual(params[path], res)
def test_resolve_template_file_name_from_role_extra_name(self):
expected = [{"extra_FOO_": "FOO"},
{"extra_config_yaml": "config.yaml"},
{"extra_name.has_dots": "name.has.dots"},
{"extra_name_": "name"},
{"extra_cdefile_c": "cdefile.c"},
{"extra_name_underscore_no_extension_":
"name_underscore_no_extension"},
{"extra_name_underscore_ext": "name_underscore.ext"}, ]
for params in expected:
name = params.keys()[0]
res = utils.resolve_template_file_name_from_role_extra_name(name)
self.assertEqual(params[name], res)
def test_resolve_template_extra_data(self):
template_contents = """ Foo Bar Baz
get_file: foo/bar.baz
"""
template_extra = models.StoredFile(
uuid="1234", contents="boo!", store=None, name="extra_bar_baz")
template = models.StoredFile(
uuid="1234", contents=template_contents, store=None)
res = utils.resolve_template_extra_data(template, [template_extra])
self.assertEqual(res, [{"extra_bar_baz": "foo/bar.baz"}])
class IntLikeTestCase(base.TestCase):
def test_is_int_like(self):
self.assertTrue(utils.is_int_like(1))
self.assertTrue(utils.is_int_like("1"))
self.assertTrue(utils.is_int_like("514"))
self.assertTrue(utils.is_int_like("0"))
self.assertFalse(utils.is_int_like(1.1))
self.assertFalse(utils.is_int_like("1.1"))
self.assertFalse(utils.is_int_like("1.1.1"))
self.assertFalse(utils.is_int_like(None))
self.assertFalse(utils.is_int_like("0."))
self.assertFalse(utils.is_int_like("aaaaaa"))
self.assertFalse(utils.is_int_like("...."))
self.assertFalse(utils.is_int_like("1g"))
self.assertFalse(
utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
self.assertFalse(utils.is_int_like("a1"))
|
apache-2.0
|
nrwahl2/ansible
|
lib/ansible/plugins/test/files.py
|
62
|
1339
|
# (c) 2015, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os.path import isdir, isfile, isabs, exists, lexists, islink, samefile, ismount
from ansible import errors
class TestModule(object):
''' Ansible file jinja2 tests '''
def tests(self):
return {
# file testing
'is_dir': isdir,
'is_file': isfile,
'is_link': islink,
'exists': exists,
'link_exists': lexists,
# path testing
'is_abs': isabs,
'is_same_file': samefile,
'is_mount': ismount,
}
|
gpl-3.0
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/j/java.py
|
12
|
1652
|
##
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBlock for installing Java, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
"""
from easybuild.easyblocks.generic.packedbinary import PackedBinary
class EB_Java(PackedBinary):
"""Support for installing Java as a packed binary file (.tar.gz)
Use the PackedBinary easyblock and set some extra paths.
"""
def make_module_extra(self):
"""
Set JAVA_HOME to install dir
"""
txt = PackedBinary.make_module_extra(self)
txt += self.module_generator.set_environment('JAVA_HOME', self.installdir)
return txt
|
mit
|
rockyzhang/zhangyanhit-python-for-android-mips
|
python-build/python-libs/gdata/src/gdata/tlslite/FileObject.py
|
359
|
6807
|
"""Class returned by TLSConnection.makefile()."""
class FileObject:
"""This class provides a file object interface to a
L{tlslite.TLSConnection.TLSConnection}.
Call makefile() on a TLSConnection to create a FileObject instance.
This class was copied, with minor modifications, from the
_fileobject class in socket.py. Note that fileno() is not
implemented."""
default_bufsize = 16384 #TREV: changed from 8192
def __init__(self, sock, mode='rb', bufsize=-1):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
def _getclosed(self):
return self._sock is not None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
for result in self._sock._decrefAsync(): #TREV
pass
finally:
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
#def fileno(self):
# raise NotImplementedError() #TREV
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
|
apache-2.0
|
Tehsmash/nova
|
nova/api/openstack/compute/contrib/certificates.py
|
79
|
2729
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
import nova.cert.rpcapi
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'certificates')
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
'private_key': private_key,
}
class CertificatesController(object):
"""The x509 Certificates API controller for the OpenStack API."""
def __init__(self):
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
super(CertificatesController, self).__init__()
def show(self, req, id):
"""Return certificate information."""
context = req.environ['nova.context']
authorize(context)
if id != 'root':
msg = _("Only root certificate can be retrieved.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
try:
cert = self.cert_rpcapi.fetch_ca(context,
project_id=context.project_id)
except exception.CryptoCAFileNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
def create(self, req, body=None):
"""Create a certificate."""
context = req.environ['nova.context']
authorize(context)
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
user_id=context.user_id, project_id=context.project_id)
return {'certificate': _translate_certificate_view(cert, pk)}
class Certificates(extensions.ExtensionDescriptor):
"""Certificates support."""
name = "Certificates"
alias = "os-certificates"
namespace = ("http://docs.openstack.org/compute/ext/"
"certificates/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-certificates',
CertificatesController(),
member_actions={})
resources.append(res)
return resources
|
apache-2.0
|
DirtyUnicorns/android_external_chromium-org
|
mojo/public/bindings/generators/mojom_pack.py
|
23
|
4051
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojom
# mojom_pack provides a mechanism for determining the packed order and offsets
# of a mojom.Struct.
#
# ps = mojom_pack.PackedStruct(struct)
# ps.packed_fields will access a list of PackedField objects, each of which
# will have an offset, a size and a bit (for mojom.BOOLs).
class PackedField(object):
kind_to_size = {
mojom.BOOL: 1,
mojom.INT8: 1,
mojom.UINT8: 1,
mojom.INT16: 2,
mojom.UINT16: 2,
mojom.INT32: 4,
mojom.UINT32: 4,
mojom.FLOAT: 4,
mojom.HANDLE: 4,
mojom.MSGPIPE: 4,
mojom.INT64: 8,
mojom.UINT64: 8,
mojom.DOUBLE: 8,
mojom.STRING: 8
}
@classmethod
def GetSizeForKind(cls, kind):
if isinstance(kind, mojom.Array) or isinstance(kind, mojom.Struct):
return 8
return cls.kind_to_size[kind]
def __init__(self, field, ordinal):
self.field = field
self.ordinal = ordinal
self.size = self.GetSizeForKind(field.kind)
self.offset = None
self.bit = None
# Returns the pad necessary to reserve space for alignment of |size|.
def GetPad(offset, size):
return (size - (offset % size)) % size
# Returns a 2-tuple of the field offset and bit (for BOOLs)
def GetFieldOffset(field, last_field):
if field.field.kind == mojom.BOOL and \
last_field.field.kind == mojom.BOOL and \
last_field.bit < 7:
return (last_field.offset, last_field.bit + 1)
offset = last_field.offset + last_field.size
pad = GetPad(offset, field.size)
return (offset + pad, 0)
class PackedStruct(object):
def __init__(self, struct):
self.struct = struct
self.packed_fields = []
# No fields.
if (len(struct.fields) == 0):
return
# Start by sorting by ordinal.
src_fields = []
ordinal = 1
for field in struct.fields:
if field.ordinal is not None:
ordinal = field.ordinal
src_fields.append(PackedField(field, ordinal))
ordinal += 1
src_fields.sort(key=lambda field: field.ordinal)
src_field = src_fields[0]
src_field.offset = 0
src_field.bit = 0
# dst_fields will contain each of the fields, in increasing offset order.
dst_fields = self.packed_fields
dst_fields.append(src_field)
# Then find first slot that each field will fit.
for src_field in src_fields[1:]:
last_field = dst_fields[0]
for i in xrange(1, len(dst_fields)):
next_field = dst_fields[i]
offset, bit = GetFieldOffset(src_field, last_field)
if offset + src_field.size <= next_field.offset:
# Found hole.
src_field.offset = offset
src_field.bit = bit
dst_fields.insert(i, src_field)
break
last_field = next_field
if src_field.offset is None:
# Add to end
src_field.offset, src_field.bit = GetFieldOffset(src_field, last_field)
dst_fields.append(src_field)
def GetTotalSize(self):
if not self.packed_fields:
return 0;
last_field = self.packed_fields[-1]
offset = last_field.offset + last_field.size
pad = GetPad(offset, 8)
return offset + pad;
class ByteInfo(object):
def __init__(self):
self.is_padding = False
self.packed_fields = []
def GetByteLayout(packed_struct):
bytes = [ByteInfo() for i in xrange(packed_struct.GetTotalSize())]
limit_of_previous_field = 0
for packed_field in packed_struct.packed_fields:
for i in xrange(limit_of_previous_field, packed_field.offset):
bytes[i].is_padding = True
bytes[packed_field.offset].packed_fields.append(packed_field)
limit_of_previous_field = packed_field.offset + packed_field.size
for i in xrange(limit_of_previous_field, len(bytes)):
bytes[i].is_padding = True
for byte in bytes:
# A given byte cannot both be padding and have a fields packed into it.
assert not (byte.is_padding and byte.packed_fields)
return bytes
|
bsd-3-clause
|
fivejjs/inasafe
|
safe_qgis/test_map.py
|
1
|
9228
|
"""**Tests for map creation in QGIS plugin.**
"""
__author__ = 'Tim Sutton <tim@linfiniti.com>'
__revision__ = '$Format:%H$'
__date__ = '01/11/2010'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import unittest
from unittest import expectedFailure
import os
import logging
# Add PARENT directory to path to make test aware of other modules
#pardir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
#sys.path.append(pardir)
from PyQt4 import QtGui
from qgis.core import (QgsMapLayerRegistry,
QgsRectangle,
QgsComposerPicture)
from qgis.gui import QgsMapCanvasLayer
from safe_qgis.safe_interface import temp_dir, unique_filename
from safe_qgis.utilities_test import (getQgisTestApp,
loadLayer,
setJakartaGeoExtent,
checkImages)
from safe_qgis.utilities import setupPrinter, dpiToMeters, qgisVersion
from safe_qgis.map import Map
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
LOGGER = logging.getLogger('InaSAFE')
class MapTest(unittest.TestCase):
"""Test the InaSAFE Map generator"""
def setUp(self):
"""Setup fixture run before each tests"""
myRegistry = QgsMapLayerRegistry.instance()
myRegistry.removeAllMapLayers()
def test_printToPdf(self):
"""Test making a pdf of the map - this is the most typical use of map.
"""
LOGGER.info('Testing printToPdf')
myLayer, _ = loadLayer('test_shakeimpact.shp')
myCanvasLayer = QgsMapCanvasLayer(myLayer)
CANVAS.setLayerSet([myCanvasLayer])
myRect = QgsRectangle(106.7894, -6.2308, 106.8004, -6.2264)
CANVAS.setExtent(myRect)
CANVAS.refresh()
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myMap.composeMap()
myPath = unique_filename(prefix='mapPdfTest',
suffix='.pdf',
dir=temp_dir('test'))
myMap.printToPdf(myPath)
LOGGER.debug(myPath)
myMessage = 'Rendered output does not exist: %s' % myPath
assert os.path.exists(myPath), myMessage
# pdf rendering is non deterministic so we can't do a hash check
# test_renderComposition renders just the image instead of pdf
# so we hash check there and here we just do a basic minimum file
# size check.
mySize = os.stat(myPath).st_size
myExpectedSize = 352798 # as rendered on linux ub 12.04 64
myMessage = 'Expected rendered map pdf to be at least %s, got %s' % (
myExpectedSize, mySize)
assert mySize >= myExpectedSize, myMessage
def test_renderComposition(self):
"""Test making an image of the map only."""
LOGGER.info('Testing renderComposition')
myLayer, _ = loadLayer('test_shakeimpact.shp')
myCanvasLayer = QgsMapCanvasLayer(myLayer)
CANVAS.setLayerSet([myCanvasLayer])
myRect = QgsRectangle(106.7894, -6.2308, 106.8004, -6.2264)
CANVAS.setExtent(myRect)
CANVAS.refresh()
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myMap.composeMap()
myImagePath, myControlImage, myTargetArea = myMap.renderComposition()
LOGGER.debug(myImagePath)
assert myControlImage is not None
myDimensions = [myTargetArea.left(),
myTargetArea.top(),
myTargetArea.bottom(),
myTargetArea.right()]
myExpectedDimensions = [0.0, 0.0, 3507.0, 2480.0]
myMessage = 'Expected target area to be %s, got %s' % (
str(myExpectedDimensions), str(myDimensions))
assert myExpectedDimensions == myDimensions, myMessage
myMessage = 'Rendered output does not exist'
assert os.path.exists(myImagePath), myMessage
myAcceptableImages = ['renderComposition.png',
'renderComposition-variantUB12.04.png',
'renderComposition-variantWindosVistaSP2-32.png',
'renderComposition-variantJenkins.png',
'renderComposition-variantUB11.10-64.png',
'renderComposition-variantUB11.04-64.png']
# Beta version and version changes can introduce a few extra chars
# into the metadata section so we set a reasonable tolerance to cope
# with this.
myTolerance = 8000
myFlag, myMessage = checkImages(myAcceptableImages,
myImagePath,
myTolerance)
assert myFlag, myMessage
def test_getMapTitle(self):
"""Getting the map title from the keywords"""
myLayer, _ = loadLayer('test_floodimpact.tif')
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myTitle = myMap.getMapTitle()
myExpectedTitle = 'Penduduk yang Mungkin dievakuasi'
myMessage = 'Expected: %s\nGot:\n %s' % (myExpectedTitle, myTitle)
assert myTitle == myExpectedTitle, myMessage
def test_handleMissingMapTitle(self):
"""Missing map title from the keywords fails gracefully"""
# TODO running OSM Buildngs with Pendudk Jakarta
# wasthrowing an error when requesting map title
# that this test wasnt replicating well
myLayer, _ = loadLayer('population_padang_1.asc')
myMap = Map(IFACE)
myMap.setImpactLayer(myLayer)
myTitle = myMap.getMapTitle()
myExpectedTitle = None
myMessage = 'Expected: %s\nGot:\n %s' % (myExpectedTitle, myTitle)
assert myTitle == myExpectedTitle, myMessage
@expectedFailure
def Xtest_renderTemplate(self):
"""Test that load template works"""
#Use the template from our resources bundle
myInPath = ':/plugins/inasafe/basic.qpt'
myLayer, _ = loadLayer('test_shakeimpact.shp')
myCanvasLayer = QgsMapCanvasLayer(myLayer)
CANVAS.setLayerSet([myCanvasLayer])
myMap = Map(IFACE)
setJakartaGeoExtent()
myMap.setImpactLayer(myLayer)
myPath = unique_filename(prefix='outTemplate',
suffix='.pdf',
dir=temp_dir('test'))
LOGGER.debug(myPath)
myMap.renderTemplate(myInPath, myPath)
assert os.path.exists(myPath)
#os.remove(myPath)
def test_windowsDrawingArtifacts(self):
"""Test that windows rendering does not make artifacts"""
# sometimes spurious lines are drawn on the layout
LOGGER.info('Testing windowsDrawingArtifacts')
myPath = unique_filename(prefix='artifacts',
suffix='.pdf',
dir=temp_dir('test'))
myMap = Map(IFACE)
setupPrinter(myPath)
myMap.setupComposition()
myImage = QtGui.QImage(10, 10, QtGui.QImage.Format_RGB32)
myImage.setDotsPerMeterX(dpiToMeters(300))
myImage.setDotsPerMeterY(dpiToMeters(300))
#myImage.fill(QtGui.QColor(250, 250, 250))
# Look at the output, you will see antialiasing issues around some
# of the boxes drawn...
# myImage.fill(QtGui.QColor(200, 200, 200))
myImage.fill(200 + 200 * 256 + 200 * 256 * 256)
myFilename = os.path.join(temp_dir(), 'greyBox')
myImage.save(myFilename, 'PNG')
for i in range(10, 190, 10):
myPicture = QgsComposerPicture(myMap.composition)
myPicture.setPictureFile(myFilename)
if qgisVersion() >= 10800: # 1.8 or newer
myPicture.setFrameEnabled(False)
else:
myPicture.setFrame(False)
myPicture.setItemPosition(i, # x
i, # y
10, # width
10) # height
myMap.composition.addItem(myPicture)
# Same drawing drawn directly as a pixmap
myPixmapItem = myMap.composition.addPixmap(
QtGui.QPixmap.fromImage(myImage))
myPixmapItem.setOffset(i, i + 20)
# Same drawing using our drawImage Helper
myWidthMM = 1
myMap.drawImage(myImage, myWidthMM, i, i + 40)
myImagePath, _, _ = myMap.renderComposition()
# when this test no longer matches our broken render hash
# we know the issue is fixed
myControlImages = ['windowsArtifacts.png']
myTolerance = 0
myFlag, myMessage = checkImages(myControlImages,
myImagePath,
myTolerance)
myMessage += ('\nWe want these images to match, if they do not '
'there may be rendering artifacts in windows.\n')
assert myFlag, myMessage
if __name__ == '__main__':
suite = unittest.makeSuite(MapTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
gpl-3.0
|
jasonthomas/zamboni
|
mkt/api/exceptions.py
|
6
|
2724
|
from django.conf import settings
from django.core.signals import got_request_exception
from rest_framework import status
from rest_framework.exceptions import APIException, ParseError as DRFParseError
from rest_framework.response import Response
from rest_framework.views import exception_handler
class AlreadyPurchased(Exception):
pass
class Conflict(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = 'Conflict detected.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class NotImplemented(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = 'API not implemented.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class ServiceUnavailable(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = 'Service unavailable at this time.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
def custom_exception_handler(exc, context=None):
"""
Custom exception handler for DRF, which doesn't provide one for HTTP
responses like tastypie does.
"""
# If propagate is true, bail early.
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# If the response is None, then DRF didn't handle the exception and we
# should do it ourselves.
if response is None:
# Start with a generic default error message.
data = {"detail": "Internal Server Error"}
# Include traceback if API_SHOW_TRACEBACKS is active.
if getattr(settings, 'API_SHOW_TRACEBACKS', settings.DEBUG):
import traceback
import sys
data['error_message'] = unicode(exc)
data['traceback'] = '\n'.join(
traceback.format_exception(*(sys.exc_info())))
request = getattr(exc, '_request', None)
klass = getattr(exc, '_klass', None)
# Send the signal so other apps are aware of the exception.
got_request_exception.send(klass, request=request)
# Send the 500 response back.
response = Response(data, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return response
class HttpLegallyUnavailable(APIException):
status_code = 451
default_detail = 'Legally unavailable.'
def __init__(self, detail=None):
self.detail = {'detail': detail or self.default_detail}
class ParseError(DRFParseError):
def __init__(self, detail):
self.detail = {u'detail': detail or self.default_detail}
|
bsd-3-clause
|
oliciv/youtube-dl
|
youtube_dl/extractor/wimp.py
|
37
|
1647
|
from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
class WimpIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?wimp\.com/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.wimp.com/maruexhausted/',
'md5': 'ee21217ffd66d058e8b16be340b74883',
'info_dict': {
'id': 'maruexhausted',
'ext': 'mp4',
'title': 'Maru is exhausted.',
'description': 'md5:57e099e857c0a4ea312542b684a869b8',
}
}, {
'url': 'http://www.wimp.com/clowncar/',
'md5': '4e2986c793694b55b37cf92521d12bb4',
'info_dict': {
'id': 'clowncar',
'ext': 'mp4',
'title': 'It\'s like a clown car.',
'description': 'md5:0e56db1370a6e49c5c1d19124c0d2fb2',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
[r"[\"']file[\"']\s*[:,]\s*[\"'](.+?)[\"']", r"videoId\s*:\s*[\"']([^\"']+)[\"']"],
webpage, 'video URL')
if YoutubeIE.suitable(video_url):
self.to_screen('Found YouTube video')
return {
'_type': 'url',
'url': video_url,
'ie_key': YoutubeIE.ie_key(),
}
return {
'id': video_id,
'url': video_url,
'title': self._og_search_title(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
}
|
unlicense
|
dbckz/ansible
|
contrib/inventory/spacewalk.py
|
19
|
8810
|
#!/usr/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <jonEbird@gmail.com>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import os
import time
from optparse import OptionParser
import subprocess
import ConfigParser
from six import iteritems
try:
import json
except:
import simplejson as json
base_dir = os.path.dirname(os.path.realpath(__file__))
default_ini_file = os.path.join(base_dir, "spacewalk.ini")
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
# Sanity check
if not os.path.exists(SW_REPORT):
print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
#------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = [ 'spacewalk_' + key for key in keys ]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
#------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk' , 'cache_age'):
CACHE_AGE = config.get('spacewalk' , 'cache_age')
if not options.org_number and config.has_option('spacewalk' , 'org_number'):
options.org_number = config.get('spacewalk' , 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
#------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError) as e:
print('Problem executing the command "%s system-groups": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
# List out the known server from Spacewalk
#------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() )
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
meta = { "hostvars" : {} }
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[ system['spacewalk_group_id'] ]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] )
final["_meta"] = meta
print(json.dumps( final ))
#print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
#------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) )
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
|
gpl-3.0
|
Alkxzv/zoonas
|
django/apps/comments/views.py
|
1
|
4479
|
from django.contrib import messages as msg
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.edit import FormView, ProcessFormView
from notes.models import Note
from reports.views import ReportModelView
from users.decorators import login_required_view
from votes.views import VoteView
from zones.decorators import moderator_required_view
from .forms import CommentCreateForm, CommentReplyForm
from .models import Comment
# User views
@login_required_view
class CommentCreateView(FormView):
form_class = CommentCreateForm
template_name = 'comments/submit_page.html'
def get_commented(self, *args, **kwargs):
raise NotImplementedError
def get_context_data(self, **kwargs):
context = super(CommentCreateView, self).get_context_data(**kwargs)
context['commented_description'] = unicode(self.get_commented())
return context
def form_valid(self, form):
author = self.request.user
text = form.cleaned_data.get('note')
item = self.get_commented()
comment = form.save(commit=False)
comment.note = Note.objects.create(text=text, author=author)
comment.item = item
comment.save()
comment.cast_vote(user=author, way='up')
comment.item.update_comment_count()
comment.item.save()
msg.add_message(self.request, msg.SUCCESS, _('Comment saved.'))
return HttpResponseRedirect(comment.get_absolute_url())
def form_invalid(self, form):
msg.add_message(self.request, msg.ERROR, _('Not saved.'))
return super(CommentCreateView, self).form_invalid(form)
@login_required_view
class CommentReplyView(SingleObjectMixin, FormView):
model = Comment
form_class = CommentReplyForm
template_name = 'comments/submit_page.html'
def get_context_data(self, **kwargs):
context = super(CommentReplyView, self).get_context_data(**kwargs)
context['commented_description'] = unicode(self.object.item)
context['reply_to'] = self.object
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(CommentReplyView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(CommentReplyView, self).post(request, *args, **kwargs)
def form_valid(self, form):
author = self.request.user
text = form.cleaned_data.get('note')
parent = self.object
reply = form.save(commit=False)
reply.parent = parent
reply.note = Note.objects.create(text=text, author=author)
reply.item = parent.item
reply.save()
reply.cast_vote(user=author, way='up')
reply.item.update_comment_count()
reply.item.save()
msg.add_message(self.request, msg.SUCCESS, _('Comment saved.'))
return HttpResponseRedirect(reply.get_absolute_url())
def form_invalid(self, form):
msg.add_message(self.request, msg.ERROR, _('Not saved.'))
return super(CommentReplyView, self).form_invalid(form)
@login_required_view
class CommentVoteView(VoteView):
model = Comment
@login_required_view
class CommentReportView(ReportModelView):
model = Comment
# Moderator views*
@moderator_required_view
class CommentEvaluateView(SingleObjectMixin, ProcessFormView):
model = Comment
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
value = request.POST.get('evaluation')
self.object = self.get_object()
if value == 'allow':
self.object.allow()
msg.add_message(self.request, msg.SUCCESS, _("Allowed."))
elif value == 'reject':
self.object.reject()
msg.add_message(self.request, msg.SUCCESS, _("Rejected."))
elif value == 'public':
self.object.item.show()
msg.add_message(self.request, msg.SUCCESS, _("Made public."))
elif value == 'hidden':
self.object.item.hide()
msg.add_message(self.request, msg.SUCCESS, _("Made private."))
else:
return HttpResponseBadRequest(_("Invalid data."))
return HttpResponseRedirect(self.object.get_absolute_url())
|
gpl-3.0
|
skirsdeda/django
|
tests/decorators/tests.py
|
47
|
9902
|
from functools import wraps, update_wrapper
from unittest import TestCase
import warnings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, permission_required, user_passes_test
from django.http import HttpResponse, HttpRequest, HttpResponseNotAllowed
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.utils.decorators import method_decorator
from django.utils.functional import allow_lazy, lazy, memoize
from django.views.decorators.cache import cache_page, never_cache, cache_control
from django.views.decorators.clickjacking import xframe_options_deny, xframe_options_sameorigin, xframe_options_exempt
from django.views.decorators.http import require_http_methods, require_GET, require_POST, require_safe, condition
from django.views.decorators.vary import vary_on_headers, vary_on_cookie
def fully_decorated(request):
"""Expected __doc__"""
return HttpResponse('<html><body>dummy</body></html>')
fully_decorated.anything = "Expected __dict__"
def compose(*functions):
# compose(f, g)(*args, **kwargs) == f(g(*args, **kwargs))
functions = list(reversed(functions))
def _inner(*args, **kwargs):
result = functions[0](*args, **kwargs)
for f in functions[1:]:
result = f(result)
return result
return _inner
full_decorator = compose(
# django.views.decorators.http
require_http_methods(["GET"]),
require_GET,
require_POST,
require_safe,
condition(lambda r: None, lambda r: None),
# django.views.decorators.vary
vary_on_headers('Accept-language'),
vary_on_cookie,
# django.views.decorators.cache
cache_page(60 * 15),
cache_control(private=True),
never_cache,
# django.contrib.auth.decorators
# Apply user_passes_test twice to check #9474
user_passes_test(lambda u: True),
login_required,
permission_required('change_world'),
# django.contrib.admin.views.decorators
staff_member_required,
# django.utils.functional
allow_lazy,
lazy,
)
# suppress the deprecation warning of memoize
with warnings.catch_warnings(record=True):
fully_decorated = memoize(fully_decorated, {}, 1)
fully_decorated = full_decorator(fully_decorated)
class DecoratorsTest(TestCase):
def test_attributes(self):
"""
Tests that django decorators set certain attributes of the wrapped
function.
"""
self.assertEqual(fully_decorated.__name__, 'fully_decorated')
self.assertEqual(fully_decorated.__doc__, 'Expected __doc__')
self.assertEqual(fully_decorated.__dict__['anything'], 'Expected __dict__')
def test_user_passes_test_composition(self):
"""
Test that the user_passes_test decorator can be applied multiple times
(#9474).
"""
def test1(user):
user.decorators_applied.append('test1')
return True
def test2(user):
user.decorators_applied.append('test2')
return True
def callback(request):
return request.user.decorators_applied
callback = user_passes_test(test1)(callback)
callback = user_passes_test(test2)(callback)
class DummyUser(object):
pass
class DummyRequest(object):
pass
request = DummyRequest()
request.user = DummyUser()
request.user.decorators_applied = []
response = callback(request)
self.assertEqual(response, ['test2', 'test1'])
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
def test_require_safe_accepts_only_safe_methods(self):
"""
Test for the require_safe decorator.
A view returns either a response or an exception.
Refs #15637.
"""
def my_view(request):
return HttpResponse("OK")
my_safe_view = require_safe(my_view)
request = HttpRequest()
request.method = 'GET'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'HEAD'
self.assertIsInstance(my_safe_view(request), HttpResponse)
request.method = 'POST'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'PUT'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
request.method = 'DELETE'
self.assertIsInstance(my_safe_view(request), HttpResponseNotAllowed)
# For testing method_decorator, a decorator that assumes a single argument.
# We will get type arguments if there is a mismatch in the number of arguments.
def simple_dec(func):
def wrapper(arg):
return func("test:" + arg)
return wraps(func)(wrapper)
simple_dec_m = method_decorator(simple_dec)
# For testing method_decorator, two decorators that add an attribute to the function
def myattr_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr = True
return wraps(func)(wrapper)
myattr_dec_m = method_decorator(myattr_dec)
def myattr2_dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.myattr2 = True
return wraps(func)(wrapper)
myattr2_dec_m = method_decorator(myattr2_dec)
class ClsDec(object):
def __init__(self, myattr):
self.myattr = myattr
def __call__(self, f):
def wrapped():
return f() and self.myattr
return update_wrapper(wrapped, f)
class MethodDecoratorTests(TestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test(object):
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
@myattr2_dec
def func():
pass
self.assertEqual(getattr(func, 'myattr', False), True)
self.assertEqual(getattr(func, 'myattr2', False), True)
# Now check method_decorator
class Test(object):
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
self.assertEqual(getattr(Test().method, 'myattr', False), True)
self.assertEqual(getattr(Test().method, 'myattr2', False), True)
self.assertEqual(getattr(Test.method, 'myattr', False), True)
self.assertEqual(getattr(Test.method, 'myattr2', False), True)
self.assertEqual(Test.method.__doc__, 'A method')
self.assertEqual(Test.method.__name__, 'method')
# Test for argumented decorator
def test_argumented(self):
class Test(object):
@method_decorator(ClsDec(False))
def method(self):
return True
self.assertEqual(Test().method(), False)
def test_descriptors(self):
def original_dec(wrapped):
def _wrapped(arg):
return wrapped(arg)
return _wrapped
method_dec = method_decorator(original_dec)
class bound_wrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __call__(self, arg):
return self.wrapped(arg)
def __get__(self, instance, owner):
return self
class descriptor_wrapper(object):
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __get__(self, instance, owner):
return bound_wrapper(self.wrapped.__get__(instance, owner))
class Test(object):
@method_dec
@descriptor_wrapper
def method(self, arg):
return arg
self.assertEqual(Test().method(1), 1)
class XFrameOptionsDecoratorsTests(TestCase):
"""
Tests for the X-Frame-Options decorators.
"""
def test_deny_decorator(self):
"""
Ensures @xframe_options_deny properly sets the X-Frame-Options header.
"""
@xframe_options_deny
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_sameorigin_decorator(self):
"""
Ensures @xframe_options_sameorigin properly sets the X-Frame-Options
header.
"""
@xframe_options_sameorigin
def a_view(request):
return HttpResponse()
r = a_view(HttpRequest())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_exempt_decorator(self):
"""
Ensures @xframe_options_exempt properly instructs the
XFrameOptionsMiddleware to NOT set the header.
"""
@xframe_options_exempt
def a_view(request):
return HttpResponse()
req = HttpRequest()
resp = a_view(req)
self.assertEqual(resp.get('X-Frame-Options', None), None)
self.assertTrue(resp.xframe_options_exempt)
# Since the real purpose of the exempt decorator is to suppress
# the middleware's functionality, let's make sure it actually works...
r = XFrameOptionsMiddleware().process_response(req, resp)
self.assertEqual(r.get('X-Frame-Options', None), None)
|
bsd-3-clause
|
swiftstack/swift
|
test/unit/common/middleware/s3api/test_cfg.py
|
3
|
1196
|
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.middleware.s3api.utils import Config
class TestS3ApiCfg(unittest.TestCase):
def test_config(self):
conf = Config(
{
'a': 'str',
'b': 10,
'c': True,
}
)
conf.update(
{
'a': 'str2',
'b': '100',
'c': 'false',
}
)
self.assertEqual(conf['a'], 'str2')
self.assertEqual(conf['b'], 100)
self.assertEqual(conf['c'], False)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
utkbansal/kuma
|
kuma/wiki/feeds.py
|
5
|
15307
|
"""Feeds for documents"""
import datetime
import json
from django.conf import settings
from django.db.models import F
from django.contrib.syndication.views import Feed
from django.utils.html import escape
from django.utils.feedgenerator import (SyndicationFeed, Rss201rev2Feed,
Atom1Feed)
from django.utils.translation import ugettext as _
from kuma.core.urlresolvers import reverse
from kuma.core.validators import valid_jsonp_callback_value
from kuma.users.helpers import gravatar_url
from .helpers import diff_table, tag_diff_table, get_compare_url, colorize_diff
from .models import Document, Revision
MAX_FEED_ITEMS = getattr(settings, 'MAX_FEED_ITEMS', 500)
DEFAULT_FEED_ITEMS = 50
class DocumentsFeed(Feed):
title = _('MDN documents')
subtitle = _('Documents authored by MDN users')
link = _('/')
def __call__(self, request, *args, **kwargs):
self.request = request
if 'all_locales' in request.GET:
self.locale = None
else:
self.locale = request.locale
return super(DocumentsFeed, self).__call__(request, *args, **kwargs)
def feed_extra_kwargs(self, obj):
return {'request': self.request}
def item_extra_kwargs(self, obj):
return {'obj': obj}
def get_object(self, request, format):
if format == 'json':
self.feed_type = DocumentJSONFeedGenerator
elif format == 'rss':
self.feed_type = Rss201rev2Feed
else:
self.feed_type = Atom1Feed
def item_pubdate(self, document):
return document.current_revision.created
def item_title(self, document):
return document.title
def item_description(self, document):
return document.current_revision.summary
def item_author_name(self, document):
return document.current_revision.creator.username
def item_author_link(self, document):
return self.request.build_absolute_uri(
document.current_revision.creator.get_absolute_url())
def item_link(self, document):
return self.request.build_absolute_uri(document.get_absolute_url())
def item_categories(self, document):
return document.tags.all()
class DocumentJSONFeedGenerator(SyndicationFeed):
"""JSON feed generator for Documents
TODO: Someday maybe make this into a JSON Activity Stream?"""
mime_type = 'application/json'
def _encode_complex(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
def write(self, outfile, encoding):
request = self.feed['request']
# Check for a callback param, validate it before use
callback = request.GET.get('callback', None)
if callback is not None:
if not valid_jsonp_callback_value(callback):
callback = None
items_out = []
for item in self.items:
document = item['obj']
# Include some of the simple elements from the preprocessed item
item_out = dict((x, item[x]) for x in (
'link', 'title', 'pubdate', 'author_name', 'author_link',
))
# HACK: DocumentFeed is the superclass of RevisionFeed. In this
# case, current_revision is the revision itself.
# TODO: Refactor this out into separate DocumentFeed and
# RevisionFeed subclasses of Feed.
if hasattr(document, 'current_revision'):
revision = document.current_revision
else:
revision = document
if revision.creator.email:
item_out['author_avatar'] = gravatar_url(revision.creator.email)
summary = revision.summary
if summary:
item_out['summary'] = summary
# Linkify the tags used in the feed item
categories = dict(
(x, request.build_absolute_uri(
reverse('wiki.tag', kwargs={'tag': x})))
for x in item['categories']
)
if categories:
item_out['categories'] = categories
items_out.append(item_out)
data = items_out
if callback:
outfile.write('%s(' % callback)
outfile.write(json.dumps(data, default=self._encode_complex))
if callback:
outfile.write(')')
class DocumentsRecentFeed(DocumentsFeed):
"""
Feed of recently revised documents
"""
title = _('MDN recent document changes')
subtitle = _('Recent changes to MDN documents')
def get_object(self, request, format, tag=None, category=None):
super(DocumentsRecentFeed, self).get_object(request, format)
self.category = category
self.tag = tag
if tag:
self.title = _('MDN recent changes to documents tagged %s' % tag)
self.link = self.request.build_absolute_uri(
reverse('wiki.tag', args=(tag,)))
else:
self.link = self.request.build_absolute_uri(
reverse('wiki.all_documents'))
def items(self):
# Temporarily storing the selected documents PKs in a list
# to speed up retrieval (max MAX_FEED_ITEMS size)
item_pks = (Document.objects
.filter_for_list(tag_name=self.tag,
category=self.category,
locale=self.locale)
.filter(current_revision__isnull=False)
.order_by('-current_revision__created')
.values_list('pk', flat=True)[:MAX_FEED_ITEMS])
return (Document.objects.filter(pk__in=list(item_pks))
.defer('html')
.prefetch_related('current_revision',
'current_revision__creator',
'tags'))
class DocumentsReviewFeed(DocumentsRecentFeed):
"""
Feed of documents in need of review
"""
title = _('MDN documents in need of review')
subtitle = _('Recent changes to MDN documents that need to be reviewed')
def get_object(self, request, format, tag=None):
super(DocumentsReviewFeed, self).get_object(request, format)
self.subtitle = None
if tag:
self.title = _('MDN documents for %s review' % tag)
self.link = self.request.build_absolute_uri(
reverse('wiki.list_review_tag', args=(tag,)))
else:
self.title = _('MDN documents for review')
self.link = self.request.build_absolute_uri(
reverse('wiki.list_review'))
return tag
def items(self, tag=None):
# Temporarily storing the selected documents PKs in a list
# to speed up retrieval (max MAX_FEED_ITEMS size)
item_pks = (Document.objects
.filter_for_review(tag_name=tag, locale=self.locale)
.filter(current_revision__isnull=False)
.order_by('-current_revision__created')
.values_list('pk', flat=True)[:MAX_FEED_ITEMS])
return (Document.objects.filter(pk__in=list(item_pks))
.defer('html')
.prefetch_related('current_revision',
'current_revision__creator',
'tags'))
class DocumentsUpdatedTranslationParentFeed(DocumentsFeed):
"""Feed of translated documents whose parent has been modified since the
translation was last updated."""
def get_object(self, request, format, tag=None):
super(DocumentsUpdatedTranslationParentFeed,
self).get_object(request, format)
self.subtitle = None
self.title = _("MDN '%s' translations in need of update" %
self.locale)
# TODO: Need an HTML / dashboard version of this feed
self.link = self.request.build_absolute_uri(
reverse('wiki.all_documents'))
def items(self):
return (Document.objects
.prefetch_related('parent')
.filter(locale=self.locale, parent__isnull=False)
.filter(modified__lt=F('parent__modified'))
.order_by('-parent__current_revision__created')
[:MAX_FEED_ITEMS])
def item_description(self, item):
# TODO: Needs to be a jinja template?
template = _(u"""
<p>
<a href="%(parent_url)s" title="%(parent_title)s">
View '%(parent_locale)s' parent
</a>
(<a href="%(mod_url)s">last modified at %(parent_modified)s</a>)
</p>
<p>
<a href="%(doc_edit_url)s" title="%(doc_title)s">
Edit '%(doc_locale)s' translation
</a>
(last modified at %(doc_modified)s)
</p>
""")
doc, parent = item, item.parent
trans_based_on_pk = (Revision.objects.filter(document=parent)
.filter(created__lte=doc.modified)
.order_by('created')
.values_list('pk', flat=True)
.first())
mod_url = get_compare_url(parent,
trans_based_on_pk,
parent.current_revision.id)
context = {
'doc_url': self.request.build_absolute_uri(doc.get_absolute_url()),
'doc_edit_url': self.request.build_absolute_uri(
doc.get_edit_url()),
'doc_title': doc.title,
'doc_locale': doc.locale,
'doc_modified': doc.modified,
'parent_url': self.request.build_absolute_uri(
parent.get_absolute_url()),
'parent_title': parent.title,
'parent_locale': parent.locale,
'parent_modified': parent.modified,
'mod_url': mod_url,
}
return template % context
class RevisionsFeed(DocumentsFeed):
"""
Feed of recent revisions
"""
title = _('MDN recent revisions')
subtitle = _('Recent revisions to MDN documents')
def items(self):
items = Revision.objects
limit = int(self.request.GET.get('limit', DEFAULT_FEED_ITEMS))
page = int(self.request.GET.get('page', 1))
start = (page - 1) * limit
finish = start + limit
if not limit or limit > MAX_FEED_ITEMS:
limit = MAX_FEED_ITEMS
if self.locale:
items = items.filter(document__locale=self.locale)
# Temporarily storing the selected revision PKs in a list
# to speed up retrieval (max MAX_FEED_ITEMS size)
item_pks = (items.order_by('-created')
.values_list('pk', flat=True)[start:finish])
return (Revision.objects.filter(pk__in=list(item_pks))
.prefetch_related('creator',
'document'))
def item_title(self, item):
return '%s (%s)' % (item.document.slug, item.document.locale)
def item_description(self, item):
# TODO: put this in a jinja template if django syndication will let us
previous = item.previous
if previous is None:
action = u'Created'
else:
action = u'Edited'
by = u'<h3>%s by:</h3><p>%s</p>' % (action, item.creator.username)
if item.comment:
comment = u'<h3>Comment:</h3><p>%s</p>' % item.comment
else:
comment = u''
review_diff = u''
tag_diff = u''
content_diff = u''
if previous:
prev_review_tags = previous.review_tags.values_list('name',
flat=True)
curr_review_tags = item.review_tags.values_list('name', flat=True)
if set(prev_review_tags) != set(curr_review_tags):
table = tag_diff_table(u','.join(prev_review_tags),
u','.join(curr_review_tags),
previous.id, item.id)
review_diff = u'<h3>Review changes:</h3>%s' % table
review_diff = colorize_diff(review_diff)
if previous.tags != item.tags:
table = tag_diff_table(previous.tags, item.tags,
previous.id, item.id)
tag_diff = u'<h3>Tag changes:</h3>%s' % table
tag_diff = colorize_diff(tag_diff)
previous_content = ''
previous_id = u'N/A'
content_diff = u'<h3>Content changes:</h3>'
if previous:
previous_content = previous.get_tidied_content()
current_content = item.get_tidied_content()
previous_id = previous.id
if previous_content != current_content:
content_diff = content_diff + diff_table(
previous_content, current_content,
previous_id, item.id)
content_diff = colorize_diff(content_diff)
else:
content_diff = content_diff + escape(item.content)
link_cell = u'<td><a href="%s">%s</a></td>'
view_cell = link_cell % (item.document.get_absolute_url(),
_('View Page'))
edit_cell = link_cell % (item.document.get_edit_url(),
_('Edit Page'))
if previous:
compare_cell = link_cell % (get_compare_url(item.document,
previous.id,
item.id),
_('Show comparison'))
else:
compare_cell = ''
history_cell = link_cell % (reverse('wiki.document_revisions',
args=[item.document.slug]),
_('History'))
links_table = u'<table border="0" width="80%">'
links_table = links_table + u'<tr>%s%s%s%s</tr>' % (view_cell,
edit_cell,
compare_cell,
history_cell)
links_table = links_table + u'</table>'
return u''.join([by, comment,
tag_diff, review_diff, content_diff, links_table])
def item_link(self, item):
return self.request.build_absolute_uri(item.document.get_absolute_url())
def item_pubdate(self, item):
return item.created
def item_author_name(self, item):
return item.creator.username
def item_author_link(self, item):
return self.request.build_absolute_uri(item.creator.get_absolute_url())
def item_categories(self, item):
return []
|
mpl-2.0
|
PaulKinlan/cli-caniuse
|
site/app/scripts/bower_components/jsrepl-build/extern/python/closured/lib/python2.7/encodings/zlib_codec.py
|
533
|
3015
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
detiber/ansible
|
lib/ansible/utils/helpers.py
|
34
|
1270
|
# (c) 2016, Ansible by Red Hat <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import string_types
def pct_to_int(value, num_items, min_value=1):
'''
Converts a given value to a percentage if specified as "x%",
otherwise converts the given value to an integer.
'''
if isinstance(value, string_types) and value.endswith('%'):
value_pct = int(value.replace("%",""))
return int((value_pct/100.0) * num_items) or min_value
else:
return int(value)
|
gpl-3.0
|
erikr/django
|
django/utils/functional.py
|
25
|
14721
|
import copy
import operator
import warnings
from functools import total_ordering, wraps
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, cls=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__prepared = False
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if not self.__prepared:
self.__prepare_class__()
self.__prepared = True
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
def __repr__(self):
return repr(self.__cast())
@classmethod
def __prepare_class__(cls):
for resultclass in resultclasses:
for type_ in resultclass.mro():
for method_name in type_.__dict__.keys():
# All __promise__ return the same wrapper method, they
# look up the correct implementation when called.
if hasattr(cls, method_name):
continue
meth = cls.__promise__(method_name)
setattr(cls, method_name, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), (
"Cannot call lazy() with both bytes and text return types.")
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
cls.__str__ = cls.__bytes_cast_encoded
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, method_name):
# Builds a wrapper around some magic method
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
return getattr(res, method_name)(*args, **kw)
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __bytes_cast_encoded(self):
return func(*self.__args, **self.__kw).encode('utf-8')
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __str__(self):
# object defines __str__(), so __prepare_class__() won't overload
# a __str__() method from the proxied class.
return str(self.__cast())
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def lazystr(text):
"""
Shortcut for the common case of a lazy callable that returns str.
"""
from django.utils.encoding import force_text # Avoid circular import
return lazy(force_text, six.text_type)(text)
def allow_lazy(func, *resultclasses):
warnings.warn(
"django.utils.functional.allow_lazy() is deprecated in favor of "
"django.utils.functional.keep_lazy()",
RemovedInDjango20Warning, 2)
return keep_lazy(*resultclasses)(func)
def keep_lazy(*resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
if not resultclasses:
raise TypeError("You must pass at least one argument to keep_lazy().")
def decorator(func):
lazy_func = lazy(func, *resultclasses)
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy_func(*args, **kwargs)
return wrapper
return decorator
def keep_lazy_text(func):
"""
A decorator for functions that accept lazy arguments and return text.
"""
return keep_lazy(six.text_type)(func)
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
# Note: if a subclass overrides __init__(), it will likely need to
# override __copy__() and __deepcopy__() as well.
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. We're going to have to initialize the wrapped
# object to successfully pickle it, so we might as well just pickle the
# wrapped object since they're supposed to act the same way.
#
# Unfortunately, if we try to simply act like the wrapped object, the ruse
# will break down when pickle gets our id(). Thus we end up with pickle
# thinking, in effect, that we are a distinct object from the wrapped
# object, but with the same __dict__. This can cause problems (see #25389).
#
# So instead, we define our own __reduce__ method and custom unpickler. We
# pickle the wrapped object as the unpickler's argument, so that pickle
# will pickle it normally, and then the unpickler simply returns its
# argument.
def __reduce__(self):
if self._wrapped is empty:
self._setup()
return (unpickle_lazyobject, (self._wrapped,))
def __getstate__(self):
"""
Prevent older versions of pickle from trying to pickle the __dict__
(which in the case of a SimpleLazyObject may contain a lambda). The
value will be ignored by __reduce__() and the custom unpickler.
"""
return {}
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use type(self), not
# self.__class__, because the latter is proxied.
return type(self)()
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode) # NOQA: unicode undefined on PY3
__nonzero__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# List/Tuple/Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__iter__ = new_method_proxy(iter)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
def unpickle_lazyobject(wrapped):
"""
Used to unpickle lazy objects. Just return its argument, which will be the
wrapped object.
"""
return wrapped
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __copy__(self):
if self._wrapped is empty:
# If uninitialized, copy the wrapper. Use SimpleLazyObject, not
# self.__class__, because the latter is proxied.
return SimpleLazyObject(self._setupfunc)
else:
# If initialized, return a copy of the wrapped object.
return copy.copy(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
|
bsd-3-clause
|
ChinaQuants/blaze
|
blaze/compute/tests/test_pyfunc.py
|
16
|
2071
|
import datetime
from blaze.compute.pyfunc import symbol, lambdify, cos, math, broadcast
from blaze.compute.pyfunc import _print_python
from blaze.expr.broadcast import broadcast_collect
t = symbol('t', '{x: int, y: int, z: int, when: datetime}')
def test_simple():
f = lambdify([t], t.x + t.y)
assert f((1, 2, 3, 4)) == 1 + 2
f = lambdify([t.x, t.y], t.x + t.y)
assert f(1, 2) == 1 + 2
def test_datetime():
f = lambdify([t], t.x + t.when.year)
assert f((1, 2, 3, datetime.datetime(2000, 1, 1))) == 1 + 2000
def inc(x):
return x + 1
def test_map():
f = lambdify([t], t.x + t.y.map(inc, 'int'))
assert f((1, 2, 3, 4)) == 1 + inc(2)
def test_math():
f = lambdify([t], abs(t.x) + cos(t.y))
assert f((-1, 0, 3, 4)) == 1 + math.cos(0.0)
def test_datetime_literals_and__print_python():
_print_python(datetime.date(2000, 1, 1)) == \
'datetime.date(2000, 1, 1)', {'datetime': datetime}
def test_datetime_literals():
f = lambdify([t], t.when > '2000-01-01')
assert f((1, 0, 3, datetime.datetime(2000, 1, 2)))
assert not f((1, 0, 3, datetime.datetime(1999, 1, 2)))
def test_broadcast_collect():
t = symbol('t', 'var * {x: int, y: int, z: int, when: datetime}')
expr = t.distinct()
expr = expr.x + 2 * expr.y
expr = expr.distinct()
result = broadcast_collect(expr)
expected = t.distinct()
expected = broadcast(expected.x + 2 * expected.y, [expected])
expected = expected.distinct()
assert result.isidentical(expected)
def test_pyfunc_works_with_invalid_python_names():
x = symbol('x-y.z', 'int')
f = lambdify([x], x + 1)
assert f(1) == 2
t = symbol('t', '{"x.y": int, "y z": int}')
f = lambdify([t], t.x_y + t.y_z)
assert f((1, 2)) == 3
def test_usub():
x = symbol('x', 'float64')
f = lambdify([x], -x)
assert f(1.0) == -1.0
def test_not():
x = symbol('x', 'bool')
f = lambdify([x], ~x)
r = f(True)
assert isinstance(r, bool) and not r
r = f(False)
assert isinstance(r, bool) and r
|
bsd-3-clause
|
mathspace/django
|
tests/flatpages_tests/test_sitemaps.py
|
380
|
1326
|
from __future__ import unicode_literals
from django.apps import apps
from django.contrib.sites.models import Site
from django.test import TestCase
from django.test.utils import modify_settings, override_settings
@override_settings(
ROOT_URLCONF='flatpages_tests.urls',
SITE_ID=1,
)
@modify_settings(
INSTALLED_APPS={
'append': ['django.contrib.sitemaps', 'django.contrib.flatpages'],
},
)
class FlatpagesSitemapTests(TestCase):
@classmethod
def setUpClass(cls):
super(FlatpagesSitemapTests, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
@classmethod
def setUpTestData(cls):
Site = apps.get_model('sites.Site')
current_site = Site.objects.get_current()
current_site.flatpage_set.create(url="/foo/", title="foo")
current_site.flatpage_set.create(url="/private-foo/", title="private foo", registration_required=True)
def test_flatpage_sitemap(self):
response = self.client.get('/flatpages/sitemap.xml')
self.assertIn(b'<url><loc>http://example.com/foo/</loc></url>', response.getvalue())
self.assertNotIn(b'<url><loc>http://example.com/private-foo/</loc></url>', response.getvalue())
|
bsd-3-clause
|
40223246/0622cdb-Final-TEST2-1
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/header.py
|
409
|
2444
|
from browser import document as doc
from browser.html import *
trans_menu = {
'menu_console':{'en':'Console','es':'Consola','fr':'Console', 'pt':'Console'},
'menu_editor':{'en':'Editor','es':'Editor','fr':'Editeur', 'pt':'Editor'},
'menu_gallery':{'en':'Gallery','es':'Galería','fr':'Galerie', 'pt':'Galeria'},
'menu_doc':{'en':'Documentation','es':'Documentación','fr':'Documentation', 'pt':'Documentação'},
'menu_download':{'en':'Download','es':'Descargas','fr':'Téléchargement', 'pt':'Download'},
'menu_dev':{'en':'Development','es':'Desarrollo','fr':'Développement', 'pt':'Desenvolvimento'},
'menu_groups':{'en':'Groups','es':'Grupos','fr':'Groupes', 'pt':'Grupos'}
}
links = {'home':'index.html',
'console':'tests/console.html',
'editor':'tests/editor.html',
'gallery':'gallery/gallery_%s.html',
'doc':'static_doc/%s/intro.html',
'download':'https://github.com/brython-dev/brython/releases',
'dev':'https://github.com/brython-dev/brython',
'groups':'groups.html'
}
def show(prefix='', language=None):
# detect language
has_req = False
if language is None:
qs_lang = doc.query.getfirst("lang")
if qs_lang and qs_lang in ["en","fr","es","pt"]:
has_req = True
language = qs_lang
else:
import locale
try:
lang,enc = locale.getdefaultlocale()
lang = lang[:2]
if lang in ["en","fr","es","pt"]:
language = lang
except:
pass
language = language or 'en'
_banner = doc['banner_row']
for key in ['console','editor','gallery','doc','download','dev','groups']:
if key in ['download','dev']:
href = links[key]
else:
href = prefix+links[key]
if key in ['doc','gallery']:
href = href %language
if key not in ['download','dev']:
# add lang to href
href += '?lang=%s' %language
if key == 'home':
img = IMG(src="/brython.svg",Class="logo")
link = A(img,href=href)
cell = TD(link,Class="logo")
else:
link = A(trans_menu['menu_%s'%key][language],href=href,Class="banner")
cell = TD(link)
if key in ['download','dev']:
link.target = "_blank"
_banner <= cell
return qs_lang,language
|
gpl-3.0
|
michaelray/Iristyle-ChocolateyPackages
|
EthanBrown.SublimeText2.UtilPackages/tools/PackageCache/Package Control/package_control/clients/readme_client.py
|
9
|
2434
|
import re
import os
import base64
try:
# Python 3
from urllib.parse import urlencode
except (ImportError):
# Python 2
from urllib import urlencode
from .json_api_client import JSONApiClient
from ..downloaders.downloader_exception import DownloaderException
# Used to map file extensions to formats
_readme_formats = {
'.md': 'markdown',
'.mkd': 'markdown',
'.mdown': 'markdown',
'.markdown': 'markdown',
'.textile': 'textile',
'.creole': 'creole',
'.rst': 'rst'
}
class ReadmeClient(JSONApiClient):
def readme_info(self, url):
"""
Retrieve the readme and info about it
:param url:
The URL of the readme file
:raises:
DownloaderException: if there is an error downloading the readme
ClientException: if there is an error parsing the response
:return:
A dict with the following keys:
`filename`
`format` - `markdown`, `textile`, `creole`, `rst` or `txt`
`contents` - contents of the readme as str/unicode
"""
contents = None
# Try to grab the contents of a GitHub-based readme by grabbing the cached
# content of the readme API call
github_match = re.match('https://raw.github.com/([^/]+/[^/]+)/([^/]+)/readme(\.(md|mkd|mdown|markdown|textile|creole|rst|txt))?$', url, re.I)
if github_match:
user_repo = github_match.group(1)
branch = github_match.group(2)
query_string = urlencode({'ref': branch})
readme_json_url = 'https://api.github.com/repos/%s/readme?%s' % (user_repo, query_string)
try:
info = self.fetch_json(readme_json_url, prefer_cached=True)
contents = base64.b64decode(info['content'])
except (ValueError) as e:
pass
if not contents:
contents = self.fetch(url)
basename, ext = os.path.splitext(url)
format = 'txt'
ext = ext.lower()
if ext in _readme_formats:
format = _readme_formats[ext]
try:
contents = contents.decode('utf-8')
except (UnicodeDecodeError) as e:
contents = contents.decode('cp1252', errors='replace')
return {
'filename': os.path.basename(url),
'format': format,
'contents': contents
}
|
mit
|
avneesh91/django
|
django/core/management/commands/sqlmigrate.py
|
48
|
2742
|
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
class Command(BaseCommand):
help = "Prints the SQL statements for the named migration."
output_transaction = True
def add_arguments(self, parser):
parser.add_argument('app_label', help='App label of the application containing the migration.')
parser.add_argument('migration_name', help='Migration name to print the SQL for.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to create SQL for. Defaults to the "default" database.',
)
parser.add_argument(
'--backwards', action='store_true', dest='backwards',
help='Creates SQL to unapply the migration, rather than to apply it',
)
def execute(self, *args, **options):
# sqlmigrate doesn't support coloring its output but we need to force
# no_color=True so that the BEGIN/COMMIT statements added by
# output_transaction don't get colored either.
options['no_color'] = True
return super().execute(*args, **options)
def handle(self, *args, **options):
# Get the database we're operating from
connection = connections[options['database']]
# Load up an executor to get all the migration data
executor = MigrationExecutor(connection)
# Resolve command-line arguments into a migration
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations" % app_label)
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (
migration_name, app_label))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'. Is it in INSTALLED_APPS?" % (
migration_name, app_label))
targets = [(app_label, migration.name)]
# Show begin/end around output only for atomic migrations
self.output_transaction = migration.atomic
# Make a plan that represents just the requested migrations and show SQL
# for it
plan = [(executor.loader.graph.nodes[targets[0]], options['backwards'])]
sql_statements = executor.collect_sql(plan)
return '\n'.join(sql_statements)
|
bsd-3-clause
|
JerryLead/spark
|
examples/src/main/python/ml/dataframe_example.py
|
72
|
2670
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example of how to use DataFrame for ML. Run with::
bin/spark-submit examples/src/main/python/ml/dataframe_example.py <input>
"""
from __future__ import print_function
import os
import sys
import tempfile
import shutil
from pyspark.sql import SparkSession
from pyspark.mllib.stat import Statistics
from pyspark.mllib.util import MLUtils
if __name__ == "__main__":
if len(sys.argv) > 2:
print("Usage: dataframe_example.py <libsvm file>", file=sys.stderr)
exit(-1)
elif len(sys.argv) == 2:
input = sys.argv[1]
else:
input = "data/mllib/sample_libsvm_data.txt"
spark = SparkSession \
.builder \
.appName("DataFrameExample") \
.getOrCreate()
# Load input data
print("Loading LIBSVM file with UDT from " + input + ".")
df = spark.read.format("libsvm").load(input).cache()
print("Schema from LIBSVM:")
df.printSchema()
print("Loaded training data as a DataFrame with " +
str(df.count()) + " records.")
# Show statistical summary of labels.
labelSummary = df.describe("label")
labelSummary.show()
# Convert features column to an RDD of vectors.
features = MLUtils.convertVectorColumnsFromML(df, "features") \
.select("features").rdd.map(lambda r: r.features)
summary = Statistics.colStats(features)
print("Selected features column with average values:\n" +
str(summary.mean()))
# Save the records in a parquet file.
tempdir = tempfile.NamedTemporaryFile(delete=False).name
os.unlink(tempdir)
print("Saving to " + tempdir + " as Parquet file.")
df.write.parquet(tempdir)
# Load the records back.
print("Loading Parquet file with UDT from " + tempdir)
newDF = spark.read.parquet(tempdir)
print("Schema from Parquet:")
newDF.printSchema()
shutil.rmtree(tempdir)
spark.stop()
|
apache-2.0
|
harlequin/sickbeard
|
lib/hachoir_core/error.py
|
90
|
1350
|
"""
Functions to display an error (error, warning or information) message.
"""
from lib.hachoir_core.log import log
from lib.hachoir_core.tools import makePrintable
import sys, traceback
def getBacktrace(empty="Empty backtrace."):
"""
Try to get backtrace as string.
Returns "Error while trying to get backtrace" on failure.
"""
try:
info = sys.exc_info()
trace = traceback.format_exception(*info)
sys.exc_clear()
if trace[0] != "None\n":
return "".join(trace)
except:
# No i18n here (imagine if i18n function calls error...)
return "Error while trying to get backtrace"
return empty
class HachoirError(Exception):
"""
Parent of all errors in Hachoir library
"""
def __init__(self, message):
message_bytes = makePrintable(message, "ASCII")
Exception.__init__(self, message_bytes)
self.text = message
def __unicode__(self):
return self.text
# Error classes which may be raised by Hachoir core
# FIXME: Add EnvironmentError (IOError or OSError) and AssertionError?
# FIXME: Remove ArithmeticError and RuntimeError?
HACHOIR_ERRORS = (HachoirError, LookupError, NameError, AttributeError,
TypeError, ValueError, ArithmeticError, RuntimeError)
info = log.info
warning = log.warning
error = log.error
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.