repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
lgarren/spack | var/spack/repos/builtin/packages/r-yaqcaffy/package.py | 1 | 1795 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RYaqcaffy(RPackage):
"""Quality control of Affymetrix GeneChip expression data and
reproducibility analysis of human whole genome chips with the MAQC
reference datasets."""
homepage = "http://bioconductor.org/packages/yaqcaffy/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/yaqcaffy_1.36.0.tar.gz"
list_url = homepage
version('1.36.0', '73bea4305aa6b1fc9d49881b95bcda42')
depends_on('r-simpleaffy', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.36.0')
| lgpl-2.1 |
cohortfsllc/cohort-cocl2-sandbox | src/trusted/validator_arm/dgen_input.py | 10 | 43651 | #!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
A simple recursive-descent parser for the table file format.
The grammar implemented here is roughly (taking some liberties with whitespace
and comment parsing):
table_file ::= decoder_actions? table+ eof ;
action ::= decoder_action | decoder_method | '"'
action_arch ::= 'arch' ':=' id | '(' id (',' id)* ')'
action_option ::= (action_rule |
action_pattern |
action_safety |
action_arch |
action_violations |
action_other) ';'
action_other ::= word ':=' bit_expr
action_pattern ::= 'pattern' ':=' bitpattern
action_safety ::= 'safety' ':=' ('super.safety' | safety_check)
('&' safety_check)*
action_rule ::= 'rule' ':=' id
action_violations ::= 'violations' ':=' violation ('&' violation)*
arch ::= '(' word+ ')'
bit_expr ::= bit_expr1 ('if' bit_expr 'else' bit_expr)? # conditional
bit_expr1 ::= bit_expr2 (('&' bit_expr2)* | ('|' bit_expr2)*)?
bit_expr2 ::= bit_expr3 | 'not' bit_expr2
bit_expr3 ::= bit_expr4 ('in' bit_set | 'in' 'bitset' pat_bit_set)?
bit_expr4 ::= bit_expr5 (('<' | '<=' | '==' | '!=' | '>=' | '>')
bit_expr5)? # comparisons
bit_expr5 ::= bit_expr6 |
bit_expr5 ('<<' | '>>') bit_expr6 # shift
bit_expr6 ::= bit_expr7 | # add ops
bit_expr6 ('+' | '-") bit_expr7
bit_expr7 ::= bit_expr8 | # mul ops
bit_expr7 ('*'| '/' | 'mod') bit_expr8
bit_expr8 ::= bit_expr9 ('=' bitpattern)? # bit check
bit_expr9 ::= bit_expr10 (':' bit_expr10)* # concat
bit_expr10 ::= bit_expr11 | bit_expr10 '(' int (':' int)? ')' # bit range
bit_expr11 ::= int | nondecimal_int | id | bit_set | '(' bit_expr ')' | call
bit_set ::= '{' (bit_expr (',' bit_expr)*)? '}'
bitpattern ::= word | negated_word
call ::= word '(' (bit_expr (',' bit_expr)*)? ')'
citation ::= '(' word+ ')'
column ::= id '(' int (':' int)? ')'
decoder_action ::= '=" decoder_defn
decoder_actions::= ('*' (int | id) decoder_defn)+
decoder_defn ::= fields? action_option*
| '*' (int | id) ('-' field_names)? fields? action_option*
decoder_method ::= '->' id
default_row ::= 'else' ':' action
field_names ::= '{' (id (',' id)*)? '}'
fields ::= '{' column (',' column)* '}'
footer ::= '+' '-' '-'
header ::= "|" column+
int ::= word (where word is a sequence of digits)
id ::= word (where word is sequence of letters, digits and _)
negated_word ::= '~' word
nondecimal_int ::= word (where word is a hexidecimal or bitstring pattern)
parenthesized_exp ::= '(' (word | punctuation)+ ')'
pat_bit_set ::= '{' (bitpattern (',' bitpattern)*)? '}'
pat_row ::= pattern+ action
pattern ::= bitpattern | '-' | '"'
quoted_string := word (where word text enclosed in quotes('))
row ::= '|' (pat_row | default_row)
safety_check ::= bit_expr '=>' id
table ::= table_desc table_actions header row+ footer
table_actions ::= (decoder_actions footer)?
table_desc ::= '+' '-' '-' id citation?
violation ::= bit_expr '=>' 'error' '(' quoted_string (',' bit_expr)* ')'
"""
import re
import dgen_core
# The following import adds type information for decoder actions corresponding
# to method definitions.
import dgen_decoder
# Set the following to True if you want to see each read/pushback of a token.
_TRACE_TOKENS = False
def parse_tables(input):
"""Entry point for the parser. Input should be a file or file-like."""
parser = Parser()
return parser.parse(input)
class Token(object):
"""Holds a (characterized) unit of text for the parser."""
def __init__(self, kind, value=None):
self.kind = kind
self.value = value if value else kind
def __repr__(self):
return 'Token(%s, "%s")' % (self.kind, self.value)
# Predefined names corresponding to predefined bit expressions.
_PREDEFINED_CONSTS = {
# Register Names.
# TODO(karl): These constants are arm32 specific. We will need to
# fix this when implementing arm64.
'NZCV': dgen_core.Literal(16), # defines conditions registers.
'None': dgen_core.Literal(32),
'Pc': dgen_core.Literal(15),
'Lr': dgen_core.Literal(14),
'Sp': dgen_core.Literal(13),
'Tp': dgen_core.Literal(9),
# Boolean values.
'true': dgen_core.BoolValue(True),
'false': dgen_core.BoolValue(False),
'inst': dgen_core.Instruction(),
# Instruction value selectors used in ARM tables. Numbered sequentially
# to guarantee selectors are unique.
'VFPNegMul_VNMLA': dgen_core.Literal(1),
'VFPNegMul_VNMLS': dgen_core.Literal(2),
'VFPNegMul_VNMUL': dgen_core.Literal(3),
# conditions for conditional instructions (see enum Condition in model.h).
# Possible values for the condition field, from the ARM ARM section A8.3.
'cond_EQ': dgen_core.Literal(0x0),
'cond_NE': dgen_core.Literal(0x1),
'cond_CS': dgen_core.Literal(0x2),
'cond_CC': dgen_core.Literal(0x3),
'cond_MI': dgen_core.Literal(0x4),
'cond_PL': dgen_core.Literal(0x5),
'cond_VS': dgen_core.Literal(0x6),
'cond_VC': dgen_core.Literal(0x7),
'cond_HI': dgen_core.Literal(0x8),
'cond_LS': dgen_core.Literal(0x9),
'cond_GE': dgen_core.Literal(0xA),
'cond_LT': dgen_core.Literal(0xB),
'cond_GT': dgen_core.Literal(0xC),
'cond_LE': dgen_core.Literal(0xD),
'cond_AL': dgen_core.Literal(0xE),
'cond_unconditional': dgen_core.Literal(0xF),
'cond_HS': dgen_core.Literal(0x2),
'cond_LO': dgen_core.Literal(0x3),
}
# Predefined regular expressions.
_DECIMAL_PATTERN = re.compile(r'^([0-9]+)$')
_HEXIDECIMAL_PATTERN = re.compile(r'^0x([0-9a-fA-F]+)$')
_BITSTRING_PATTERN = re.compile(r'^\'([01]+)\'$')
_ID_PATTERN = re.compile(r'^[a-zA-z][a-zA-z0-9_]*$')
_STRING_PATTERN = re.compile(r'^\'(.*)\'$')
# When true, catch all bugs when parsing and report line.
_CATCH_EXCEPTIONS = True
# List of file level decoder actions that must be specified in every
# specification file, because they are used somewhere else than in table rows.
_REQUIRED_FILE_DECODER_ACTIONS = [
# Defiles the decoder action that handles instructions that are
# not defined by rows in the instruction tables.
'NotImplemented',
# Defines the decoder action that handles the fictitious instruction
# inserted before the code segment, acting as the previous instruction
# for the first instruction in the bundle.
'FictitiousFirst'
]
class Parser(object):
"""Parses a set of tables from the input file."""
def parse(self, input):
self.input = input # The remaining input to parse
decoder = dgen_core.Decoder() # The generated decoder of parse tables.
if _CATCH_EXCEPTIONS:
try:
return self._parse(decoder)
except Exception as e:
self._unexpected(e)
except:
self._unexpected("Unknow problem.")
else:
return self._parse(decoder)
def _parse(self, decoder):
# Read global decoder actions.
self._global_decoder_actions(decoder)
# Read tables while there are tables to read.
while self._next_token().kind == '+':
self._table(decoder)
# Check that we read everything.
if not self._next_token().kind == 'eof':
self._unexpected('unrecognized input found')
if not decoder.primary:
self._unexpected('No primary table defined')
if not decoder.tables():
self._unexpected('No tables defined')
return decoder
def __init__(self):
self._words = [] # Words left on current line, not yet parsed.
self._line_no = 0 # The current line being parsed
self._token = None # The next token from the input.
self._reached_eof = False # True when end of file reached
self._pushed_tokens = [] # Tokens pushed back onto the input stream.
# Reserved words allowed. Must be ordered such that if p1 != p2 are in
# the list, and p1.startswith(p2), then p1 must appear before p2.
self._reserved = ['else', 'other', 'mod', 'if', 'not', 'in']
# Punctuation allowed. Must be ordered such that if p1 != p2 are in
# the list, and p1.startswith(p2), then p1 must appear before p2.
self._punctuation = ['=>', '->', '-', '+', '(', ')', '==', ':=', '"',
'|', '~', '&', '{', '}', ',', ';', '!=',':',
'>>', '<<', '>=', '>', '<=', '<', '=', '*', '/']
# Holds global decoder actions, that can be used in tables if not defined
# locally.
self._file_actions = {}
#-------- Recursive descent parse functions corresponding to grammar above.
def _action(self, starred_actions, last_action):
""" action ::= decoder_action | decoder_method | '"' """
if self._next_token().kind == '"':
self._read_token('"')
return last_action
if self._next_token().kind == '=':
return self._decoder_action(starred_actions)
elif self._next_token().kind == '->':
return self._decoder_method()
else:
self._unexpected("Row doesn't define an action")
def _action_arch(self, context):
"""action_arch ::= 'arch' ':=' id | '(' id (',' id)* ')'
Adds architecture to context."""
self._read_keyword('arch')
self._read_token(':=')
if self._next_token().kind == '(':
self._read_token('(')
names = [ self._id() ]
while self._next_token().kind == ',':
self._read_token(',')
names.append(self._id())
self._read_token(')')
self._define('arch', names, context)
else:
self._define('arch', self._id(), context)
def _action_option(self, context):
"""action_option ::= (action_rule | action_pattern |
action_safety | action_arch |
action_violation | action_other) ';'
Returns the specified architecture, or None if other option.
"""
if self._is_keyword('rule'):
self._action_rule(context)
elif self._is_keyword('pattern'):
self._action_pattern(context)
elif self._is_keyword('safety'):
self._action_safety(context)
elif self._is_keyword('arch'):
self._action_arch(context)
elif self._is_keyword('violations'):
self._action_violations(context)
elif self._next_token().kind == 'word':
self._action_other(context)
else:
self._unexpected("Expected action option but not found")
self._read_token(';')
def _action_other(self, context):
"""action_other ::= 'word' ':=' bit_expr
Recognizes other actions not currently implemented.
"""
name = self._read_token('word').value
self._read_token(':=')
self._define(name, self._bit_expr(context), context)
def _action_pattern(self, context):
"""action_pattern ::= 'pattern' ':=' bitpattern
Adds pattern/parse constraints to the context.
"""
self._read_keyword('pattern')
self._read_token(':=')
self._define('pattern', self._bitpattern32(), context)
def _action_safety(self, context):
"""action_safety ::=
'safety' ':=' ('super.safety' | safety_check) ('&' safety_check)*
Adds safety constraints to the context.
"""
self._read_keyword('safety')
self._read_token(':=')
if self._is_keyword('super.safety'):
# Treat as extending case of inherited safety.
self._read_keyword('super.safety')
checks = context.find('safety', install_inheriting=False)
if isinstance(checks, list):
checks = list(checks)
else:
self._unexpected('safety extensions not allowed, nothing to extend')
else:
checks = [ self._safety_check(context) ]
while self._next_token().kind == '&':
self._read_token('&')
checks.append(self._safety_check(context))
self._define('safety', checks, context)
def _action_rule(self, context):
"""action_rule ::= 'rule' ':=' id
Adds rule name to the context.
"""
self._read_keyword('rule')
self._read_token(':=')
self._define('rule', self._id(), context)
def _action_violations(self, context):
"""action_violations ::= 'violations' ':=' violation ('&' violation)*"""
self._read_keyword('violations')
self._read_token(':=')
violations = [ self._violation(context) ]
while self._next_token().kind == '&':
self._read_token('&')
violations.append(self._violation(context))
self._define('violations', violations, context)
def _arch(self):
""" arch ::= '(' word+ ')' """
return ' '.join(self._parenthesized_exp())
def _bit_check(self, context):
""" bit_check ::= column '=' bitpattern """
column = None
if self._is_column():
column = self._column()
self._read_token('=')
elif self._is_name_equals():
name = self._id()
column = context.find(name)
if not column:
self._unexpected("Can't find column definition for %s" % name)
self._read_token('=')
else:
self._unexpected("didn't find bit pattern check")
pattern = dgen_core.BitPattern.parse(self._bitpattern(), column)
if not pattern:
self._unexpected("bit pattern check malformed")
return pattern
def _bit_expr(self, context):
"""bit_expr ::= bit_expr1 ('if' bit_expr 'else' bit_expr)?"""
then_value = self._bit_expr1(context)
if self._next_token().kind != 'if': return then_value
self._read_token('if')
test = self._bit_expr(context)
self._read_token('else')
else_value = self._bit_expr(context)
return dgen_core.IfThenElse(test, then_value, else_value)
def _bit_expr1(self, context):
"""bit_expr1 ::= bit_expr2 (('&' bit_expr2)* | ('|' bit_expr2)*)"""
value = self._bit_expr2(context)
if self._next_token().kind == '&':
args = [value]
while self._next_token().kind == '&':
self._read_token('&')
args.append(self._bit_expr2(context))
value = dgen_core.AndExp(args)
elif self._next_token().kind == '|':
args = [value]
while self._next_token().kind == '|':
self._read_token('|')
args.append(self._bit_expr2(context))
value = dgen_core.OrExp(args)
return value
def _bit_expr2(self, context):
"""bit_expr2 ::= bit_expr3 | 'not' bit_expr2"""
if self._next_token().kind == 'not':
self._read_token('not')
return dgen_core.NegatedTest(self._bit_expr2(context))
return self._bit_expr3(context)
def _bit_expr3(self, context):
"""bit_expr3 ::= bit_expr4 ('in' bit_set | 'in' 'bitset' pat_bit_set)?"""
value = self._bit_expr4(context)
if not self._next_token().kind == 'in': return value
self._read_token('in')
if self._is_keyword('bitset'):
self._read_keyword('bitset')
return dgen_core.InBitSet(value, self._pat_bit_set())
else:
return dgen_core.InUintSet(value, self._bit_set(context))
def _bit_expr4(self, context):
"""bit_expr4 ::= bit_expr5 (('<' | '<=' | '==' | '!=' | '>=' | '>')
bit_expr5)? """
value = self._bit_expr5(context)
for op in ['<', '<=', '==', '!=', '>=', '>']:
if self._next_token().kind == op:
self._read_token(op)
return dgen_core.CompareExp(op, value, self._bit_expr5(context))
return value
def _bit_expr5(self, context):
"""bit_expr5 ::= bit_expr6 | bit_expr5 ('<<' | '>>') bit_expr6"""
value = self._bit_expr6(context)
while self._next_token().kind in ['<<', '>>']:
op = self._read_token().value
value = dgen_core.ShiftOp(op, value, self._bit_expr6(context))
return value
def _bit_expr6(self, context):
"""bit_expr6 ::= bit_expr7 | bit_expr6 ('+' | '-') bit_expr7"""
value = self._bit_expr7(context)
while self._next_token().kind in ['+', '-']:
op = self._read_token().value
value = dgen_core.AddOp(op, value, self._bit_expr7(context))
return value
def _bit_expr7(self, context):
"""bit_expr7 ::= bit_expr8 |
bit_expr7 ('*' | '/' | 'mod') bit_expr8"""
value = self._bit_expr8(context)
while self._next_token().kind in ['*', '/', 'mod']:
op = self._read_token().value
value = dgen_core.MulOp(op, value, self._bit_expr8(context))
return value
def _bit_expr8(self, context):
"""bit_expr8 ::= bit_expr9 ('=' bitpattern)?"""
bits = self._bit_expr9(context)
if self._next_token().kind != '=': return bits
self._read_token('=')
bitpat = self._bitpattern()
pattern = dgen_core.BitPattern.parse_catch(bitpat, bits)
if not pattern:
self._unexpected('Pattern mismatch in %s = %s' % (bits, bitpat))
else:
return pattern
def _bit_expr9(self, context):
"""bit_expr9 ::= bit_expr10 (':' bit_expr10)*"""
value = self._bit_expr10(context)
if self._next_token().kind != ':': return value
values = [ value ]
while self._next_token().kind == ':':
self._read_token(':')
values.append(self._bit_expr10(context))
return dgen_core.Concat(values)
def _bit_expr10(self, context):
"""bit_expr10 ::= bit_expr11 |
bit_expr10 '(' int (':' int)? ')'"""
value = self._bit_expr11(context)
while self._next_token().kind == '(':
self._read_token('(')
hi_bit = self._int()
lo_bit = hi_bit
if self._next_token().kind == ':':
self._read_token(':')
lo_bit = self._int()
self._read_token(')')
value = dgen_core.BitField(value, hi_bit, lo_bit)
return value
def _bit_expr11(self, context):
"""bit_expr11 ::= int | nondecimal_int | id | bit_set |
'(' bit_expr ')' | call"""
if self._is_int():
return dgen_core.Literal(self._int())
elif self._is_nondecimal_int():
return self._nondecimal_int()
elif self._next_token().kind == '{':
return self._bit_set(context)
elif self._next_token().kind == '(':
self._read_token('(')
value = self._bit_expr(context)
self._read_token(')')
return dgen_core.ParenthesizedExp(value)
elif (self._is_name_paren() and not self._is_column()):
# Note: we defer input like "foo(2)" to being a (bit field) column.
# If you want to recognize "foo(2)" as a function call, write 'foo((2))'
return self._call(context)
elif self._is_id():
name = self._id()
value = context.find(name)
if not value:
value = _PREDEFINED_CONSTS.get(name)
if not value:
self._unexpected("Can't find definition for %s" % name)
# Lift predefined symbol into context, so that the high-level
# definition will be available when the context is printed.
context.define(name, value)
return dgen_core.IdRef(name, value)
else:
self._unexpected("Don't understand value: %s" % self._next_token().value)
def _bit_set(self, context):
"""bit_set ::= '{' (bit_expr (',' bit_expr)*)? '}'"""
values = []
self._read_token('{')
if not self._next_token().kind == '}':
values.append(self._bit_expr(context))
while self._next_token().kind == ',':
self._read_token(',')
values.append(self._bit_expr(context))
self._read_token('}')
return dgen_core.BitSet(values)
def _bitpattern(self):
""" bitpattern ::= 'word' | negated_word """
return (self._negated_word() if self._next_token().kind == '~'
else self._read_token('word').value)
def _bitpattern32(self):
"""Returns a bit pattern with 32 bits."""
pattern = self._bitpattern()
if pattern[0] == '~':
if len(pattern) != 33:
self._unexpected("Bit pattern %s length != 32" % pattern)
elif len(pattern) != 32:
self._unexpected("Bit pattern %s length != 32" % pattern)
return pattern
def _call(self, context):
"""call ::= word '(' (bit_expr (',' bit_expr)*)? ')'"""
name = self._read_token('word').value
args = []
self._read_token('(')
while self._next_token().kind != ')':
if args:
self._read_token(',')
args.append(self._bit_expr(context))
self._read_token(')')
if len(args) == 1 and name in dgen_core.DGEN_TYPE_TO_CPP_TYPE.keys():
return dgen_core.TypeCast(name, args[0])
else:
return dgen_core.FunctionCall(name, args)
def _citation(self):
""" citation ::= '(' word+ ')' """
return ' '.join(self._parenthesized_exp())
def _column(self):
"""column ::= id '(' int (':' int)? ')'
Reads a column and returns the correspond representation.
"""
name = self._read_token('word').value
self._read_token('(')
hi_bit = self._int()
lo_bit = hi_bit
if self._next_token().kind == ':':
self._read_token(':')
lo_bit = self._int()
self._read_token(')')
return dgen_core.BitField(name, hi_bit, lo_bit)
def _decoder_action_options(self, context):
"""Parses 'action_options*'."""
while True:
if self._is_action_option():
self._action_option(context)
else:
return
def _decoder_action(self, starred_actions):
"""decoder_action ::= '=" decoder_defn"""
self._read_token('=')
action = self._decoder_defn(starred_actions)
self._check_action_is_well_defined(action)
return self._decoder_defn_end(action)
def _decoder_actions(self):
""" decoder_actions::= ('*' (int | id) decoder_defn)+ """
starred_actions = {}
while self._next_token().kind == '*':
self._read_token('*')
if self._is_int():
index = self._int()
else:
index = self._id()
if starred_actions.get(index):
self._unexpected("Multple *actions defined for %s" % index)
starred_actions[index] = self._decoder_defn(starred_actions)
return starred_actions
def _decoder_defn_end(self, action):
"""Called when at end of a decoder definition. Used to
turn off type checking."""
action.force_type_checking(False)
return action
def _decoder_defn(self, starred_actions):
"""decoder_defn ::= fields? action_option*
| '*' (int | id) ('-' field_names)?
fields? action_option*
"""
if self._next_token().kind == '*':
return self._decoder_defn_end(
self._decoder_action_extend(starred_actions))
action = dgen_core.DecoderAction()
action.force_type_checking(True)
if self._next_token().kind == '{':
fields = self._fields(action)
self._define('fields', fields, action)
self._decoder_action_options(action)
elif self._is_action_option():
self._decoder_action_options(action)
return self._decoder_defn_end(action)
def _decoder_action_extend(self, starred_actions):
"""'*' (int | id) ('-' field_names)? fields? action_option*
Helper function to _decoder_action."""
self._read_token('*')
if self._is_int():
index = self._int()
else:
index = self._id()
indexed_action = starred_actions.get(index)
if not indexed_action:
indexed_action = self._file_actions.get(index)
if not indexed_action:
self._unexpected("Can't find decoder action *%s" % index)
# Create an initial copy, and define starred action as
# inheriting definition.
action = dgen_core.DecoderAction()
action.force_type_checking(True)
# Get the set of field names.
fields = []
if self._next_token().kind == '-':
self._read_token('-')
fields = self._field_names()
action.inherits(indexed_action, fields)
# Recognize fields if applicable.
if self._next_token().kind == '{':
self._define('fields', self._fields(action), action)
# Recognize overriding options.
self._decoder_action_options(action)
action.disinherit()
return action
def _decoder_method(self):
""" decoder_method ::= '->' id """
self._read_token('->')
name = self._id()
return dgen_core.DecoderMethod(name)
def _default_row(self, table, starred_actions, last_action):
""" default_row ::= 'else' ':' action """
self._read_token('else')
self._read_token(':')
action = self._action(starred_actions, last_action)
self._check_action_is_well_defined(action)
if not table.add_default_row(action):
self._unexpected('Unable to install row default')
return (None, self._decoder_defn_end(action))
def _field_names(self):
"""'{' (id (',' id)*)? '}'
Note: To capture predefined actions, we allow special
action keywords to also apply.
"""
names = []
self._read_token('{')
if self._is_field_name():
names.append(self._read_token().value)
while self._next_token().kind == ',':
self._read_token(',')
if self._is_field_name():
name == self._read_token().value
if name in names:
raise Exception("Repeated field name: %s" % name)
names.append(name)
else:
raise Exception("field name expected, found %s" %
self._next_token().value)
self._read_token('}')
return names
def _field_name_next(self):
return self._is_id() or self._is_action_option()
def _fields(self, context):
"""fields ::= '{' column (',' column)* '}'"""
fields = []
self._read_token('{')
field = self._column()
fields.append(field)
self._define(field.name().name(), field, context)
while self._next_token().kind == ',':
self._read_token(',')
field = self._column()
self._define(field.name().name(), field, context)
fields.append(field)
self._read_token('}')
return fields
def _footer(self):
""" footer ::= '+' '-' '-' """
self._read_token('+')
self._read_token('-')
self._read_token('-')
def _global_decoder_actions(self, decoder):
"""Read in file level decoder actions, and install required predefined
file decoder actions."""
self._file_actions = self._decoder_actions()
for required_action in _REQUIRED_FILE_DECODER_ACTIONS:
action = self._file_actions.get(required_action)
if not action:
self._unexpected("File level action '%s' not defined" % required_action)
decoder.define_value(required_action, action)
def _header(self, table):
""" header ::= "|" column+ """
self._read_token('|')
self._add_column(table)
while self._is_column():
self._add_column(table)
def _nondecimal_int(self):
"""nondecimal_int ::= word
where word is a hexidecimal or bitstring pattern."""
word = self._read_token('word').value
match = _HEXIDECIMAL_PATTERN.match(word)
if match:
return Literal(int(match.group(1), 16), name=word)
match = _BITSTRING_PATTERN.match(word)
if match:
text = match.group(1)
l = dgen_core.Literal(int(text, 2), name=word)
return dgen_core.BitField(l, len(text) - 1, 0)
self._unexpected('Nondecimal integer expected but not found: %s' % word)
def _int(self):
""" int ::= word
Int is a sequence of digits. Returns the corresponding integer.
"""
word = self._read_token('word').value
match = _DECIMAL_PATTERN.match(word)
if match:
return int(match.group(1))
self._unexpected(
'integer expected but found "%s"' % word)
def _id(self):
""" id ::= word
Word starts with a letter, and followed by letters, digits,
and underscores. Returns the corresponding identifier.
"""
if self._is_id():
return self._read_token('word').value
def _named_value(self, context):
"""named_value ::= id ':=' bit_expr."""
name = self._id()
self._read_token(':=')
value = self._bit_expr(context)
self._define(name, value, context)
return value
def _negated_word(self):
""" negated_word ::= '~' 'word' """
self._read_token('~')
return '~' + self._read_token('word').value
def _parenthesized_exp(self, minlength=1):
""" parenthesized_exp ::= '(' (word | punctuation)+ ')'
The punctuation doesn't include ')'.
Returns the sequence of token values parsed.
"""
self._read_token('(')
words = []
while not self._at_eof() and self._next_token().kind != ')':
words.append(self._read_token().value)
if len(words) < minlength:
self._unexpected("len(parenthesized expresssion) < %s" % minlength)
self._read_token(')')
return words
def _pat_bit_set(self):
"""pat_bit_set ::= '{' (bitpattern (',' bitpattern)*)? '}'"""
values = []
self._read_token('{')
if not self._next_token().kind == '}':
values.append(self._bitpattern())
while self._next_token().kind == ',':
self._read_token(',')
values.append(self._bitpattern())
self._read_token('}')
return dgen_core.BitSet(values)
def _pat_row(self, table, starred_actions, last_patterns, last_action):
""" pat_row ::= pattern+ action
Passed in sequence of patterns and action from last row,
and returns list of patterns and action from this row.
"""
patterns = [] # Patterns as found on input.
expanded_patterns = [] # Patterns after being expanded.
num_patterns = 0
num_patterns_last = len(last_patterns) if last_patterns else None
while self._next_token().kind not in ['=', '->', '|', '+']:
if not last_patterns or num_patterns < num_patterns_last:
last_pattern = last_patterns[num_patterns] if last_patterns else None
pattern = self._pattern(last_pattern)
patterns.append(pattern)
expanded_patterns.append(table.define_pattern(pattern, num_patterns))
num_patterns += 1
else:
# Processed patterns in this row, since width is now the
# same as last row.
break
action = self._action(starred_actions, last_action)
table.add_row(expanded_patterns, action)
return (patterns, action)
def _pattern(self, last_pattern):
""" pattern ::= bitpattern | '-' | '"'
Arguments are:
last_pattern: The pattern for this column from the previous line.
"""
if self._next_token().kind == '"':
self._read_token('"')
return last_pattern
if self._next_token().kind in ['-', 'word']:
return self._read_token().value
return self._bitpattern()
def _quoted_string(self):
"""quoted_string := word
where word is text enclosed in quotes(')."""
word = self._read_token('word').value
match = _STRING_PATTERN.match(word)
if match:
text = match.group(1)
return dgen_core.QuotedString(text, name=word)
self._unexpected('Quoted string expected but not found: "%s"' % word)
def _row(self, table, starred_actions, last_patterns=None, last_action=None):
""" row ::= '|' (pat_row | default_row)
Passed in sequence of patterns and action from last row,
and returns list of patterns and action from this row.
"""
self._read_token('|')
if self._next_token().kind == 'else':
return self._default_row(table, starred_actions, last_action)
else:
return self._pat_row(table, starred_actions, last_patterns, last_action)
def _read_keyword(self, keyword):
"""Returns true if the next symbol matches the (identifier) keyword."""
token = self._read_token()
if not ((token.kind == keyword) or
(token.kind == 'word' and token.value == keyword)):
self._unexpected("Expected '%s' but found '%s'" % (keyword, token.value))
def _safety_check(self, context):
"""safety_check ::= bit_expr '=>' id
Parses safety check and returns it.
"""
check = self._bit_expr(context)
self._read_token('=>')
name = self._id()
return dgen_core.SafetyAction(check, name)
def _table(self, decoder):
"""table ::= table_desc table_actions header row+ footer"""
table = self._table_desc()
starred_actions = self._table_actions()
self._header(table)
(pattern, action) = self._row(table, starred_actions)
while not self._next_token().kind == '+':
(pattern, action) = self._row(table, starred_actions, pattern, action)
if not decoder.add(table):
self._unexpected('Multiple tables with name %s' % table.name)
self._footer()
def _table_actions(self):
"""table_actions ::= ( ('*' (int | id) decoder_defn)+ footer)?"""
starred_actions = {}
if self._next_token().kind != '*': return starred_actions
starred_actions = self._decoder_actions()
self._footer()
return starred_actions
def _table_desc(self):
""" table_desc ::= '+' '-' '-' id citation? """
self._read_token('+')
self._read_token('-')
self._read_token('-')
name = self._id()
citation = None
if self._next_token().kind == '(':
citation = self._citation()
return dgen_core.Table(name, citation)
def _violation(self, context):
"""violation ::= bit_expr '=>' 'error' '(' quoted_string (',' bit_expr)* ')'
Parses a (conditional) violation and returns it."""
check = self._bit_expr(context)
self._read_token('=>')
self._read_keyword('error')
self._read_token('(')
args = [ self._quoted_string() ]
while self._next_token().kind == ',':
self._read_token(',')
args.append(self._bit_expr(context))
self._read_token(')')
return dgen_core.Violation(check, args)
#------ Syntax checks ------
def _at_eof(self):
"""Returns true if next token is the eof token."""
return self._next_token().kind == 'eof'
def _is_action_option(self):
"""Returns true if the input matches an action_option.
Note: We assume that checking for a word, followed by an assignment
is sufficient.
"""
matches = False
if self._next_token().kind == 'word':
token = self._read_token()
if self._next_token().kind == ':=':
matches = True
self._pushback_token(token)
return matches
def _is_bit_check(self):
"""Returns true if a bit check appears next on the input stream.
Assumes that if a column if found, it must be a bit check.
"""
return self._is_column_equals() or self._is_name_equals()
def _is_column(self):
"""Returns true if input defines a column (pattern name).
column ::= id '(' int (':' int)? ')'
"""
(tokens, matches) = self._is_column_tokens()
self._pushback_tokens(tokens)
return matches
def _is_column_equals(self):
""" """
(tokens, matches) = self._is_column_tokens()
if self._next_token().kind != '=':
matches = False
self._pushback_tokens(tokens)
return matches
def _is_column_tokens(self):
"""Collects the sequence of tokens defining:
column ::= id '(' int (':' int)? ')'
"""
# Try to match sequence of tokens, saving tokens as processed
tokens = []
matches = False
if self._next_token().kind == 'word':
tokens.append(self._read_token('word'))
if self._next_token().kind == '(':
tokens.append(self._read_token('('))
if self._is_int():
tokens.append(self._read_token('word'))
if self._next_token().kind == ':':
tokens.append(self._read_token(':'))
if self._is_int():
tokens.append(self._read_token('word'))
if self._next_token().kind == ')':
matches = True
return (tokens, matches)
def _is_id(self):
word = self._next_token()
if word.kind != 'word': return False
return _ID_PATTERN.match(word.value)
def _is_field_name(self):
return self._is_id() or self._is_action_option()
def _is_int(self):
"""Tests if an integer occurs next."""
if self._next_token().kind != 'word': return None
return _DECIMAL_PATTERN.match(self._next_token().value)
def _is_keyword(self, keyword):
"""Returns true if the next token is the given keyword."""
token = self._next_token()
return (token.kind == keyword or
(token.kind == 'word' and token.value == keyword))
def _is_nondecimal_int(self):
if self._next_token().kind != 'word': return None
word = self._next_token().value
return (_HEXIDECIMAL_PATTERN.match(word) or
_BITSTRING_PATTERN.match(word))
def _is_name_equals(self):
"""Returns true if input begins with 'name='."""
matches = False
if self._next_token().kind == 'word':
token = self._read_token('word')
if self._next_token().kind == '=':
matches = True
self._pushback_token(token)
return matches
def _is_name_paren(self):
"""Returns true if input begins with 'name('."""
matches = False
if self._next_token().kind == 'word':
token = self._read_token('word')
if self._next_token().kind == '(':
matches = True
self._pushback_token(token)
return matches
def _is_name_semicolon(self):
"""Returns true if input begins with 'name:'."""
matches = False
if self._next_token().kind == 'word':
token = self._read_token('word')
if self._next_token().kind == ';':
matches = True
self._pushback_token(token)
return matches
def _check_action_is_well_defined(self, action):
if not dgen_decoder.ActionDefinesDecoder(action):
self._unexpected("No virtual fields defined for decoder")
#------ Helper functions.
def _add_column(self, table):
"""Adds a column to a table, and verifies that it isn't repeated."""
column = self._column()
if not table.add_column(column):
self._unexpected('In table %s, column %s is repeated' %
(table.name, column.name()))
def _define(self, name, value, context):
"""Install value under name in the given context. Report error if
name is already defined.
"""
if not context.define(name, value, fail_if_defined=False):
self._unexpected('%s: multiple definitions' % name)
def _read_id_or_none(self, read_id):
if self._next_token().kind in ['|', '+', '(']:
return None
name = self._id() if read_id else self._read_token('word').value
return None if name and name == 'None' else name
#------ Tokenizing the input stream ------
def _read_token(self, kind=None):
"""Reads and returns the next token from input."""
token = self._next_token()
self._token = None
if kind and kind != token.kind:
self._unexpected('Expected "%s" but found "%s"'
% (kind, token.kind))
if _TRACE_TOKENS:
print "Read %s" % token
return token
def _is_next_tokens(self, tokens):
"""Returns true if the input contains the sequence of tokens."""
read_tokens = []
match = True
check_tokens = list(tokens)
while check_tokens:
token = check_tokens.pop(0)
next = self._next_token()
if next.kind == token:
read_tokens.append(self._read_token(token))
else:
match = False
break
self._pushback_tokens(read_tokens)
return match
def _next_token(self):
"""Returns the next token from the input."""
# First seee if cached.
if self._token: return self._token
if self._pushed_tokens:
self._token = self._pushed_tokens.pop()
return self._token
# If no more tokens left on the current line. read
# input till more tokens are found
while not self._reached_eof and not self._words:
self._words = self._extract_words(self._read_line())
if self._words:
# More tokens found. Convert the first word to a token.
word = self._words.pop(0)
# Quoted strings must not be split!
if word[0] == "'":
self._token = Token('word', word)
return self._token
# First remove any applicable punctuation.
for p in self._punctuation:
index = word.find(p)
if index == 0:
# Found punctuation, return it.
self._pushback(word[len(p):])
self._token = Token(p)
return self._token
elif index > 0:
self._pushback(word[index:])
word = word[:index]
# See if reserved keyword.
for key in self._reserved:
index = word.find(key)
if index == 0:
# Found reserved keyword. Verify at end, or followed
# by punctuation.
rest = word[len(key):]
if not rest or 0 in [rest.find(p) for p in self._punctuation]:
self._token = Token(key)
return self._token
# if reached, word doesn't contain any reserved words
# punctuation, so return it.
self._token = Token('word', word)
else:
# No more tokens found, assume eof.
self._token = Token('eof')
return self._token
def _extract_words(self, line):
"""Returns the list of words in the given line. Words, in this
context include any non-commented text that is separated by
whitespace, and do not appear in quotes.
Note: This code assumes that comments have already been removed."""
words = []
start_index = 0;
line_length = len(line)
for i in range(0, line_length):
# Skip over matched text.
if i < start_index: continue
# See if word separator, and process accordingly.
ch = line[i]
if ch in [' ', '\t', '\n', "'"]:
# Word separator. Add word and skip over separator.
if start_index < i:
# non-empty text found. add as word.
words.append(line[start_index:i])
if ch == "'":
text = self._extract_quoted_string(ch, i, line)
words.append(text)
start_index = i + len(text)
else:
start_index = i + 1
# All text processed. Add word if non-empty text at end of line.
if start_index < line_length:
words.append(line[start_index:line_length])
return words
def _extract_quoted_string(self, quote, start_index, line):
i = start_index + 1
line_length = len(line)
while i < line_length:
ch = line[i]
i += 1
if ch == quote:
return line[start_index:i]
# If reached, did not find end of quoted string.
self._unexpected(
"Can't find matching quote for string starting with quote %s" %
quote)
def _pushback(self, word):
"""Puts word back onto the list of words."""
if word:
self._words.insert(0, word)
def _pushback_token(self, token):
"""Puts token back on to the input stream."""
if _TRACE_TOKENS:
print "pushback %s" % token
if self._token:
self._pushed_tokens.append(self._token)
self._token = token
else:
self._token = token
def _pushback_tokens(self, tokens):
"""Puts back the reversed list of tokens on to the input stream."""
while tokens:
self._pushback_token(tokens.pop())
def _read_line(self):
"""Reads the next line of input, and returns it. Otherwise None."""
self._line_no += 1
line = self.input.readline()
if line:
return re.sub(r'#.*', '', line).strip()
else:
self._reached_eof = True
return ''
#-------- Error reporting ------
def _unexpected(self, context='Unexpected line in input'):
""""Reports that we didn't find the expected context. """
raise Exception('Line %d: %s' % (self._line_no, context))
| bsd-3-clause |
s0enke/boto | boto/cloudhsm/__init__.py | 111 | 1654 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS CloudHSM service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.cloudhsm.layer1 import CloudHSMConnection
return get_regions('cloudhsm', connection_cls=CloudHSMConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
ProjectSWGCore/NGECore2 | scripts/mobiles/dathomir/mutant_rancor.py | 2 | 1401 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('mutant_rancor')
mobileTemplate.setLevel(81)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(1020)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(901)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(851)
mobileTemplate.setSocialGroup("rancor")
mobileTemplate.setAssistRange(24)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_mutant_rancor.iff')
mobileTemplate.setTemplates(templates)
attacks = Vector()
attacks.add('bm_dampen_pain_5')
attacks.add('bm_shaken_3')
attacks.add('bm_stomp_5')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('mutant_rancor', mobileTemplate) | lgpl-3.0 |
struempelix/mbed | workspace_tools/compliance/ioper_base.py | 106 | 1992 | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import sys
try:
from colorama import Fore
except:
pass
COLORAMA = 'colorama' in sys.modules
class IOperTestCaseBase():
""" Interoperability test case base class
@return list of tuple (severity, Description)
Example: (result.append((IOperTestSeverity.INFO, ""))
"""
def __init__(self, scope=None):
self.PASS = 'PASS'
self.INFO = 'INFO'
self.ERROR = 'ERROR'
self.WARN = 'WARN'
self.scope = scope # Default test scope (basic, pedantic, mbed-enabled etc...)
def test(self, param=None):
result = []
return result
def RED(self, text):
return self.color_text(text, color=Fore.RED, delim=Fore.RESET) if COLORAMA else text
def GREEN(self, text):
return self.color_text(text, color=Fore.GREEN, delim=Fore.RESET) if COLORAMA else text
def YELLOW(self, text):
return self.color_text(text, color=Fore.YELLOW, delim=Fore.RESET) if COLORAMA else text
def color_text(self, text, color='', delim=''):
return color + text + color + delim
def COLOR(self, severity, text):
colors = {
self.PASS : self.GREEN,
self.ERROR : self.RED,
self.WARN : self.YELLOW
}
if severity in colors:
return colors[severity](text)
return text
| apache-2.0 |
indictranstech/tele-erpnext | erpnext/patches/v4_2/fix_recurring_orders.py | 113 | 1943 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
sales_orders = frappe.db.sql("""select name from `tabSales Order`
where docstatus = 1 and ifnull(is_recurring, 0) = 1
and (per_delivered > 0 or per_billed > 0)""", as_dict=1)
for so in sales_orders:
if not frappe.db.exists("Delivery Note Item", {"against_sales_order": so.name, "docstatus": 1}):
frappe.db.sql("""update `tabSales Order` set per_delivered = 0,
delivery_status = 'Not Delivered' where name = %s""", so.name)
frappe.db.sql("""update `tabSales Order Item` set delivered_qty = 0
where parent = %s""", so.name)
if not frappe.db.exists("Sales Invoice Item", {"sales_order": so.name, "docstatus": 1}):
frappe.db.sql("""update `tabSales Order` set per_billed = 0,
billing_status = 'Not Billed' where name = %s""", so.name)
frappe.db.sql("""update `tabSales Order Item` set billed_amt = 0
where parent = %s""", so.name)
purchase_orders = frappe.db.sql("""select name from `tabPurchase Order`
where docstatus = 1 and ifnull(is_recurring, 0) = 1
and (per_received > 0 or per_billed > 0)""", as_dict=1)
for po in purchase_orders:
if not frappe.db.exists("Purchase Receipt Item", {"prevdoc_doctype": "Purchase Order",
"prevdoc_docname": po.name, "docstatus": 1}):
frappe.db.sql("""update `tabPurchase Order` set per_received = 0
where name = %s""", po.name)
frappe.db.sql("""update `tabPurchase Order Item` set received_qty = 0
where parent = %s""", po.name)
if not frappe.db.exists("Purchase Invoice Item", {"purchase_order": po.name, "docstatus": 1}):
frappe.db.sql("""update `tabPurchase Order` set per_billed = 0
where name = %s""", po.name)
frappe.db.sql("""update `tabPurchase Order Item` set billed_amt = 0
where parent = %s""", po.name) | agpl-3.0 |
alvaroaleman/ansible | lib/ansible/modules/notification/campfire.py | 48 | 5329 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ ]
author: "Adam Garside (@fabulops)"
'''
EXAMPLES = '''
- campfire:
subscription: foo
token: 12345
room: 123
msg: Task completed.
- campfire:
subscription: foo
token: 12345
room: 123
notify: loggins
msg: Task completed ... with feeling.
'''
import cgi
def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
# Hack to add basic auth username and password the way fetch_url expects
module.params['url_username'] = token
module.params['url_password'] = 'X'
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
junbochen/pylearn2 | pylearn2/utils/logger.py | 44 | 11385 | """Local facilities to configure the logger to our needs."""
__author__ = "David Warde-Farley"
__copyright__ = "Copyright 2012, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__email__ = "wardefar@iro"
__maintainer__ = "David Warde-Farley"
# Portions cribbed from the standard library logging module,
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging
import sys
from logging import Handler, Formatter
from theano.compat import six
from theano.compat.six.moves import xrange
class CustomFormatter(Formatter):
"""
Conditionally displays log level names and source loggers, only if
the log level is WARNING or greater.
Parameters
----------
prefix : WRITEME
only_from : WRITEME
"""
def __init__(self, prefix='', only_from=None):
Formatter.__init__(self)
self._info_fmt = prefix + "%(message)s"
self._fmt = prefix + "%(levelname)s (%(name)s): %(message)s"
self._only_from = only_from
def format(self, record):
"""
Format the specified record as text.
Parameters
----------
record : object
A LogRecord object with the appropriate attributes.
Returns
-------
s : str
A string containing the formatted log message.
Notes
-----
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory
steps are carried out. The message attribute of the record is
computed using LogRecord.getMessage(). If the formatting
string uses the time (as determined by a call to usesTime(),
formatTime() is called to format the event time. If there is
exception information, it is formatted using formatException()
and appended to the message.
"""
record.message = record.getMessage()
# Python 2.6 don't have usesTime() fct.
# So we skip that information for them.
if hasattr(self, 'usesTime') and self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
emit_special = (self._only_from is None or
record.name.startswith(self._only_from))
if record.levelno == logging.INFO and emit_special:
s = self._info_fmt % record.__dict__
else:
s = self._fmt % record.__dict__
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924
s = s + record.exc_text.decode(sys.getfilesystemencoding())
return s
class CustomStreamHandler(Handler):
"""
A handler class which writes logging records, appropriately
formatted, to one of two streams. DEBUG and INFO messages
get written to the provided `stdout`, all other messages to
`stderr`.
If stream is not specified, sys.stderr is used.
Parameters
----------
stdout : file-like object, optional
Stream to which DEBUG and INFO messages should be written.
If `None`, `sys.stdout` will be used.
stderr : file-like object, optional
Stream to which WARNING, ERROR, CRITICAL messages will be
written. If `None`, `sys.stderr` will be used.
formatter : `logging.Formatter` object, optional
Assigned to `self.formatter`, used to format outgoing log messages.
Notes
-----
N.B. it is **not** recommended to pass `sys.stdout` or `sys.stderr` as
constructor arguments explicitly, as certain things (like nosetests) can
reassign these during code execution! Instead, simply pass `None`.
"""
def __init__(self, stdout=None, stderr=None, formatter=None):
Handler.__init__(self)
self._stdout = stdout
self._stderr = stderr
self.formatter = formatter
@property
def stdout(self):
"""
.. todo::
WRITEME
"""
return sys.stdout if self._stdout is None else self._stdout
@property
def stderr(self):
"""
.. todo::
WRITEME
"""
return sys.stderr if self._stderr is None else self._stderr
def flush(self):
"""Flushes the stream."""
for stream in (self.stdout, self.stderr):
stream.flush()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
Parameters
----------
record : WRITEME
"""
try:
msg = self.format(record)
if record.levelno > logging.INFO:
stream = self.stderr
else:
stream = self.stdout
fs = u"%s\n"
#if no unicode support...
#Python 2.6 don't have logging._unicode, so use the no unicode path
# as stream.encoding also don't exist.
if not getattr(logging, '_unicode', True):
stream.write(fs % msg)
else:
try:
if (isinstance(msg, six.text_type) and
getattr(stream, 'encoding', None)):
try:
stream.write(fs % msg)
except UnicodeEncodeError:
# Printing to terminals sometimes fails. For
# example, with an encoding of 'cp1251', the above
# write will work if written to a stream opened or
# wrapped by the codecs module, but fail when
# writing to a terminal even when the codepage is
# set to cp1251. An extra encoding step seems to
# be needed.
stream.write((fs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except (UnicodeError, TypeError):
stream.write((fs % msg).encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def configure_custom(debug=False, stdout=None, stderr=None):
"""
Configure the logging module to output logging messages to the
console via `stdout` and `stderr`.
Parameters
----------
debug : bool
If `True`, display DEBUG messages on `stdout` along with
INFO-level messages.
stdout : file-like object, optional
Stream to which DEBUG and INFO messages should be written.
If `None`, `sys.stdout` will be used.
stderr : file-like object, optional
Stream to which WARNING, ERROR, CRITICAL messages will be
written. If `None`, `sys.stderr` will be used.
Notes
-----
This uses `CustomStreamHandler` defined in this module to
set up a console logger. By default, messages are formatted
as "LEVEL: message", where "LEVEL:" is omitted if the
level is INFO.
WARNING, ERROR and CRITICAL level messages are logged to
`stderr` (or the provided substitute)
N.B. it is **not** recommended to pass `sys.stdout` or
`sys.stderr` as constructor arguments explicitly, as certain
things (like nosetests) can reassign these during code
execution! Instead, simply pass `None`.
"""
top_level_logger = logging.getLogger(__name__.split('.')[0])
# Do not propagate messages to the root logger.
top_level_logger.propagate = False
# Set the log level of our logger, either to DEBUG or INFO.
top_level_logger.setLevel(logging.DEBUG if debug else logging.INFO)
# Get rid of any extant logging handlers that are installed.
# This means we can call configure_custom() more than once
# and have it be idempotent.
while top_level_logger.handlers:
top_level_logger.handlers.pop()
# Install our custom-configured handler and formatter.
fmt = CustomFormatter()
handler = CustomStreamHandler(stdout=stdout, stderr=stderr, formatter=fmt)
top_level_logger.addHandler(handler)
def restore_defaults():
"""
Use this if you are embedding our library in a larger application
and wish to handle logging yourself at the level of the root
logger.
Undoes the effects of `configure_custom()`. By default, this
shuts us up on the console except for WARNING, ERROR, and
CRITICAL. See the documentation for the `logging` standard library
module for details.
"""
top_level_logger = logging.getLogger(__name__.split('.')[0])
# Propagate log messages upwards.
top_level_logger.propagate = True
# Restore the log level to its default value, i.e. logging.NOTSET.
top_level_logger.setLevel(logging.NOTSET)
# Delete any handlers that might be installed on our logger.
while top_level_logger.handlers:
top_level_logger.handlers.pop()
def newline(logger, nb_blank_lines=1):
"""
A simple method to write a real new line to logging.
Only works with the INFO level at the moment.
Parameters
----------
logger : Logger object
The logger where the blank line will be added.
nb_blank_lines : int, optional
Number of blank lines in a row.
"""
formatter = logging.Formatter(fmt='')
handler = CustomStreamHandler(formatter=formatter)
logger.addHandler(handler)
for i in xrange(nb_blank_lines):
logger.info('')
logger.removeHandler(handler)
| bsd-3-clause |
mplpl/m7z | lib7z/googletest/scripts/fuse_gtest_files.py | 346 | 8884 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print('ERROR: Cannot find %s in directory %s.' % (relative_path,
directory))
print('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print('ABORTED.')
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in open(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in open(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print(__doc__)
sys.exit(1)
if __name__ == '__main__':
main()
| lgpl-2.1 |
hastexo/edx-platform | lms/djangoapps/instructor/tests/test_enrollment_store_provider.py | 10 | 2740 | """
Exercises tests on the base_store_provider file
"""
from django.test import TestCase
from lms.djangoapps.instructor.enrollment_report import AbstractEnrollmentReportProvider
from lms.djangoapps.instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
class BadImplementationAbstractEnrollmentReportProvider(AbstractEnrollmentReportProvider):
"""
Test implementation of EnrollmentProvider to assert that non-implementations of methods
raises the correct methods
"""
def get_user_profile(self, user_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_user_profile(user_id)
def get_enrollment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_enrollment_info(user, course_id)
def get_payment_info(self, user, course_id):
"""
Fake implementation of method which calls base class, which should throw NotImplementedError
"""
super(BadImplementationAbstractEnrollmentReportProvider, self).get_payment_info(user, course_id)
class TestBaseNotificationDataProvider(TestCase):
"""
Cover the EnrollmentReportProvider class
"""
def test_cannot_create_instance(self):
"""
EnrollmentReportProvider is an abstract class and we should not be able
to create an instance of it
"""
with self.assertRaises(TypeError):
# parent of the BaseEnrollmentReportProvider is EnrollmentReportProvider
super(BadImplementationAbstractEnrollmentReportProvider, self)
def test_get_provider(self):
"""
Makes sure we get an instance of the registered enrollment provider
"""
provider = PaidCourseEnrollmentReportProvider()
self.assertIsNotNone(provider)
self.assertTrue(isinstance(provider, PaidCourseEnrollmentReportProvider))
def test_base_methods_exceptions(self):
"""
Asserts that all base-methods on the EnrollmentProvider interface will throw
an NotImplementedError
"""
bad_provider = BadImplementationAbstractEnrollmentReportProvider()
with self.assertRaises(NotImplementedError):
bad_provider.get_enrollment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_payment_info(None, None)
with self.assertRaises(NotImplementedError):
bad_provider.get_user_profile(None)
| agpl-3.0 |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/UiSection.py | 1 | 2838 | ## @file
# process UI section generation
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Section
from Ffs import Ffs
import subprocess
import Common.LongFilePathOs as os
from GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import UiSectionClassObject
from Common.LongFilePathSupport import OpenLongFilePath as open
## generate UI section
#
#
class UiSection (UiSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
UiSectionClassObject.__init__(self)
## GenSection() method
#
# Generate UI section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = {}):
#
# Prepare the parameter of GenSection
#
if FfsInf != None:
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
self.StringData = FfsInf.__ExtendMacro__(self.StringData)
self.FileName = FfsInf.__ExtendMacro__(self.FileName)
OutputFile = os.path.join(OutputPath, ModuleName + 'SEC' + SecNum + Ffs.SectionSuffix.get('UI'))
if self.StringData != None :
NameString = self.StringData
elif self.FileName != None:
FileNameStr = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FileName)
FileNameStr = GenFdsGlobalVariable.MacroExtend(FileNameStr, Dict)
FileObj = open(FileNameStr, 'r')
NameString = FileObj.read()
NameString = '\"' + NameString + "\""
FileObj.close()
else:
NameString = ''
GenFdsGlobalVariable.GenerateSection(OutputFile, None, 'EFI_SECTION_USER_INTERFACE', Ui=NameString)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
| gpl-2.0 |
lpirl/ansible | lib/ansible/utils/module_docs_fragments/vmware.py | 19 | 1474 | # (c) 2016, Charles Paul <cpaul@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Paramaters for VMware modules
DOCUMENTATION = '''
options:
hostname:
description:
- The hostname or IP address of the vSphere vCenter
required: True
username:
description:
- The username of the vSphere vCenter
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the vSphere vCenter
required: True
aliases: ['pass', 'pwd']
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to
false when certificates are not trusted
required: False
default: 'True'
choices: ['True', 'False']
'''
| gpl-3.0 |
WANdisco/hive | testutils/ptest/hivetest.py | 18 | 24138 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import time
from threading import Thread
import os.path
import collections
import re
import os
import Report
import config
# WARNING
#
# If you are editing this code, please be aware that commands passed to `run`
# should not use single quotes, this will break and end badly as the final
# command looks like `ssh 'host' 'some command - single quote will break it'`.
# Also please be aware that `run` uses `.format` to change `{host}` in commands
# into actual host name it is running on, running `.format` on strings using
# `{host}`, for example including `host_code_path` will not work.
#
# Also this code assumes master_base_path is available to all testing machines
# and is mounted in the same place on all of them.
#
# Getting rid of this restrictions without making the code much more complicated
# is very welcome.
# This is configured in user configuration file.
local = None
qfile_set = None
other_set = None
remote_set = None
all_set = None
master_base_path = None
host_base_path = None
runtest_dir = os.getcwd()
# End of user configurated things.
ant_path = None
arc_path = None
phutil_path = None
code_path = None
report_path = None
host_code_path = None
ivy_path = None
def read_conf(config_file):
global local, qfile_set, other_set, remote_set, all_set
global master_base_path, host_base_path
global ant_path, arc_path, phutil_path, code_path, report_path, host_code_path, ivy_path
if config_file is not None:
config.load(config_file)
else:
config.load()
local = config.local
qfile_set = config.qfile_set
other_set = config.other_set
remote_set = config.remote_set
all_set = config.all_set
master_base_path = config.master_base_path
host_base_path = config.host_base_path
if 'HIVE_PTEST_SUFFIX' in os.environ:
suffix = os.environ['HIVE_PTEST_SUFFIX']
master_base_path += '-' + suffix
host_base_path += '-' + suffix
ant_path = master_base_path + '/apache-ant-1.8.4'
arc_path = master_base_path + '/arcanist'
phutil_path = master_base_path + '/libphutil'
code_path = master_base_path + '/trunk'
report_path = master_base_path + '/report/' + time.strftime('%m.%d.%Y_%H:%M:%S')
host_code_path = host_base_path + '/trunk-{host}'
ivy_path = master_base_path + '/.ivy2'
# Setup of needed environmental variables and paths
# Proxy
if args.http_proxy is not None:
all_set.export('http_proxy', args.http_proxy + ':' + args.http_proxy_port)
all_set.export('https_proxy', args.http_proxy + ':' + args.http_proxy_port)
all_set.export('ANT_OPTS', get_ant_opts_proxy())
# Ant
all_set.export('ANT_HOME', ant_path)
all_set.add_path(ant_path + '/bin')
# Arcanist
all_set.add_path(arc_path + '/bin')
# Java
all_set.export('JAVA_HOME', config.java_home)
all_set.add_path(config.java_home + '/bin')
# Hive
remote_set.export('HIVE_HOME', host_code_path + '/build/dist')
remote_set.add_path(host_code_path + '/build/dist/bin')
def get_ant_opts_proxy():
cmd = ' -Dhttp.proxyHost=' + args.http_proxy
cmd += ' -Dhttp.proxyPort=' + args.http_proxy_port
cmd += ' -Dhttps.proxyHost=' + args.http_proxy
cmd += ' -Dhttps.proxyPort=' + args.http_proxy_port
return cmd
def get_ant():
# Gets Ant 1.8.4 from one of Apache mirrors.
print('\n-- Installing Ant 1.8.4\n')
if local.run('test -d "{0}"'.format(ant_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(master_base_path))
local.cd(master_base_path)
local.run('curl "http://apache.osuosl.org//ant/binaries/apache-ant-1.8.4-bin.tar.gz" | tar xz')
else:
print('\n Ant 1.8.4 already installed\n')
def get_arc():
# Gets latest Arcanist and libphtuil from their Git repositories.
print('\n-- Updating Arcanist installation\n')
if local.run('test -d "{0}"'.format(arc_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(arc_path)))
local.run('git clone https://github.com/facebook/arcanist.git "{0}"'
.format(arc_path))
if local.run('test -d "{0}"'.format(phutil_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(phutil_path)))
local.run('git clone https://github.com/facebook/libphutil.git "{0}"'
.format(phutil_path))
local.cd(arc_path)
local.run('git pull https://github.com/facebook/arcanist.git')
local.cd(phutil_path)
local.run('git pull https://github.com/facebook/libphutil.git')
def get_clean_hive():
# Gets latest Hive from Apache Git repository and cleans the repository
# (undo of any changes and removal of all generated files). Also runs
# `arc-setup` so the repo is ready to be used.
print('\n-- Updating Hive repo\n')
local.cd(code_path)
if local.run('test -d "{0}"'.format(code_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(code_path)))
local.run('git clone http://git.apache.org/hive.git "{0}"'.format(code_path))
else:
# Clean repo and checkout to t he last revision
local.run('git reset --hard HEAD')
local.run('git clean -dffx')
local.run('git pull')
local.run('ant arc-setup')
def copy_local_hive():
# Copy local repo to the destination path instead of using git clone
if local.run('test -d "{0}"'.format(code_path), warn_only = True,
abandon_output = False) is None:
local.run('mkdir -p "{0}"'.format(os.path.dirname(code_path)))
local.run('rm -rf "{0}"'.format(code_path), warn_only = True)
local.run('mkdir -p "{0}"'.format(code_path))
local.run('echo "{0}"'.format(runtest_dir))
local.cd(runtest_dir)
local.run('cp -rf * "{0}"'.format(code_path))
local.cd(code_path)
local.run('ant arc-setup')
def prepare_for_reports():
# Generates directories for test reports. All test nodes will copy results
# to this directories.
print('\n-- Creating a directory for JUnit reports\n')
# Remove previous reports that might be there.
local.run('rm -rf "{0}"'.format(report_path), warn_only = True)
local.run('mkdir -p "{0}/logs"'.format(report_path))
local.run('mkdir -p "{0}/out/clientpositive"'.format(report_path))
local.run('mkdir -p "{0}/out/clientnegative"'.format(report_path))
def patch_hive(patches = [], revision = None):
# Applies given patches to the Hive repo. Revision means a Differential
# revision, patches list is a list of paths to patches on local file system.
#
# Allowing multiple revisions and patches would complicate things a little
# (order of applied patches should be preserved, but argparse will split
# them into two lists) so only multiple local patches are allowed.
# Shouldn't be a big problem as you can use `arc export` to get the patches
# locally.
local.cd(code_path)
if revision is not None:
print('\n-- Patching Hive repo using a Differential revision\n')
revision = revision.upper()
if not revision.startswith('D'):
revision = 'D' + revision
local.run('arc patch "{0}"'.format(revision))
if patches:
print('\n-- Patching Hive repo using a patch from local file system\n')
for patch in patches:
local.run('patch -rf -p0 < "{0}"'.format(patch))
def build_hive():
print('\n-- Building Hive\n')
local.cd(code_path)
cmd = 'ant -Divy.default.ivy.user.dir={0} '.format(ivy_path)
if args.very_clean:
cmd += 'very-clean '
else:
cmd += 'clean '
cmd += 'package'
local.run(cmd)
def propagate_hive():
# Expects master_base_path to be available on all test nodes in the same
# place (for example using NFS).
print('\n-- Propagating Hive repo to all hosts\n')
print(host_code_path)
print(code_path)
remote_set.run('rm -rf "{0}"'.format(host_code_path))
remote_set.run('mkdir -p "{0}"'.format(host_code_path))
remote_set.run('cp -r "{0}/*" "{1}"'.format(
code_path, host_code_path))
# It should avoid issues with 'ivy publish' exceptions during testing phase.
remote_set.run('cp -r "{0}" "{1}"'.format(ivy_path, host_code_path))
def segment_tests(path):
# Removes `.q` files that should not be run on this host. The huge shell
# command is slow (not really suprising considering amount of forking it has
# to do), you are welcome to make it better=).
local.cd(code_path + path)
tests = local.run('ls -1', quiet = True, abandon_output = False).strip().split('\n')
qfile_set.cd(host_code_path + path)
test_splits = [[] for i in range(len(qfile_set))]
i = 0
for test in tests:
test_splits[i].append(test)
i = (i + 1) % len(qfile_set)
cmd = []
for i in range(len(qfile_set)):
host = qfile_set.conn[i].hostname
cmd.append('if [[ "{host}" != "' + host + '" ]]; then rm -f ' + ' '.join(test_splits[i]) + '; fi')
cmd = ' && '.join(cmd)
# The command is huge and printing it out is not very useful, using wabbit
# hunting mode.
qfile_set.run(cmd, vewy_quiet = True)
def prepare_tests():
print('\n-- Preparing test sets on all hosts\n')
segment_tests('/ql/src/test/queries/clientpositive')
segment_tests('/ql/src/test/queries/clientnegative')
def collect_log(name):
# Moves JUnit log to the global logs directory.
#
# This has the same restriction on master_base_path as propagate_hive.
new_name = name.split('.')
new_name[-2] += '-{host}'
new_name = '.'.join(new_name)
qfile_set.cd(host_code_path + '/build/ql/test')
# If tests failed there may be no file, so warn only if `cp` is having
# problems.
qfile_set.run(
'cp "' + name + '" "' + report_path + '/logs/' + new_name + '" || ' +
'touch "' + report_path + '/logs/{host}-' + name + '.fail"'
)
# Get the hive.log too.
qfile_set.cd(host_code_path + '/build/ql/tmp')
qfile_set.run('cp "hive.log" "' + report_path + '/logs/hive-{host}-' + name + '.log"',
warn_only = True)
def collect_out(name, desc_name):
# Moves `.out` file (test output) to the global logs directory.
#
# This has the same restriction on master_base_path as propagate_hive.
qfile_set.cd(host_code_path + '/' + name)
# Warn only if no files are found.
qfile_set.run('mkdir -p "' + report_path + '/' + desc_name + '/out/' + '"', warn_only = True)
qfile_set.run('cp * "' + report_path + '/' + desc_name + '/out/' + '"', warn_only = True)
def run_tests():
# Runs TestCliDriver and TestNegativeCliDriver testcases.
print('\n-- Running .q file tests on all hosts\n')
# Using `quiet` because output of `ant test` is not very useful when we are
# running on many hosts and it all gets mixed up. In case of an error
# you'll get last lines generated by `ant test` anyway (might be totally
# irrelevant if one of the first tests fails and Ant reports a failure after
# running all the other test, fortunately JUnit report saves the Ant output
# if you need it for some reason).
remote_ivy_path = '$(pwd)/.ivy2'
qfile_set.cd(host_code_path)
qfile_set.run('ant -Divy.default.ivy.user.dir={0} -Dtestcase=TestCliDriver test'.format(remote_ivy_path), quiet = True, warn_only = True)
collect_log('TEST-org.apache.hadoop.hive.cli.TestCliDriver.xml')
collect_out('build/ql/test/logs/clientpositive', 'TestCliDriver')
qfile_set.cd(host_code_path)
qfile_set.run('ant -Divy.default.ivy.user.dir={0} -Dtestcase=TestNegativeCliDriver test'.format(remote_ivy_path), quiet = True, warn_only = True)
collect_log('TEST-org.apache.hadoop.hive.cli.TestNegativeCliDriver.xml')
collect_out('build/ql/test/logs/clientnegative', 'TestNegativeCliDriver')
def run_other_tests():
# Runs all other tests that run_test doesn't run.
def get_other_list():
local.cd(code_path)
# Generate test classes in build.
local.run('ant -Dtestcase=nothing test')
if (args.singlehost):
tests = local.run(' | '.join([
'find build/*/test/classes -name "Test*.class"',
'sed -e "s:[^/]*/::g"',
'grep -v TestSerDe.class',
'grep -v TestHiveMetaStore.class',
'grep -v TestBeeLineDriver.class',
'grep -v TestHiveServer2Concurrency.class',
'grep -v TestCliDriver.class',
'grep -v TestNegativeCliDriver.class',
'grep -v ".*\$.*\.class"',
'sed -e "s:\.class::"'
]), abandon_output = False)
return tests.split()
else:
tests = local.run(' | '.join([
'find build/*/test/classes -name "Test*.class"',
'sed -e "s:[^/]*/::g"',
'grep -v TestSerDe.class',
'grep -v TestHiveMetaStore.class',
'grep -v TestBeeLineDriver.class',
'grep -v TestHiveServer2Concurrency.class',
'grep -v TestCliDriver.class',
'grep -v TestNegativeCliDriver.class',
'grep -v ".*\$.*\.class"',
'grep -v TestSetUGIOnBothClientServer.class',
'grep -v TestSetUGIOnOnlyClient.class',
'grep -v TestSetUGIOnOnlyServer.class',
'grep -v TestRemoteHiveMetaStore',
'grep -v TestEmbeddedHiveMetaStore',
'sed -e "s:\.class::"'
]), abandon_output = False)
return tests.split()
def segment_other():
other_set.run('mkdir -p ' + report_path + '/TestContribCliDriver', warn_only = True)
other_set.run('mkdir -p ' + report_path + '/TestContribCliDriver/positive', warn_only = True)
other_set.run('mkdir -p ' + report_path + '/TestContribCliDriver/negative', warn_only = True)
other_set.run('mkdir -p ' + report_path + '/TestHBaseCliDriver', warn_only = True)
# Split all test cases between hosts.
def get_command(test):
return '; '.join([
'ant -Divy.default.ivy.user.dir=$(pwd)/.ivy2 -Dtestcase=' + test + ' test',
'cp "`find . -name "TEST-*.xml"`" "' + report_path + '/logs/" || ' +
'touch "' + report_path + '/logs/{host}-' + test + '.fail"',
'cp "build/ql/tmp/hive.log" "' + report_path + '/logs/hive-{host}-' + test + '.log"',
'cp "build/contrib/test/logs/contribclientnegative/*" "' + report_path + '/TestContribCliDriver/negative 2>/dev/null"',
'cp "build/contrib/test/logs/contribclientpositive/*" "' + report_path + '/TestContribCliDriver/positive 2>/dev/null"',
'cp "build/hbase-handler/test/logs/hbase-handler/*" "' + report_path + '/TestHBaseCliDriver/ 2>/dev/null"'
])
cmd = []
i = 0
for test in get_other_list():
# Special case, don't run minimr tests in parallel. They will run
# on the first host, and no other tests will run there (unless we
# have a single host).
#
# TODO: Real fix would be to allow parallel runs of minimr tests.
if len(other_set) > 1:
if re.match('.*minimr.*', test.lower()):
host = other_set.conn[0].hostname
else:
i = (i + 1) % len(other_set)
if i == 0:
i = 1
host = other_set.conn[i].hostname
else:
# We are running on single host.
host = other_set.conn[0].hostname
cmd.append(
'if [[ "{host}" == "' + host + '" ]]; then ' +
get_command(test) +
'; fi'
)
return ' ; '.join(cmd)
command = segment_other()
other_set.cd(host_code_path)
# See comment about quiet option in run_tests.
other_set.run(command, quiet = True, warn_only = True)
def generate_report(one_file_report = False):
# Uses `Report.py` to create a HTML report.
print('\n-- Generating a test report\n')
local.run('cp "' + master_base_path + '/templogs/* " "'+ report_path + '/logs/" ', warn_only = True)
# Call format to remove '{{' and '}}'.
path = os.path.expandvars(report_path.format())
CmdArgs = collections.namedtuple('CmdArgs', ['one_file', 'log_dir', 'report_dir'])
args = CmdArgs(
one_file = one_file_report,
log_dir = '{0}/logs'.format(path),
report_dir = path
)
Report.make_report(args)
print('\n-- Test report has been generated and is available here:')
print('-- "{0}/report.html"'.format(path))
print()
def stop_tests():
# Brutally stops tests on all hosts, something more subtle would be nice and
# would allow the same user to run this script multiple times
# simultaneously.
print('\n-- Stopping tests on all hosts\n')
remote_set.run('killall -9 java', warn_only = True)
def remove_code():
# Running this only on one connection per host so there are no conflicts
# between several `rm` calls. This removes all repositories, it would have
# to be changed if we were to allow multiple simultaneous runs of this
# script.
print('\n-- Removing Hive code from all hosts\n')
# We could remove only `host_code_path`, but then we would have abandoned
# directories after lowering number of processes running on one host.
cmd = 'rm -rf "' + host_base_path + '"'
cmd = 'if [[ `echo "{host}" | grep -q -- "-0$"; echo "$?"` -eq "0" ]]; then ' + \
cmd + '; fi'
remote_set.run(cmd)
def overwrite_results():
# Copy generated `.q.out` files to master repo.
local.cd(code_path)
expanded_path = local.run('pwd', abandon_output = False)
print('\n-- Copying generated `.q.out` files to master repository: ' +
expanded_path)
for name in ['clientpositive', 'clientnegative']:
local.cd(report_path + '/out/' + name)
# Don't panic if no files are found.
local.run('cp * "' + code_path + '/ql/src/test/results/' + name + '"',
warn_only = True)
def save_svn_info():
if args.svn_info:
local.cd(master_base_path + '/trunk')
local.run('git show --summary > "{0}"'.format(report_path + '/svn-info'))
def save_patch():
if args.save_patch:
local.cd(code_path)
local.run('git add --all')
local.run('git diff --no-prefix HEAD > "{0}"'.format(report_path + '/patch'))
# -- Tasks that can be called from command line start here.
def cmd_prepare(patches = [], revision = None):
get_ant()
get_arc()
if (args.copylocal):
copy_local_hive()
else :
get_clean_hive()
patch_hive(patches, revision)
build_hive()
propagate_hive()
prepare_tests()
def cmd_run_tests(one_file_report = False):
prepare_for_reports()
save_svn_info()
save_patch()
t = Thread(target = run_other_tests)
t.start()
run_tests()
t.join()
if args.overwrite:
overwrite_results()
generate_report(one_file_report)
def cmd_test(patches = [], revision = None, one_file_report = False):
cmd_prepare(patches, revision)
if args.singlehost==False:
local.cd(master_base_path + '/trunk')
local.run('chmod -R 777 *');
local.run('rm -rf "' + master_base_path + '/templogs/"')
local.run('mkdir -p "' + master_base_path + '/templogs/"')
tests = ['TestRemoteHiveMetaStore','TestEmbeddedHiveMetaStore','TestSetUGIOnBothClientServer','TestSetUGIOnOnlyClient','TestSetUGIOnOnlyServer']
for test in tests:
local.run('sudo -u root ant -Divy.default.ivy.user.dir={0} '.format(ivy_path) + ' -Dtestcase=' + test + ' test')
local.run('cp "`find . -name "TEST-*.xml"`" "' + master_base_path + '/templogs/"')
cmd_run_tests(one_file_report)
def cmd_stop():
stop_tests()
def cmd_remove():
remove_code()
parser = argparse.ArgumentParser(description =
'Hive test farm controller.')
parser.add_argument('--config', dest = 'config',
help = 'Path to configuration file')
parser.add_argument('--prepare', action = 'store_true', dest = 'prepare',
help = 'Builds Hive and propagates it to all test machines')
parser.add_argument('--run-tests', action = 'store_true', dest = 'run_tests',
help = 'Runs tests on all test machines')
parser.add_argument('--test', action = 'store_true', dest = 'test',
help = 'Same as running `prepare` and then `run-tests`')
parser.add_argument('--report-name', dest = 'report_name',
help = 'Store report and logs directory called `REPORT_NAME`')
parser.add_argument('--stop', action = 'store_true', dest = 'stop',
help = 'Kill misbehaving tests on all machines')
parser.add_argument('--remove', action = 'store_true', dest = 'remove',
help = 'Remove Hive trunk copies from test machines')
parser.add_argument('--revision', dest = 'revision',
help = 'Differential revision to test')
parser.add_argument('--patch', dest = 'patch', nargs = '*',
help = 'Patches from local file system to test')
parser.add_argument('--one-file-report', dest = 'one_file_report',
action = 'store_true',
help = 'Generate one (huge) report file instead of multiple small ones')
parser.add_argument('--overwrite', dest = 'overwrite', action = 'store_true',
help = 'Overwrite result files in master repo')
parser.add_argument('--copylocal', dest = 'copylocal', action = 'store_true',
help = 'Copy local repo instead of using git clone and git hub')
parser.add_argument('--singlehost', dest = 'singlehost', action = 'store_true',
help = 'Only run the test on single host, It is the users '
'responsibility to make sure that the conf. file does not '
'contain multiple hosts. '
'The script is not doing any validation. When --singlehost is set '
'the script should not be run using sudo.')
parser.add_argument('--very-clean', action = 'store_true', dest = 'very_clean',
help = 'Build hive with `very-clean` option')
parser.add_argument('--svn-info', dest = 'svn_info', action = 'store_true',
help = 'Save result of `svn info` into ${report_path}/svn-info')
parser.add_argument('--save-patch', dest = 'save_patch', action = 'store_true',
help = 'Save applied patch into ${report_path}/patch')
parser.add_argument('--http-proxy', dest = 'http_proxy',
help = 'Proxy host')
parser.add_argument('--http-proxy-port', dest = 'http_proxy_port',
help = 'Proxy port')
args = parser.parse_args()
read_conf(args.config)
if args.report_name:
report_path = '/'.join(report_path.split('/')[:-1] + [args.report_name])
if args.prepare:
cmd_prepare(args.patch, args.revision)
elif args.run_tests:
cmd_run_tests(args.one_file_report)
elif args.test:
cmd_test(args.patch, args.revision, args.one_file_report)
elif args.stop:
cmd_stop()
elif args.remove:
cmd_remove()
else:
parser.print_help()
| apache-2.0 |
jostep/tensorflow | tensorflow/contrib/linalg/python/ops/linear_operator_full_matrix.py | 31 | 6339 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = ["LinearOperatorFullMatrix"]
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float32`, `float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
| apache-2.0 |
sjohannes/exaile | xlgui/panel/playlists.py | 1 | 38162 | # Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
from gi.repository import GObject
from xl import common, event, playlist as xl_playlist, radio, settings, trax
from xl.nls import gettext as _
from xlgui import icons, panel
from xlgui.panel import menus
from xlgui.widgets import dialogs
from xlgui.widgets.common import DragTreeView
from xlgui.widgets.smart_playlist_editor import SmartPlaylistEditor
import logging
logger = logging.getLogger(__name__)
class TrackWrapper(object):
def __init__(self, track, playlist):
self.track = track
self.playlist = playlist
def __unicode__(self):
text = self.track.get_tag_raw('title')
if text is not None:
text = u' / '.join(text)
if text:
artists = self.track.get_tag_raw('artist')
if artists:
text += u' - ' + u' / '.join(artists)
return text
return self.track.get_loc_for_io()
class BasePlaylistPanelMixin(GObject.GObject):
"""
Base playlist tree object.
Used by the radio and playlists panels to display playlists
"""
# HACK: Notice that this is not __gsignals__; descendants need to manually
# merge this in. This is because new PyGObject doesn't like __gsignals__
# coming from mixin. See:
# * https://bugs.launchpad.net/bugs/714484
# * http://www.daa.com.au/pipermail/pygtk/2011-February/019394.html
_gsignals_ = {
'playlist-selected': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'tracks-selected': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'append-items': (GObject.SignalFlags.RUN_LAST, None, (object, bool)),
'replace-items': (GObject.SignalFlags.RUN_LAST, None, (object,)),
'queue-items': (GObject.SignalFlags.RUN_LAST, None, (object,)),
}
def __init__(self):
"""
Initializes the mixin
"""
GObject.GObject.__init__(self)
self.playlist_nodes = {} # {playlist: iter} cache for custom playlists
self.track_image = icons.MANAGER.pixbuf_from_icon_name(
'audio-x-generic', Gtk.IconSize.SMALL_TOOLBAR
)
# {Playlist: Gtk.Dialog} mapping to keep track of open "are you sure
# you want to delete" dialogs
self.deletion_dialogs = {}
def remove_playlist(self, ignored=None):
"""
Removes the selected playlist from the UI
and from the underlying manager
"""
selected_playlist = self.tree.get_selected_page(raw=True)
if selected_playlist is None:
return
dialog = self.deletion_dialogs.get(selected_playlist)
if dialog:
dialog.present()
return
def on_response(dialog, response):
if response == Gtk.ResponseType.YES:
if isinstance(selected_playlist, xl_playlist.SmartPlaylist):
self.smart_manager.remove_playlist(selected_playlist.name)
else:
self.playlist_manager.remove_playlist(selected_playlist.name)
# Remove from {playlist: iter} cache.
del self.playlist_nodes[selected_playlist]
# Remove from UI.
selection = self.tree.get_selection()
(model, iter) = selection.get_selected()
self.model.remove(iter)
del self.deletion_dialogs[selected_playlist]
dialog.destroy()
dialog = Gtk.MessageDialog(
self.parent,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION,
Gtk.ButtonsType.YES_NO,
_('Delete the playlist "%s"?') % selected_playlist.name,
)
dialog.connect('response', on_response)
self.deletion_dialogs[selected_playlist] = dialog
dialog.present()
def rename_playlist(self, playlist):
"""
Renames the playlist
"""
if playlist is None:
return
# Ask for new name
dialog = dialogs.TextEntryDialog(
_("Enter the new name you want for your playlist"),
_("Rename Playlist"),
playlist.name,
parent=self.parent,
)
result = dialog.run()
name = dialog.get_value()
dialog.destroy()
if result != Gtk.ResponseType.OK or name == '':
return
if name in self.playlist_manager.playlists:
# name is already in use
dialogs.error(
self.parent, _("The " "playlist name you entered is already in use.")
)
return
selection = self.tree.get_selection()
(model, iter) = selection.get_selected()
model.set_value(iter, 1, name)
# Update the manager aswell
self.playlist_manager.rename_playlist(playlist, name)
def open_selected_playlist(self):
selection = self.tree.get_selection()
(model, iter) = selection.get_selected()
self.open_item(self.tree, model.get_path(iter), None)
def on_rating_changed(self, widget, rating):
"""
Updates the rating of the selected tracks
"""
tracks = self.get_selected_tracks()
for track in tracks:
track.set_rating(rating)
maximum = settings.get_option('rating/maximum', 5)
event.log_event('rating_changed', self, rating / maximum * 100)
def open_item(self, tree, path, col):
"""
Called when the user double clicks on a playlist,
also called when the user double clicks on a track beneath
a playlist. When they active a track it opens the playlist
and starts playing that track
"""
iter = self.model.get_iter(path)
item = self.model.get_value(iter, 2)
if item is not None:
if isinstance(item, (xl_playlist.Playlist, xl_playlist.SmartPlaylist)):
# for smart playlists
if hasattr(item, 'get_playlist'):
try:
item = item.get_playlist(self.collection)
except Exception as e:
logger.exception("Error loading smart playlist")
dialogs.error(
self.parent, _("Error loading smart playlist: %s") % str(e)
)
return
else:
# Get an up to date copy
item = self.playlist_manager.get_playlist(item.name)
# item.set_is_custom(True)
# self.controller.main.add_playlist(item)
self.emit('playlist-selected', item)
else:
self.emit('append-items', [item.track], True)
def add_new_playlist(self, tracks=[], name=None):
"""
Adds a new playlist to the list of playlists. If name is
None or the name conflicts with an existing playlist, the
user will be queried for a new name.
Returns the name of the new playlist, or None if it was
not added.
"""
if name:
if name in self.playlist_manager.playlists:
name = dialogs.ask_for_playlist_name(
self.get_panel().get_toplevel(), self.playlist_manager, name
)
else:
if tracks:
artists = []
composers = []
albums = []
for track in tracks:
artist = track.get_tag_display('artist', artist_compilations=False)
if artist is not None:
artists += [artist]
composer = track.get_tag_display(
'composer', artist_compilations=False
)
if composer is not None:
composers += composer
album = track.get_tag_display('album')
if album is not None:
albums += album
artists = list(set(artists))[:3]
composers = list(set(composers))[:3]
albums = list(set(albums))[:3]
if len(artists) > 0:
name = artists[0]
if len(artists) > 2:
# TRANSLATORS: Playlist title suggestion with more
# than two values
name = _('%(first)s, %(second)s and others') % {
'first': artists[0],
'second': artists[1],
}
elif len(artists) > 1:
# TRANSLATORS: Playlist title suggestion with two values
name = _('%(first)s and %(second)s') % {
'first': artists[0],
'second': artists[1],
}
elif len(composers) > 0:
name = composers[0]
if len(composers) > 2:
# TRANSLATORS: Playlist title suggestion with more
# than two values
name = _('%(first)s, %(second)s and others') % {
'first': composers[0],
'second': composers[1],
}
elif len(composers) > 1:
# TRANSLATORS: Playlist title suggestion with two values
name = _('%(first)s and %(second)s') % {
'first': composers[0],
'second': composers[1],
}
elif len(albums) > 0:
name = albums[0]
if len(albums) > 2:
# TRANSLATORS: Playlist title suggestion with more
# than two values
name = _('%(first)s, %(second)s and others') % {
'first': albums[0],
'second': albums[1],
}
elif len(albums) > 1:
# TRANSLATORS: Playlist title suggestion with two values
name = _('%(first)s and %(second)s') % {
'first': albums[0],
'second': albums[1],
}
else:
name = ''
name = dialogs.ask_for_playlist_name(
self.get_panel().get_toplevel(), self.playlist_manager, name
)
if name is not None:
# Create the playlist from all of the tracks
new_playlist = xl_playlist.Playlist(name)
new_playlist.extend(tracks)
# We are adding a completely new playlist with tracks so we save it
self.playlist_manager.save_playlist(new_playlist)
return name
def _load_playlist_nodes(self, playlist):
"""
Loads the playlist tracks into the node for the specified playlist
"""
if playlist not in self.playlist_nodes:
return
expanded = self.tree.row_expanded(
self.model.get_path(self.playlist_nodes[playlist])
)
self._clear_node(self.playlist_nodes[playlist])
parent = self.playlist_nodes[playlist]
for track in playlist:
if not track:
continue
wrapper = TrackWrapper(track, playlist)
row = (self.track_image, unicode(wrapper), wrapper)
self.model.append(parent, row)
if expanded:
self.tree.expand_row(
self.model.get_path(self.playlist_nodes[playlist]), False
)
def remove_selected_track(self):
"""
Removes the selected track from its playlist
and saves the playlist
"""
selection = self.tree.get_selection()
(model, iter) = selection.get_selected()
track = model.get_value(iter, 2)
if isinstance(track, TrackWrapper):
del track.playlist[track.playlist.index(track.track)]
# Update the list
self.model.remove(iter)
# TODO do we save the playlist after this??
self.playlist_manager.save_playlist(track.playlist, overwrite=True)
class PlaylistsPanel(panel.Panel, BasePlaylistPanelMixin):
"""
The playlists panel
"""
__gsignals__ = BasePlaylistPanelMixin._gsignals_
ui_info = ('playlists.ui', 'PlaylistsPanel')
def __init__(self, parent, playlist_manager, smart_manager, collection, name):
"""
Intializes the playlists panel
@param playlist_manager: The playlist manager
"""
panel.Panel.__init__(self, parent, name, _('Playlists'))
BasePlaylistPanelMixin.__init__(self)
self.playlist_manager = playlist_manager
self.smart_manager = smart_manager
self.collection = collection
self.box = self.builder.get_object('PlaylistsPanel')
self.playlist_name_info = 500
self.track_target = Gtk.TargetEntry.new("text/uri-list", 0, 0)
self.playlist_target = Gtk.TargetEntry.new(
"playlist_name", Gtk.TargetFlags.SAME_WIDGET, self.playlist_name_info
)
self.deny_targets = [Gtk.TargetEntry.new('', 0, 0)]
self.tree = PlaylistDragTreeView(self)
self.tree.connect('row-activated', self.open_item)
self.tree.set_headers_visible(False)
self.tree.connect('drag-motion', self.drag_motion)
self.tree.drag_source_set(
Gdk.ModifierType.BUTTON1_MASK,
[self.track_target, self.playlist_target],
Gdk.DragAction.COPY | Gdk.DragAction.MOVE,
)
self.scroll = Gtk.ScrolledWindow()
self.scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.scroll.add(self.tree)
self.scroll.set_shadow_type(Gtk.ShadowType.IN)
self.box.pack_start(self.scroll, True, True, 0)
self.box.show_all()
pb = Gtk.CellRendererPixbuf()
cell = Gtk.CellRendererText()
if settings.get_option('gui/ellipsize_text_in_panels', False):
from gi.repository import Pango
cell.set_property('ellipsize-set', True)
cell.set_property('ellipsize', Pango.EllipsizeMode.END)
col = Gtk.TreeViewColumn('Text')
col.pack_start(pb, False)
col.pack_start(cell, True)
col.set_attributes(pb, pixbuf=0)
col.set_attributes(cell, text=1)
self.tree.append_column(col)
self.model = Gtk.TreeStore(GdkPixbuf.Pixbuf, str, object)
self.tree.set_model(self.model)
# icons
self.folder = icons.MANAGER.pixbuf_from_icon_name(
'folder', Gtk.IconSize.SMALL_TOOLBAR
)
self.playlist_image = icons.MANAGER.pixbuf_from_icon_name(
'music-library', Gtk.IconSize.SMALL_TOOLBAR
)
# menus
self.playlist_menu = menus.PlaylistsPanelPlaylistMenu(self)
self.smart_menu = menus.PlaylistsPanelPlaylistMenu(self)
self.default_menu = menus.PlaylistPanelMenu(self)
self.track_menu = menus.TrackPanelMenu(self)
self._connect_events()
self._load_playlists()
@property
def menu(self):
"""
Gets a menu for the selected item
:return: xlgui.widgets.menu.Menu or None if do not have it
"""
model, it = self.tree.get_selection().get_selected()
pl = model[it][2]
return (
self.playlist_menu
if isinstance(pl, xl_playlist.Playlist)
else self.smart_menu
if isinstance(pl, xl_playlist.SmartPlaylist)
else self.track_menu
if isinstance(pl, TrackWrapper)
else self.default_menu
)
def _connect_events(self):
event.add_ui_callback(self.refresh_playlists, 'track_tags_changed')
event.add_ui_callback(
self._on_playlist_added, 'playlist_added', self.playlist_manager
)
self.tree.connect('key-release-event', self.on_key_released)
def _playlist_properties(self):
pl = self.tree.get_selected_page(raw=True)
if isinstance(pl, xl_playlist.SmartPlaylist):
self.edit_selected_smart_playlist()
def refresh_playlists(self, type, track, tags):
"""
wrapper so that multiple events dont cause multiple
reloads in quick succession
"""
if settings.get_option('gui/sync_on_tag_change', True) and tags & {
'title',
'artist',
}:
self._refresh_playlists()
@common.glib_wait(500)
def _refresh_playlists(self):
"""
Callback for when tags have changed and the playlists
need refreshing.
"""
if settings.get_option('gui/sync_on_tag_change', True):
for playlist in self.playlist_nodes:
self.update_playlist_node(playlist)
def _on_playlist_added(self, type, object, playlist_name):
new_playlist = self.playlist_manager.get_playlist(playlist_name)
for plx in self.playlist_nodes:
if plx.name == playlist_name:
self.update_playlist_node(new_playlist)
return
self.playlist_nodes[new_playlist] = self.model.append(
self.custom, [self.playlist_image, playlist_name, new_playlist]
)
self.tree.expand_row(self.model.get_path(self.custom), False)
self._load_playlist_nodes(new_playlist)
def _load_playlists(self):
"""
Loads the currently saved playlists
"""
self.smart = self.model.append(None, [self.folder, _("Smart Playlists"), None])
self.custom = self.model.append(
None, [self.folder, _("Custom Playlists"), None]
)
names = sorted(self.smart_manager.playlists[:])
for name in names:
self.model.append(
self.smart,
[self.playlist_image, name, self.smart_manager.get_playlist(name)],
)
names = sorted(self.playlist_manager.playlists[:])
for name in names:
playlist = self.playlist_manager.get_playlist(name)
self.playlist_nodes[playlist] = self.model.append(
self.custom, [self.playlist_image, name, playlist]
)
self._load_playlist_nodes(playlist)
self.tree.expand_row(self.model.get_path(self.smart), False)
self.tree.expand_row(self.model.get_path(self.custom), False)
def update_playlist_node(self, pl):
"""
Updates the playlist node of the playlist
to reflect any changes in it (i.e. tracks
being added to the playlist)
@param pl: the playlist to be updated
"""
playlists = self.playlist_nodes.keys()
for playlist in playlists:
if playlist.name == pl.name:
node = self.playlist_nodes[playlist]
# Replace the playlist object in {playlist: iter} cache.
del self.playlist_nodes[playlist]
self.playlist_nodes[pl] = node
# Replace the playlist object in tree model.
self.model[node][2] = pl
# Refresh the playlist subnodes.
self._load_playlist_nodes(pl)
def import_playlist(self):
"""
Shows a dialog to ask the user to import a new playlist
"""
def _on_playlists_selected(dialog, playlists):
for playlist in playlists:
self.add_new_playlist(playlist, playlist.name)
dialog = dialogs.PlaylistImportDialog()
dialog.connect('playlists-selected', _on_playlists_selected)
dialog.show()
def add_smart_playlist(self):
"""
Shows a dialog for adding a new smart playlist
"""
pl = SmartPlaylistEditor.create(
self.collection, self.smart_manager, self.parent
)
if pl:
self.model.append(self.smart, [self.playlist_image, pl.name, pl])
def edit_selected_smart_playlist(self):
"""
Shows a dialog for editing the currently selected smart playlist
"""
pl = self.tree.get_selected_page(raw=True)
self.edit_smart_playlist(pl)
def edit_smart_playlist(self, pl):
"""
Shows a dialog for editing a smart playlist
"""
pl = SmartPlaylistEditor.edit(
pl, self.collection, self.smart_manager, self.parent
)
if pl:
selection = self.tree.get_selection()
model, it = selection.get_selected()
model.set_value(it, 1, pl.name)
model.set_value(it, 2, pl)
def drag_data_received(self, tv, context, x, y, selection, info, etime):
"""
Called when someone drags some thing onto the playlist panel
"""
if info == self.playlist_name_info:
# We are being dragged a playlist so
# we have to reorder them
playlist_name = selection.get_text()
drag_source = self.tree.get_selected_page()
# verify names
if drag_source is not None:
if drag_source.name == playlist_name:
drop_info = tv.get_dest_row_at_pos(x, y)
drag_source_iter = self.playlist_nodes[drag_source]
if drop_info:
path, position = drop_info
drop_target_iter = self.model.get_iter(path)
drop_target = self.model.get_value(drop_target_iter, 2)
if position == Gtk.TreeViewDropPosition.BEFORE:
# Put the playlist before drop_target
self.model.move_before(drag_source_iter, drop_target_iter)
self.playlist_manager.move(
playlist_name, drop_target.name, after=False
)
else:
# put the playlist after drop_target
self.model.move_after(drag_source_iter, drop_target_iter)
self.playlist_manager.move(
playlist_name, drop_target.name, after=True
)
# Even though we are doing a move we still don't
# call the delete method because we take care
# of it above by moving instead of inserting/deleting
context.finish(True, False, etime)
else:
self._drag_data_received_uris(tv, context, x, y, selection, info, etime)
def _drag_data_received_uris(self, tv, context, x, y, selection, info, etime):
"""
Called by drag_data_received when the user drags URIs onto us
"""
locs = list(selection.get_uris())
drop_info = tv.get_dest_row_at_pos(x, y)
if drop_info:
path, position = drop_info
iter = self.model.get_iter(path)
drop_target = self.model.get_value(iter, 2)
# if the current item is a track, use the parent playlist
insert_index = None
if isinstance(drop_target, TrackWrapper):
current_playlist = drop_target.playlist
drop_target_index = current_playlist.index(drop_target.track)
# Adjust insert position based on drop position
if (
position == Gtk.TreeViewDropPosition.BEFORE
or position == Gtk.TreeViewDropPosition.INTO_OR_BEFORE
):
# By default adding tracks inserts it before so we do not
# have to modify the insert index
insert_index = drop_target_index
else:
# If we want to go after we have to append 1
insert_index = drop_target_index + 1
else:
current_playlist = drop_target
# Since the playlist do not have very good support for
# duplicate tracks we have to perform some trickery
# to make this work properly in all cases
try:
remove_track_index = current_playlist.index(
self.tree.get_selected_track()
)
except ValueError:
remove_track_index = None
if insert_index is not None and remove_track_index is not None:
# Since remove_track_index will be removed before
# the new track is inserted we have to offset the
# insert index
if insert_index > remove_track_index:
insert_index = insert_index - 1
# Delete the track before adding the other one
# so we do not get duplicates
# right now the playlist does not support
# duplicate tracks very well
if context.action == Gdk.DragAction.MOVE:
# On a move action the second True makes the
# drag_data_delete function called
context.finish(True, True, etime)
else:
context.finish(True, False, etime)
# Add the tracks we found to the internal playlist
# TODO: have it pass in existing tracks?
(tracks, playlists) = self.tree.get_drag_data(locs)
if insert_index is not None:
current_playlist[insert_index:insert_index] = tracks
else:
current_playlist.extend(tracks)
self._load_playlist_nodes(current_playlist)
# Do we save in the case when a user drags a file onto a playlist
# in the playlist panel? note that the playlist does not have to
# be open for this to happen
self.playlist_manager.save_playlist(current_playlist, overwrite=True)
else:
# If the user dragged files prompt for a new playlist name
# else if they dragged a playlist add the playlist
# We don't want the tracks in the playlists to be added to the
# master tracks list so we pass in False
(tracks, playlists) = self.tree.get_drag_data(locs, False)
# First see if they dragged any playlist files
for new_playlist in playlists:
self.playlist_nodes[new_playlist] = self.model.append(
self.custom, [self.playlist_image, new_playlist.name, new_playlist]
)
self._load_playlist_nodes(new_playlist)
# We are adding a completely new playlist with tracks so
# we save it
self.playlist_manager.save_playlist(new_playlist, overwrite=True)
# After processing playlist proceed to ask the user for the
# name of the new playlist to add and add the tracks to it
if len(tracks) > 0:
self.add_new_playlist(tracks)
def drag_data_delete(self, tv, context):
"""
Called after a drag data operation is complete
and we want to delete the source data
"""
if context.drag_drop_succeeded():
self.remove_selected_track()
def drag_get_data(self, tv, context, selection_data, info, time):
"""
Called when someone drags something from the playlist
"""
# TODO based on info determine what we set in selection_data
if info == self.playlist_name_info:
pl = self.tree.get_selected_page()
if pl is not None:
selection_data.set(Gdk.SELECTION_TYPE_STRING, 8, pl.name)
else:
pl = self.tree.get_selected_page()
if pl is not None:
tracks = pl[:]
else:
tracks = self.tree.get_selected_tracks()
if not tracks:
return
for track in tracks:
DragTreeView.dragged_data[track.get_loc_for_io()] = track
uris = trax.util.get_uris_from_tracks(tracks)
selection_data.set_uris(uris)
def drag_motion(self, tv, context, x, y, time):
"""
Sets the appropriate drag action based on what we are hovering over
hovering over playlists causes the copy action to occur
hovering over tracks within the same playlist causes the move
action to occur
hovering over tracks within different playlist causes the move
action to occur
Called on the destination widget
"""
# Reset any target to be default to moving tracks
self.tree.enable_model_drag_dest([self.track_target], Gdk.DragAction.DEFAULT)
# Determine where the drag is coming from
dragging_playlist = False
if tv == self.tree:
selected_playlist = self.tree.get_selected_page()
if selected_playlist is not None:
dragging_playlist = True
# Find out where they are dropping onto
drop_info = tv.get_dest_row_at_pos(x, y)
if drop_info:
path, position = drop_info
iter = self.model.get_iter(path)
drop_target = self.model.get_value(iter, 2)
if isinstance(drop_target, xl_playlist.Playlist):
if dragging_playlist:
# If we drag onto we copy, if we drag between we move
if (
position == Gtk.TreeViewDropPosition.INTO_OR_BEFORE
or position == Gtk.TreeViewDropPosition.INTO_OR_AFTER
):
Gdk.drag_status(context, Gdk.DragAction.COPY, time)
else:
Gdk.drag_status(context, Gdk.DragAction.MOVE, time)
# Change target as well
self.tree.enable_model_drag_dest(
[self.playlist_target], Gdk.DragAction.DEFAULT
)
else:
Gdk.drag_status(context, Gdk.DragAction.COPY, time)
elif isinstance(drop_target, TrackWrapper):
# We are dragging onto another track
# make it a move operation if we are only dragging
# tracks within our widget
# We do a copy if we are draggin from another playlist
if Gtk.drag_get_source_widget(context) == tv and not dragging_playlist:
Gdk.drag_status(context, Gdk.DragAction.MOVE, time)
else:
Gdk.drag_status(context, Gdk.DragAction.COPY, time)
else:
# Prevent drop operation by changing the targets
self.tree.enable_model_drag_dest(
self.deny_targets, Gdk.DragAction.DEFAULT
)
return False
return True
else: # No drop info
if dragging_playlist:
context.drag_status(Gdk.DragAction.MOVE, time)
# Change target as well
self.tree.enable_model_drag_dest(
[self.playlist_target], Gdk.DragAction.DEFAULT
)
def on_key_released(self, widget, event):
"""
Called when a key is released in the tree
"""
if event.keyval == Gdk.KEY_Menu:
(mods, paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
iter = self.model.get_iter(paths[0])
pl = self.model.get_value(iter, 2)
# Based on what is selected determines what
# menu we will show
if isinstance(pl, xl_playlist.Playlist):
Gtk.Menu.popup(
self.playlist_menu, None, None, None, None, 0, event.time
)
elif isinstance(pl, xl_playlist.SmartPlaylist):
Gtk.Menu.popup(
self.smart_menu, None, None, None, None, 0, event.time
)
elif isinstance(pl, TrackWrapper):
Gtk.Menu.popup(
self.track_menu, None, None, None, None, 0, event.time
)
else:
Gtk.Menu.popup(
self.default_menu, None, None, None, None, 0, event.time
)
return True
if event.keyval == Gdk.KEY_Left:
(mods, paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
self.tree.collapse_row(paths[0])
return True
if event.keyval == Gdk.KEY_Right:
(mods, paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
self.tree.expand_row(paths[0], False)
return True
if event.keyval == Gdk.KEY_Delete:
(mods, paths) = self.tree.get_selection().get_selected_rows()
if paths and paths[0]:
iter = self.model.get_iter(paths[0])
pl = self.model.get_value(iter, 2)
# Based on what is selected determines what
# menu we will show
if isinstance(pl, xl_playlist.Playlist) or isinstance(
pl, xl_playlist.SmartPlaylist
):
self.remove_playlist(pl)
elif isinstance(pl, TrackWrapper):
self.remove_selected_track()
return True
return False
def _clear_node(self, node):
"""
Clears a node of all children
"""
iter = self.model.iter_children(node)
while True:
if not iter:
break
self.model.remove(iter)
iter = self.model.iter_children(node)
class PlaylistDragTreeView(DragTreeView):
"""
Custom DragTreeView to retrieve data from playlists
"""
def __init__(self, container, receive=True, source=True):
DragTreeView.__init__(self, container, receive, source)
self.show_cover_drag_icon = False
def get_selection_empty(self):
'''Returns True if there are no selected items'''
return self.get_selection().count_selected_rows() == 0
def get_selection_is_computed(self):
"""
Returns True if selection is a Smart Playlist
"""
item = self.get_selected_item(raw=True)
return isinstance(item, xl_playlist.SmartPlaylist)
def get_selected_tracks(self):
"""
Used by the menu, just basically gets the selected
playlist and returns the tracks in it
"""
playlist = self.get_selected_page()
if playlist is not None:
return [track for track in playlist]
else:
return [self.get_selected_track()]
return None
def get_selected_page(self, raw=False):
"""
Retrieves the currently selected playlist in
the playlists panel. If a non-playlist is
selected it returns None
@return: the playlist
"""
item = self.get_selected_item(raw=raw)
if isinstance(item, (xl_playlist.Playlist, xl_playlist.SmartPlaylist)):
return item
else:
return None
def get_selected_track(self):
item = self.get_selected_item()
if not item:
return None
if isinstance(item, TrackWrapper):
return item.track
else:
return None
def get_selected_item(self, raw=False):
(model, iter) = self.get_selection().get_selected()
if not iter:
return None
item = model.get_value(iter, 2)
# for smart playlists
if isinstance(item, xl_playlist.SmartPlaylist):
if raw:
return item
try:
return item.get_playlist(self.container.collection)
except Exception:
return None
if isinstance(item, radio.RadioItem):
if raw:
return item
return item.get_playlist()
elif isinstance(item, xl_playlist.Playlist):
return item
elif isinstance(item, TrackWrapper):
return item
else:
return None
| gpl-2.0 |
celiafish/VisTrails | vistrails/gui/mashups/mashups_widgets.py | 2 | 15700 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtCore, QtGui
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.modules.utils import get_widget_class
from vistrails.gui.modules.constant_configuration import ConstantWidgetMixin, \
StandardConstantWidget
from vistrails.core.modules.module_registry import get_module_registry
class QAliasSliderWidget(QtGui.QWidget):
def __init__(self, alias, vtparam, parent=None):
QtGui.QWidget.__init__(self, parent)
self.alias = alias
self.vtparam = vtparam
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
label = QtGui.QLabel(alias.name)
label.font().setBold(True)
self.value = QSliderWidget(param=vtparam, parent=self)
self.value.setRange(alias.component.minVal, alias.component.maxVal)
self.value.setSingleStep(alias.component.stepSize)
self.value.setContents(self.alias.component.val)
self.connect(self.value,
QtCore.SIGNAL("contentsChanged"),
self.contents_changed)
hbox = QtGui.QHBoxLayout()
hbox.setMargin(8)
hbox.addWidget(label)
hbox.addWidget(self.value)
self.setLayout(hbox)
def contents_changed(self, info):
#print "drop down emitting"
self.emit(QtCore.SIGNAL('contentsChanged'), (self, info))
def focusInEvent(self, event):
self.emit(QtCore.SIGNAL("receivedfocus"), self)
def focusOutEvent(self, event):
self.emit(QtCore.SIGNAL("removedfocus"), self)
###############################################################################
class QSliderWidget(ConstantWidgetMixin, QtGui.QSlider):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QSlider.__init__(self, QtCore.Qt.Horizontal, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
assert param.type in['Integer', 'Float']
self.sliderType = int if param.type == 'Integer' else float
assert param.identifier == get_vistrails_basic_pkg_id()
self.connect(self, QtCore.SIGNAL('valueChanged(int)'),self.change_val)
QtGui.QSlider.setSingleStep(self, 1)
QtGui.QSlider.setPageStep(self, 5)
self.floatMinVal = 0.0
self.floatMaxVal = 1.0
self.floatStepSize = 1
self.numSteps = 1
self.setContents(param.strValue)
self.setTickPosition(QtGui.QSlider.TicksAbove)
def contents(self):
floatVal = float(self.value()) * self.floatStepSize + self.floatMinVal
return self.sliderType(floatVal)
def setContents(self, strValue, silent=True):
""" encodes a number to a scaled integer """
if strValue:
value = strValue
else:
value = "0.0"
floatVal = float(value)
value = int((floatVal-self.floatMinVal)/self.floatStepSize)
self.setValue(int(value))
self.setToolTip("%g" % floatVal)
if not silent:
self.update_parent()
def change_val(self, newval):
""" decodes a scaled integer to the correct number """
floatVal = float(newval) * self.floatStepSize + self.floatMinVal
self.setToolTip("%g" % floatVal)
self.update_parent()
def setRange(self, minVal, maxVal):
self.floatMinVal = float(minVal)
self.floatMaxVal = float(maxVal)
QtGui.QSlider.setRange(self, 0, 1)
self.setSingleStep(self.floatStepSize)
def setSingleStep(self, stepSize):
""" stepSize tells the step between values. We need to calculate the
number of steps """
self.floatStepSize = float(stepSize)
self.numSteps = int((self.floatMaxVal - self.floatMinVal)/self.floatStepSize)
QtGui.QSlider.setRange(self, 0, self.numSteps)
###############################################################################
class QAliasNumericStepperWidget(QtGui.QWidget):
def __init__(self, alias, vtparam, parent=None):
QtGui.QWidget.__init__(self, parent)
self.alias = alias
self.vtparam = vtparam
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
label = QtGui.QLabel(alias.name)
label.font().setBold(True)
if self.alias.component.type == "Integer":
self.value = QNumericStepperIntegerWidget(param=vtparam,
parent=self)
self.value.setRange(int(alias.component.minVal),
int(alias.component.maxVal))
self.value.setSingleStep(int(alias.component.stepSize))
self.value.setContents(self.alias.component.val)
elif self.alias.component.type == "Float":
self.value = QNumericStepperFloatWidget(param=vtparam,
parent=self)
self.value.setRange(float(alias.component.minVal),
float(alias.component.maxVal))
self.value.setSingleStep(float(alias.component.stepSize))
self.value.setContents(self.alias.component.val)
self.connect(self.value,
QtCore.SIGNAL("contentsChanged"),
self.contents_changed)
hbox = QtGui.QHBoxLayout()
hbox.setMargin(8)
hbox.addWidget(label)
hbox.addWidget(self.value)
self.setLayout(hbox)
def contents_changed(self, info):
#print "drop down emitting"
self.emit(QtCore.SIGNAL('contentsChanged'), (self, info))
def focusInEvent(self, event):
self.emit(QtCore.SIGNAL("receivedfocus"), self)
def focusOutEvent(self, event):
self.emit(QtCore.SIGNAL("removedfocus"), self)
###############################################################################
class QNumericStepperIntegerWidget(ConstantWidgetMixin, QtGui.QSpinBox):
contentsChanged = QtCore.pyqtSignal(object, object)
def __init__(self, param, parent=None):
QtGui.QSpinBox.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
assert param.type == 'Integer'
assert param.identifier == get_vistrails_basic_pkg_id()
self.connect(self, QtCore.SIGNAL('valueChanged(int)'),
self.change_val)
self.setContents(param.strValue)
def contents(self):
return self.value()
def setContents(self, strValue, silent=True):
if strValue:
value = strValue
else:
value = "0"
self.setValue(int(value))
if not silent:
self.update_parent()
def change_val(self, newval):
self.update_parent()
###############################################################################
class QNumericStepperFloatWidget(ConstantWidgetMixin, QtGui.QDoubleSpinBox):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QDoubleSpinBox.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
assert param.type == 'Float'
assert param.identifier == get_vistrails_basic_pkg_id()
self.connect(self, QtCore.SIGNAL('valueChanged(double)'),
self.change_val)
self.setContents(param.strValue)
def contents(self):
return self.value()
def setContents(self, strValue, silent=True):
if strValue:
value = strValue
else:
value = "0"
self.setValue(float(value))
if not silent:
self.update_parent()
def change_val(self, newval):
self.update_parent()
###############################################################################
class QDropDownWidget(QtGui.QWidget):
def __init__(self, alias, vtparam, parent=None):
QtGui.QWidget.__init__(self, parent)
self.alias = alias
self.vtparam = vtparam
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
label = QtGui.QLabel(alias.name)
label.font().setBold(True)
self.value = self.createAliasWidget(val=self.alias.component.val,
parent=self)
self.connect(self.value,
QtCore.SIGNAL("contentsChanged"),
self.contents_changed)
self.dropdownbtn = QtGui.QToolButton(self)
self.dropdownbtn.setArrowType(QtCore.Qt.DownArrow)
self.dropdownbtn.setAutoRaise(True)
#menu button
self.createMenu()
self.dropdownbtn.setPopupMode(QtGui.QToolButton.InstantPopup)
hbox = QtGui.QHBoxLayout()
hbox.setMargin(8)
hbox.addWidget(label)
hbox.addWidget(self.value)
hbox.addWidget(self.dropdownbtn)
self.setLayout(hbox)
def createMenu(self):
self.menu = QMenuValue(self)
self.menu.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
mbox = QtGui.QVBoxLayout()
mbox.setSpacing(1)
mbox.setMargin(2)
self.menu_widgets = {}
valuelist = self.alias.component.valueList
for v in valuelist:
hbox = QtGui.QHBoxLayout()
rb = QMenuRadioButton()
rb.setChecked(False)
vw = self.createMenuAliasWidget(val=v, parent=self)
vw.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
vw.setReadOnly(True)
self.menu_widgets[rb] = vw
hbox.addWidget(rb)
hbox.addWidget(vw)
mbox.addLayout(hbox)
self.connect(rb,
QtCore.SIGNAL("clicked(bool)"),
self.menu.hide)
self.connect(vw,
QtCore.SIGNAL("clicked(bool)"),
rb.setChecked)
self.menu.setLayout(mbox)
self.dropdownbtn.setMenu(self.menu)
#there's a bug on a mac that causes the menu to be always displayed
#where it was shown for the first time... We need to ensure
#the right position.
self.connect(self.menu,
QtCore.SIGNAL("aboutToShow()"),
self.ensure_menu_position)
self.connect(self.menu,
QtCore.SIGNAL("aboutToHide()"),
self.value_selected)
def contents_changed(self, info):
#print "drop down emitting"
self.emit(QtCore.SIGNAL('contentsChanged'), (self, info))
def ensure_menu_position(self):
#print self.dropdownbtn.pos(),
newpos = QtCore.QPoint(self.dropdownbtn.pos().x(),
self.dropdownbtn.pos().y() + self.dropdownbtn.frameSize().height())
self.menu.move(self.mapToGlobal(newpos))
#print self.menu.pos()
def createAliasWidget(self, val=None, parent=None):
if self.vtparam.identifier == '':
idn = get_vistrails_basic_pkg_id()
else:
idn = self.vtparam.identifier
reg = get_module_registry()
p_descriptor = reg.get_descriptor_by_name(idn, self.vtparam.type,
self.vtparam.namespace)
widget_type = get_widget_class(p_descriptor)
if val:
self.vtparam.strValue = val
return widget_type(self.vtparam, parent)
def createMenuAliasWidget(self, val=None, parent=None):
widget = self.createAliasWidget(val)
return QMenuValueItem(widget, parent)
def value_selected(self):
#print "value_selected", self.menu.pos()
for rb, vw in self.menu_widgets.iteritems():
if rb.isChecked():
self.value.setContents(vw.contents(), silent=False)
vw.setFocus()
rb.setChecked(False)
self.menu.hide()
break
def focusInEvent(self, event):
self.emit(QtCore.SIGNAL("receivedfocus"), self)
def focusOutEvent(self, event):
self.emit(QtCore.SIGNAL("removedfocus"), self)
class QMenuRadioButton(QtGui.QRadioButton):
def focusInEvent(self, event):
self.setChecked(True)
#self.emit(QtCore.SIGNAL("clicked(bool)"), True)
QtGui.QRadioButton.focusInEvent(self, event)
class QMenuValue(QtGui.QMenu):
def mousePressEvent(self, e):
vw = self.childAt(e.pos())
while vw is not None and not isinstance(vw, QMenuValueItem):
vw = vw.parent()
if vw is not None:
vw.emit(QtCore.SIGNAL("clicked(bool)"), True)
QtGui.QMenu.mousePressEvent(self, e)
class QMenuValueItem(QtGui.QWidget):
def __init__(self, widget, parent=None):
QtGui.QWidget.__init__(self, parent)
self.widget = widget
vlayout = QtGui.QVBoxLayout()
vlayout.setMargin(0)
vlayout.setSpacing(0)
vlayout.addWidget(self.widget)
self.setLayout(vlayout)
def setReadOnly(self, on):
self.setEnabled(not on)
def contents(self):
return self.widget.contents()
def mousePressEvent(self, e):
self.emit(QtCore.SIGNAL("clicked(bool)"), True)
| bsd-3-clause |
yfried/ansible | lib/ansible/modules/source_control/git_config.py | 18 | 6941 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Marius Gedminas <marius@pov.lt>
# (c) 2016, Matthew Gamble <git@matthewgamble.net>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: git_config
author:
- Matthew Gamble (@djmattyg007)
- Marius Gedminas
version_added: 2.1
requirements: ['git']
short_description: Read and write git configuration
description:
- The C(git_config) module changes git configuration by invoking 'git config'.
This is needed if you don't want to use M(template) for the entire git
config file (e.g. because you need to change just C(user.email) in
/etc/.git/config). Solutions involving M(command) are cumbersome or
don't work correctly in check mode.
options:
list_all:
description:
- List all settings (optionally limited to a given I(scope))
type: bool
default: 'no'
name:
description:
- The name of the setting. If no value is supplied, the value will
be read from the config if it has been set.
repo:
description:
- Path to a git repository for reading and writing values from a
specific repo.
scope:
description:
- Specify which scope to read/set values from. This is required
when setting config values. If this is set to local, you must
also specify the repo parameter. It defaults to system only when
not using I(list_all)=yes.
choices: [ "local", "global", "system" ]
value:
description:
- When specifying the name of a single setting, supply a value to
set that setting to the given value.
'''
EXAMPLES = '''
# Set some settings in ~/.gitconfig
- git_config:
name: alias.ci
scope: global
value: commit
- git_config:
name: alias.st
scope: global
value: status
# Or system-wide:
- git_config:
name: alias.remotev
scope: system
value: remote -v
- git_config:
name: core.editor
scope: global
value: vim
# scope=system is the default
- git_config:
name: alias.diffc
value: diff --cached
- git_config:
name: color.ui
value: auto
# Make etckeeper not complain when invoked by cron
- git_config:
name: user.email
repo: /etc
scope: local
value: 'root@{{ ansible_fqdn }}'
# Read individual values from git config
- git_config:
name: alias.ci
scope: global
# scope: system is also assumed when reading values, unless list_all=yes
- git_config:
name: alias.diffc
# Read all values from git config
- git_config:
list_all: yes
scope: global
# When list_all=yes and no scope is specified, you get configuration from all scopes
- git_config:
list_all: yes
# Specify a repository to include local settings
- git_config:
list_all: yes
repo: /path/to/repo.git
'''
RETURN = '''
---
config_value:
description: When list_all=no and value is not set, a string containing the value of the setting in name
returned: success
type: string
sample: "vim"
config_values:
description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
returned: success
type: dictionary
sample:
core.editor: "vim"
color.ui: "auto"
alias.diffc: "diff --cached"
alias.remotev: "remote -v"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def main():
module = AnsibleModule(
argument_spec=dict(
list_all=dict(required=False, type='bool', default=False),
name=dict(type='str'),
repo=dict(type='path'),
scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
value=dict(required=False)
),
mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']],
required_if=[('scope', 'local', ['repo'])],
required_one_of=[['list_all', 'name']],
supports_check_mode=True,
)
git_path = module.get_bin_path('git', True)
params = module.params
# We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
# Set the locale to C to ensure consistent messages.
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if params['name']:
name = params['name']
else:
name = None
if params['scope']:
scope = params['scope']
elif params['list_all']:
scope = None
else:
scope = 'system'
if params['value']:
new_value = params['value']
else:
new_value = None
args = [git_path, "config", "--includes"]
if params['list_all']:
args.append('-l')
if scope:
args.append("--" + scope)
if name:
args.append(name)
if scope == 'local':
dir = params['repo']
elif params['list_all'] and params['repo']:
# Include local settings from a specific repo when listing all available settings
dir = params['repo']
else:
# Run from root directory to avoid accidentally picking up any local config settings
dir = "/"
(rc, out, err) = module.run_command(' '.join(args), cwd=dir)
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
# This just means nothing has been set at the given scope
module.exit_json(changed=False, msg='', config_values={})
elif rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
if params['list_all']:
values = out.rstrip().splitlines()
config_values = {}
for value in values:
k, v = value.split('=', 1)
config_values[k] = v
module.exit_json(changed=False, msg='', config_values=config_values)
elif not new_value:
module.exit_json(changed=False, msg='', config_value=out.rstrip())
else:
old_value = out.rstrip()
if old_value == new_value:
module.exit_json(changed=False, msg="")
if not module.check_mode:
new_value_quoted = shlex_quote(new_value)
cmd = ' '.join(args + [new_value_quoted])
(rc, out, err) = module.run_command(cmd, cwd=dir)
if err:
module.fail_json(rc=rc, msg=err, cmd=cmd)
module.exit_json(
msg='setting changed',
diff=dict(
before_header=' '.join(args),
before=old_value + "\n",
after_header=' '.join(args),
after=new_value + "\n"
),
changed=True
)
if __name__ == '__main__':
main()
| gpl-3.0 |
yarikoptic/seaborn | doc/sphinxext/ipython_console_highlighting.py | 78 | 4181 | """reST directive for syntax-highlighting ipython interactive sessions.
XXX - See what improvements can be made based on the new (as of Sept 2009)
'pycon' lexer for the python console. At the very least it will give better
highlighted tracebacks.
"""
#-----------------------------------------------------------------------------
# Needed modules
# Standard library
import re
# Third party
from pygments.lexer import Lexer, do_insertions
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
PythonTracebackLexer)
from pygments.token import Comment, Generic
from sphinx import highlighting
#-----------------------------------------------------------------------------
# Global constants
line_re = re.compile('.*?\n')
#-----------------------------------------------------------------------------
# Code begins - classes and functions
class IPythonConsoleLexer(Lexer):
"""
For IPython console output or doctests, such as:
.. sourcecode:: ipython
In [1]: a = 'foo'
In [2]: a
Out[2]: 'foo'
In [3]: print(a)
foo
In [4]: 1 / 0
Notes:
- Tracebacks are not currently supported.
- It assumes the default IPython prompts, not customized ones.
"""
name = 'IPython console session'
aliases = ['ipython']
mimetypes = ['text/x-ipython-console']
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
continue_prompt = re.compile(" \.\.\.+:")
tb_start = re.compile("\-+")
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
input_prompt = self.input_prompt.match(line)
continue_prompt = self.continue_prompt.match(line.rstrip())
output_prompt = self.output_prompt.match(line)
if line.startswith("#"):
insertions.append((len(curcode),
[(0, Comment, line)]))
elif input_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, input_prompt.group())]))
curcode += line[input_prompt.end():]
elif continue_prompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, continue_prompt.group())]))
curcode += line[continue_prompt.end():]
elif output_prompt is not None:
# Use the 'error' token for output. We should probably make
# our own token, but error is typicaly in a bright color like
# red, so it works fine for our output prompts.
insertions.append((len(curcode),
[(0, Generic.Error, output_prompt.group())]))
curcode += line[output_prompt.end():]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows that the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
pass
#-----------------------------------------------------------------------------
# Register the extension as a valid pygments lexer
highlighting.lexers['ipython'] = IPythonConsoleLexer()
| bsd-3-clause |
statik/grr | gui/api_plugins/artifact_test.py | 2 | 4077 | #!/usr/bin/env python
"""This modules contains tests for artifact API renderer."""
import os
from grr.gui import api_test_lib
from grr.gui.api_plugins import artifact as artifact_plugin
from grr.lib import artifact
from grr.lib import artifact_registry
from grr.lib import artifact_test
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import test_lib
class ApiArtifactsRendererTest(artifact_test.ArtifactBaseTest):
"""Test for ApiArtifactsRenderer."""
def setUp(self):
super(ApiArtifactsRendererTest, self).setUp()
self.renderer = artifact_plugin.ApiArtifactsRenderer()
self.LoadTestArtifacts()
def _renderEmptyArtifacts(self):
artifact_registry.REGISTRY.ClearSources()
return self.renderer.Render(self.renderer.args_type(), token=self.token)
def testNoArtifacts(self):
rendering = self._renderEmptyArtifacts()
self.assertEqual(rendering,
{"count": 0, "items": [], "offset": 0, "total_count": 0})
def _renderTestArtifacts(self):
return self.renderer.Render(self.renderer.args_type(), token=self.token)
def testPrepackagedArtifacts(self):
rendering = self._renderTestArtifacts()
# we know there are some prepackaged artifacts
self.assertTrue(rendering)
# test for a prepackaged artifact we know to exist
for item in rendering["items"]:
if item["value"]["artifact"]["value"]["name"]["value"] == "FakeArtifact":
fake_artifact = item["value"]
self.assertTrue(fake_artifact)
self.assertIn("is_custom", fake_artifact)
self.assertFalse(fake_artifact["is_custom"]["value"])
for required_key in ("doc",
"labels",
"supported_os"):
self.assertIn(required_key, fake_artifact["artifact"]["value"])
class ArtifactRendererRegressionTest(
api_test_lib.ApiCallRendererRegressionTest):
renderer = "ApiArtifactRenderer"
def Run(self):
artifact_registry.REGISTRY.ClearSources()
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "artifacts", "test_artifact.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
self.Check("GET", "/api/artifacts")
class ApiArtifactsDeleteRendererTest(test_lib.GRRBaseTest):
def setUp(self):
super(ApiArtifactsDeleteRendererTest, self).setUp()
self.renderer = artifact_plugin.ApiArtifactsDeleteRenderer()
def UploadTestArtifacts(self):
artifact_registry.REGISTRY.ClearRegistry()
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "artifacts", "test_artifacts.json")
with open(test_artifacts_file) as fd:
artifact.UploadArtifactYamlFile(fd.read(), token=self.token)
def testDeletesArtifactsWithSpecifiedNames(self):
self.UploadTestArtifacts()
count = len(artifact_registry.REGISTRY.GetArtifacts(
reload_datastore_artifacts=True))
args = self.renderer.args_type(names=["TestFilesArtifact",
"WMIActiveScriptEventConsumer"])
response = self.renderer.Render(args, token=self.token)
self.assertEqual(response, dict(status="OK"))
artifact_registry.REGISTRY.ClearRegistry()
new_count = len(artifact_registry.REGISTRY.GetArtifacts(
reload_datastore_artifacts=True))
# Check that we deleted exactly 2 artifacts.
self.assertEqual(new_count, count - 2)
def testDeleteDependency(self):
self.UploadTestArtifacts()
args = self.renderer.args_type(names=["TestAggregationArtifact"])
with self.assertRaises(ValueError):
self.renderer.Render(args, token=self.token)
def testDeleteNonExistentArtifact(self):
self.UploadTestArtifacts()
args = self.renderer.args_type(names=["NonExistentArtifact"])
e = self.assertRaises(ValueError)
with e:
self.renderer.Render(args, token=self.token)
self.assertEqual(str(e.exception),
"Artifact(s) to delete (NonExistentArtifact) not found.")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
scode/pants | src/python/pants/backend/python/tasks/checkstyle/future_compatibility.py | 3 | 2640 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
from pants.backend.python.tasks.checkstyle.common import CheckstylePlugin
from pants.subsystem.subsystem import Subsystem
# Warn on non 2.x/3.x compatible symbols:
# - basestring
# - xrange
#
# Methods:
# - .iteritems
# - .iterkeys
#
# Comprehension builtins
# - filter
# - map
# - range
#
# => Make sure that these are not assigned.
# Warn if they are assigned or returned directly from functions
#
# Class internals:
# __metaclass__
class FutureCompatibilitySubsystem(Subsystem):
options_scope = 'pycheck-future-compat'
@classmethod
def register_options(cls, register):
super(FutureCompatibilitySubsystem, cls).register_options(register)
register('--skip', default=False, action='store_true',
help='If enabled, skip this style checker.')
class FutureCompatibility(CheckstylePlugin):
"""Warns about behavior that will likely break when moving to Python 3.x"""
BAD_ITERS = frozenset(('iteritems', 'iterkeys', 'itervalues'))
BAD_FUNCTIONS = frozenset(('xrange',))
BAD_NAMES = frozenset(('basestring', 'unicode'))
subsystem = FutureCompatibilitySubsystem
def nits(self):
for call in self.iter_ast_types(ast.Call):
if isinstance(call.func, ast.Attribute):
if call.func.attr in self.BAD_ITERS:
yield self.error(
'T602',
'{attr} disappears in Python 3.x. Use non-iter instead.'.format(attr=call.func.attr),
call)
elif isinstance(call.func, ast.Name):
if call.func.id in self.BAD_FUNCTIONS:
yield self.error(
'T603',
'Please avoid {func_id} as it disappears in Python 3.x.'.format(func_id=call.func.id),
call)
for name in self.iter_ast_types(ast.Name):
if name.id in self.BAD_NAMES:
yield self.error(
'T604', 'Please avoid {id} as it disappears in Python 3.x.'.format(id=name.id), name)
for class_def in self.iter_ast_types(ast.ClassDef):
for node in class_def.body:
if not isinstance(node, ast.Assign):
continue
for name in node.targets:
if not isinstance(name, ast.Name):
continue
if name.id == '__metaclass__':
yield self.warning('T605',
'This metaclass style is deprecated and gone entirely in Python 3.x.', name)
| apache-2.0 |
jespino/GalaxduS | libs/screens/newgame.py | 1 | 2534 | from galaxdustk.buttons import Button, CircularSelectButton, SelectButton
from galaxdustk.label import Label
from galaxdustk.screen import BaseScreen
import handlers
from gettext import gettext as _
class NewGameScreen(BaseScreen):
background_path = 'data/images/backgrounds/menu.png'
def __init__(self, context):
super(NewGameScreen,self).__init__(context)
screenrect = self.context.screen.get_rect()
label_species = Label(_('Select your specie:'))
label_species.rect.left = 10
label_species.rect.centery = 100
self.sprites.add(label_species)
for x in range(1,4):
specie = CircularSelectButton(self, 'data/images/species/specie%d.png' % x)
specie.rect.left = (label_species.rect.right - 100) + (x*125)
specie.rect.centery = 100
specie.group_id = 1
specie.value = x
self.sprites.add(specie)
label_size = Label(_('Select the galaxy size:'))
label_size.rect.left = 10
label_size.rect.centery = 200
self.sprites.add(label_size)
for galaxy_size in [(1,_('Small')),(2,_('Medium')),(3,_('Big'))]:
size = SelectButton(self, galaxy_size[1], width=100)
size.rect.left = (label_size.rect.right - 100) + (galaxy_size[0]*125)
size.rect.centery = 200
size.group_id = 2
size.value = galaxy_size[0]
self.sprites.add(size)
label_size = Label(_('Select your color:'))
label_size.rect.left = 10
label_size.rect.centery = 300
self.sprites.add(label_size)
for player_color in [(1,_('Red'), (255,0,0)),(2,_('Green'), (0,255,0)),(3,_('Blue'), (0,0,255))]:
one_color = SelectButton(self, player_color[1], width=100)
one_color.rect.left = (label_size.rect.right - 100) + (player_color[0]*125)
one_color.rect.centery = 300
one_color.group_id = 3
one_color.value = player_color[2]
self.sprites.add(one_color)
begin_game = Button(self, _("Begin the game"))
begin_game.rect.right = screenrect.right - 10
begin_game.rect.bottom = screenrect.bottom - 10
begin_game.connect("clicked", handlers.startgame)
self.sprites.add(begin_game)
back = Button(self, _("Back"))
back.rect.left = screenrect.left + 10
back.rect.bottom = screenrect.bottom - 10
back.connect("clicked", handlers.go_to_menu)
self.sprites.add(back)
| gpl-3.0 |
melphi/algobox | python/algobox/src/algobox/client/generated/datacollector/models/__init__.py | 1 | 1283 | # coding: utf-8
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
# import models into model package
from .connection_info import ConnectionInfo
from .connection_registration_dto import ConnectionRegistrationDto
from .connection_registration_request_dto import ConnectionRegistrationRequestDto
from .health_status import HealthStatus
from .instrument_info import InstrumentInfo
from .instrument_subscription_dto import InstrumentSubscriptionDto
from .price_tick_stage import PriceTickStage
from .string_value_dto import StringValueDto
| apache-2.0 |
jairideout/scikit-bio | skbio/sequence/tests/test_base.py | 10 | 1423 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import unittest
from skbio.sequence._base import ElasticLines
class TestElasticLines(unittest.TestCase):
def setUp(self):
self.el = ElasticLines()
def test_empty(self):
self.assertEqual(self.el.to_str(), '')
def test_add_line(self):
self.el.add_line('foo')
self.assertEqual(self.el.to_str(), 'foo')
def test_add_lines(self):
self.el = ElasticLines()
self.el.add_lines(['alice', 'bob', 'carol'])
self.assertEqual(self.el.to_str(), 'alice\nbob\ncarol')
def test_add_separator(self):
self.el.add_separator()
self.assertEqual(self.el.to_str(), '')
self.el.add_line('foo')
self.assertEqual(self.el.to_str(), '---\nfoo')
self.el.add_separator()
self.el.add_lines(['bar', 'bazzzz'])
self.el.add_separator()
self.assertEqual(self.el.to_str(),
'------\nfoo\n------\nbar\nbazzzz\n------')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
goyalankit/ride-agg | requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| apache-2.0 |
maggnus/eloipool-crypto | jsonrpc_getwork.py | 14 | 3506 | # Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from binascii import b2a_hex
from jsonrpcserver import JSONRPCHandler
import logging
try:
import midstate
assert midstate.SHA256(b'This is just a test, ignore it. I am making it over 64-bytes long.')[:8] == (0x755f1a94, 0x999b270c, 0xf358c014, 0xfd39caeb, 0x0dcc9ebc, 0x4694cd1a, 0x8e95678e, 0x75fac450)
except:
logging.getLogger('jsonrpc_getwork').warning('Error importing \'midstate\' module; work will not provide midstates')
midstate = None
from struct import pack
from util import RejectedShare, swap32
_CheckForDupesHACK = {}
_RealDupes = {}
class _getwork:
def final_init(server):
ShareTargetHex = '%064x' % (server.ShareTarget,)
ShareTargetHexLE = b2a_hex(bytes.fromhex(ShareTargetHex)[::-1]).decode('ascii')
JSONRPCHandler.getwork_rv_template['target'] = ShareTargetHexLE
getwork_rv_template = {
'data': '000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'target': 'ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'submitold': True,
}
def doJSON_getwork(self, data=None):
if not data is None:
return self.doJSON_submitwork(data)
rv = dict(self.getwork_rv_template)
(hdr, x, target) = self.server.getBlockHeader(self.Username)
# FIXME: this assumption breaks with internal rollntime
# NOTE: noncerange needs to set nonce to start value at least
global _CheckForDupesHACK
uhdr = hdr[:68] + hdr[72:]
if uhdr in _CheckForDupesHACK:
_RealDupes[uhdr] = (_CheckForDupesHACK[uhdr], (hdr, x))
raise self.server.RaiseRedFlags(RuntimeError('issuing duplicate work'))
_CheckForDupesHACK[uhdr] = (hdr, x)
data = b2a_hex(swap32(hdr)).decode('utf8') + rv['data']
# TODO: endian shuffle etc
rv['data'] = data
if midstate and 'midstate' not in self.extensions and 'midstate' not in self.quirks:
h = midstate.SHA256(hdr)[:8]
rv['midstate'] = b2a_hex(pack('<LLLLLLLL', *h)).decode('ascii')
ShareTargetHex = '%064x' % (target,)
ShareTargetHexLE = b2a_hex(bytes.fromhex(ShareTargetHex)[::-1]).decode('ascii')
rv['target'] = ShareTargetHexLE
self._JSONHeaders['X-Roll-NTime'] = 'expire=120'
return rv
def doJSON_submitwork(self, datax):
data = swap32(bytes.fromhex(datax))[:80]
share = {
'data': data,
'_origdata' : datax,
'username': self.Username,
'remoteHost': self.remoteHost,
'userAgent': self.UA,
'submitProtocol': 'getwork',
}
try:
self.server.receiveShare(share)
except RejectedShare as rej:
self._JSONHeaders['X-Reject-Reason'] = str(rej)
return False
return True
JSONRPCHandler._register(_getwork)
| agpl-3.0 |
lolz0r/pytorch-es | envs.py | 2 | 2404 | # Taken from https://github.com/ikostrikov/pytorch-a3c
from __future__ import absolute_import, division, print_function
import numpy as np
import gym
from gym.spaces.box import Box
from universe import vectorized
from universe.wrappers import Unvectorize, Vectorize
import cv2
# Taken from https://github.com/openai/universe-starter-agent
def create_atari_env(env_id):
env = gym.make(env_id)
if len(env.observation_space.shape) > 1:
print('Preprocessing env')
env = Vectorize(env)
env = AtariRescale42x42(env)
env = NormalizedEnv(env)
env = Unvectorize(env)
else:
print('No preprocessing because env is too small')
return env
def _process_frame42(frame):
frame = frame[34:34 + 160, :160]
# Resize by half, then down to 42x42 (essentially mipmapping). If
# we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
frame = cv2.resize(frame, (80, 80))
frame = cv2.resize(frame, (42, 42))
frame = frame.mean(2)
frame = frame.astype(np.float32)
frame *= (1.0 / 255.0)
frame = np.reshape(frame, [1, 42, 42])
return frame
class AtariRescale42x42(vectorized.ObservationWrapper):
def __init__(self, env=None):
super(AtariRescale42x42, self).__init__(env)
self.observation_space = Box(0.0, 1.0, [1, 42, 42])
def _observation(self, observation_n):
return [_process_frame42(observation) for observation in observation_n]
class NormalizedEnv(vectorized.ObservationWrapper):
def __init__(self, env=None):
super(NormalizedEnv, self).__init__(env)
self.state_mean = 0
self.state_std = 0
self.alpha = 0.9999
self.max_episode_length = 0
def _observation(self, observation_n):
for observation in observation_n:
self.max_episode_length += 1
self.state_mean = self.state_mean * self.alpha + \
observation.mean() * (1 - self.alpha)
self.state_std = self.state_std * self.alpha + \
observation.std() * (1 - self.alpha)
denom = (1 - pow(self.alpha, self.max_episode_length))
unbiased_mean = self.state_mean / denom
unbiased_std = self.state_std / denom
return [(observation - unbiased_mean) / (unbiased_std + 1e-8)
for observation in observation_n]
| mit |
lichong012245/django-lfs-0.7.8 | lfs/integrationtests/windmilltests/managetests/test_manage.py | 5 | 1684 | from windmill.authoring import WindmillTestClient
def setup_module(module):
pass
def test_manage_setting_of_price_calculator():
client = WindmillTestClient(__name__)
client.open(url="/product/chocolate")
client.waits.forPageLoad(timeout=u'20000')
# check that product includes vat
client.asserts.assertText(xpath=u"//form[@id='product-form']/div[5][@class='prices']/div[2][@class='price-disclaimer']", validator=u'*inc. VAT')
# open the manage interface
client.open(url="/manage/")
client.waits.forPageLoad(timeout=u'20000')
client.type(text=u'admin', id=u'id_username')
client.type(text=u'admin', id=u'id_password')
client.click(xpath=u"//div[@id='content']/div/div[1]/form/button")
client.waits.forPageLoad(timeout=u'20000')
client.waits.forElement(link=u'Products', timeout=u'8000')
client.click(link=u'Products')
client.waits.forPageLoad(timeout=u'20000')
client.click(link="Chocolate")
client.waits.forPageLoad(timeout=u'20000')
client.waits.forElement(timeout=u'8000', id=u'id_price_calculator')
client.click(id=u'id_price_calculator')
client.select(option=u'Price excludes tax', id=u'id_price_calculator')
client.click(xpath=u"//form[@id='product-data-form']/fieldset[4]/div[4]/div[2]")
client.click(value=u'Save Data')
# Check that price excludes vat now
client.open(url="/product/chocolate")
client.waits.forPageLoad(timeout=u'20000')
# check that product includes vat
client.asserts.assertText(xpath=u"//form[@id='product-form']/div[5][@class='prices']/div[2][@class='price-disclaimer']", validator=u'*exc. VAT')
def teardown_module(module):
pass
| bsd-3-clause |
MSeifert04/astropy | astropy/table/tests/test_operations.py | 3 | 67327 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import pytest
import numpy as np
from astropy.tests.helper import catch_warnings
from astropy.table import Table, QTable, TableMergeError, Column, MaskedColumn
from astropy.table.operations import _get_out_class
from astropy import units as u
from astropy.utils import metadata
from astropy.utils.metadata import MergeConflictError
from astropy import table
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy.io.misc.asdf.tags.helpers import skycoord_equal
def sort_eq(list1, list2):
return sorted(list1) == sorted(list2)
class TestJoin():
def _setup(self, t_cls=Table):
lines1 = [' a b c ',
' 0 foo L1',
' 1 foo L2',
' 1 bar L3',
' 2 bar L4']
lines2 = [' a b d ',
' 1 foo R1',
' 1 foo R2',
' 2 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls(self.t2, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t3.meta.update(OrderedDict([('b', 3), ('c', [1, 2]), ('d', 2), ('a', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4]),
('c', {'a': 1, 'b': 1}),
('d', 1),
('a', 1)])
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.join(self.t1, self.t2, join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='warn')
assert len(w) == 3
assert out.meta == self.t3.meta
with catch_warnings() as w:
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t3.meta
with pytest.raises(MergeConflictError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.join(self.t1, self.t3, join_type='inner', metadata_conflicts='nonsense')
def test_both_unmasked_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Basic join with default parameters (inner join on common keys)
t12 = table.join(t1, t2)
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert type(t12['c']) is type(t1['c'])
assert type(t12['d']) is type(t2['d'])
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Table meta merged properly
assert t12.meta == self.meta_merge
def test_both_unmasked_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left')
assert t12.has_masked_columns is True
assert t12.masked is False
for name in ('a', 'b', 'c'):
assert type(t12[name]) is Column
assert type(t12['d']) is MaskedColumn
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3'])
# Right join
t12 = table.join(t1, t2, join_type='right')
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer')
assert t12.has_masked_columns is True
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 foo L1 --',
' 1 bar L3 --',
' 1 foo L2 R1',
' 1 foo L2 R2',
' 2 bar L4 R3',
' 4 bar -- R4'])
# Check that the common keys are 'a', 'b'
t12a = table.join(t1, t2, join_type='outer')
t12b = table.join(t1, t2, join_type='outer', keys=['a', 'b'])
assert np.all(t12a.as_array() == t12b.as_array())
def test_both_unmasked_single_key_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Inner join on 'a' column
t12 = table.join(t1, t2, keys='a')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b_1']) is type(t1['b'])
assert type(t12['c']) is type(t1['c'])
assert type(t12['b_2']) is type(t2['b'])
assert type(t12['d']) is type(t2['d'])
assert t12.masked is False
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
def test_both_unmasked_single_key_left_right_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
# Left join
t12 = table.join(t1, t2, join_type='left', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3'])
# Right join
t12 = table.join(t1, t2, join_type='right', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
# Outer join
t12 = table.join(t1, t2, join_type='outer', keys='a')
assert t12.has_masked_columns is True
assert sort_eq(t12.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 0 foo L1 -- --',
' 1 foo L2 foo R1',
' 1 foo L2 foo R2',
' 1 bar L3 foo R1',
' 1 bar L3 foo R2',
' 2 bar L4 bar R3',
' 4 -- -- bar R4'])
def test_masked_unmasked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
# Result table is never masked
t1m2 = table.join(t1m, t2, join_type='inner')
assert t1m2.masked is False
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2))
# Mask out some values in left table and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t1m2 = table.join(t1m, t2, join_type='inner', keys='a')
assert sort_eq(t1m2.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar R3'])
t21m = table.join(t2, t1m, join_type='inner', keys='a')
assert sort_eq(t21m.pformat(), [' a b_1 d b_2 c ',
'--- --- --- --- ---',
' 1 foo R2 -- L2',
' 1 foo R2 bar --',
' 1 foo R1 -- L2',
' 1 foo R1 bar --',
' 2 bar R3 bar L4'])
def test_masked_masked(self, operation_table_type):
self._setup(operation_table_type)
"""Two masked tables"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t1m = operation_table_type(self.t1, masked=True)
t2 = self.t2
t2m = operation_table_type(self.t2, masked=True)
# Result table is never masked but original column types are preserved
t1m2m = table.join(t1m, t2m, join_type='inner')
assert t1m2m.masked is False
for col in t1m2m.itercols():
assert type(col) is MaskedColumn
# Result should match non-masked result
t12 = table.join(t1, t2)
assert np.all(t12.as_array() == np.array(t1m2m))
# Mask out some values in both tables and make sure they propagate
t1m['b'].mask[1] = True
t1m['c'].mask[2] = True
t2m['d'].mask[2] = True
t1m2m = table.join(t1m, t2m, join_type='inner', keys='a')
assert sort_eq(t1m2m.pformat(), [' a b_1 c b_2 d ',
'--- --- --- --- ---',
' 1 -- L2 foo R1',
' 1 -- L2 foo R2',
' 1 bar -- foo R1',
' 1 bar -- foo R2',
' 2 bar L4 bar --'])
def test_classes(self):
"""Ensure that classes and subclasses get through as expected"""
class MyCol(Column):
pass
class MyMaskedCol(MaskedColumn):
pass
t1 = Table()
t1['a'] = MyCol([1])
t1['b'] = MyCol([2])
t1['c'] = MyMaskedCol([3])
t2 = Table()
t2['a'] = Column([1, 2])
t2['d'] = MyCol([3, 4])
t2['e'] = MyMaskedCol([5, 6])
t12 = table.join(t1, t2, join_type='inner')
for name, exp_type in (('a', MyCol), ('b', MyCol), ('c', MyMaskedCol),
('d', MyCol), ('e', MyMaskedCol)):
assert type(t12[name] is exp_type)
t21 = table.join(t2, t1, join_type='left')
# Note col 'b' gets upgraded from MyCol to MaskedColumn since it needs to be
# masked, but col 'c' stays since MyMaskedCol supports masking.
for name, exp_type in (('a', MyCol), ('b', MaskedColumn), ('c', MyMaskedCol),
('d', MyCol), ('e', MyMaskedCol)):
assert type(t12[name] is exp_type)
def test_col_rename(self, operation_table_type):
self._setup(operation_table_type)
"""
Test auto col renaming when there is a conflict. Use
non-default values of uniq_col_name and table_names.
"""
t1 = self.t1
t2 = self.t2
t12 = table.join(t1, t2, uniq_col_name='x_{table_name}_{col_name}_y',
table_names=['L', 'R'], keys='a')
assert t12.colnames == ['a', 'x_L_b_y', 'c', 'x_R_b_y', 'd']
def test_rename_conflict(self, operation_table_type):
self._setup(operation_table_type)
"""
Test that auto-column rename fails because of a conflict
with an existing column
"""
t1 = self.t1
t2 = self.t2
t1['b_1'] = 1 # Add a new column b_1 that will conflict with auto-rename
with pytest.raises(TableMergeError):
table.join(t1, t2, keys='a')
def test_missing_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that doesn't exist"""
t1 = self.t1
t2 = self.t2
with pytest.raises(TableMergeError):
table.join(t1, t2, keys=['a', 'not there'])
def test_bad_join_type(self, operation_table_type):
self._setup(operation_table_type)
"""Bad join_type input"""
t1 = self.t1
t2 = self.t2
with pytest.raises(ValueError):
table.join(t1, t2, join_type='illegal value')
def test_no_common_keys(self, operation_table_type):
self._setup(operation_table_type)
"""Merge tables with no common keys"""
t1 = self.t1
t2 = self.t2
del t1['a']
del t1['b']
del t2['a']
del t2['b']
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_masked_key_column(self, operation_table_type):
self._setup(operation_table_type)
"""Merge on a key column that has a masked element"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
t1 = self.t1
t2 = operation_table_type(self.t2, masked=True)
table.join(t1, t2) # OK
t2['a'].mask[0] = True
with pytest.raises(TableMergeError):
table.join(t1, t2)
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t2.rename_column('d', 'c') # force col conflict and renaming
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
meta2 = OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
# Key col 'a', should first value ('cm')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value 't1_b'
t2['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta = meta1
t2['a'].info.meta = meta2
# Key col 'b', should be meta2
t2['b'].info.meta = meta2
# All these should pass through
t1['c'].info.format = '%3s'
t1['c'].info.description = 't1_c'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
t12 = table.join(t1, t2, keys=['a', 'b'])
if operation_table_type is Table:
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
else:
assert len(warning_lines) == 0
assert t12['a'].unit == 'm'
assert t12['b'].info.description == 't1_b'
assert t12['b'].info.format == '%6s'
assert t12['a'].info.meta == self.meta_merge
assert t12['b'].info.meta == meta2
assert t12['c_1'].info.format == '%3s'
assert t12['c_1'].info.description == 't1_c'
assert t12['c_2'].info.format == '%6s'
assert t12['c_2'].info.description == 't2_c'
def test_join_multidimensional(self, operation_table_type):
self._setup(operation_table_type)
# Regression test for #2984, which was an issue where join did not work
# on multi-dimensional columns.
t1 = operation_table_type()
t1['a'] = [1, 2, 3]
t1['b'] = np.ones((3, 4))
t2 = operation_table_type()
t2['a'] = [1, 2, 3]
t2['c'] = [4, 5, 6]
t3 = table.join(t1, t2)
np.testing.assert_allclose(t3['a'], t1['a'])
np.testing.assert_allclose(t3['b'], t1['b'])
np.testing.assert_allclose(t3['c'], t2['c'])
def test_join_multidimensional_masked(self, operation_table_type):
self._setup(operation_table_type)
"""
Test for outer join with multidimensional columns where masking is required.
(Issue #4059).
"""
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
a = table.MaskedColumn([1, 2, 3], name='a')
a2 = table.Column([1, 3, 4], name='a')
b = table.MaskedColumn([[1, 2],
[3, 4],
[5, 6]],
name='b',
mask=[[1, 0],
[0, 1],
[0, 0]])
c = table.Column([[1, 1],
[2, 2],
[3, 3]],
name='c')
t1 = operation_table_type([a, b])
t2 = operation_table_type([a2, c])
t12 = table.join(t1, t2, join_type='inner')
assert np.all(t12['b'].mask == [[True, False],
[False, False]])
assert not hasattr(t12['c'], 'mask')
t12 = table.join(t1, t2, join_type='outer')
assert np.all(t12['b'].mask == [[True, False],
[False, True],
[False, False],
[True, True]])
assert np.all(t12['c'].mask == [[False, False],
[True, True],
[False, False],
[False, False]])
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
cls_name = type(col).__name__
len_col = len(col)
idx = np.arange(len_col)
t1 = table.QTable([idx, col], names=['idx', 'm1'])
t2 = table.QTable([idx, col], names=['idx', 'm2'])
# Set up join mismatches for different join_type cases
t1 = t1[[0, 1, 3]]
t2 = t2[[0, 2, 3]]
# Test inner join, which works for all mixin_cols
out = table.join(t1, t2, join_type='inner')
assert len(out) == 2
assert out['m2'].__class__ is col.__class__
assert np.all(out['idx'] == [0, 3])
if cls_name == 'SkyCoord':
# SkyCoord doesn't support __eq__ so use our own
assert skycoord_equal(out['m1'], col[[0, 3]])
assert skycoord_equal(out['m2'], col[[0, 3]])
else:
assert np.all(out['m1'] == col[[0, 3]])
assert np.all(out['m2'] == col[[0, 3]])
# Check for left, right, outer join which requires masking. Only Time
# supports this currently.
if cls_name == 'Time':
out = table.join(t1, t2, join_type='left')
assert len(out) == 3
assert np.all(out['idx'] == [0, 1, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, False, False])
assert np.all(out['m2'].mask == [False, True, False])
out = table.join(t1, t2, join_type='right')
assert len(out) == 3
assert np.all(out['idx'] == [0, 2, 3])
assert np.all(out['m1'] == t1['m1'])
assert np.all(out['m2'] == t2['m2'])
assert np.all(out['m1'].mask == [False, True, False])
assert np.all(out['m2'].mask == [False, False, False])
out = table.join(t1, t2, join_type='outer')
assert len(out) == 4
assert np.all(out['idx'] == [0, 1, 2, 3])
assert np.all(out['m1'] == col)
assert np.all(out['m2'] == col)
assert np.all(out['m1'].mask == [False, False, True, False])
assert np.all(out['m2'].mask == [False, True, False, False])
else:
# Otherwise make sure it fails with the right exception message
for join_type in ('outer', 'left', 'right'):
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert ('join requires masking' in str(err.value) or
'join unavailable' in str(err.value))
def test_cartesian_join(self, operation_table_type):
t1 = Table(rows=[(1, 'a'),
(2, 'b')], names=['a', 'b'])
t2 = Table(rows=[(3, 'c'),
(4, 'd')], names=['a', 'c'])
t12 = table.join(t1, t2, join_type='cartesian')
assert t1.colnames == ['a', 'b']
assert t2.colnames == ['a', 'c']
assert len(t12) == len(t1) * len(t2)
assert str(t12).splitlines() == [
'a_1 b a_2 c ',
'--- --- --- ---',
' 1 a 3 c',
' 1 a 4 d',
' 2 b 3 c',
' 2 b 4 d']
with pytest.raises(ValueError, match='cannot supply keys for a cartesian join'):
t12 = table.join(t1, t2, join_type='cartesian', keys='a')
class TestSetdiff():
def _setup(self, t_cls=Table):
lines1 = [' a b ',
' 0 foo ',
' 1 foo ',
' 1 bar ',
' 2 bar ']
lines2 = [' a b ',
' 0 foo ',
' 3 foo ',
' 4 bar ',
' 2 bar ']
lines3 = [' a b d ',
' 0 foo R1',
' 8 foo R2',
' 1 bar R3',
' 4 bar R4']
self.t1 = t_cls.read(lines1, format='ascii')
self.t2 = t_cls.read(lines2, format='ascii')
self.t3 = t_cls.read(lines3, format='ascii')
def test_default_same_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t2)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
' 1 bar',
' 1 foo']
def test_default_same_tables(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t1)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---']
def test_extra_col_left_table(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
out = table.setdiff(self.t3, self.t1)
def test_extra_col_right_table(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t1, self.t3)
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
' 1 foo',
' 2 bar']
def test_keys(self, operation_table_type):
self._setup(operation_table_type)
out = table.setdiff(self.t3, self.t1, keys=['a', 'b'])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b d ',
'--- --- ---',
' 4 bar R4',
' 8 foo R2']
def test_missing_key(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
out = table.setdiff(self.t3, self.t1, keys=['a', 'd'])
class TestVStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' a b',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2[1]])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'1.0 bar']
def test_stack_table_column(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t1.copy()
t2.meta.clear()
out = table.vstack([self.t1, t2['a']])
assert out.masked is False
assert out.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 --',
'1.0 --']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.vstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.vstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.vstack([])
with pytest.raises(TypeError):
table.vstack(1)
with pytest.raises(TypeError):
table.vstack([self.t2, 1])
with pytest.raises(ValueError):
table.vstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='inner')
assert t12.masked is False
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert t12.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez']
t124 = table.vstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
assert t124.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'2.0 pez',
'3.0 sez',
'0.0 foo',
'1.0 bar']
def test_stack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t12 = table.vstack([t1, t2], join_type='outer')
assert t12.masked is False
assert t12.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5']
t124 = table.vstack([t1, t2, t4], join_type='outer')
assert t124.masked is False
assert t124.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo --',
'1.0 bar --',
'2.0 pez 4',
'3.0 sez 5',
'0.0 foo --',
'1.0 bar --']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='inner')
assert ("The 'b' columns have incompatible types: {}"
.format([self.t1['b'].dtype.name, self.t3['b'].dtype.name])
in str(excinfo.value))
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, self.t3], join_type='outer')
assert "The 'b' columns have incompatible types:" in str(excinfo.value)
with pytest.raises(TableMergeError):
table.vstack([self.t1, self.t2], join_type='exact')
t1_reshape = self.t1.copy()
t1_reshape['b'].shape = [2, 1]
with pytest.raises(TableMergeError) as excinfo:
table.vstack([self.t1, t1_reshape])
assert "have different shape" in str(excinfo.value)
def test_vstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t4 = self.t4
t4['b'].mask[1] = True
t14 = table.vstack([t1, t4])
assert t14.masked is False
assert t14.pformat() == [' a b ',
'--- ---',
'0.0 foo',
'1.0 bar',
'0.0 foo',
'1.0 --']
def test_col_meta_merge_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].info.unit = 'cm'
t2['a'].info.unit = 'm'
t4['a'].info.unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%f'
t2['a'].info.format = '%f'
t4['a'].info.format = '%f'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='inner')
if operation_table_type is Table:
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert warning_lines[1].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
# Check units are suitably ignored for a regular Table
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'1.000000 bar',
'2.000000 pez',
'3.000000 sez',
'0.000000 foo',
'1.000000 bar']
else:
assert len(warning_lines) == 0
# Check QTable correctly dealt with units.
assert out.pformat() == [' a b ',
' km ',
'-------- ------',
'0.000000 foo',
'0.000010 bar',
'0.002000 pez',
'0.003000 sez',
'0.000000 foo',
'1.000000 bar']
assert out['a'].info.unit == 'km'
assert out['a'].info.format == '%f'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
def test_col_meta_merge_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Key col 'a', should last value ('km')
t1['a'].unit = 'cm'
t2['a'].unit = 'm'
t4['a'].unit = 'km'
# Key col 'a' format should take last when all match
t1['a'].info.format = '%0d'
t2['a'].info.format = '%0d'
t4['a'].info.format = '%0d'
# Key col 'b', take first value 't1_b'
t1['b'].info.description = 't1_b'
# Key col 'b', take first non-empty value '%6s'
t4['b'].info.format = '%6s'
# Key col 'a', should be merged meta
t1['a'].info.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
t2['a'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['a'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
# Key col 'b', should be meta2
t2['b'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
# All these should pass through
t2['c'].unit = 'm'
t2['c'].info.format = '%6s'
t2['c'].info.description = 't2_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.vstack([t1, t2, t4], join_type='outer')
assert warning_lines[0].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (cm != m)"
in str(warning_lines[0].message))
assert warning_lines[1].category == metadata.MergeConflictWarning
assert ("In merged column 'a' the 'unit' attribute does not match (m != km)"
in str(warning_lines[1].message))
assert out['a'].unit == 'km'
assert out['a'].info.format == '%0d'
assert out['b'].info.description == 't1_b'
assert out['b'].info.format == '%6s'
assert out['a'].info.meta == self.meta_merge
assert out['b'].info.meta == OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)])
assert out['c'].info.unit == 'm'
assert out['c'].info.format == '%6s'
assert out['c'].info.description == 't2_c'
def test_vstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.vstack(self.t1)).all()
assert (self.t1 == table.vstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col = mixin_cols['m']
len_col = len(col)
t = table.QTable([col], names=['a'])
cls_name = type(col).__name__
# Vstack works for these classes:
implemented_mixin_classes = ['Quantity', 'Angle', 'Time',
'Latitude', 'Longitude',
'EarthLocation']
if cls_name in implemented_mixin_classes:
out = table.vstack([t, t])
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['a'][len_col:] == col)
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t])
assert ('vstack unavailable for mixin column type(s): {}'
.format(cls_name) in str(err.value))
# Check for outer stack which requires masking. Only Time supports
# this currently.
t2 = table.QTable([col], names=['b']) # different from col name for t
if cls_name == 'Time':
out = table.vstack([t, t2], join_type='outer')
assert len(out) == len_col * 2
assert np.all(out['a'][:len_col] == col)
assert np.all(out['b'][len_col:] == col)
assert np.all(out['a'].mask == [False] * len_col + [True] * len_col)
assert np.all(out['b'].mask == [True] * len_col + [False] * len_col)
# check directly stacking mixin columns:
out2 = table.vstack([t, t2['b']])
assert np.all(out['a'] == out2['a'])
assert np.all(out['b'] == out2['b'])
else:
with pytest.raises(NotImplementedError) as err:
table.vstack([t, t2], join_type='outer')
assert ('vstack requires masking' in str(err.value) or
'vstack unavailable' in str(err.value))
class TestDStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t2['d'] = Time([1, 2], format='cxcsec')
self.t3 = t_cls({'a': [[5., 6.], [4., 3.]],
'b': [['foo', 'bar'], ['pez', 'sez']]},
names=('a', 'b'))
self.t4 = t_cls(self.t1, copy=True, masked=t_cls is Table)
self.t5 = t_cls({'a': [[4., 2.], [1., 6.]],
'b': [['foo', 'pez'], ['bar', 'sez']]},
names=('a', 'b'))
self.t6 = t_cls.read([' a b c',
' 7. pez 2',
' 4. sez 6',
' 6. foo 3'], format='ascii')
@staticmethod
def compare_dstack(tables, out):
for ii, tbl in enumerate(tables):
for name, out_col in out.columns.items():
if name in tbl.colnames:
# Columns always compare equal
assert np.all(tbl[name] == out[name][:, ii])
# If input has a mask then output must have same mask
if hasattr(tbl[name], 'mask'):
assert np.all(tbl[name].mask == out[name].mask[:, ii])
# If input has no mask then output might have a mask (if other table
# is missing that column). If so then all mask values should be False.
elif hasattr(out[name], 'mask'):
assert not np.any(out[name].mask[:, ii])
else:
# Column missing for this table, out must have a mask with all True.
assert np.all(out[name].mask[:, ii])
def test_dstack_table_column(self, operation_table_type):
"""Stack a table with 3 cols and one column (gets auto-converted to Table).
"""
self._setup(operation_table_type)
t2 = self.t1.copy()
out = table.dstack([self.t1, t2['a']])
self.compare_dstack([self.t1, t2[('a',)]], out)
def test_dstack_basic_outer(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail('Quantity columns do not support masking.')
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
t4['a'].mask[0] = True
# Test for non-masked table
t12 = table.dstack([t1, t2], join_type='outer')
assert type(t12) is operation_table_type
assert type(t12['a']) is type(t1['a'])
assert type(t12['b']) is type(t1['b'])
self.compare_dstack([t1, t2], t12)
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type='outer')
assert type(t124) is operation_table_type
assert type(t124['a']) is type(t4['a'])
assert type(t124['b']) is type(t4['b'])
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_basic_inner(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t4 = self.t4
# Test for masked table
t124 = table.dstack([t1, t2, t4], join_type='inner')
assert type(t124) is operation_table_type
assert type(t124['a']) is type(t4['a'])
assert type(t124['b']) is type(t4['b'])
self.compare_dstack([t1, t2, t4], t124)
def test_dstack_multi_dimension_column(self, operation_table_type):
self._setup(operation_table_type)
t3 = self.t3
t5 = self.t5
t2 = self.t2
t35 = table.dstack([t3, t5])
assert type(t35) is operation_table_type
assert type(t35['a']) is type(t3['a'])
assert type(t35['b']) is type(t3['b'])
self.compare_dstack([t3, t5], t35)
with pytest.raises(TableMergeError):
table.dstack([t2, t3])
def test_dstack_different_length_table(self, operation_table_type):
self._setup(operation_table_type)
t2 = self.t2
t6 = self.t6
with pytest.raises(ValueError):
table.dstack([t2, t6])
def test_dstack_single_table(self):
self._setup(Table)
out = table.dstack(self.t1)
assert np.all(out == self.t1)
class TestHStack():
def _setup(self, t_cls=Table):
self.t1 = t_cls.read([' a b',
' 0. foo',
' 1. bar'], format='ascii')
self.t2 = t_cls.read([' a b c',
' 2. pez 4',
' 3. sez 5'], format='ascii')
self.t3 = t_cls.read([' d e',
' 4. 7',
' 5. 8',
' 6. 9'], format='ascii')
self.t4 = t_cls(self.t1, copy=True, masked=True)
self.t4['a'].name = 'f'
self.t4['b'].name = 'g'
# The following table has meta-data that conflicts with t1
self.t5 = t_cls(self.t1, copy=True)
self.t1.meta.update(OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)]))
self.t2.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
self.t4.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
self.t5.meta.update(OrderedDict([('b', 3), ('c', 'k'), ('d', 1)]))
self.meta_merge = OrderedDict([('b', [1, 2, 3, 4, 5, 6]),
('c', {'a': 1, 'b': 1, 'c': 1}),
('d', 1),
('a', 1),
('e', 1)])
def test_stack_same_table(self, operation_table_type):
"""
From #2995, test that hstack'ing references to the same table has the
expected output.
"""
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t1])
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 bar']
def test_stack_rows(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1[0], self.t2[1]])
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 3.0 sez 5']
def test_stack_columns(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2['c']])
assert type(out['a']) is type(self.t1['a'])
assert type(out['b']) is type(self.t1['b'])
assert type(out['c']) is type(self.t2['c'])
assert out.pformat() == [' a b c ',
'--- --- ---',
'0.0 foo 4',
'1.0 bar 5']
def test_table_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2, self.t4], join_type='inner')
assert out.meta == self.meta_merge
def test_table_meta_merge_conflict(self, operation_table_type):
self._setup(operation_table_type)
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='warn')
assert len(w) == 2
assert out.meta == self.t5.meta
with catch_warnings() as w:
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='silent')
assert len(w) == 0
assert out.meta == self.t5.meta
with pytest.raises(MergeConflictError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='error')
with pytest.raises(ValueError):
out = table.hstack([self.t1, self.t5], join_type='inner', metadata_conflicts='nonsense')
def test_bad_input_type(self, operation_table_type):
self._setup(operation_table_type)
with pytest.raises(ValueError):
table.hstack([])
with pytest.raises(TypeError):
table.hstack(1)
with pytest.raises(TypeError):
table.hstack([self.t2, 1])
with pytest.raises(ValueError):
table.hstack([self.t1, self.t2], join_type='invalid join type')
def test_stack_basic(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t2 = self.t2
t3 = self.t3
t4 = self.t4
out = table.hstack([t1, t2], join_type='inner')
assert out.masked is False
assert type(out) is operation_table_type
assert type(out['a_1']) is type(t1['a'])
assert type(out['b_1']) is type(t1['b'])
assert type(out['a_2']) is type(t2['a'])
assert type(out['b_2']) is type(t2['b'])
assert out.pformat() == ['a_1 b_1 a_2 b_2 c ',
'--- --- --- --- ---',
'0.0 foo 2.0 pez 4',
'1.0 bar 3.0 sez 5']
# stacking as a list gives same result
out_list = table.hstack([t1, t2], join_type='inner')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2], join_type='outer')
assert out.pformat() == out_list.pformat()
out = table.hstack([t1, t2, t3, t4], join_type='outer')
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar',
' -- -- -- -- -- 6.0 9 -- --']
out = table.hstack([t1, t2, t3, t4], join_type='inner')
assert out.masked is False
assert out.pformat() == ['a_1 b_1 a_2 b_2 c d e f g ',
'--- --- --- --- --- --- --- --- ---',
'0.0 foo 2.0 pez 4 4.0 7 0.0 foo',
'1.0 bar 3.0 sez 5 5.0 8 1.0 bar']
def test_stack_incompatible(self, operation_table_type):
self._setup(operation_table_type)
# For join_type exact, which will fail here because n_rows
# does not match
with pytest.raises(TableMergeError):
table.hstack([self.t1, self.t3], join_type='exact')
def test_hstack_one_masked(self, operation_table_type):
if operation_table_type is QTable:
pytest.xfail()
self._setup(operation_table_type)
t1 = self.t1
t2 = operation_table_type(t1, copy=True, masked=True)
t2.meta.clear()
t2['b'].mask[1] = True
out = table.hstack([t1, t2])
assert out.pformat() == ['a_1 b_1 a_2 b_2',
'--- --- --- ---',
'0.0 foo 0.0 foo',
'1.0 bar 1.0 --']
def test_table_col_rename(self, operation_table_type):
self._setup(operation_table_type)
out = table.hstack([self.t1, self.t2], join_type='inner',
uniq_col_name='{table_name}_{col_name}',
table_names=('left', 'right'))
assert out.masked is False
assert out.pformat() == ['left_a left_b right_a right_b c ',
'------ ------ ------- ------- ---',
' 0.0 foo 2.0 pez 4',
' 1.0 bar 3.0 sez 5']
def test_col_meta_merge(self, operation_table_type):
self._setup(operation_table_type)
t1 = self.t1
t3 = self.t3[:2]
t4 = self.t4
# Just set a bunch of meta and make sure it is the same in output
meta1 = OrderedDict([('b', [1, 2]), ('c', {'a': 1}), ('d', 1)])
t1['a'].unit = 'cm'
t1['b'].info.description = 't1_b'
t4['f'].info.format = '%6s'
t1['b'].info.meta.update(meta1)
t3['d'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t4['g'].info.meta.update(OrderedDict([('b', [5, 6]), ('c', {'c': 1}), ('e', 1)]))
t3['e'].info.meta.update(OrderedDict([('b', [3, 4]), ('c', {'b': 1}), ('a', 1)]))
t3['d'].unit = 'm'
t3['d'].info.format = '%6s'
t3['d'].info.description = 't3_c'
with catch_warnings(metadata.MergeConflictWarning) as warning_lines:
out = table.hstack([t1, t3, t4], join_type='exact')
assert len(warning_lines) == 0
for t in [t1, t3, t4]:
for name in t.colnames:
for attr in ('meta', 'unit', 'format', 'description'):
assert getattr(out[name].info, attr) == getattr(t[name].info, attr)
# Make sure we got a copy of meta, not ref
t1['b'].info.meta['b'] = None
assert out['b'].info.meta['b'] == [1, 2]
def test_hstack_one_table(self, operation_table_type):
self._setup(operation_table_type)
"""Regression test for issue #3313"""
assert (self.t1 == table.hstack(self.t1)).all()
assert (self.t1 == table.hstack([self.t1])).all()
def test_mixin_functionality(self, mixin_cols):
col1 = mixin_cols['m']
col2 = col1[2:4] # Shorter version of col1
t1 = table.QTable([col1])
t2 = table.QTable([col2])
cls_name = type(col1).__name__
out = table.hstack([t1, t2], join_type='inner')
assert type(out['col0_1']) is type(out['col0_2'])
assert len(out) == len(col2)
# Check that columns are as expected.
if cls_name == 'SkyCoord':
assert skycoord_equal(out['col0_1'], col1[:len(col2)])
assert skycoord_equal(out['col0_2'], col2)
else:
assert np.all(out['col0_1'] == col1[:len(col2)])
assert np.all(out['col0_2'] == col2)
# Time class supports masking, all other mixins do not
if cls_name == 'Time':
out = table.hstack([t1, t2], join_type='outer')
assert len(out) == len(t1)
assert np.all(out['col0_1'] == col1)
assert np.all(out['col0_2'][:len(col2)] == col2)
assert np.all(out['col0_2'].mask == [False, False, True, True])
# check directly stacking mixin columns:
out2 = table.hstack([t1, t2['col0']], join_type='outer')
assert np.all(out['col0_1'] == out2['col0_1'])
assert np.all(out['col0_2'] == out2['col0_2'])
else:
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err.value)
def test_unique(operation_table_type):
t = operation_table_type.read(
[' a b c d',
' 2 b 7.0 0',
' 1 c 3.0 5',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 1 a 1.0 7',
' 2 b 5.0 1',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
], format='ascii')
tu = operation_table_type(np.sort(t[:-1]))
t_all = table.unique(t)
assert sort_eq(t_all.pformat(), tu.pformat())
t_s = t.copy()
del t_s['b', 'c', 'd']
t_all = table.unique(t_s)
assert sort_eq(t_all.pformat(), [' a ',
'---',
' 0',
' 1',
' 2'])
key1 = 'a'
t1a = table.unique(t, key1)
assert sort_eq(t1a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 7.0 0'])
t1b = table.unique(t, key1, keep='last')
assert sort_eq(t1b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 c 3.0 5',
' 2 b 5.0 1'])
t1c = table.unique(t, key1, keep='none')
assert sort_eq(t1c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4'])
key2 = ['a', 'b']
t2a = table.unique(t, key2)
assert sort_eq(t2a.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 1.0 7',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 7.0 0'])
t2b = table.unique(t, key2, keep='last')
assert sort_eq(t2b.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 1 a 2.0 6',
' 1 c 3.0 5',
' 2 a 4.0 3',
' 2 b 5.0 1'])
t2c = table.unique(t, key2, keep='none')
assert sort_eq(t2c.pformat(), [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 a 4.0 3'])
key2 = ['a', 'a']
with pytest.raises(ValueError) as exc:
t2a = table.unique(t, key2)
assert exc.value.args[0] == "duplicate key names"
with pytest.raises(ValueError) as exc:
table.unique(t, key2, keep=True)
assert exc.value.args[0] == (
"'keep' should be one of 'first', 'last', 'none'")
t1_m = operation_table_type(t1a, masked=True)
t1_m['a'].mask[1] = True
with pytest.raises(ValueError) as exc:
t1_mu = table.unique(t1_m)
assert exc.value.args[0] == (
"cannot use columns with masked values as keys; "
"remove column 'a' from keys and rerun unique()")
t1_mu = table.unique(t1_m, silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 0 a 0.0 4',
' 2 b 7.0 0',
' -- c 3.0 5']
with pytest.raises(ValueError) as e:
t1_mu = table.unique(t1_m, silent=True, keys='a')
t1_m = operation_table_type(t, masked=True)
t1_m['a'].mask[1] = True
t1_m['d'].mask[3] = True
# Test that multiple masked key columns get removed in the correct
# order
t1_mu = table.unique(t1_m, keys=['d', 'a', 'b'], silent=True)
assert t1_mu.masked is False
assert t1_mu.pformat() == [' a b c d ',
'--- --- --- ---',
' 2 a 4.0 --',
' 2 b 7.0 0',
' -- c 3.0 5']
def test_vstack_bytes(operation_table_type):
"""
Test for issue #5617 when vstack'ing bytes columns in Py3.
This is really an upsteam numpy issue numpy/numpy/#8403.
"""
t = operation_table_type([[b'a']], names=['a'])
assert t['a'].itemsize == 1
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 1
def test_vstack_unicode():
"""
Test for problem related to issue #5617 when vstack'ing *unicode*
columns. In this case the character size gets multiplied by 4.
"""
t = table.Table([['a']], names=['a'])
assert t['a'].itemsize == 4 # 4-byte / char for U dtype
t2 = table.vstack([t, t])
assert len(t2) == 2
assert t2['a'].itemsize == 4
def test_join_mixins_time_quantity():
"""
Test for table join using non-ndarray key columns.
"""
tm1 = Time([2, 1, 2], format='cxcsec')
q1 = [2, 1, 1] * u.m
idx1 = [1, 2, 3]
tm2 = Time([2, 3], format='cxcsec')
q2 = [2, 3] * u.m
idx2 = [10, 20]
t1 = Table([tm1, q1, idx1], names=['tm', 'q', 'idx'])
t2 = Table([tm2, q2, idx2], names=['tm', 'q', 'idx'])
# Output:
#
# <Table length=4>
# tm q idx_1 idx_2
# m
# object float64 int64 int64
# ------------------ ------- ----- -----
# 0.9999999999969589 1.0 2 --
# 2.00000000000351 1.0 3 --
# 2.00000000000351 2.0 1 10
# 3.000000000000469 3.0 -- 20
t12 = table.join(t1, t2, join_type='outer', keys=['tm', 'q'])
# Key cols are lexically sorted
assert np.all(t12['tm'] == Time([1, 2, 2, 3], format='cxcsec'))
assert np.all(t12['q'] == [1, 1, 2, 3] * u.m)
assert np.all(t12['idx_1'] == np.ma.array([2, 3, 1, 0], mask=[0, 0, 0, 1]))
assert np.all(t12['idx_2'] == np.ma.array([0, 0, 10, 20], mask=[1, 1, 0, 0]))
def test_join_mixins_not_sortable():
"""
Test for table join using non-ndarray key columns that are not sortable.
"""
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg')
t1 = Table([sc, [1, 2]], names=['sc', 'idx1'])
t2 = Table([sc, [10, 20]], names=['sc', 'idx2'])
with pytest.raises(TypeError, match='one or more key columns are not sortable'):
table.join(t1, t2, keys='sc')
def test_join_non_1d_key_column():
c1 = [[1, 2], [3, 4]]
c2 = [1, 2]
t1 = Table([c1, c2], names=['a', 'b'])
t2 = t1.copy()
with pytest.raises(ValueError, match="key column 'a' must be 1-d"):
table.join(t1, t2, keys='a')
def test_get_out_class():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
assert _get_out_class([c, mc]) is mc.__class__
assert _get_out_class([mc, c]) is mc.__class__
assert _get_out_class([c, c]) is c.__class__
assert _get_out_class([c]) is c.__class__
with pytest.raises(ValueError):
_get_out_class([c, q])
with pytest.raises(ValueError):
_get_out_class([q, c])
def test_masking_required_exception():
"""
Test that outer join, hstack and vstack fail for a mixin column which
does not support masking.
"""
col = [1, 2, 3, 4] * u.m
t1 = table.QTable([[1, 2, 3, 4], col], names=['a', 'b'])
t2 = table.QTable([[1, 2], col[:2]], names=['a', 'c'])
with pytest.raises(NotImplementedError) as err:
table.vstack([t1, t2], join_type='outer')
assert 'vstack requires masking' in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.hstack([t1, t2], join_type='outer')
assert 'hstack requires masking' in str(err.value)
with pytest.raises(NotImplementedError) as err:
table.join(t1, t2, join_type='outer')
assert 'join requires masking' in str(err.value)
def test_stack_columns():
c = table.Column([1, 2])
mc = table.MaskedColumn([1, 2])
q = [1, 2] * u.m
time = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
sc = SkyCoord([1, 2], [3, 4], unit='deg')
cq = table.Column([11, 22], unit=u.m)
t = table.hstack([c, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([q, c])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([mc, q])
assert t.__class__ is table.QTable
assert t.masked is False
t = table.hstack([c, mc])
assert t.__class__ is table.Table
assert t.masked is False
t = table.vstack([q, q])
assert t.__class__ is table.QTable
t = table.vstack([c, c])
assert t.__class__ is table.Table
t = table.hstack([c, time])
assert t.__class__ is table.Table
t = table.hstack([c, sc])
assert t.__class__ is table.Table
t = table.hstack([q, time, sc])
assert t.__class__ is table.QTable
with pytest.raises(ValueError):
table.vstack([c, q])
with pytest.raises(ValueError):
t = table.vstack([q, cq])
def test_mixin_join_regression():
# This used to trigger a ValueError:
# ValueError: NumPy boolean array indexing assignment cannot assign
# 6 input values to the 4 output values where the mask is true
t1 = QTable()
t1['index'] = [1, 2, 3, 4, 5]
t1['flux1'] = [2, 3, 2, 1, 1] * u.Jy
t1['flux2'] = [2, 3, 2, 1, 1] * u.Jy
t2 = QTable()
t2['index'] = [3, 4, 5, 6]
t2['flux1'] = [2, 1, 1, 3] * u.Jy
t2['flux2'] = [2, 1, 1, 3] * u.Jy
t12 = table.join(t1, t2, keys=('index', 'flux1', 'flux2'), join_type='outer')
assert len(t12) == 6
| bsd-3-clause |
Communities-Communications/cc-odoo | addons/website_gengo/__openerp__.py | 385 | 1387 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Gengo Translator',
'category': 'Website',
'version': '1.0',
'description': """
Website Gengo Translator
========================
Translate you website in one click
""",
'author': 'OpenERP SA',
'depends': [
'website',
'base_gengo'
],
'data': [
'views/website_gengo.xml',
],
'qweb': [],
'installable': True,
}
| agpl-3.0 |
leilihh/novaha | nova/tests/scheduler/test_filters.py | 18 | 8712 | # Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import inspect
import sys
from nova import filters
from nova import loadables
from nova import test
class Filter1(filters.BaseFilter):
"""Test Filter class #1."""
pass
class Filter2(filters.BaseFilter):
"""Test Filter class #2."""
pass
class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
def test_filter_all_recursive_yields(self):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
total_iterations = 200
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
# returning False for 'obj2'. So, 'obj1' will get yielded
# 'total_iterations' number of times before the first filter_all()
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in xrange(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in xrange(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in xrange(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
def test_get_filtered_objects(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
Filter1().AndReturn(filt1_mock)
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
Filter2().AndReturn(filt2_mock)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_classes = [Filter1, Filter2]
result = filter_handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
run_filter_for_index() method returns false
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
Filter1().AndReturn(filt1_mock)
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
Filter2().AndReturn(filt2_mock)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_classes = [Filter1, Filter2]
result = filter_handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
# Shouldn't be called.
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
Filter1().AndReturn(filt1_mock)
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_classes = [Filter1, Filter2]
result = filter_handler.get_filtered_objects(filter_classes,
filter_objs_initial,
filter_properties)
self.assertIsNone(result)
| apache-2.0 |
tadebayo/myedge | myvenv/Lib/site-packages/easy_thumbnails/widgets.py | 12 | 2612 | from django.forms.widgets import ClearableFileInput
from django.utils.safestring import mark_safe
from easy_thumbnails.files import get_thumbnailer
from easy_thumbnails.conf import settings
class ImageClearableFileInput(ClearableFileInput):
"""
Use this widget to show a thumbnail of the image next to the image file.
If using the admin and :class:`~easy_thumbnails.fields.ThumbnailerField`,
you can use this widget automatically with the following code::
class MyModelAdmin(admin.ModelAdmin):
formfield_overrides = {
ThumbnailerField: {'widget': ImageClearableFileInput},
}
"""
template_with_initial = (
u'%(clear_template)s<br />'
u'%(input_text)s: %(input)s'
)
template_with_thumbnail = (
u'%(template)s<br />'
u'<a href="%(source_url)s" target="_blank">%(thumb)s</a>'
)
def __init__(self, thumbnail_options=None, attrs=None):
"""
Set up the thumbnail options for this widget.
:param thumbnail_options: options used to generate the thumbnail. If no
``size`` is given, it'll be ``(80, 80)``. If not provided at all,
default options will be used from the
:attr:`~easy_thumbnails.conf.Settings.THUMBNAIL_WIDGET_OPTIONS`
setting.
"""
thumbnail_options = (
thumbnail_options or settings.THUMBNAIL_WIDGET_OPTIONS)
thumbnail_options = thumbnail_options.copy()
if 'size' not in thumbnail_options:
thumbnail_options['size'] = (80, 80)
self.thumbnail_options = thumbnail_options
super(ImageClearableFileInput, self).__init__(attrs)
def thumbnail_id(self, name):
return '%s_thumb_id' % name
def get_thumbnail(self, value):
thumbnailer = get_thumbnailer(value, value.name)
thumbnailer.source_storage = value.storage
if hasattr(value, 'thumbnail_storage'):
thumbnailer.thumbnail_storage = value.thumbnail_storage
return thumbnailer.get_thumbnail(self.thumbnail_options)
def render(self, name, value, attrs=None):
output = super(ImageClearableFileInput, self).render(
name, value, attrs)
if not value or not hasattr(value, 'storage'):
return output
thumb = self.get_thumbnail(value)
substitution = {
'template': output,
'thumb': thumb.tag(id=self.thumbnail_id(name)),
'source_url': value.storage.url(value.name),
}
return mark_safe(self.template_with_thumbnail % substitution)
| mit |
Avira/pootle | pootle/apps/pootle_app/management/commands/dump.py | 6 | 5768 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
from optparse import make_option
from django.core.management.base import CommandError
from pootle_app.management.commands import PootleCommand
from pootle_app.models import Directory
from pootle_project.models import Project
DUMPED = {
'TranslationProject': ('pootle_path', 'real_path', 'disabled'),
'Store': ('file', 'translation_project', 'pootle_path', 'name', 'state'),
'Directory': ('name', 'parent', 'pootle_path'),
'Unit': ('source', 'target', 'source_wordcount', 'target_wordcount',
'developer_comment', 'translator_comment', 'locations',
'isobsolete', 'isfuzzy', 'istranslated'),
'Suggestion': ('target_f', 'user_id'),
'Language': ('code', 'fullname', 'pootle_path'),
'Project': ('code', 'fullname', 'checkstyle', 'localfiletype',
'treestyle', 'source_language', 'ignoredfiles',
'screenshot_search_prefix', 'disabled')
}
class Command(PootleCommand):
help = "Dump data."
shared_option_list = (
make_option('--stats', action='store_true', dest='stats',
help='Dump stats'),
make_option('--data', action='store_true', dest='data',
help='Data all data'),
make_option('--stop-level', action='store', dest='stop_level',
default=-1),
)
option_list = PootleCommand.option_list + shared_option_list
def handle_all(self, **options):
if not self.projects and not self.languages:
stats = options.get('stats', False)
data = options.get('data', False)
stop_level = int(options.get('stop_level', -1))
if stats:
self.dump_stats(stop_level=stop_level)
return
if data:
self.dump_all(stop_level=stop_level)
return
raise CommandError("Set --data or --stats option.")
else:
super(Command, self).handle_all(**options)
def handle_translation_project(self, tp, **options):
stats = options.get('stats', False)
data = options.get('data', False)
stop_level = int(options.get('stop_level', -1))
if stats:
res = {}
self._dump_stats(tp.directory, res, stop_level=stop_level)
return
if data:
self._dump_item(tp.directory, 0, stop_level=stop_level)
return
raise CommandError("Set --data or --stats option.")
def dump_stats(self, stop_level):
res = {}
for prj in Project.objects.all():
self._dump_stats(prj, res, stop_level=stop_level)
def _dump_stats(self, item, res, stop_level):
key = item.get_cachekey()
item.initialize_children()
if stop_level != 0 and item.children:
if stop_level > 0:
stop_level = stop_level - 1
for child in item.children:
self._dump_stats(child, res,
stop_level=stop_level)
res[key] = (item.get_stats(include_children=False))
if res[key]['lastaction']:
last_action_id = res[key]['lastaction']['id']
else:
last_action_id = None
if res[key]['lastupdated']:
last_updated_id = res[key]['lastupdated']['id']
else:
last_updated_id = None
out = u"%s %s,%s,%s,%s,%s,%s,%s,%s" % \
(key, res[key]['total'], res[key]['translated'],
res[key]['fuzzy'], res[key]['suggestions'],
res[key]['critical'], res[key]['is_dirty'],
last_action_id, last_updated_id)
self.stdout.write(out)
def dump_all(self, stop_level):
root = Directory.objects.root
self._dump_item(root, 0, stop_level=stop_level)
def _dump_item(self, item, level, stop_level):
self.stdout.write(self.dumped(item))
if item.is_dir:
# item is a Directory
if item.is_project():
self.stdout.write(self.dumped(item.project))
elif item.is_language():
self.stdout.write(self.dumped(item.language))
elif item.is_translationproject():
try:
self.stdout.write(self.dumped(item.translationproject))
except:
pass
else:
# item should be a Store
for unit in item.units:
self.stdout.write(self.dumped(unit))
for sg in unit.get_suggestions():
self.stdout.write(self.dumped(sg))
if stop_level != level:
item.initialize_children()
if item.children:
for child in item.children:
self._dump_item(child, level + 1, stop_level=stop_level)
def dumped(self, item):
def get_param(param):
p = getattr(item, param)
res = p() if callable(p) else p
res = u"%s" % res
res = res.replace('\n', '\\n')
return (param, res)
return u"%d:%s\t%s" % \
(
item.id,
item._meta.object_name,
"\t".join(
u"%s=%s" % (k, v)
for k, v in map(get_param, DUMPED[item._meta.object_name])
)
)
| gpl-3.0 |
mayankcu/Django-social | app/vkontakte.py | 8 | 3231 | from django.contrib.auth import BACKEND_SESSION_KEY
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.core.cache import cache
from django.conf import settings
from social_auth.models import UserSocialAuth
from social_auth.views import complete as social_complete
from social_auth.utils import setting
from social_auth.backends.contrib.vkontakte import VKontakteOAuth2Backend, vkontakte_api
def is_complete_authentication(request):
return request.user.is_authenticated() and VKontakteOAuth2Backend.__name__ in request.session.get(BACKEND_SESSION_KEY, '')
def get_access_token(user):
key = str(user.id)
access_token = cache.get(key)
# If cache is empty read the database
if access_token is None:
try:
social_user = user.social_user if hasattr(user, 'social_user') \
else UserSocialAuth.objects.get(user=user.id, provider=VKontakteOAuth2Backend.name)
except UserSocialAuth.DoesNotExist:
return None
if social_user.extra_data:
access_token = social_user.extra_data.get('access_token')
expires = social_user.extra_data.get('expires')
cache.set(key, access_token, int(expires) if expires is not None else 0)
return access_token
# VK decorator to setup environment
def vkontakte_decorator(func):
def wrapper(request, *args, **kwargs):
user = request.user
# User must me logged via VKontakte backend in order to ensure we talk about the same person
if not is_complete_authentication(request):
try:
user = social_complete(request, VKontakteOAuth2Backend.name)
except (ValueError, AttributeError):
pass # no matter if failed
# Not recommended way for VK, but still something we need to be aware of
if isinstance(user, HttpResponse):
kwargs.update({'auth_response': user})
# Need to re-check the completion
else:
if is_complete_authentication(request):
kwargs.update({'access_token': get_access_token(request.user)})
else:
request.user = AnonymousUser()
return func(request, *args, **kwargs)
return wrapper
@vkontakte_decorator
def vkontakte_view(request, *args, **kwargs):
# If there is a ready response just return it. Not recommended because pipeline redirects fail the normal workflow
# here.
auth_response = kwargs.get('auth_response')
if auth_response:
for item in auth_response.items():
if item[0] == 'Location' and 'form' in item[1]:
return auth_response
return render_to_response('vkontakte_app.html',
{'vk_app_id': settings.VKONTAKTE_APP_AUTH['id'] if hasattr(settings, 'VKONTAKTE_APP_AUTH') else None,
'app_scope': ','.join(settings.VKONTAKTE_OAUTH2_EXTRA_SCOPE),
'warning': not request.GET.get('user_id')},
RequestContext(request)) | bsd-3-clause |
wuhengzhi/crosswalk | tools/patcher.py | 1 | 2403 | # Copyright (c) 2009 The Chromium Embedded Framework Authors. All rights
# reserved.
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from optparse import OptionParser
import os
import sys
from patch_util import from_file
# cannot be loaded as a module
if __name__ != "__main__":
sys.stderr.write('This file cannot be loaded as a module!')
sys.exit()
# currently only apply patch for android port
if not os.environ.get('XWALK_OS_ANDROID'):
sys.exit()
# parse command-line options
disc = """
This utility applies patch files.
"""
parser = OptionParser(description=disc)
parser.add_option('--patch-config', dest='patchconfig', metavar='DIR',
help='patch configuration file')
(options, args) = parser.parse_args()
# the patchconfig option is required
if options.patchconfig is None:
parser.print_help(sys.stdout)
sys.exit()
# normalize the patch directory value
patchdir = os.path.dirname(
os.path.abspath(options.patchconfig)).replace('\\', '/')
if patchdir[-1] != '/':
patchdir += '/'
# check if the patching should be skipped
if os.path.isfile(patchdir + 'NOPATCH'):
nopatch = True
sys.stdout.write('NOPATCH exists -- files have not been patched.\n')
else:
nopatch = False
# locate the patch configuration file
if not os.path.isfile(options.patchconfig):
sys.stderr.write('File '+options.patchconfig+' does not exist.\n')
sys.exit()
scope = {}
execfile(options.patchconfig, scope)
patches = scope["patches"]
for patch in patches:
pfile = patchdir + 'patches/' + patch['name'] + '.patch'
dopatch = True
if 'condition' in patch:
# Check that the environment variable is set.
if patch['condition'] not in os.environ:
sys.stderr.write('Skipping patch file ' + pfile + '\n')
dopatch = False
if dopatch:
if not os.path.isfile(pfile):
sys.stderr.write('Patch file ' + pfile + ' does not exist.\n')
else:
sys.stderr.write('Reading patch file ' + pfile + '\n')
pdir = patch['path']
patchObj = from_file(pfile)
patchObj.apply(pdir)
if 'note' in patch:
separator = '-' * 79 + '\n'
sys.stderr.write(separator)
sys.stderr.write('NOTE: '+patch['note']+'\n')
sys.stderr.write(separator)
| bsd-3-clause |
anshimagupta/kubernetes | cluster/juju/charms/trusty/kubernetes/hooks/hooks.py | 72 | 8088 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file that is called by Juju.
"""
import json
import httplib
import os
import time
import socket
import subprocess
import sys
import urlparse
from charmhelpers.core import hookenv, host
from kubernetes_installer import KubernetesInstaller
from path import path
from lib.registrator import Registrator
hooks = hookenv.Hooks()
@hooks.hook('api-relation-changed')
def api_relation_changed():
"""
On the relation to the api server, this function determines the appropriate
architecture and the configured version to copy the kubernetes binary files
from the kubernetes-master charm and installs it locally on this machine.
"""
hookenv.log('Starting api-relation-changed')
charm_dir = path(hookenv.charm_dir())
# Get the package architecture, rather than the from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
kubernetes_bin_dir = path('/opt/kubernetes/bin')
# Get the version of kubernetes to install.
version = subprocess.check_output(['relation-get', 'version']).strip()
print('Relation version: ', version)
if not version:
print('No version present in the relation.')
exit(0)
version_file = charm_dir / '.version'
if version_file.exists():
previous_version = version_file.text()
print('Previous version: ', previous_version)
if version == previous_version:
exit(0)
# Can not download binaries while the service is running, so stop it.
# TODO: Figure out a better way to handle upgraded kubernetes binaries.
for service in ('kubelet', 'proxy'):
if host.service_running(service):
host.service_stop(service)
command = ['relation-get', 'private-address']
# Get the kubernetes-master address.
server = subprocess.check_output(command).strip()
print('Kubernetes master private address: ', server)
installer = KubernetesInstaller(arch, version, server, kubernetes_bin_dir)
installer.download()
installer.install()
# Write the most recently installed version number to the file.
version_file.write_text(version)
relation_changed()
@hooks.hook('etcd-relation-changed',
'network-relation-changed')
def relation_changed():
"""Connect the parts and go :-)
"""
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers', 'kubeapi_server'):
if not template_data.get(k):
print('Missing data for %s %s' % (k, template_data))
return
print('Running with\n%s' % template_data)
# Setup kubernetes supplemental group
setup_kubernetes_group()
# Register upstart managed services
for n in ('kubelet', 'proxy'):
if render_upstart(n, template_data) or not host.service_running(n):
print('Starting %s' % n)
host.service_restart(n)
# Register machine via api
print('Registering machine')
register_machine(template_data['kubeapi_server'])
# Save the marker (for restarts to detect prev install)
template_data.save()
def get_template_data():
rels = hookenv.relations()
template_data = hookenv.Config()
template_data.CONFIG_FILE_NAME = '.unit-state'
overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type')
etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))
# kubernetes master isn't ha yet.
if api_servers:
api_info = api_servers.pop()
api_servers = 'http://%s:%s' % (api_info[0], api_info[1])
template_data['overlay_type'] = overlay_type
template_data['kubelet_bind_addr'] = _bind_addr(
hookenv.unit_private_ip())
template_data['proxy_bind_addr'] = _bind_addr(
hookenv.unit_get('public-address'))
template_data['kubeapi_server'] = api_servers
template_data['etcd_servers'] = ','.join([
'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)])
template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace(
'/', '-')
return _encode(template_data)
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError('Could not resolve private address')
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
return d
def get_scoped_rel_attr(rel_name, rels, attr):
private_ip = hookenv.unit_private_ip()
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_data.get('private-address') != private_ip:
continue
if unit_data.get(attr):
return unit_data.get(attr)
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_upstart(name, data):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.upstart.tmpl' % name)
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
def register_machine(apiserver, retry=False):
parsed = urlparse.urlparse(apiserver)
# identity = hookenv.local_unit().replace('/', '-')
private_address = hookenv.unit_private_ip()
with open('/proc/meminfo') as fh:
info = fh.readline()
mem = info.strip().split(':')[1].strip().split()[0]
cpus = os.sysconf('SC_NPROCESSORS_ONLN')
registration_request = Registrator()
registration_request.data['Kind'] = 'Minion'
registration_request.data['id'] = private_address
registration_request.data['name'] = private_address
registration_request.data['metadata']['name'] = private_address
registration_request.data['spec']['capacity']['mem'] = mem + ' K'
registration_request.data['spec']['capacity']['cpu'] = cpus
registration_request.data['spec']['externalID'] = private_address
registration_request.data['status']['hostIP'] = private_address
response, result = registration_request.register(parsed.hostname,
parsed.port,
'/api/v1/nodes')
print(response)
try:
registration_request.command_succeeded(response, result)
except ValueError:
# This happens when we have already registered
# for now this is OK
pass
def setup_kubernetes_group():
output = subprocess.check_output(['groups', 'kubernetes'])
# TODO: check group exists
if 'docker' not in output:
subprocess.check_output(
['usermod', '-a', '-G', 'docker', 'kubernetes'])
if __name__ == '__main__':
hooks.execute(sys.argv)
| apache-2.0 |
Stibbons/python-project-bootstrap | src/ppb/cli/main.py | 1 | 1483 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ppb.cli import sub_cmd
from ppb.cli import version
import argparse
import sys
class Main(object):
'''
This command line tool ...
'''
def main(self):
# Global command-line arguments
parser = argparse.ArgumentParser(description=self.__doc__)
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debugging output + automatic pdb attach on exception')
parser.add_argument('--quiet', action='store_true', default=False,
help='Suppress output')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Enable additional output')
parser.add_argument('-V', '--version', action='version', version=version.__version__)
subparser = parser.add_subparsers(title='Subcommands', description='Subcommands')
for command_name in sub_cmd.__all__:
sub_command = getattr(sub_cmd, command_name)()
p = subparser.add_parser(sub_command.name, help=sub_command.help)
p.set_defaults(subcommand=sub_command,
subsubcommand=sub_command.execute)
sub_command.addParser(p)
opts = parser.parse_args(sys.argv[1:])
cmd = opts.subcommand
cmd.options = opts
cmd.execute()
| bsd-3-clause |
TimBuckley/effective_django | django/contrib/sessions/backends/db.py | 113 | 2930 | import logging
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, transaction, router
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
return {}
def exists(self, session_key):
return Session.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
obj = Session(
session_key=self._get_or_create_session_key(),
session_data=self.encode(self._get_session(no_load=must_create)),
expire_date=self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session
| bsd-3-clause |
plast-lab/cclyzer | src/main/cclyzer/cli/load_module.py | 2 | 1818 | import logging
from os import path
from .commands import CliCommand
from .. import Analysis
from ..project import ProjectManager
from ..runtime import Environment as env
# Initialize logger and project manager for this module
_logger = logging.getLogger(__name__)
_projects = ProjectManager()
class LoadModuleCommand(CliCommand):
description = 'Load logic module to analysis'
@classmethod
def init_parser_args(cls, parser):
"""Add custom options to CLI subcommand parser.
"""
# Gather logic module names
module_names = [p.name for p in _projects]
# Add positional module argument
parser.add_argument('module', metavar='MODULE', choices=module_names,
help='Logic module to be loaded')
# Add positional analysis argument
parser.add_argument('analysis', metavar='ANALYSIS_DIR',
help='Analysis directory')
# Add various flags
parser.add_argument('--load-deps', action='store_true')
@property
def defaultconfig(self):
configdir = env().user_config_dir
return path.join(configdir, "config")
def __init__(self, args):
CliCommand.__init__(self, args)
self._module = args.module
self._analysis = Analysis.load(args.analysis)
_logger.info('Loaded analysis from disk: %s', self.analysis)
@property
def module(self):
return self._module
@property
def analysis(self):
return self._analysis
def run(self):
"""The main function that will be called by command-line execution
of the tool.
"""
_logger.info('Loading module %s', self.module)
self.analysis.load_project(self.module)
_logger.info('Module %s is loaded', self.module)
| mit |
lukauskas/means | src/means/inference/hypercube.py | 2 | 2158 | import random
import numpy as np
def hypercube(number_of_samples, variables):
"""
This implements Latin Hypercube Sampling.
See https://mathieu.fenniak.net/latin-hypercube-sampling/ for intuitive explanation of what it is
:param number_of_samples: number of segments/samples
:param variables: initial parameters and conditions (list of ranges, i.e. (70, 110), (0.1, 0.5) ..)
:return:
"""
number_of_dimensions = len(variables)
# Split range 0-1 into `nSeg` segments of equal size
segment_ranges = []
for i in range(number_of_samples):
ratio = 1.0 / number_of_samples
segment_ranges.append((ratio * i, ratio * (i + 1)))
x = []
for i in range(number_of_dimensions):
values = []
for j, segment in enumerate(segment_ranges):
# Set values[j] to a random value within the appropriate segment
random_element = random.random()
value = (random_element * (segment[1] - segment[0])) + (segment[0])
values.append(value)
# TODO: replace the below line with random.shuffle(values) (no need values= in front)
# this breaks regression tests as the values are shuffled in different order
values = random.sample(values, len(values))
x.append(values)
# at this point x is a list of lists containing a randomly-ordered list of random values
# in each of the `possvalues` segments
samples = []
for i in range(len(segment_ranges)):
sample = [y[i] for y in x]
samples.append(sample)
# It looks like `samples` is just transposed version of `x`, i.e. `samples[i][j] = x[j][i]`
for sample in samples:
for i, variable in enumerate(variables):
# if no range given for parameter/variable
if variable[1] == variable[0]:
# just return the whatever constant was given
sample[i] = variable[1]
else:
# return the value indicated by random number in sample[i] that is within that range
sample[i] = (sample[i] * (variable[1] - variable[0])) + variable[0]
return samples
| mit |
trappn/crosshair | include/patterns.py | 1 | 5162 | import cv2
import numpy as np
# number of available patterns:
maxpat = 10
# defining functions for all possible patterns follow,
# activated by patternswitch function
# pattern1: Bruker style crosshair with circles and ticks
def pattern1( arr, width, height, x, y, rad, col ):
cv2.line(arr,(0,y),(width,y),col,1)
cv2.line(arr,(x,0),(x,height),col,1)
i = 0
for i in range(1, 8):
cv2.circle(arr,(x,y),i*rad,col,1)
i += 1
# ticks on the horizontal axis:
intervalh = np.arange(0,width,float(rad)/10)
j = 0
for i in intervalh:
# make every 5th tick longer, omit every 10th tick:
diff = int(round(i))
if j%5 == 0:
if not j%10 == 0:
cv2.line(arr,(x+diff,y-4),(x+diff,y+4),col,1)
cv2.line(arr,(x-diff,y-4),(x-diff,y+4),col,1)
else:
cv2.line(arr,(x+diff,y-2),(x+diff,y+3),col,1)
cv2.line(arr,(x-diff,y-2),(x-diff,y+3),col,1)
j += 1
# ticks on the vertical axis:
intervalv = np.arange(0,height,float(rad)/10)
l = 0
for k in intervalv:
# make every 5th and 10th tick longer:
diff = int(round(k))
if l%5 == 0:
if l%10 == 0:
cv2.line(arr,(x-6,y+diff),(x+6,y+diff),col,1)
cv2.line(arr,(x-6,y-diff),(x+6,y-diff),col,1)
else:
cv2.line(arr,(x-4,y+diff),(x+4,y+diff),col,1)
cv2.line(arr,(x-4,y-diff),(x+4,y-diff),col,1)
else:
cv2.line(arr,(x-2,y+diff),(x+2,y+diff),col,1)
cv2.line(arr,(x-2,y-diff),(x+2,y-diff),col,1)
l += 1
return
# pattern2: simple crosshair with ticks
def pattern2( arr, width, height, x, y, rad, col ):
# cv2.circle(arr,(x,y),rad,col,1)
cv2.line(arr,(0,y),(width,y),col,1)
cv2.line(arr,(x,0),(x,height),col,1)
# ticks on the horizontal axis:
intervalh = np.arange(0,width,float(rad)/10)
j = 0
for i in intervalh:
# make every 5th and 10th tick longer:
diff = int(round(i))
if j%5 == 0:
if j%10 == 0:
cv2.line(arr,(x+diff,y-6),(x+diff,y+6),col,1)
cv2.line(arr,(x-diff,y-6),(x-diff,y+6),col,1)
else:
cv2.line(arr,(x+diff,y-4),(x+diff,y+4),col,1)
cv2.line(arr,(x-diff,y-4),(x-diff,y+4),col,1)
else:
cv2.line(arr,(x+diff,y-2),(x+diff,y+3),col,1)
cv2.line(arr,(x-diff,y-2),(x-diff,y+3),col,1)
j += 1
# ticks on the vertical axis:
intervalv = np.arange(0,height,float(rad)/10)
l = 0
for k in intervalv:
# make every 5th and 10th tick longer:
diff = int(round(k))
if l%5 == 0:
if l%10 == 0:
cv2.line(arr,(x-6,y+diff),(x+6,y+diff),col,1)
cv2.line(arr,(x-6,y-diff),(x+6,y-diff),col,1)
else:
cv2.line(arr,(x-4,y+diff),(x+4,y+diff),col,1)
cv2.line(arr,(x-4,y-diff),(x+4,y-diff),col,1)
else:
cv2.line(arr,(x-2,y+diff),(x+2,y+diff),col,1)
cv2.line(arr,(x-2,y-diff),(x+2,y-diff),col,1)
l += 1
return
# pattern3: simple crosshair without ticks
def pattern3( arr, width, height, x, y, rad, col ):
cv2.line(arr,(0,y),(width,y),col,1)
cv2.line(arr,(x,0),(x,height),col,1)
return
# pattern4: simple crosshair with circles (no ticks)
def pattern4( arr, width, height, x, y, rad, col ):
cv2.line(arr,(0,y),(width,y),col,1)
cv2.line(arr,(x,0),(x,height),col,1)
i = 0
for i in range(1, 8):
cv2.circle(arr,(x,y),i*rad,col,1)
i += 1
return
# pattern5: simple crosshair with one circle (no ticks)
def pattern5( arr, width, height, x, y, rad, col ):
cv2.line(arr,(0,y),(width,y),col,1)
cv2.line(arr,(x,0),(x,height),col,1)
cv2.circle(arr,(x,y),rad,col,1)
return
# pattern6: simple circle
def pattern6( arr, width, height, x, y, rad, col ):
cv2.circle(arr,(x,y),rad,col,1)
return
# pattern7: small center crosshair
def pattern7( arr, width, height, x, y, rad, col ):
cv2.line(arr,(x-10,y),(x+10,y),col,1)
cv2.line(arr,(x,y-10),(x,y+10),col,1)
return
# pattern8: small center crosshair without center
def pattern8( arr, width, height, x, y, rad, col ):
cv2.line(arr,(x-10,y),(x-3,y),col,1)
cv2.line(arr,(x,y-10),(x,y-3),col,1)
cv2.line(arr,(x+3,y),(x+10,y),col,1)
cv2.line(arr,(x,y+3),(x,y+10),col,1)
return
# pattern9: only a dot
def pattern9( arr, width, height, x, y, rad, col ):
cv2.circle(arr,(x,y),2,col,-1)
return
# pattern10: grid
def pattern10( arr, width, height, x, y, rad, col ):
# center lines:
cv2.line(arr,(0,y),(width,y),col,1)
cv2.line(arr,(x,0),(x,height),col,1)
i = rad
j = rad
# horizontal lines:
while i < height:
cv2.line(arr,(0,y+i),(width,y+i),col,1)
cv2.line(arr,(0,y-i),(width,y-i),col,1)
i += rad
# vertical lines:
while j < width:
cv2.line(arr,(x+j,0),(x+j,height),col,1)
cv2.line(arr,(x-j,0),(x-j,height),col,1)
j += rad
return
| mit |
javierTerry/odoo | addons/pos_discount/__openerp__.py | 312 | 1615 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Point of Sale Discounts',
'version': '1.0',
'category': 'Point of Sale',
'sequence': 6,
'summary': 'Simple Discounts in the Point of Sale ',
'description': """
=======================
This module allows the cashier to quickly give a percentage
sale discount to a customer.
""",
'author': 'OpenERP SA',
'depends': ['point_of_sale'],
'data': [
'views/views.xml',
'views/templates.xml'
],
'installable': True,
'website': 'https://www.odoo.com/page/point-of-sale',
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
thnee/ansible | lib/ansible/modules/network/fortios/fortios_report_chart.py | 12 | 32190 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_report_chart
short_description: Report chart widget configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify report feature and chart category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
report_chart:
description:
- Report chart widget configuration.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
background:
description:
- Chart background.
type: str
category:
description:
- Category.
type: str
choices:
- misc
- traffic
- event
- virus
- webfilter
- attack
- spam
- dlp
- app-ctrl
- vulnerability
category_series:
description:
- Category series of pie chart.
type: dict
suboptions:
databind:
description:
- Category series value expression.
type: str
font_size:
description:
- Font size of category-series title.
type: int
color_palette:
description:
- Color palette (system will pick color automatically by default).
type: str
column:
description:
- Table column definition.
type: list
suboptions:
detail_unit:
description:
- Detail unit of column.
type: str
detail_value:
description:
- Detail value of column.
type: str
footer_unit:
description:
- Footer unit of column.
type: str
footer_value:
description:
- Footer value of column.
type: str
header_value:
description:
- Display name of table header.
type: str
id:
description:
- ID.
required: true
type: int
mapping:
description:
- Show detail in certain display value for certain condition.
type: list
suboptions:
displayname:
description:
- Display name.
type: str
id:
description:
- id
required: true
type: int
op:
description:
- Comparison operator.
type: str
choices:
- none
- greater
- greater-equal
- less
- less-equal
- equal
- between
value_type:
description:
- Value type.
type: str
choices:
- integer
- string
value1:
description:
- Value 1.
type: str
value2:
description:
- Value 2.
type: str
comments:
description:
- Comment.
type: str
dataset:
description:
- Bind dataset to chart.
type: str
dimension:
description:
- Dimension.
type: str
choices:
- 2D
- 3D
drill_down_charts:
description:
- Drill down charts.
type: list
suboptions:
chart_name:
description:
- Drill down chart name.
type: str
id:
description:
- Drill down chart ID.
required: true
type: int
status:
description:
- Enable/disable this drill down chart.
type: str
choices:
- enable
- disable
favorite:
description:
- Favorite.
type: str
choices:
- no
- yes
graph_type:
description:
- Graph type.
type: str
choices:
- none
- bar
- pie
- line
- flow
legend:
description:
- Enable/Disable Legend area.
type: str
choices:
- enable
- disable
legend_font_size:
description:
- Font size of legend area.
type: int
name:
description:
- Chart Widget Name
required: true
type: str
period:
description:
- Time period.
type: str
choices:
- last24h
- last7d
policy:
description:
- Used by monitor policy.
type: int
style:
description:
- Style.
type: str
choices:
- auto
- manual
title:
description:
- Chart title.
type: str
title_font_size:
description:
- Font size of chart title.
type: int
type:
description:
- Chart type.
type: str
choices:
- graph
- table
value_series:
description:
- Value series of pie chart.
type: dict
suboptions:
databind:
description:
- Value series value expression.
type: str
x_series:
description:
- X-series of chart.
type: dict
suboptions:
caption:
description:
- X-series caption.
type: str
caption_font_size:
description:
- X-series caption font size.
type: int
databind:
description:
- X-series value expression.
type: str
font_size:
description:
- X-series label font size.
type: int
is_category:
description:
- X-series represent category or not.
type: str
choices:
- yes
- no
label_angle:
description:
- X-series label angle.
type: str
choices:
- 45-degree
- vertical
- horizontal
scale_direction:
description:
- Scale increase or decrease.
type: str
choices:
- decrease
- increase
scale_format:
description:
- Date/time format.
type: str
choices:
- YYYY-MM-DD-HH-MM
- YYYY-MM-DD HH
- YYYY-MM-DD
- YYYY-MM
- YYYY
- HH-MM
- MM-DD
scale_step:
description:
- Scale step.
type: int
scale_unit:
description:
- Scale unit.
type: str
choices:
- minute
- hour
- day
- month
- year
unit:
description:
- X-series unit.
type: str
y_series:
description:
- Y-series of chart.
type: dict
suboptions:
caption:
description:
- Y-series caption.
type: str
caption_font_size:
description:
- Y-series caption font size.
type: int
databind:
description:
- Y-series value expression.
type: str
extra_databind:
description:
- Extra Y-series value.
type: str
extra_y:
description:
- Allow another Y-series value
type: str
choices:
- enable
- disable
extra_y_legend:
description:
- Extra Y-series legend type/name.
type: str
font_size:
description:
- Y-series label font size.
type: int
group:
description:
- Y-series group option.
type: str
label_angle:
description:
- Y-series label angle.
type: str
choices:
- 45-degree
- vertical
- horizontal
unit:
description:
- Y-series unit.
type: str
y_legend:
description:
- First Y-series legend type/name.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Report chart widget configuration.
fortios_report_chart:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
report_chart:
background: "<your_own_value>"
category: "misc"
category_series:
databind: "<your_own_value>"
font_size: "7"
color_palette: "<your_own_value>"
column:
-
detail_unit: "<your_own_value>"
detail_value: "<your_own_value>"
footer_unit: "<your_own_value>"
footer_value: "<your_own_value>"
header_value: "<your_own_value>"
id: "15"
mapping:
-
displayname: "<your_own_value>"
id: "18"
op: "none"
value_type: "integer"
value1: "<your_own_value>"
value2: "<your_own_value>"
comments: "<your_own_value>"
dataset: "<your_own_value>"
dimension: "2D"
drill_down_charts:
-
chart_name: "<your_own_value>"
id: "28"
status: "enable"
favorite: "no"
graph_type: "none"
legend: "enable"
legend_font_size: "33"
name: "default_name_34"
period: "last24h"
policy: "36"
style: "auto"
title: "<your_own_value>"
title_font_size: "39"
type: "graph"
value_series:
databind: "<your_own_value>"
x_series:
caption: "<your_own_value>"
caption_font_size: "45"
databind: "<your_own_value>"
font_size: "47"
is_category: "yes"
label_angle: "45-degree"
scale_direction: "decrease"
scale_format: "YYYY-MM-DD-HH-MM"
scale_step: "52"
scale_unit: "minute"
unit: "<your_own_value>"
y_series:
caption: "<your_own_value>"
caption_font_size: "57"
databind: "<your_own_value>"
extra_databind: "<your_own_value>"
extra_y: "enable"
extra_y_legend: "<your_own_value>"
font_size: "62"
group: "<your_own_value>"
label_angle: "45-degree"
unit: "<your_own_value>"
y_legend: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_report_chart_data(json):
option_list = ['background', 'category', 'category_series',
'color_palette', 'column', 'comments',
'dataset', 'dimension', 'drill_down_charts',
'favorite', 'graph_type', 'legend',
'legend_font_size', 'name', 'period',
'policy', 'style', 'title',
'title_font_size', 'type', 'value_series',
'x_series', 'y_series']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def report_chart(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['report_chart'] and data['report_chart']:
state = data['report_chart']['state']
else:
state = True
report_chart_data = data['report_chart']
filtered_data = underscore_to_hyphen(filter_report_chart_data(report_chart_data))
if state == "present":
return fos.set('report',
'chart',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('report',
'chart',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_report(data, fos):
if data['report_chart']:
resp = report_chart(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"report_chart": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"background": {"required": False, "type": "str"},
"category": {"required": False, "type": "str",
"choices": ["misc", "traffic", "event",
"virus", "webfilter", "attack",
"spam", "dlp", "app-ctrl",
"vulnerability"]},
"category_series": {"required": False, "type": "dict",
"options": {
"databind": {"required": False, "type": "str"},
"font_size": {"required": False, "type": "int"}
}},
"color_palette": {"required": False, "type": "str"},
"column": {"required": False, "type": "list",
"options": {
"detail_unit": {"required": False, "type": "str"},
"detail_value": {"required": False, "type": "str"},
"footer_unit": {"required": False, "type": "str"},
"footer_value": {"required": False, "type": "str"},
"header_value": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"mapping": {"required": False, "type": "list",
"options": {
"displayname": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"op": {"required": False, "type": "str",
"choices": ["none", "greater", "greater-equal",
"less", "less-equal", "equal",
"between"]},
"value_type": {"required": False, "type": "str",
"choices": ["integer", "string"]},
"value1": {"required": False, "type": "str"},
"value2": {"required": False, "type": "str"}
}}
}},
"comments": {"required": False, "type": "str"},
"dataset": {"required": False, "type": "str"},
"dimension": {"required": False, "type": "str",
"choices": ["2D", "3D"]},
"drill_down_charts": {"required": False, "type": "list",
"options": {
"chart_name": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"favorite": {"required": False, "type": "str",
"choices": ["no", "yes"]},
"graph_type": {"required": False, "type": "str",
"choices": ["none", "bar", "pie",
"line", "flow"]},
"legend": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"legend_font_size": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"period": {"required": False, "type": "str",
"choices": ["last24h", "last7d"]},
"policy": {"required": False, "type": "int"},
"style": {"required": False, "type": "str",
"choices": ["auto", "manual"]},
"title": {"required": False, "type": "str"},
"title_font_size": {"required": False, "type": "int"},
"type": {"required": False, "type": "str",
"choices": ["graph", "table"]},
"value_series": {"required": False, "type": "dict",
"options": {
"databind": {"required": False, "type": "str"}
}},
"x_series": {"required": False, "type": "dict",
"options": {
"caption": {"required": False, "type": "str"},
"caption_font_size": {"required": False, "type": "int"},
"databind": {"required": False, "type": "str"},
"font_size": {"required": False, "type": "int"},
"is_category": {"required": False, "type": "str",
"choices": ["yes", "no"]},
"label_angle": {"required": False, "type": "str",
"choices": ["45-degree", "vertical", "horizontal"]},
"scale_direction": {"required": False, "type": "str",
"choices": ["decrease", "increase"]},
"scale_format": {"required": False, "type": "str",
"choices": ["YYYY-MM-DD-HH-MM", "YYYY-MM-DD HH", "YYYY-MM-DD",
"YYYY-MM", "YYYY", "HH-MM",
"MM-DD"]},
"scale_step": {"required": False, "type": "int"},
"scale_unit": {"required": False, "type": "str",
"choices": ["minute", "hour", "day",
"month", "year"]},
"unit": {"required": False, "type": "str"}
}},
"y_series": {"required": False, "type": "dict",
"options": {
"caption": {"required": False, "type": "str"},
"caption_font_size": {"required": False, "type": "int"},
"databind": {"required": False, "type": "str"},
"extra_databind": {"required": False, "type": "str"},
"extra_y": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"extra_y_legend": {"required": False, "type": "str"},
"font_size": {"required": False, "type": "int"},
"group": {"required": False, "type": "str"},
"label_angle": {"required": False, "type": "str",
"choices": ["45-degree", "vertical", "horizontal"]},
"unit": {"required": False, "type": "str"},
"y_legend": {"required": False, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_report(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_report(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
anryko/ansible | lib/ansible/module_utils/network/common/config.py | 9 | 14281 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import hashlib
from ansible.module_utils.six.moves import zip
from ansible.module_utils._text import to_bytes, to_native
DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/', 'echo']
DEFAULT_IGNORE_LINES_RE = set([
re.compile(r"Using \d+ out of \d+ bytes"),
re.compile(r"Building configuration"),
re.compile(r"Current configuration : \d+ bytes")
])
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
class ConfigLine(object):
def __init__(self, raw):
self.text = str(raw).strip()
self.raw = raw
self._children = list()
self._parents = list()
def __str__(self):
return self.raw
def __eq__(self, other):
return self.line == other.line
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, key):
for item in self._children:
if item.text == key:
return item
raise KeyError(key)
@property
def line(self):
line = self.parents
line.append(self.text)
return ' '.join(line)
@property
def children(self):
return _obj_to_text(self._children)
@property
def child_objs(self):
return self._children
@property
def parents(self):
return _obj_to_text(self._parents)
@property
def path(self):
config = _obj_to_raw(self._parents)
config.append(self.raw)
return '\n'.join(config)
@property
def has_children(self):
return len(self._children) > 0
@property
def has_parents(self):
return len(self._parents) > 0
def add_child(self, obj):
if not isinstance(obj, ConfigLine):
raise AssertionError('child must be of type `ConfigLine`')
self._children.append(obj)
def ignore_line(text, tokens=None):
for item in (tokens or DEFAULT_COMMENT_TOKENS):
if text.startswith(item):
return True
for regex in DEFAULT_IGNORE_LINES_RE:
if regex.match(text):
return True
def _obj_to_text(x):
return [o.text for o in x]
def _obj_to_raw(x):
return [o.raw for o in x]
def _obj_to_block(objects, visited=None):
items = list()
for o in objects:
if o not in items:
items.append(o)
for child in o._children:
if child not in items:
items.append(child)
return _obj_to_raw(items)
def dumps(objects, output='block', comments=False):
if output == 'block':
items = _obj_to_block(objects)
elif output == 'commands':
items = _obj_to_text(objects)
elif output == 'raw':
items = _obj_to_raw(objects)
else:
raise TypeError('unknown value supplied for keyword output')
if output == 'block':
if comments:
for index, item in enumerate(items):
nextitem = index + 1
if nextitem < len(items) and not item.startswith(' ') and items[nextitem].startswith(' '):
item = '!\n%s' % item
items[index] = item
items.append('!')
items.append('end')
return '\n'.join(items)
class NetworkConfig(object):
def __init__(self, indent=1, contents=None, ignore_lines=None):
self._indent = indent
self._items = list()
self._config_text = None
if ignore_lines:
for item in ignore_lines:
if not isinstance(item, Pattern):
item = re.compile(item)
DEFAULT_IGNORE_LINES_RE.add(item)
if contents:
self.load(contents)
@property
def items(self):
return self._items
@property
def config_text(self):
return self._config_text
@property
def sha1(self):
sha1 = hashlib.sha1()
sha1.update(to_bytes(str(self), errors='surrogate_or_strict'))
return sha1.digest()
def __getitem__(self, key):
for line in self:
if line.text == key:
return line
raise KeyError(key)
def __iter__(self):
return iter(self._items)
def __str__(self):
return '\n'.join([c.raw for c in self.items])
def __len__(self):
return len(self._items)
def load(self, s):
self._config_text = s
self._items = self.parse(s)
def loadfp(self, fp):
with open(fp) as f:
return self.load(f.read())
def parse(self, lines, comment_tokens=None):
toplevel = re.compile(r'\S')
childline = re.compile(r'^\s*(.+)$')
entry_reg = re.compile(r'([{};])')
ancestors = list()
config = list()
indents = [0]
for linenum, line in enumerate(to_native(lines, errors='surrogate_or_strict').split('\n')):
text = entry_reg.sub('', line).strip()
cfg = ConfigLine(line)
if not text or ignore_line(text, comment_tokens):
continue
# handle top level commands
if toplevel.match(line):
ancestors = [cfg]
indents = [0]
# handle sub level commands
else:
match = childline.match(line)
line_indent = match.start(1)
if line_indent < indents[-1]:
while indents[-1] > line_indent:
indents.pop()
if line_indent > indents[-1]:
indents.append(line_indent)
curlevel = len(indents) - 1
parent_level = curlevel - 1
cfg._parents = ancestors[:curlevel]
if curlevel > len(ancestors):
config.append(cfg)
continue
for i in range(curlevel, len(ancestors)):
ancestors.pop()
ancestors.append(cfg)
ancestors[parent_level].add_child(cfg)
config.append(cfg)
return config
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
if item.parents == path[:-1]:
return item
def get_block(self, path):
if not isinstance(path, list):
raise AssertionError('path argument must be a list object')
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self._expand_block(obj)
def get_block_config(self, path):
block = self.get_block(path)
return dumps(block, 'block')
def _expand_block(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj._children:
if child in S:
continue
self._expand_block(child, S)
return S
def _diff_line(self, other):
updates = list()
for item in self.items:
if item not in other:
updates.append(item)
return updates
def _diff_strict(self, other):
updates = list()
# block extracted from other does not have all parents
# but the last one. In case of multiple parents we need
# to add additional parents.
if other and isinstance(other, list) and len(other) > 0:
start_other = other[0]
if start_other.parents:
for parent in start_other.parents:
other.insert(0, ConfigLine(parent))
for index, line in enumerate(self.items):
try:
if str(line).strip() != str(other[index]).strip():
updates.append(line)
except (AttributeError, IndexError):
updates.append(line)
return updates
def _diff_exact(self, other):
updates = list()
if len(other) != len(self.items):
updates.extend(self.items)
else:
for ours, theirs in zip(self.items, other):
if ours != theirs:
updates.extend(self.items)
break
return updates
def difference(self, other, match='line', path=None, replace=None):
"""Perform a config diff against the another network config
:param other: instance of NetworkConfig to diff against
:param match: type of diff to perform. valid values are 'line',
'strict', 'exact'
:param path: context in the network config to filter the diff
:param replace: the method used to generate the replacement lines.
valid values are 'block', 'line'
:returns: a string of lines that are different
"""
if path and match != 'line':
try:
other = other.get_block(path)
except ValueError:
other = list()
else:
other = other.items
# generate a list of ConfigLines that aren't in other
meth = getattr(self, '_diff_%s' % match)
updates = meth(other)
if replace == 'block':
parents = list()
for item in updates:
if not item.has_parents:
parents.append(item)
else:
for p in item._parents:
if p not in parents:
parents.append(p)
updates = list()
for item in parents:
updates.extend(self._expand_block(item))
visited = set()
expanded = list()
for item in updates:
for p in item._parents:
if p.line not in visited:
visited.add(p.line)
expanded.append(p)
expanded.append(item)
visited.add(item.line)
return expanded
def add(self, lines, parents=None):
ancestors = list()
offset = 0
obj = None
# global config command
if not parents:
for line in lines:
# handle ignore lines
if ignore_line(line):
continue
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_block(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self._indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj._parents = list(ancestors)
ancestors[-1]._children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in lines:
# handle ignore lines
if ignore_line(line):
continue
# check if child already exists
for child in ancestors[-1]._children:
if child.text == line:
break
else:
offset = len(parents) * self._indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item._parents = ancestors
ancestors[-1]._children.append(item)
self.items.append(item)
class CustomNetworkConfig(NetworkConfig):
def items_text(self):
return [item.text for item in self.items]
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.child_objs:
if child in S:
continue
self.expand_section(child, S)
return S
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
| gpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/Django/tests/regressiontests/views/tests/defaults.py | 50 | 4469 | from __future__ import absolute_import, unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import setup_test_template_loader, restore_template_loaders
from ..models import Author, Article, UrlArticle
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
fixtures = ['testdata.json']
non_existing_urls = ['/views/non_existing_url/', # this is in urls.py
'/views/other_non_existing_url/'] # this NOT in urls.py
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.non_existing_urls:
response = self.client.get(url)
csrf_token = response.context['csrf_token']
self.assertNotEqual(str(csrf_token), 'NOTPROVIDED')
self.assertNotEqual(str(csrf_token), '')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/views/server_error/')
self.assertEqual(response.status_code, 500)
def test_custom_templates(self):
"""
Test that 404.html and 500.html templates are picked by their respective
handler.
"""
setup_test_template_loader(
{'404.html': 'This is a test template for a 404 error.',
'500.html': 'This is a test template for a 500 error.'}
)
try:
for code, url in ((404, '/views/non_existing_url/'), (500, '/views/server_error/')):
response = self.client.get(url)
self.assertContains(response, "test template for a %d error" % code,
status_code=code)
finally:
restore_template_loaders()
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
| agpl-3.0 |
iAmMrinal0/plexpy | lib/unidecode/x089.py | 252 | 4628 | data = (
'Ji ', # 0x00
'Zhi ', # 0x01
'Gua ', # 0x02
'Ken ', # 0x03
'Che ', # 0x04
'Ti ', # 0x05
'Ti ', # 0x06
'Fu ', # 0x07
'Chong ', # 0x08
'Xie ', # 0x09
'Bian ', # 0x0a
'Die ', # 0x0b
'Kun ', # 0x0c
'Duan ', # 0x0d
'Xiu ', # 0x0e
'Xiu ', # 0x0f
'He ', # 0x10
'Yuan ', # 0x11
'Bao ', # 0x12
'Bao ', # 0x13
'Fu ', # 0x14
'Yu ', # 0x15
'Tuan ', # 0x16
'Yan ', # 0x17
'Hui ', # 0x18
'Bei ', # 0x19
'Chu ', # 0x1a
'Lu ', # 0x1b
'Ena ', # 0x1c
'Hitoe ', # 0x1d
'Yun ', # 0x1e
'Da ', # 0x1f
'Gou ', # 0x20
'Da ', # 0x21
'Huai ', # 0x22
'Rong ', # 0x23
'Yuan ', # 0x24
'Ru ', # 0x25
'Nai ', # 0x26
'Jiong ', # 0x27
'Suo ', # 0x28
'Ban ', # 0x29
'Tun ', # 0x2a
'Chi ', # 0x2b
'Sang ', # 0x2c
'Niao ', # 0x2d
'Ying ', # 0x2e
'Jie ', # 0x2f
'Qian ', # 0x30
'Huai ', # 0x31
'Ku ', # 0x32
'Lian ', # 0x33
'Bao ', # 0x34
'Li ', # 0x35
'Zhe ', # 0x36
'Shi ', # 0x37
'Lu ', # 0x38
'Yi ', # 0x39
'Die ', # 0x3a
'Xie ', # 0x3b
'Xian ', # 0x3c
'Wei ', # 0x3d
'Biao ', # 0x3e
'Cao ', # 0x3f
'Ji ', # 0x40
'Jiang ', # 0x41
'Sen ', # 0x42
'Bao ', # 0x43
'Xiang ', # 0x44
'Chihaya ', # 0x45
'Pu ', # 0x46
'Jian ', # 0x47
'Zhuan ', # 0x48
'Jian ', # 0x49
'Zui ', # 0x4a
'Ji ', # 0x4b
'Dan ', # 0x4c
'Za ', # 0x4d
'Fan ', # 0x4e
'Bo ', # 0x4f
'Xiang ', # 0x50
'Xin ', # 0x51
'Bie ', # 0x52
'Rao ', # 0x53
'Man ', # 0x54
'Lan ', # 0x55
'Ao ', # 0x56
'Duo ', # 0x57
'Gui ', # 0x58
'Cao ', # 0x59
'Sui ', # 0x5a
'Nong ', # 0x5b
'Chan ', # 0x5c
'Lian ', # 0x5d
'Bi ', # 0x5e
'Jin ', # 0x5f
'Dang ', # 0x60
'Shu ', # 0x61
'Tan ', # 0x62
'Bi ', # 0x63
'Lan ', # 0x64
'Pu ', # 0x65
'Ru ', # 0x66
'Zhi ', # 0x67
'[?] ', # 0x68
'Shu ', # 0x69
'Wa ', # 0x6a
'Shi ', # 0x6b
'Bai ', # 0x6c
'Xie ', # 0x6d
'Bo ', # 0x6e
'Chen ', # 0x6f
'Lai ', # 0x70
'Long ', # 0x71
'Xi ', # 0x72
'Xian ', # 0x73
'Lan ', # 0x74
'Zhe ', # 0x75
'Dai ', # 0x76
'Tasuki ', # 0x77
'Zan ', # 0x78
'Shi ', # 0x79
'Jian ', # 0x7a
'Pan ', # 0x7b
'Yi ', # 0x7c
'Ran ', # 0x7d
'Ya ', # 0x7e
'Xi ', # 0x7f
'Xi ', # 0x80
'Yao ', # 0x81
'Feng ', # 0x82
'Tan ', # 0x83
'[?] ', # 0x84
'Biao ', # 0x85
'Fu ', # 0x86
'Ba ', # 0x87
'He ', # 0x88
'Ji ', # 0x89
'Ji ', # 0x8a
'Jian ', # 0x8b
'Guan ', # 0x8c
'Bian ', # 0x8d
'Yan ', # 0x8e
'Gui ', # 0x8f
'Jue ', # 0x90
'Pian ', # 0x91
'Mao ', # 0x92
'Mi ', # 0x93
'Mi ', # 0x94
'Mie ', # 0x95
'Shi ', # 0x96
'Si ', # 0x97
'Zhan ', # 0x98
'Luo ', # 0x99
'Jue ', # 0x9a
'Mi ', # 0x9b
'Tiao ', # 0x9c
'Lian ', # 0x9d
'Yao ', # 0x9e
'Zhi ', # 0x9f
'Jun ', # 0xa0
'Xi ', # 0xa1
'Shan ', # 0xa2
'Wei ', # 0xa3
'Xi ', # 0xa4
'Tian ', # 0xa5
'Yu ', # 0xa6
'Lan ', # 0xa7
'E ', # 0xa8
'Du ', # 0xa9
'Qin ', # 0xaa
'Pang ', # 0xab
'Ji ', # 0xac
'Ming ', # 0xad
'Ying ', # 0xae
'Gou ', # 0xaf
'Qu ', # 0xb0
'Zhan ', # 0xb1
'Jin ', # 0xb2
'Guan ', # 0xb3
'Deng ', # 0xb4
'Jian ', # 0xb5
'Luo ', # 0xb6
'Qu ', # 0xb7
'Jian ', # 0xb8
'Wei ', # 0xb9
'Jue ', # 0xba
'Qu ', # 0xbb
'Luo ', # 0xbc
'Lan ', # 0xbd
'Shen ', # 0xbe
'Di ', # 0xbf
'Guan ', # 0xc0
'Jian ', # 0xc1
'Guan ', # 0xc2
'Yan ', # 0xc3
'Gui ', # 0xc4
'Mi ', # 0xc5
'Shi ', # 0xc6
'Zhan ', # 0xc7
'Lan ', # 0xc8
'Jue ', # 0xc9
'Ji ', # 0xca
'Xi ', # 0xcb
'Di ', # 0xcc
'Tian ', # 0xcd
'Yu ', # 0xce
'Gou ', # 0xcf
'Jin ', # 0xd0
'Qu ', # 0xd1
'Jiao ', # 0xd2
'Jiu ', # 0xd3
'Jin ', # 0xd4
'Cu ', # 0xd5
'Jue ', # 0xd6
'Zhi ', # 0xd7
'Chao ', # 0xd8
'Ji ', # 0xd9
'Gu ', # 0xda
'Dan ', # 0xdb
'Zui ', # 0xdc
'Di ', # 0xdd
'Shang ', # 0xde
'Hua ', # 0xdf
'Quan ', # 0xe0
'Ge ', # 0xe1
'Chi ', # 0xe2
'Jie ', # 0xe3
'Gui ', # 0xe4
'Gong ', # 0xe5
'Hong ', # 0xe6
'Jie ', # 0xe7
'Hun ', # 0xe8
'Qiu ', # 0xe9
'Xing ', # 0xea
'Su ', # 0xeb
'Ni ', # 0xec
'Ji ', # 0xed
'Lu ', # 0xee
'Zhi ', # 0xef
'Zha ', # 0xf0
'Bi ', # 0xf1
'Xing ', # 0xf2
'Hu ', # 0xf3
'Shang ', # 0xf4
'Gong ', # 0xf5
'Zhi ', # 0xf6
'Xue ', # 0xf7
'Chu ', # 0xf8
'Xi ', # 0xf9
'Yi ', # 0xfa
'Lu ', # 0xfb
'Jue ', # 0xfc
'Xi ', # 0xfd
'Yan ', # 0xfe
'Xi ', # 0xff
)
| gpl-3.0 |
npinto/pytest | testing/test_helpconfig.py | 1 | 2191 | import py, pytest,os
from _pytest.helpconfig import collectattr
def test_version(testdir, pytestconfig):
result = testdir.runpytest("--version")
assert result.ret == 0
#p = py.path.local(py.__file__).dirpath()
result.stderr.fnmatch_lines([
'*py.test*%s*imported from*' % (pytest.__version__, )
])
if pytestconfig.pluginmanager._plugin_distinfo:
result.stderr.fnmatch_lines([
"*setuptools registered plugins:",
"*at*",
])
def test_help(testdir):
result = testdir.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*-v*verbose*",
"*setup.cfg*",
"*minversion*",
])
def test_collectattr():
class A:
def pytest_hello(self):
pass
class B(A):
def pytest_world(self):
pass
methods = py.builtin.sorted(collectattr(B))
assert list(methods) == ['pytest_hello', 'pytest_world']
methods = py.builtin.sorted(collectattr(B()))
assert list(methods) == ['pytest_hello', 'pytest_world']
def test_hookvalidation_unknown(testdir):
testdir.makeconftest("""
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret != 0
result.stderr.fnmatch_lines([
'*unknown hook*pytest_hello*'
])
def test_hookvalidation_optional(testdir):
testdir.makeconftest("""
import pytest
@pytest.mark.optionalhook
def pytest_hello(xyz):
pass
""")
result = testdir.runpytest()
assert result.ret == 0
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines([
"*using*pytest*py*",
"*active plugins*",
])
def test_debug(testdir, monkeypatch):
result = testdir.runpytest("--debug")
assert result.ret == 0
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
result = testdir.runpytest()
assert result.ret == 0
result.stderr.fnmatch_lines([
"*registered*PluginManager*"
])
| mit |
xguse/ete | test/test_phylotree.py | 1 | 14583 | import unittest
from ete_dev import *
from datasets import *
class Test_phylo_module(unittest.TestCase):
# ALL TESTS USE THIS EXAMPLE TREE
#
# /-Dme_001
# /--------|
# | \-Dme_002
# |
# | /-Cfa_001
# | /--------|
# | | \-Mms_001
# | |
#---------| | /-Hsa_001
# | | /--------|
# | /--------| /--------| \-Hsa_003
# | | | | |
# | | | /--------| \-Ptr_001
# | | | | |
# | | | | \-Mmu_001
# | | \--------|
# \--------| | /-Hsa_004
# | | /--------|
# | \--------| \-Ptr_004
# | |
# | \-Mmu_004
# |
# | /-Ptr_002
# \--------|
# | /-Hsa_002
# \--------|
# \-Mmu_002
def test_link_alignmets(self):
""" Phylotree can be linked to SeqGroup objects"""
fasta = """
>seqA
MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAH
>seqB
MAEIPDATIQQFMALTNVSHNIAVQY--EFGDLNEALNSYYAYQTDDQKDRREEAH
>seqC
MAEIPDATIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAH
>seqD
MAEAPDETIQQFMALTNVSHNIAVQYLSEFGDLNEAL--------------REEAH
"""
# Caution with iphylip string. blank spaces in the beginning are important
iphylip = """
4 76
seqA MAEIPDETIQ QFMALT---H NIAVQYLSEF GDLNEALNSY YASQTDDIKD RREEAHQFMA
seqB MAEIPDATIQ QFMALTNVSH NIAVQY--EF GDLNEALNSY YAYQTDDQKD RREEAHQFMA
seqC MAEIPDATIQ ---ALTNVSH NIAVQYLSEF GDLNEALNSY YASQTDDQPD RREEAHQFMA
seqD MAEAPDETIQ QFMALTNVSH NIAVQYLSEF GDLNEAL--- ---------- -REEAHQ---
LTNVSHQFMA LTNVSH
LTNVSH---- ------
LTNVSH---- ------
-------FMA LTNVSH
"""
# Loads a tree and link it to an alignment. As usual, 'alignment' can be
# the path to a file or the data themselves in text string format
alg1 = SeqGroup(fasta)
alg2 = SeqGroup(iphylip, format="iphylip")
t = PhyloTree("(((seqA,seqB),seqC),seqD);", alignment=fasta, alg_format="fasta")
for l in t.get_leaves():
self.assertEqual(l.sequence, alg1.get_seq(l.name))
# The associated alignment can be changed at any time
t.link_to_alignment(alignment=alg2, alg_format="iphylip")
for l in t.get_leaves():
self.assertEqual(l.sequence, alg2.get_seq(l.name))
def test_get_sp_overlap_on_all_descendants(self):
""" Tests ortholgy prediction using the sp overlap"""
# Creates a gene phylogeny with several duplication events at
# different levels.
t = PhyloTree('((Dme_001,Dme_002),(((Cfa_001,Mms_001),((((Hsa_001,Hsa_003),Ptr_001),Mmu_001),((Hsa_004,Ptr_004),Mmu_004))),(Ptr_002,(Hsa_002,Mmu_002))));')
# Scans the tree using the species overlap algorithm and detect all
# speciation and duplication events
events = t.get_descendant_evol_events()
# Check that all duplications are detected
dup1 = t.get_common_ancestor("Hsa_001", "Hsa_004")
self.assertEqual(dup1.evoltype, "D")
dup2 = t.get_common_ancestor("Dme_001", "Dme_002")
self.assertEqual(dup2.evoltype, "D")
dup3 = t.get_common_ancestor("Hsa_001", "Hsa_002")
self.assertEqual(dup3.evoltype, "D")
dup4 = t.get_common_ancestor("Hsa_001", "Hsa_003")
self.assertEqual(dup4.evoltype, "D")
# All other nodes should be speciation
for node in t.traverse():
if not node.is_leaf() and \
node not in set([dup1, dup2, dup3, dup4]):
self.assertEqual(node.evoltype, "S")
# Check events
for e in events:
self.assertEqual(e.node.evoltype, e.etype)
# Check orthology/paralogy prediction
orthologs = set()
for e in events:
if e.node == dup1:
self.assertEqual(e.inparalogs, set(['Ptr_001', 'Hsa_001', 'Mmu_001', 'Hsa_003']))
self.assertEqual(e.outparalogs, set(['Mmu_004', 'Ptr_004', 'Hsa_004']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.outparalogs, e.out_seqs)
self.assertEqual(e.inparalogs, e.in_seqs)
elif e.node == dup2:
self.assertEqual(e.inparalogs, set(['Dme_001']))
self.assertEqual(e.outparalogs, set(['Dme_002']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.outparalogs, e.out_seqs)
self.assertEqual(e.inparalogs, e.in_seqs)
elif e.node == dup3:
self.assertEqual(e.inparalogs, set(['Hsa_003', 'Cfa_001', 'Ptr_001', 'Hsa_001', 'Ptr_004', 'Hsa_004', 'Mmu_004', 'Mmu_001', 'Mms_001']))
self.assertEqual(e.outparalogs, set(['Hsa_002', 'Ptr_002', 'Mmu_002']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.outparalogs, e.out_seqs)
self.assertEqual(e.inparalogs, e.in_seqs)
elif e.node == dup4:
self.assertEqual(e.inparalogs, set(['Hsa_001']))
self.assertEqual(e.outparalogs, set(['Hsa_003']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.outparalogs, e.out_seqs)
self.assertEqual(e.inparalogs, e.in_seqs)
else:
key1 = list(e.inparalogs)
key2 = list(e.orthologs)
key1.sort()
key2.sort()
orthologs.add(tuple(sorted([tuple(key1), tuple(key2)])))
orthologies = [
[set(['Dme_001', 'Dme_002']), set(['Ptr_001', 'Cfa_001', 'Hsa_002', 'Hsa_003', 'Ptr_002', 'Hsa_001', 'Ptr_004', 'Hsa_004', 'Mmu_004', 'Mmu_001', 'Mms_001', 'Mmu_002'])],
[set(['Mms_001', 'Cfa_001']), set(['Hsa_003', 'Ptr_001', 'Hsa_001', 'Ptr_004', 'Hsa_004', 'Mmu_004', 'Mmu_001'])],
[set(['Ptr_002']), set(['Hsa_002', 'Mmu_002'])],
[set(['Cfa_001']), set(['Mms_001'])],
[set(['Hsa_002']), set(['Mmu_002'])],
[set(['Hsa_003', 'Hsa_001', 'Ptr_001']), set(['Mmu_001'])],
[set(['Ptr_004', 'Hsa_004']), set(['Mmu_004'])],
[set(['Hsa_003', 'Hsa_001']), set(['Ptr_001'])],
[set(['Hsa_004']), set(['Ptr_004'])]
]
expected_orthologs = set()
for l1,l2 in orthologies:
key1 = list(l1)
key2 = list(l2)
key1.sort()
key2.sort()
expected_orthologs.add(tuple(sorted([tuple(key1), tuple(key2)])))
# Are all orthologies as expected
self.assertEqual(expected_orthologs, orthologs)
def test_get_sp_overlap_on_a_seed(self):
""" Tests ortholgy prediction using sp overlap"""
# Creates a gene phylogeny with several duplication events at
# different levels.
t = PhyloTree('((Dme_001,Dme_002),(((Cfa_001,Mms_001),((((Hsa_001,Hsa_003),Ptr_001),Mmu_001),((Hsa_004,Ptr_004),Mmu_004))),(Ptr_002,(Hsa_002,Mmu_002))));')
# Scans the tree using the species overlap algorithm
seed = t.search_nodes(name="Hsa_001")[0]
events = seed.get_my_evol_events()
# Check that duplications are detected
dup1 = t.get_common_ancestor("Hsa_001", "Hsa_004")
self.assertEqual(dup1.evoltype, "D")
# This duplication is not in the seed path
dup2 = t.get_common_ancestor("Dme_001", "Dme_002")
self.assert_(not hasattr(dup2, "evoltype"))
dup3 = t.get_common_ancestor("Hsa_001", "Hsa_002")
self.assertEqual(dup3.evoltype, "D")
dup4 = t.get_common_ancestor("Hsa_001", "Hsa_003")
self.assertEqual(dup4.evoltype, "D")
# All other nodes should be speciation
node = seed
while node:
if not node.is_leaf() and \
node not in set([dup1, dup2, dup3, dup4]):
self.assertEqual(node.evoltype, "S")
node = node.up
# Check events
for e in events:
self.assertEqual(e.node.evoltype, e.etype)
# Check orthology/paralogy prediction
orthologs = set()
for e in events:
if e.node == dup1:
self.assertEqual(e.inparalogs, set(['Hsa_001', 'Hsa_003']))
self.assertEqual(e.outparalogs, set(['Hsa_004']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.in_seqs, set(['Ptr_001', 'Hsa_001', 'Mmu_001', 'Hsa_003']))
self.assertEqual(e.out_seqs, set(['Mmu_004', 'Ptr_004', 'Hsa_004']))
elif e.node == dup3:
self.assertEqual(e.inparalogs, set(['Hsa_003', 'Hsa_001', 'Hsa_004' ]))
self.assertEqual(e.outparalogs, set(['Hsa_002']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.in_seqs, set(['Hsa_003', 'Cfa_001', 'Ptr_001', 'Hsa_001', 'Ptr_004', 'Hsa_004', 'Mmu_004', 'Mmu_001', 'Mms_001']))
self.assertEqual(e.out_seqs, set(['Hsa_002', 'Ptr_002', 'Mmu_002']))
elif e.node == dup4:
self.assertEqual(e.inparalogs, set(['Hsa_001']))
self.assertEqual(e.outparalogs, set(['Hsa_003']))
self.assertEqual(e.orthologs, set())
self.assertEqual(e.in_seqs, set(['Hsa_001']))
self.assertEqual(e.out_seqs, set(['Hsa_003']))
else:
key1 = list(e.inparalogs)
key2 = list(e.orthologs)
key1.sort()
key2.sort()
orthologs.add(tuple(sorted([tuple(key1), tuple(key2)])))
orthologies = [
[set(['Dme_001', 'Dme_002']), set([ 'Hsa_002', 'Hsa_003', 'Hsa_001', 'Hsa_004' ])],
[set(['Mms_001', 'Cfa_001']), set(['Hsa_003', 'Hsa_001', 'Hsa_004'])],
[set(['Hsa_003', 'Hsa_001']), set(['Mmu_001'])],
[set(['Hsa_003', 'Hsa_001']), set(['Ptr_001'])],
]
expected_orthologs = set()
for l1,l2 in orthologies:
key1 = list(l1)
key2 = list(l2)
key1.sort()
key2.sort()
expected_orthologs.add(tuple(sorted([tuple(key1), tuple(key2)])))
# Are all orthologies as expected
self.assertEqual(expected_orthologs, orthologs)
def test_reconciliation(self):
""" Tests ortholgy prediction based on the species reconciliation method"""
gene_tree_nw = '((Dme_001,Dme_002),(((Cfa_001,Mms_001),((Hsa_001,Ptr_001),Mmu_001)),(Ptr_002,(Hsa_002,Mmu_002))));'
species_tree_nw = "((((Hsa, Ptr), Mmu), (Mms, Cfa)), Dme);"
genetree = PhyloTree(gene_tree_nw)
sptree = PhyloTree(species_tree_nw)
recon_tree, events = genetree.reconcile(sptree)
# Check that reconcilied tree nodes have the correct lables:
# gene loss, duplication, etc.
expected_recon = "((Dme_001:1,Dme_002:1)1:1[&&NHX:evoltype=D],(((Cfa_001:1,Mms_001:1)1:1[&&NHX:evoltype=S],((Hsa_001:1,Ptr_001:1)1:1[&&NHX:evoltype=S],Mmu_001:1)1:1[&&NHX:evoltype=S])1:1[&&NHX:evoltype=S],((Mms:1[&&NHX:evoltype=L],Cfa:1[&&NHX:evoltype=L])1:1[&&NHX:evoltype=L],(((Hsa:1[&&NHX:evoltype=L],Ptr_002:1)1:1[&&NHX:evoltype=L],Mmu:1[&&NHX:evoltype=L])1:1[&&NHX:evoltype=L],((Ptr:1[&&NHX:evoltype=L],Hsa_002:1)1:1[&&NHX:evoltype=L],Mmu_002:1)1:1[&&NHX:evoltype=S])1:1[&&NHX:evoltype=D])1:1[&&NHX:evoltype=L])1:1[&&NHX:evoltype=D])[&&NHX:evoltype=S];"
self.assertEqual(recon_tree.write(["evoltype"]), expected_recon)
def test_miscelaneus(self):
""" Test several things """
# Creates a gene phylogeny with several duplication events at
# different levels.
t = PhyloTree('((Dme_001,Dme_002),(((Cfa_001,Mms_001),((((Hsa_001,Hsa_003),Ptr_001),Mmu_001),((Hsa_004,Ptr_004),Mmu_004))),(Ptr_002,(Hsa_002,Mmu_002))));')
# Create a dictionary with relative ages for the species present in
# the phylogenetic tree. Note that ages are only relative numbers to
# define which species are older, and that different species can
# belong to the same age.
sp2age = {
'Hsa': 1, # Homo sapiens (Hominids)
'Ptr': 2, # P. troglodytes (primates)
'Mmu': 2, # Macaca mulata (primates)
'Mms': 3, # Mus musculus (mammals)
'Cfa': 3, # Canis familiaris (mammals)
'Dme': 4 # Drosophila melanogaster (metazoa)
}
# Check that dup ages are correct
dup1 = t.get_common_ancestor("Hsa_001", "Hsa_004")
self.assertEqual(dup1.get_age(sp2age), 2)
dup2 = t.get_common_ancestor("Dme_001", "Dme_002")
self.assertEqual(dup2.get_age(sp2age), 4)
dup3 = t.get_common_ancestor("Hsa_001", "Hsa_002")
self.assertEqual(dup3.get_age(sp2age), 3)
dup4 = t.get_common_ancestor("Hsa_001", "Hsa_003")
self.assertEqual(dup4.get_age(sp2age), 1)
# Check is_monophyletic tests
self.assert_(dup1.is_monophyletic(["Hsa", "Ptr", "Mmu"]))
self.assert_(not dup1.is_monophyletic(["Hsa", "Ptr"]))
self.assert_(not dup1.is_monophyletic(["Hsa", "Ptr", "Mms"]))
# Check rooting options
expected_root = t.search_nodes(name="Dme_002")[0]
expected_root.dist += 2.3
self.assertEqual(t.get_farthest_oldest_leaf(sp2age), expected_root)
print t
print t.get_farthest_oldest_node(sp2age)
# Check get species functions
self.assertEqual(t.get_species(), set(sp2age.keys()))
self.assertEqual(set([sp for sp in t.iter_species()]), set(sp2age.keys()))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Jorge-Rodriguez/ansible | lib/ansible/modules/cloud/openstack/os_keypair.py | 42 | 4603 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# Copyright (c) 2013, John Dewey <john@dewey.ws>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keypair
short_description: Add/Delete a keypair from OpenStack
author: "Benno Joy (@bennojoy)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove key pair from OpenStack
options:
name:
description:
- Name that has to be given to the key pair
required: true
public_key:
description:
- The public key that would be uploaded to nova and injected into VMs
upon creation.
public_key_file:
description:
- Path to local file containing ssh public key. Mutually exclusive
with public_key.
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- os_keypair:
cloud: mordred
state: present
name: ansible_key
public_key_file: /home/me/.ssh/id_rsa.pub
# Creates a new key pair and the private key returned after the run.
- os_keypair:
cloud: rax-dfw
state: present
name: ansible_key
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: str
name:
description: Name given to the keypair.
returned: success
type: str
public_key:
description: The public key value for the keypair.
returned: success
type: str
private_key:
description: The private key value for the keypair.
returned: Only when a keypair is generated for the user (e.g., when creating one
and a public key is not specified).
type: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(module, keypair):
state = module.params['state']
if state == 'present' and not keypair:
return True
if state == 'absent' and keypair:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
public_key=dict(default=None),
public_key_file=dict(default=None),
state=dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[['public_key', 'public_key_file']])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
public_key = module.params['public_key']
if module.params['public_key_file']:
with open(module.params['public_key_file']) as public_key_fh:
public_key = public_key_fh.read().rstrip()
sdk, cloud = openstack_cloud_from_module(module)
try:
keypair = cloud.get_keypair(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, keypair))
if state == 'present':
if keypair and keypair['name'] == name:
if public_key and (public_key != keypair['public_key']):
module.fail_json(
msg="Key name %s present but key hash not the same"
" as offered. Delete key first." % name
)
else:
changed = False
else:
keypair = cloud.create_keypair(name, public_key)
changed = True
module.exit_json(changed=changed,
key=keypair,
id=keypair['id'])
elif state == 'absent':
if keypair:
cloud.delete_keypair(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
saradbowman/osf.io | osf/management/commands/create_fake_preprint_actions.py | 5 | 1885 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import random
import logging
from faker import Faker
from django.core.management.base import BaseCommand
from osf.models import ReviewAction, Preprint, OSFUser
from osf.utils.workflows import DefaultStates, DefaultTriggers
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Add fake Actions to every preprint that doesn't already have one"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'user',
type=str,
nargs='?',
default=None,
help='Guid for user to list as creator for all fake actions (default to arbitrary user)'
)
parser.add_argument(
'--num-actions',
action='store',
type=int,
default=10,
help='Number of actions to create for each preprint which does not have one'
)
def handle(self, *args, **options):
user_guid = options.get('user')
num_actions = options.get('--num-actions')
if user_guid is None:
user = OSFUser.objects.first()
else:
user = OSFUser.objects.get(guids___id=user_guid)
fake = Faker()
triggers = [a.value for a in DefaultTriggers]
states = [s.value for s in DefaultStates]
for preprint in Preprint.objects.filter(actions__isnull=True):
for i in range(num_actions):
action = ReviewAction(
target=preprint,
creator=user,
trigger=random.choice(triggers),
from_state=random.choice(states),
to_state=random.choice(states),
comment=fake.text(),
)
action.save()
| apache-2.0 |
kasbah/slim_looper | src/gui/carla_widgets/ui_inputdialog_value.py | 1 | 2493 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resources/ui/inputdialog_value.ui'
#
# Created: Thu Jan 2 17:50:40 2014
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(269, 183)
self.verticalLayout_2 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout_2.addWidget(self.label)
self.doubleSpinBox = QtGui.QDoubleSpinBox(Dialog)
self.doubleSpinBox.setObjectName(_fromUtf8("doubleSpinBox"))
self.verticalLayout_2.addWidget(self.doubleSpinBox)
self.groupBox = QtGui.QGroupBox(Dialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.textBrowser = QtGui.QTextBrowser(self.groupBox)
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.verticalLayout.addWidget(self.textBrowser)
self.verticalLayout_2.addWidget(self.groupBox)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_2.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Set value", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "TextLabel", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Dialog", "Scale Points", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 |
fmarier/letsencrypt | letsencrypt/interfaces.py | 13 | 14967 | """Let's Encrypt client interfaces."""
import abc
import zope.interface
# pylint: disable=no-self-argument,no-method-argument,no-init,inherit-non-class
# pylint: disable=too-few-public-methods
class AccountStorage(object):
"""Accounts storage interface."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def find_all(self): # pragma: no cover
"""Find all accounts.
:returns: All found accounts.
:rtype: list
"""
raise NotImplementedError()
@abc.abstractmethod
def load(self, account_id): # pragma: no cover
"""Load an account by its id.
:raises .AccountNotFound: if account could not be found
:raises .AccountStorageError: if account could not be loaded
"""
raise NotImplementedError()
@abc.abstractmethod
def save(self, account): # pragma: no cover
"""Save account.
:raises .AccountStorageError: if account could not be saved
"""
raise NotImplementedError()
class IPluginFactory(zope.interface.Interface):
"""IPlugin factory.
Objects providing this interface will be called without satisfying
any entry point "extras" (extra dependencies) you might have defined
for your plugin, e.g (excerpt from ``setup.py`` script)::
setup(
...
entry_points={
'letsencrypt.plugins': [
'name=example_project.plugin[plugin_deps]',
],
},
extras_require={
'plugin_deps': ['dep1', 'dep2'],
}
)
Therefore, make sure such objects are importable and usable without
extras. This is necessary, because CLI does the following operations
(in order):
- loads an entry point,
- calls `inject_parser_options`,
- requires an entry point,
- creates plugin instance (`__call__`).
"""
description = zope.interface.Attribute("Short plugin description")
def __call__(config, name):
"""Create new `IPlugin`.
:param IConfig config: Configuration.
:param str name: Unique plugin name.
"""
def inject_parser_options(parser, name):
"""Inject argument parser options (flags).
1. Be nice and prepend all options and destinations with
`~.common.option_namespace` and `~common.dest_namespace`.
2. Inject options (flags) only. Positional arguments are not
allowed, as this would break the CLI.
:param ArgumentParser parser: (Almost) top-level CLI parser.
:param str name: Unique plugin name.
"""
class IPlugin(zope.interface.Interface):
"""Let's Encrypt plugin."""
def prepare():
"""Prepare the plugin.
Finish up any additional initialization.
:raises .PluginError:
when full initialization cannot be completed.
:raises .MisconfigurationError:
when full initialization cannot be completed. Plugin will
be displayed on a list of available plugins.
:raises .NoInstallationError:
when the necessary programs/files cannot be located. Plugin
will NOT be displayed on a list of available plugins.
:raises .NotSupportedError:
when the installation is recognized, but the version is not
currently supported.
"""
def more_info():
"""Human-readable string to help the user.
Should describe the steps taken and any relevant info to help the user
decide which plugin to use.
:rtype str:
"""
class IAuthenticator(IPlugin):
"""Generic Let's Encrypt Authenticator.
Class represents all possible tools processes that have the
ability to perform challenges and attain a certificate.
"""
def get_chall_pref(domain):
"""Return list of challenge preferences.
:param str domain: Domain for which challenge preferences are sought.
:returns: List of challenge types (subclasses of
:class:`acme.challenges.Challenge`) with the most
preferred challenges first. If a type is not specified, it means the
Authenticator cannot perform the challenge.
:rtype: list
"""
def perform(achalls):
"""Perform the given challenge.
:param list achalls: Non-empty (guaranteed) list of
:class:`~letsencrypt.achallenges.AnnotatedChallenge`
instances, such that it contains types found within
:func:`get_chall_pref` only.
:returns: List of ACME
:class:`~acme.challenges.ChallengeResponse` instances
or if the :class:`~acme.challenges.Challenge` cannot
be fulfilled then:
``None``
Authenticator can perform challenge, but not at this time.
``False``
Authenticator will never be able to perform (error).
:rtype: :class:`list` of
:class:`acme.challenges.ChallengeResponse`
:raises .PluginError: If challenges cannot be performed
"""
def cleanup(achalls):
"""Revert changes and shutdown after challenges complete.
:param list achalls: Non-empty (guaranteed) list of
:class:`~letsencrypt.achallenges.AnnotatedChallenge`
instances, a subset of those previously passed to :func:`perform`.
:raises PluginError: if original configuration cannot be restored
"""
class IConfig(zope.interface.Interface):
"""Let's Encrypt user-supplied configuration.
.. warning:: The values stored in the configuration have not been
filtered, stripped or sanitized.
"""
server = zope.interface.Attribute("ACME Directory Resource URI.")
email = zope.interface.Attribute(
"Email used for registration and recovery contact.")
rsa_key_size = zope.interface.Attribute("Size of the RSA key.")
config_dir = zope.interface.Attribute("Configuration directory.")
work_dir = zope.interface.Attribute("Working directory.")
accounts_dir = zope.interface.Attribute(
"Directory where all account information is stored.")
backup_dir = zope.interface.Attribute("Configuration backups directory.")
cert_dir = zope.interface.Attribute(
"Directory where newly generated Certificate Signing Requests "
"(CSRs) and certificates not enrolled in the renewer are saved.")
cert_key_backup = zope.interface.Attribute(
"Directory where all certificates and keys are stored. "
"Used for easy revocation.")
in_progress_dir = zope.interface.Attribute(
"Directory used before a permanent checkpoint is finalized.")
key_dir = zope.interface.Attribute("Keys storage.")
temp_checkpoint_dir = zope.interface.Attribute(
"Temporary checkpoint directory.")
renewer_config_file = zope.interface.Attribute(
"Location of renewal configuration file.")
no_verify_ssl = zope.interface.Attribute(
"Disable SSL certificate verification.")
dvsni_port = zope.interface.Attribute(
"Port number to perform DVSNI challenge. "
"Boulder in testing mode defaults to 5001.")
no_simple_http_tls = zope.interface.Attribute(
"Do not use TLS when solving SimpleHTTP challenges.")
simple_http_port = zope.interface.Attribute(
"Port used in the SimpleHttp challenge.")
class IInstaller(IPlugin):
"""Generic Let's Encrypt Installer Interface.
Represents any server that an X509 certificate can be placed.
"""
def get_all_names():
"""Returns all names that may be authenticated.
:rtype: `list` of `str`
"""
def deploy_cert(domain, cert_path, key_path, chain_path=None):
"""Deploy certificate.
:param str domain: domain to deploy certificate file
:param str cert_path: absolute path to the certificate file
:param str key_path: absolute path to the private key file
:param str chain_path: absolute path to the certificate chain file
:raises .PluginError: when cert cannot be deployed
"""
def enhance(domain, enhancement, options=None):
"""Perform a configuration enhancement.
:param str domain: domain for which to provide enhancement
:param str enhancement: An enhancement as defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: Flexible options parameter for enhancement.
Check documentation of
:const:`~letsencrypt.constants.ENHANCEMENTS`
for expected options for each enhancement.
:raises .PluginError: If Enhancement is not supported, or if
an error occurs during the enhancement.
"""
def supported_enhancements():
"""Returns a list of supported enhancements.
:returns: supported enhancements which should be a subset of
:const:`~letsencrypt.constants.ENHANCEMENTS`
:rtype: :class:`list` of :class:`str`
"""
def get_all_certs_keys():
"""Retrieve all certs and keys set in configuration.
:returns: tuples with form `[(cert, key, path)]`, where:
- `cert` - str path to certificate file
- `key` - str path to associated key file
- `path` - file path to configuration file
:rtype: list
"""
def save(title=None, temporary=False):
"""Saves all changes to the configuration files.
Both title and temporary are needed because a save may be
intended to be permanent, but the save is not ready to be a full
checkpoint
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory. `title` has no effect if temporary is true.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (challenges)
:raises .PluginError: when save is unsuccessful
"""
def rollback_checkpoints(rollback=1):
"""Revert `rollback` number of configuration checkpoints.
:raises .PluginError: when configuration cannot be fully reverted
"""
def view_config_changes():
"""Display all of the LE config changes.
:raises .PluginError: when config changes cannot be parsed
"""
def config_test():
"""Make sure the configuration is valid.
:raises .MisconfigurationError: when the config is not in a usable state
"""
def restart():
"""Restart or refresh the server content.
:raises .PluginError: when server cannot be restarted
"""
class IDisplay(zope.interface.Interface):
"""Generic display."""
def notification(message, height, pause):
"""Displays a string message
:param str message: Message to display
:param int height: Height of dialog box if applicable
:param bool pause: Whether or not the application should pause for
confirmation (if available)
"""
def menu(message, choices,
ok_label="OK", cancel_label="Cancel", help_label=""):
"""Displays a generic menu.
:param str message: message to display
:param choices: choices
:type choices: :class:`list` of :func:`tuple` or :class:`str`
:param str ok_label: label for OK button
:param str cancel_label: label for Cancel button
:param str help_label: label for Help button
:returns: tuple of (`code`, `index`) where
`code` - str display exit code
`index` - int index of the user's selection
"""
def input(message):
"""Accept input from the user.
:param str message: message to display to the user
:returns: tuple of (`code`, `input`) where
`code` - str display exit code
`input` - str of the user's input
:rtype: tuple
"""
def yesno(message, yes_label="Yes", no_label="No"):
"""Query the user with a yes/no question.
Yes and No label must begin with different letters.
:param str message: question for the user
:returns: True for "Yes", False for "No"
:rtype: bool
"""
def checklist(message, tags, default_state):
"""Allow for multiple selections from a menu.
:param str message: message to display to the user
:param list tags: where each is of type :class:`str` len(tags) > 0
:param bool default_status: If True, items are in a selected state by
default.
"""
class IValidator(zope.interface.Interface):
"""Configuration validator."""
def certificate(cert, name, alt_host=None, port=443):
"""Verifies the certificate presented at name is cert
:param OpenSSL.crypto.X509 cert: Expected certificate
:param str name: Server's domain name
:param bytes alt_host: Host to connect to instead of the IP
address of host
:param int port: Port to connect to
:returns: True if the certificate was verified successfully
:rtype: bool
"""
def redirect(name, port=80, headers=None):
"""Verify redirect to HTTPS
:param str name: Server's domain name
:param int port: Port to connect to
:param dict headers: HTTP headers to include in request
:returns: True if redirect is successfully enabled
:rtype: bool
"""
def hsts(name):
"""Verify HSTS header is enabled
:param str name: Server's domain name
:returns: True if HSTS header is successfully enabled
:rtype: bool
"""
def ocsp_stapling(name):
"""Verify ocsp stapling for domain
:param str name: Server's domain name
:returns: True if ocsp stapling is successfully enabled
:rtype: bool
"""
class IReporter(zope.interface.Interface):
"""Interface to collect and display information to the user."""
HIGH_PRIORITY = zope.interface.Attribute(
"Used to denote high priority messages")
MEDIUM_PRIORITY = zope.interface.Attribute(
"Used to denote medium priority messages")
LOW_PRIORITY = zope.interface.Attribute(
"Used to denote low priority messages")
def add_message(self, msg, priority, on_crash=False):
"""Adds msg to the list of messages to be printed.
:param str msg: Message to be displayed to the user.
:param int priority: One of HIGH_PRIORITY, MEDIUM_PRIORITY, or
LOW_PRIORITY.
:param bool on_crash: Whether or not the message should be printed if
the program exits abnormally.
"""
def print_messages(self):
"""Prints messages to the user and clears the message queue."""
| apache-2.0 |
halfflat/rdmini | gtest-1.7.0/test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/openid/server/trustroot.py | 12 | 14391 | # -*- test-case-name: openid.test.test_rpverify -*-
"""
This module contains the C{L{TrustRoot}} class, which helps handle
trust root checking. This module is used by the
C{L{openid.server.server}} module, but it is also available to server
implementers who wish to use it for additional trust root checking.
It also implements relying party return_to URL verification, based on
the realm.
"""
__all__ = [
'TrustRoot',
'RP_RETURN_TO_URL_TYPE',
'extractReturnToURLs',
'returnToMatches',
'verifyReturnTo',
]
from openid import urinorm
from openid.yadis import services
from urllib.parse import urlparse, urlunparse
import re
import logging
############################################
_protocols = ['http', 'https']
_top_level_domains = [
'ac', 'ad', 'ae', 'aero', 'af', 'ag', 'ai', 'al', 'am', 'an',
'ao', 'aq', 'ar', 'arpa', 'as', 'asia', 'at', 'au', 'aw',
'ax', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg', 'bh', 'bi',
'biz', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw',
'by', 'bz', 'ca', 'cat', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci',
'ck', 'cl', 'cm', 'cn', 'co', 'com', 'coop', 'cr', 'cu', 'cv',
'cx', 'cy', 'cz', 'de', 'dj', 'dk', 'dm', 'do', 'dz', 'ec',
'edu', 'ee', 'eg', 'er', 'es', 'et', 'eu', 'fi', 'fj', 'fk',
'fm', 'fo', 'fr', 'ga', 'gb', 'gd', 'ge', 'gf', 'gg', 'gh',
'gi', 'gl', 'gm', 'gn', 'gov', 'gp', 'gq', 'gr', 'gs', 'gt',
'gu', 'gw', 'gy', 'hk', 'hm', 'hn', 'hr', 'ht', 'hu', 'id',
'ie', 'il', 'im', 'in', 'info', 'int', 'io', 'iq', 'ir', 'is',
'it', 'je', 'jm', 'jo', 'jobs', 'jp', 'ke', 'kg', 'kh', 'ki',
'km', 'kn', 'kp', 'kr', 'kw', 'ky', 'kz', 'la', 'lb', 'lc',
'li', 'lk', 'lr', 'ls', 'lt', 'lu', 'lv', 'ly', 'ma', 'mc',
'md', 'me', 'mg', 'mh', 'mil', 'mk', 'ml', 'mm', 'mn', 'mo',
'mobi', 'mp', 'mq', 'mr', 'ms', 'mt', 'mu', 'museum', 'mv',
'mw', 'mx', 'my', 'mz', 'na', 'name', 'nc', 'ne', 'net', 'nf',
'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu', 'nz', 'om', 'org',
'pa', 'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr',
'pro', 'ps', 'pt', 'pw', 'py', 'qa', 're', 'ro', 'rs', 'ru',
'rw', 'sa', 'sb', 'sc', 'sd', 'se', 'sg', 'sh', 'si', 'sj',
'sk', 'sl', 'sm', 'sn', 'so', 'sr', 'st', 'su', 'sv', 'sy',
'sz', 'tc', 'td', 'tel', 'tf', 'tg', 'th', 'tj', 'tk', 'tl',
'tm', 'tn', 'to', 'tp', 'tr', 'travel', 'tt', 'tv', 'tw',
'tz', 'ua', 'ug', 'uk', 'us', 'uy', 'uz', 'va', 'vc', 've',
'vg', 'vi', 'vn', 'vu', 'wf', 'ws', 'xn--0zwm56d',
'xn--11b5bs3a9aj6g', 'xn--80akhbyknj4f', 'xn--9t4b11yi5a',
'xn--deba0ad', 'xn--g6w251d', 'xn--hgbk6aj7f53bba',
'xn--hlcj6aya9esc7a', 'xn--jxalpdlp', 'xn--kgbechtv',
'xn--zckzah', 'ye', 'yt', 'yu', 'za', 'zm', 'zw']
# Build from RFC3986, section 3.2.2. Used to reject hosts with invalid
# characters.
host_segment_re = re.compile(
r"(?:[-a-zA-Z0-9!$&'\(\)\*+,;=._~]|%[a-zA-Z0-9]{2})+$")
class RealmVerificationRedirected(Exception):
"""Attempting to verify this realm resulted in a redirect.
@since: 2.1.0
"""
def __init__(self, relying_party_url, rp_url_after_redirects):
self.relying_party_url = relying_party_url
self.rp_url_after_redirects = rp_url_after_redirects
def __str__(self):
return ("Attempting to verify %r resulted in "
"redirect to %r" %
(self.relying_party_url,
self.rp_url_after_redirects))
def _parseURL(url):
try:
url = urinorm.urinorm(url)
except ValueError:
return None
proto, netloc, path, params, query, frag = urlparse(url)
if not path:
# Python <2.4 does not parse URLs with no path properly
if not query and '?' in netloc:
netloc, query = netloc.split('?', 1)
path = '/'
path = urlunparse(('', '', path, params, query, frag))
if ':' in netloc:
try:
host, port = netloc.split(':')
except ValueError:
return None
if not re.match(r'\d+$', port):
return None
else:
host = netloc
port = ''
host = host.lower()
if not host_segment_re.match(host):
return None
return proto, host, port, path
class TrustRoot(object):
"""
This class represents an OpenID trust root. The C{L{parse}}
classmethod accepts a trust root string, producing a
C{L{TrustRoot}} object. The method OpenID server implementers
would be most likely to use is the C{L{isSane}} method, which
checks the trust root for given patterns that indicate that the
trust root is too broad or points to a local network resource.
@sort: parse, isSane
"""
def __init__(self, unparsed, proto, wildcard, host, port, path):
self.unparsed = unparsed
self.proto = proto
self.wildcard = wildcard
self.host = host
self.port = port
self.path = path
def isSane(self):
"""
This method checks the to see if a trust root represents a
reasonable (sane) set of URLs. 'http://*.com/', for example
is not a reasonable pattern, as it cannot meaningfully specify
the site claiming it. This function attempts to find many
related examples, but it can only work via heuristics.
Negative responses from this method should be treated as
advisory, used only to alert the user to examine the trust
root carefully.
@return: Whether the trust root is sane
@rtype: C{bool}
"""
if self.host == 'localhost':
return True
host_parts = self.host.split('.')
if self.wildcard:
assert host_parts[0] == '', host_parts
del host_parts[0]
# If it's an absolute domain name, remove the empty string
# from the end.
if host_parts and not host_parts[-1]:
del host_parts[-1]
if not host_parts:
return False
# Do not allow adjacent dots
if '' in host_parts:
return False
tld = host_parts[-1]
if tld not in _top_level_domains:
return False
if len(host_parts) == 1:
return False
if self.wildcard:
if len(tld) == 2 and len(host_parts[-2]) <= 3:
# It's a 2-letter tld with a short second to last segment
# so there needs to be more than two segments specified
# (e.g. *.co.uk is insane)
return len(host_parts) > 2
# Passed all tests for insanity.
return True
def validateURL(self, url):
"""
Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool}
"""
url_parts = _parseURL(url)
if url_parts is None:
return False
proto, host, port, path = url_parts
if proto != self.proto:
return False
if port != self.port:
return False
if '*' in host:
return False
if not self.wildcard:
if host != self.host:
return False
elif ((not host.endswith(self.host)) and
('.' + host) != self.host):
return False
if path != self.path:
path_len = len(self.path)
trust_prefix = self.path[:path_len]
url_prefix = path[:path_len]
# must be equal up to the length of the path, at least
if trust_prefix != url_prefix:
return False
# These characters must be on the boundary between the end
# of the trust root's path and the start of the URL's
# path.
if '?' in self.path:
allowed = '&'
else:
allowed = '?/'
return (self.path[-1] in allowed or
path[path_len] in allowed)
return True
def parse(cls, trust_root):
"""
This method creates a C{L{TrustRoot}} instance from the given
input, if possible.
@param trust_root: This is the trust root to parse into a
C{L{TrustRoot}} object.
@type trust_root: C{str}
@return: A C{L{TrustRoot}} instance if trust_root parses as a
trust root, C{None} otherwise.
@rtype: C{NoneType} or C{L{TrustRoot}}
"""
url_parts = _parseURL(trust_root)
if url_parts is None:
return None
proto, host, port, path = url_parts
# check for valid prototype
if proto not in _protocols:
return None
# check for URI fragment
if path.find('#') != -1:
return None
# extract wildcard if it is there
if host.find('*', 1) != -1:
# wildcard must be at start of domain: *.foo.com, not foo.*.com
return None
if host.startswith('*'):
# Starts with star, so must have a dot after it (if a
# domain is specified)
if len(host) > 1 and host[1] != '.':
return None
host = host[1:]
wilcard = True
else:
wilcard = False
# we have a valid trust root
tr = cls(trust_root, proto, wilcard, host, port, path)
return tr
parse = classmethod(parse)
def checkSanity(cls, trust_root_string):
"""str -> bool
is this a sane trust root?
"""
trust_root = cls.parse(trust_root_string)
if trust_root is None:
return False
else:
return trust_root.isSane()
checkSanity = classmethod(checkSanity)
def checkURL(cls, trust_root, url):
"""quick func for validating a url against a trust root. See the
TrustRoot class if you need more control."""
tr = cls.parse(trust_root)
return tr is not None and tr.validateURL(url)
checkURL = classmethod(checkURL)
def buildDiscoveryURL(self):
"""Return a discovery URL for this realm.
This function does not check to make sure that the realm is
valid. Its behaviour on invalid inputs is undefined.
@rtype: str
@returns: The URL upon which relying party discovery should be run
in order to verify the return_to URL
@since: 2.1.0
"""
if self.wildcard:
# Use "www." in place of the star
assert self.host.startswith('.'), self.host
www_domain = 'www' + self.host
return '%s://%s%s' % (self.proto, www_domain, self.path)
else:
return self.unparsed
def __repr__(self):
return "TrustRoot(%r, %r, %r, %r, %r, %r)" % (
self.unparsed, self.proto, self.wildcard, self.host, self.port,
self.path)
def __str__(self):
return repr(self)
# The URI for relying party discovery, used in realm verification.
#
# XXX: This should probably live somewhere else (like in
# openid.consumer or openid.yadis somewhere)
RP_RETURN_TO_URL_TYPE = 'http://specs.openid.net/auth/2.0/return_to'
def _extractReturnURL(endpoint):
"""If the endpoint is a relying party OpenID return_to endpoint,
return the endpoint URL. Otherwise, return None.
This function is intended to be used as a filter for the Yadis
filtering interface.
@see: C{L{openid.yadis.services}}
@see: C{L{openid.yadis.filters}}
@param endpoint: An XRDS BasicServiceEndpoint, as returned by
performing Yadis dicovery.
@returns: The endpoint URL or None if the endpoint is not a
relying party endpoint.
@rtype: str or NoneType
"""
if endpoint.matchTypes([RP_RETURN_TO_URL_TYPE]):
return endpoint.uri
else:
return None
def returnToMatches(allowed_return_to_urls, return_to):
"""Is the return_to URL under one of the supplied allowed
return_to URLs?
@since: 2.1.0
"""
for allowed_return_to in allowed_return_to_urls:
# A return_to pattern works the same as a realm, except that
# it's not allowed to use a wildcard. We'll model this by
# parsing it as a realm, and not trying to match it if it has
# a wildcard.
return_realm = TrustRoot.parse(allowed_return_to)
if (# Parses as a trust root
return_realm is not None and
# Does not have a wildcard
not return_realm.wildcard and
# Matches the return_to that we passed in with it
return_realm.validateURL(return_to)
):
return True
# No URL in the list matched
return False
def getAllowedReturnURLs(relying_party_url):
"""Given a relying party discovery URL return a list of return_to URLs.
@since: 2.1.0
"""
(rp_url_after_redirects, return_to_urls) = services.getServiceEndpoints(
relying_party_url, _extractReturnURL)
if rp_url_after_redirects != relying_party_url:
# Verification caused a redirect
raise RealmVerificationRedirected(
relying_party_url, rp_url_after_redirects)
return return_to_urls
# _vrfy parameter is there to make testing easier
def verifyReturnTo(realm_str, return_to, _vrfy=getAllowedReturnURLs):
"""Verify that a return_to URL is valid for the given realm.
This function builds a discovery URL, performs Yadis discovery on
it, makes sure that the URL does not redirect, parses out the
return_to URLs, and finally checks to see if the current return_to
URL matches the return_to.
@raises DiscoveryFailure: When Yadis discovery fails
@returns: True if the return_to URL is valid for the realm
@since: 2.1.0
"""
realm = TrustRoot.parse(realm_str)
if realm is None:
# The realm does not parse as a URL pattern
return False
try:
allowable_urls = _vrfy(realm.buildDiscoveryURL())
except RealmVerificationRedirected as err:
logging.exception(str(err))
return False
if returnToMatches(allowable_urls, return_to):
return True
else:
logging.error("Failed to validate return_to %r for realm %r, was not "
"in %s" % (return_to, realm_str, allowable_urls))
return False
| mit |
cmouse/buildbot | master/buildbot/test/unit/process/test_remotecommand.py | 5 | 7462 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.trial import unittest
from buildbot.process import remotecommand
from buildbot.test.fake import logfile
from buildbot.test.fake import remotecommand as fakeremotecommand
from buildbot.test.util import interfaces
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.warnings import DeprecatedApiWarning
class TestRemoteShellCommand(unittest.TestCase):
def test_obfuscated_arguments(self):
command = ["echo",
("obfuscated", "real", "fake"),
"test",
("obfuscated", "real2", "fake2"),
("not obfuscated", "a", "b"),
("obfuscated"), # not obfuscated
("obfuscated", "test"), # not obfuscated
("obfuscated", "1", "2", "3"), # not obfuscated)
]
cmd = remotecommand.RemoteShellCommand("build", command)
self.assertEqual(cmd.command, command)
self.assertEqual(cmd.fake_command, ["echo",
"fake",
"test",
"fake2",
("not obfuscated", "a", "b"),
("obfuscated"), # not obfuscated
# not obfuscated
("obfuscated", "test"),
# not obfuscated)
("obfuscated", "1", "2", "3"),
])
def test_not_obfuscated_arguments(self):
command = "echo test"
cmd = remotecommand.RemoteShellCommand("build", command)
self.assertEqual(cmd.command, command)
self.assertEqual(cmd.fake_command, command)
# NOTE:
#
# This interface is considered private to Buildbot and may change without
# warning in future versions.
class Tests(interfaces.InterfaceTests):
remoteCommandClass = None
def makeRemoteCommand(self, stdioLogName='stdio'):
return self.remoteCommandClass('ping', {'arg': 'val'},
stdioLogName=stdioLogName)
def test_signature_RemoteCommand_constructor(self):
@self.assertArgSpecMatches(self.remoteCommandClass.__init__)
def __init__(self, remote_command, args, ignore_updates=False,
collectStdout=False, collectStderr=False,
decodeRC=None,
stdioLogName='stdio'):
pass
def test_signature_RemoteShellCommand_constructor(self):
@self.assertArgSpecMatches(self.remoteShellCommandClass.__init__)
def __init__(self, workdir, command, env=None, want_stdout=1,
want_stderr=1, timeout=20 * 60, maxTime=None, sigtermTime=None, logfiles=None,
usePTY=None, logEnviron=True, collectStdout=False,
collectStderr=False, interruptSignal=None, initialStdin=None,
decodeRC=None,
stdioLogName='stdio'):
pass
def test_signature_run(self):
cmd = self.makeRemoteCommand()
@self.assertArgSpecMatches(cmd.run)
def run(self, step, conn, builder_name):
pass
def test_signature_useLog(self):
cmd = self.makeRemoteCommand()
@self.assertArgSpecMatches(cmd.useLog)
def useLog(self, log_, closeWhenFinished=False, logfileName=None):
pass
def test_signature_useLogDelayed(self):
cmd = self.makeRemoteCommand()
@self.assertArgSpecMatches(cmd.useLogDelayed)
def useLogDelayed(self, logfileName, activateCallBack,
closeWhenFinished=False):
pass
def test_signature_interrupt(self):
cmd = self.makeRemoteCommand()
@self.assertArgSpecMatches(cmd.interrupt)
def useLogDelayed(self, why):
pass
def test_signature_didFail(self):
cmd = self.makeRemoteCommand()
@self.assertArgSpecMatches(cmd.didFail)
def useLogDelayed(self):
pass
def test_signature_logs(self):
cmd = self.makeRemoteCommand()
self.assertIsInstance(cmd.logs, dict)
def test_signature_active(self):
cmd = self.makeRemoteCommand()
self.assertIsInstance(cmd.active, bool)
def test_RemoteShellCommand_constructor(self):
self.remoteShellCommandClass('wkdir', 'some-command')
class TestRunCommand(unittest.TestCase, Tests):
remoteCommandClass = remotecommand.RemoteCommand
remoteShellCommandClass = remotecommand.RemoteShellCommand
def test_notStdioLog(self):
logname = 'notstdio'
cmd = self.makeRemoteCommand(stdioLogName=logname)
log = logfile.FakeLogFile(logname)
cmd.useLog(log)
cmd.addStdout('some stdout')
self.assertEqual(log.stdout, 'some stdout')
cmd.addStderr('some stderr')
self.assertEqual(log.stderr, 'some stderr')
cmd.addHeader('some header')
self.assertEqual(log.header, 'some header')
def test_RemoteShellCommand_usePTY_on_worker_2_16(self):
cmd = remotecommand.RemoteShellCommand('workdir', 'shell')
def workerVersion(command, oldversion=None):
return '2.16'
def workerVersionIsOlderThan(command, minversion):
return ['2', '16'] < minversion.split('.')
step = mock.Mock()
step.workerVersionIsOlderThan = workerVersionIsOlderThan
step.workerVersion = workerVersion
conn = mock.Mock()
conn.remoteStartCommand = mock.Mock(return_value=None)
cmd.run(step, conn, 'builder')
self.assertEqual(cmd.args['usePTY'], 'slave-config')
class TestFakeRunCommand(unittest.TestCase, Tests):
remoteCommandClass = fakeremotecommand.FakeRemoteCommand
remoteShellCommandClass = fakeremotecommand.FakeRemoteShellCommand
class TestWorkerTransition(unittest.TestCase):
def test_RemoteShellCommand_usePTY(self):
with assertNotProducesWarnings(DeprecatedApiWarning):
cmd = remotecommand.RemoteShellCommand(
'workdir', 'command')
self.assertTrue(cmd.args['usePTY'] is None)
with assertNotProducesWarnings(DeprecatedApiWarning):
cmd = remotecommand.RemoteShellCommand(
'workdir', 'command', usePTY=True)
self.assertTrue(cmd.args['usePTY'])
with assertNotProducesWarnings(DeprecatedApiWarning):
cmd = remotecommand.RemoteShellCommand(
'workdir', 'command', usePTY=False)
self.assertFalse(cmd.args['usePTY'])
| gpl-2.0 |
cgstudiomap/cgstudiomap | main/eggs/xlwt-1.0.0-py2.7.egg/xlwt/Cell.py | 8 | 8563 | # -*- coding: windows-1252 -*-
from struct import unpack, pack
from . import BIFFRecords
from .compat import xrange
class StrCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "sst_idx"]
def __init__(self, rowx, colx, xf_idx, sst_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.sst_idx = sst_idx
def get_biff_data(self):
# return BIFFRecords.LabelSSTRecord(self.rowx, self.colx, self.xf_idx, self.sst_idx).get()
return pack('<5HL', 0x00FD, 10, self.rowx, self.colx, self.xf_idx, self.sst_idx)
class BlankCell(object):
__slots__ = ["rowx", "colx", "xf_idx"]
def __init__(self, rowx, colx, xf_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
def get_biff_data(self):
# return BIFFRecords.BlankRecord(self.rowx, self.colx, self.xf_idx).get()
return pack('<5H', 0x0201, 6, self.rowx, self.colx, self.xf_idx)
class MulBlankCell(object):
__slots__ = ["rowx", "colx1", "colx2", "xf_idx"]
def __init__(self, rowx, colx1, colx2, xf_idx):
self.rowx = rowx
self.colx1 = colx1
self.colx2 = colx2
self.xf_idx = xf_idx
def get_biff_data(self):
return BIFFRecords.MulBlankRecord(self.rowx,
self.colx1, self.colx2, self.xf_idx).get()
class NumberCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, number):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.number = float(number)
def get_encoded_data(self):
rk_encoded = 0
num = self.number
# The four possible kinds of RK encoding are *not* mutually exclusive.
# The 30-bit integer variety picks up the most.
# In the code below, the four varieties are checked in descending order
# of bangs per buck, or not at all.
# SJM 2007-10-01
if -0x20000000 <= num < 0x20000000: # fits in 30-bit *signed* int
inum = int(num)
if inum == num: # survives round-trip
# print "30-bit integer RK", inum, hex(inum)
rk_encoded = 2 | (inum << 2)
return 1, rk_encoded
temp = num * 100
if -0x20000000 <= temp < 0x20000000:
# That was step 1: the coded value will fit in
# a 30-bit signed integer.
itemp = int(round(temp, 0))
# That was step 2: "itemp" is the best candidate coded value.
# Now for step 3: simulate the decoding,
# to check for round-trip correctness.
if itemp / 100.0 == num:
# print "30-bit integer RK*100", itemp, hex(itemp)
rk_encoded = 3 | (itemp << 2)
return 1, rk_encoded
if 0: # Cost of extra pack+unpack not justified by tiny yield.
packed = pack('<d', num)
w01, w23 = unpack('<2i', packed)
if not w01 and not(w23 & 3):
# 34 lsb are 0
# print "float RK", w23, hex(w23)
return 1, w23
packed100 = pack('<d', temp)
w01, w23 = unpack('<2i', packed100)
if not w01 and not(w23 & 3):
# 34 lsb are 0
# print "float RK*100", w23, hex(w23)
return 1, w23 | 1
#print "Number"
#print
return 0, pack('<5Hd', 0x0203, 14, self.rowx, self.colx, self.xf_idx, num)
def get_biff_data(self):
isRK, value = self.get_encoded_data()
if isRK:
return pack('<5Hi', 0x27E, 10, self.rowx, self.colx, self.xf_idx, value)
return value # NUMBER record already packed
class BooleanCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, number):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.number = number
def get_biff_data(self):
return BIFFRecords.BoolErrRecord(self.rowx,
self.colx, self.xf_idx, self.number, 0).get()
error_code_map = {
0x00: 0, # Intersection of two cell ranges is empty
0x07: 7, # Division by zero
0x0F: 15, # Wrong type of operand
0x17: 23, # Illegal or deleted cell reference
0x1D: 29, # Wrong function or range name
0x24: 36, # Value range overflow
0x2A: 42, # Argument or function not available
'#NULL!' : 0, # Intersection of two cell ranges is empty
'#DIV/0!': 7, # Division by zero
'#VALUE!': 36, # Wrong type of operand
'#REF!' : 23, # Illegal or deleted cell reference
'#NAME?' : 29, # Wrong function or range name
'#NUM!' : 36, # Value range overflow
'#N/A!' : 42, # Argument or function not available
}
class ErrorCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "number"]
def __init__(self, rowx, colx, xf_idx, error_string_or_code):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
try:
self.number = error_code_map[error_string_or_code]
except KeyError:
raise Exception('Illegal error value (%r)' % error_string_or_code)
def get_biff_data(self):
return BIFFRecords.BoolErrRecord(self.rowx,
self.colx, self.xf_idx, self.number, 1).get()
class FormulaCell(object):
__slots__ = ["rowx", "colx", "xf_idx", "frmla", "calc_flags"]
def __init__(self, rowx, colx, xf_idx, frmla, calc_flags=0):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.frmla = frmla
self.calc_flags = calc_flags
def get_biff_data(self):
return BIFFRecords.FormulaRecord(self.rowx,
self.colx, self.xf_idx, self.frmla.rpn(), self.calc_flags).get()
# module-level function for *internal* use by the Row module
def _get_cells_biff_data_mul(rowx, cell_items):
# Return the BIFF data for all cell records in the row.
# Adjacent BLANK|RK records are combined into MUL(BLANK|RK) records.
pieces = []
nitems = len(cell_items)
i = 0
while i < nitems:
icolx, icell = cell_items[i]
if isinstance(icell, NumberCell):
isRK, value = icell.get_encoded_data()
if not isRK:
pieces.append(value) # pre-packed NUMBER record
i += 1
continue
muldata = [(value, icell.xf_idx)]
target = NumberCell
elif isinstance(icell, BlankCell):
muldata = [icell.xf_idx]
target = BlankCell
else:
pieces.append(icell.get_biff_data())
i += 1
continue
lastcolx = icolx
j = i
packed_record = ''
for j in xrange(i+1, nitems):
jcolx, jcell = cell_items[j]
if jcolx != lastcolx + 1:
nexti = j
break
if not isinstance(jcell, target):
nexti = j
break
if target == NumberCell:
isRK, value = jcell.get_encoded_data()
if not isRK:
packed_record = value
nexti = j + 1
break
muldata.append((value, jcell.xf_idx))
else:
muldata.append(jcell.xf_idx)
lastcolx = jcolx
else:
nexti = j + 1
if target == NumberCell:
if lastcolx == icolx:
# RK record
value, xf_idx = muldata[0]
pieces.append(pack('<5Hi', 0x027E, 10, rowx, icolx, xf_idx, value))
else:
# MULRK record
nc = lastcolx - icolx + 1
pieces.append(pack('<4H', 0x00BD, 6 * nc + 6, rowx, icolx))
pieces.append(b''.join([pack('<Hi', xf_idx, value) for value, xf_idx in muldata]))
pieces.append(pack('<H', lastcolx))
else:
if lastcolx == icolx:
# BLANK record
xf_idx = muldata[0]
pieces.append(pack('<5H', 0x0201, 6, rowx, icolx, xf_idx))
else:
# MULBLANK record
nc = lastcolx - icolx + 1
pieces.append(pack('<4H', 0x00BE, 2 * nc + 6, rowx, icolx))
pieces.append(b''.join([pack('<H', xf_idx) for xf_idx in muldata]))
pieces.append(pack('<H', lastcolx))
if packed_record:
pieces.append(packed_record)
i = nexti
return b''.join(pieces)
| agpl-3.0 |
srio/oasys-comsyl | orangecontrib/comsyl/widgets/applications/comsyl_propagate_beamline.py | 1 | 10084 | import os, sys
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QApplication, QFileDialog
from PyQt5.QtGui import QIntValidator, QDoubleValidator
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from comsyl.autocorrelation.CompactAFReader import CompactAFReader
from orangecontrib.comsyl.widgets.gui.ow_comsyl_widget import OWComsylWidget
from orangecontrib.comsyl.util.preprocessor import ComsylPreprocessorData
from orangecontrib.comsyl.util.python_script import PythonConsole
from orangecontrib.comsyl.util.messages import showConfirmMessage
class OWComsylPropagateBeamline(OWComsylWidget):
name = "Propagate Beamline Script"
description = "COMSYL Propagate Beamline"
icon = "icons/propagator.png"
maintainer = "Manuel Sanchez del Rio"
maintainer_email = "srio(@at@)esrf.eu"
priority = 46
category = ""
keywords = ["COMSYL", "coherent modes"]
inputs = [("COMSYL modes" , CompactAFReader, "setCompactAFReader" ),
("COMSYL preprocessor beamline" , ComsylPreprocessorData, "setPreprocessor" ),]
outputs = [{"name":"COMSYL modes",
"type":CompactAFReader,
"doc":"COMSYL modes",
"id":"COMSYL modes"} ]
COMSYL_AF_FILE = ""
BL_PICKLE_FILE = ""
MODE_INDEX = Setting(2) # maxumim mode index
REFERENCE_SOURCE = Setting(0)
DIRECTORY_NAME = "tmp_comsyl_propagation"
PYTHON_INTERPRETER = sys.executable
IMAGE_WIDTH = 890
IMAGE_HEIGHT = 680
def __init__(self, show_automatic_box=True):
super().__init__(show_automatic_box=show_automatic_box)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Refresh Script", callback=self.refresh_script)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
gen_box = oasysgui.widgetBox(self.controlArea, "COMSYL Beamline Propagation", addSpace=False, orientation="vertical", height=530, width=self.CONTROL_AREA_WIDTH-5)
figure_box0 = oasysgui.widgetBox(gen_box, "", addSpace=True, orientation="horizontal")
self.id_comsyl_af_file = oasysgui.lineEdit(figure_box0, self, "COMSYL_AF_FILE", "Comsyl File with Modes:",
labelWidth=90, valueType=str, orientation="horizontal")
gui.button(figure_box0, self, "...", callback=self.select_comsyl_af_file)
figure_box = oasysgui.widgetBox(gen_box, "", addSpace=True, orientation="horizontal")
self.id_bl_pickle_file = oasysgui.lineEdit(figure_box, self, "BL_PICKLE_FILE", "BL Pickle File:",
labelWidth=90, valueType=str, orientation="horizontal")
gui.button(figure_box, self, "...", callback=self.select_bl_pickle_file)
oasysgui.lineEdit(gen_box, self, "MODE_INDEX",
label="Maximum Mode index", addSpace=False,
valueType=int, validator=QIntValidator(), orientation="horizontal", labelWidth=150)
oasysgui.lineEdit(gen_box, self, "DIRECTORY_NAME", "Temporal Directory", labelWidth=160, valueType=str, orientation="horizontal")
oasysgui.lineEdit(gen_box, self, "PYTHON_INTERPRETER", "Python interpreter", labelWidth=160, valueType=str, orientation="horizontal")
tabs_setting = oasysgui.tabWidget(self.mainArea)
tabs_setting.setFixedHeight(self.IMAGE_HEIGHT)
tabs_setting.setFixedWidth(self.IMAGE_WIDTH)
tab_scr = oasysgui.createTabPage(tabs_setting, "Python Script")
tab_out = oasysgui.createTabPage(tabs_setting, "System Output")
self.pythonScript = oasysgui.textArea(readOnly=False)
self.pythonScript.setStyleSheet("background-color: white; font-family: Courier, monospace;")
self.pythonScript.setMaximumHeight(self.IMAGE_HEIGHT - 250)
script_box = oasysgui.widgetBox(tab_scr, "", addSpace=False, orientation="vertical", height=self.IMAGE_HEIGHT - 10, width=self.IMAGE_WIDTH - 10)
script_box.layout().addWidget(self.pythonScript)
console_box = oasysgui.widgetBox(script_box, "", addSpace=True, orientation="vertical",
height=150, width=self.IMAGE_WIDTH - 10)
self.console = PythonConsole(self.__dict__, self)
console_box.layout().addWidget(self.console)
self.shadow_output = oasysgui.textArea()
out_box = oasysgui.widgetBox(tab_out, "System Output", addSpace=True, orientation="horizontal", height=self.IMAGE_WIDTH - 45)
out_box.layout().addWidget(self.shadow_output)
button_box = oasysgui.widgetBox(tab_scr, "", addSpace=True, orientation="horizontal")
gui.button(button_box, self, "Run Script", callback=self.execute_script, height=40)
gui.button(button_box, self, "Save Script to File", callback=self.save_script, height=40)
#############################
# self.refresh_script()
#
def select_comsyl_af_file(self):
self.id_comsyl_af_file.setText(oasysgui.selectFileFromDialog(self,
self.COMSYL_AF_FILE, "Select Input File",
file_extension_filter="COMSYL Files (*.npz)"))
def select_bl_pickle_file(self):
self.id_bl_pickle_file.setText(oasysgui.selectFileFromDialog(self,
self.BL_PICKLE_FILE, "Select Input File",
file_extension_filter="COMSYL Beamline Pickle Files (*.p)"))
def setCompactAFReader(self, af):
if not af is None:
self.COMSYL_AF_FILE = af._af._io.fromFile()
self.refresh_script()
def setPreprocessor(self, data):
try:
self.BL_PICKLE_FILE = data.get_beamline_pickle_file()
self.refresh_script()
except:
pass
def execute_script(self):
if showConfirmMessage(message = "Do you confirm launching a COMSYL propagation?",
informative_text="This is a long and resource-consuming process: launching it within the OASYS environment is highly discouraged." + \
"The suggested solution is to save the script into a file and to launch it in a different environment."):
self._script = str(self.pythonScript.toPlainText())
self.console.write("\nRunning script:\n")
self.console.push("exec(_script)")
self.console.new_prompt(sys.ps1)
def save_script(self):
file_name = QFileDialog.getSaveFileName(self, "Save File to Disk", os.getcwd(), filter='*.py')[0]
if not file_name is None:
if not file_name.strip() == "":
file = open(file_name, "w")
file.write(str(self.pythonScript.toPlainText()))
file.close()
QtWidgets.QMessageBox.information(self, "QMessageBox.information()",
"File " + file_name + " written to disk",
QtWidgets.QMessageBox.Ok)
def refresh_script(self):
dd = {"COMSYL_AF_FILE": self.COMSYL_AF_FILE,
"BL_PICKLE_FILE": self.BL_PICKLE_FILE,
"DIRECTORY_NAME": self.DIRECTORY_NAME,
"PYTHON_INTERPRETER": self.PYTHON_INTERPRETER,
"MODE_INDEX": self.MODE_INDEX,
}
self.pythonScript.setText(self.script_template().format_map(dd))
def script_template(self):
return """import pickle
from comsyl.waveoptics.ComsylWofryBeamline import ComsylWofryBeamline
from comsyl.waveoptics.SRWAdapter import ComsylSRWBeamline
from comsyl.autocorrelation.CompactAFReader import CompactAFReader
comsyl_beamline = pickle.load(open("{BL_PICKLE_FILE}","rb"))
filename = "{COMSYL_AF_FILE}"
af_oasys = CompactAFReader.initialize_from_file(filename)
af_comsyl = af_oasys.get_af()
# **source position correction**
source_position=af_comsyl.info().sourcePosition()
if source_position == "entrance":
source_offset = af_comsyl._undulator.length() * 0.5
elif source_position == "center":
source_offset = 0.0
else:
raise Exception("Unhandled source position")
print("Using source position entrance z=%f" % source_offset)
comsyl_beamline.add_undulator_offset(source_offset)
af_propagated = comsyl_beamline.propagate_af(af_comsyl,
directory_name="{DIRECTORY_NAME}",
af_output_file_root="{DIRECTORY_NAME}/propagated_beamline",
maximum_mode={MODE_INDEX},
python_to_be_used="{PYTHON_INTERPRETER}")
#rediagonalization **uncomment to proceed**
#af_propagated.diagonalizeModes({MODE_INDEX})
#af_propagated.save("{DIRECTORY_NAME}/rediagonalized")
"""
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication
app = QApplication([])
ow = OWComsylPropagateBeamline()
ow.COMSYL_AF_FILE = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz"
ow.BL_PICKLE_FILE = "/scisoft/xop2.4/extensions/shadowvui/shadow3-scripts/HIGHLIGHTS/bl.p"
ow.refresh_script()
ow.show()
app.exec_()
ow.saveSettings() | mit |
skevy/django | django/contrib/sitemaps/__init__.py | 291 | 4010 | from django.contrib.sites.models import Site, get_current_site
from django.core import urlresolvers, paginator
from django.core.exceptions import ImproperlyConfigured
import urllib
PING_URL = "http://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL):
"""
Alerts Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urlresolvers.reverse().
"""
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index')
except urlresolvers.NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap')
except urlresolvers.NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.")
from django.contrib.sites.models import Site
current_site = Site.objects.get_current()
url = "http://%s%s" % (current_site.domain, sitemap_url)
params = urllib.urlencode({'sitemap':url})
urllib.urlopen("%s?%s" % (ping_url, params))
class Sitemap(object):
# This limit is defined by Google. See the index documentation at
# http://sitemaps.org/protocol.php#index.
limit = 50000
def __get(self, name, obj, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
return attr(obj)
return attr
def items(self):
return []
def location(self, obj):
return obj.get_absolute_url()
def _get_paginator(self):
if not hasattr(self, "_paginator"):
self._paginator = paginator.Paginator(self.items(), self.limit)
return self._paginator
paginator = property(_get_paginator)
def get_urls(self, page=1, site=None):
if site is None:
if Site._meta.installed:
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured("In order to use Sitemaps you must either use the sites framework or pass in a Site or RequestSite object in your view code.")
urls = []
for item in self.paginator.page(page).object_list:
loc = "http://%s%s" % (site.domain, self.__get('location', item))
priority = self.__get('priority', item, None)
url_info = {
'location': loc,
'lastmod': self.__get('lastmod', item, None),
'changefreq': self.__get('changefreq', item, None),
'priority': str(priority is not None and priority or '')
}
urls.append(url_info)
return urls
class FlatPageSitemap(Sitemap):
def items(self):
current_site = Site.objects.get_current()
return current_site.flatpage_set.filter(registration_required=False)
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None):
self.queryset = info_dict['queryset']
self.date_field = info_dict.get('date_field', None)
self.priority = priority
self.changefreq = changefreq
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
| bsd-3-clause |
scality/cinder | cinder/wsgi/eventlet_server.py | 3 | 10380 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Methods for working with eventlet WSGI servers."""
from __future__ import print_function
import errno
import os
import socket
import ssl
import time
import eventlet
import eventlet.wsgi
import greenlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from oslo_utils import excutils
from oslo_utils import netutils
from cinder import exception
from cinder.i18n import _, _LE, _LI
socket_opts = [
cfg.BoolOpt('tcp_keepalive',
default=True,
help="Sets the value of TCP_KEEPALIVE (True/False) for each "
"server socket."),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('tcp_keepalive_interval',
help="Sets the value of TCP_KEEPINTVL in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('tcp_keepalive_count',
help="Sets the value of TCP_KEEPCNT for each "
"server socket. Not supported on OS X."),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('ssl_key_file',
help="Private key file to use when starting "
"the server securely"),
]
eventlet_opts = [
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
cfg.IntOpt('client_socket_timeout', default=900,
help="Timeout for client connections\' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of \'0\' means "
"wait forever."),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help='If False, closes the client socket connection '
'explicitly. Setting it to True to maintain backward '
'compatibility. Recommended setting is set it to False.'),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
CONF.register_opts(eventlet_opts)
LOG = logging.getLogger(__name__)
class Server(service.ServiceBase):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = 1000
def __init__(self, name, app, host=None, port=None, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:returns: None
"""
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.client_socket_timeout = CONF.client_socket_timeout or None
self.name = name
self.app = app
self._host = host or "0.0.0.0"
self._port = port or 0
self._server = None
self._socket = None
self._protocol = protocol
self.pool_size = pool_size or self.default_pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
ca_file = CONF.ssl_ca_file
self._use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s")
% cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s")
% key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you "
"must specify both a cert_file and "
"key_file option value in your "
"configuration file."))
retry_until = time.time() + 30
while not self._socket and time.time() < retry_until:
try:
self._socket = eventlet.listen(bind_addr, backlog=backlog,
family=family)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not self._socket:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for 30 seconds") %
{'host': host, 'port': port})
(self._host, self._port) = self._socket.getsockname()[0:2]
LOG.info(_LI("%(name)s listening on %(_host)s:%(_port)s"),
{'name': self.name, '_host': self._host, '_port': self._port})
def start(self):
"""Start serving a WSGI application.
:returns: None
:raises: cinder.exception.InvalidInput
"""
# The server socket object will be closed after server exits,
# but the underlying file descriptor will remain open, and will
# give bad file descriptor error. So duplicating the socket object,
# to keep file descriptor usable.
dup_socket = self._socket.dup()
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# NOTE(praneshp): Call set_tcp_keepalive in oslo to set
# tcp keepalive parameters. Sockets can hang around forever
# without keepalive
netutils.set_tcp_keepalive(dup_socket,
CONF.tcp_keepalive,
CONF.tcp_keepidle,
CONF.tcp_keepalive_count,
CONF.tcp_keepalive_interval)
if self._use_ssl:
try:
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl_cert_file,
'keyfile': CONF.ssl_key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl_ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
dup_socket = ssl.wrap_socket(dup_socket,
**ssl_kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to start %(name)s on %(_host)s: "
"%(_port)s with SSL "
"support."), self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': dup_socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._logger,
'socket_timeout': self.client_socket_timeout,
'keepalive': CONF.wsgi_keep_alive
}
self._server = eventlet.spawn(**wsgi_kwargs)
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_LI("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
self._pool.waitall()
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_LI("WSGI server has stopped."))
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self._pool.resize(self.pool_size)
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | instal/script.module.universalscrapers/lib/universalscrapers/modules/js2py/pyjs.py | 38 | 1836 | from .base import *
from .constructors.jsmath import Math
from .constructors.jsdate import Date
from .constructors.jsobject import Object
from .constructors.jsfunction import Function
from .constructors.jsstring import String
from .constructors.jsnumber import Number
from .constructors.jsboolean import Boolean
from .constructors.jsregexp import RegExp
from .constructors.jsarray import Array
from .prototypes.jsjson import JSON
from .host.console import console
from .host.jseval import Eval
from .host.jsfunctions import parseFloat, parseInt, isFinite, isNaN
# Now we have all the necessary items to create global environment for script
__all__ = ['Js', 'PyJsComma', 'PyJsStrictEq', 'PyJsStrictNeq',
'PyJsException', 'PyJsBshift', 'Scope', 'PyExceptionToJs',
'JsToPyException', 'JS_BUILTINS', 'appengine', 'set_global_object',
'JsRegExp', 'PyJsException', 'PyExceptionToJs', 'JsToPyException', 'PyJsSwitchException']
# these were defined in base.py
builtins = ('true','false','null','undefined','Infinity',
'NaN', 'console', 'String', 'Number', 'Boolean', 'RegExp',
'Math', 'Date', 'Object', 'Function', 'Array',
'parseFloat', 'parseInt', 'isFinite', 'isNaN')
#Array, Function, JSON, Error is done later :)
# also some built in functions like eval...
def set_global_object(obj):
obj.IS_CHILD_SCOPE = False
this = This({})
this.own = obj.own
this.prototype = obj.prototype
PyJs.GlobalObject = this
# make this available
obj.register('this')
obj.put('this', this)
scope = dict(zip(builtins, [globals()[e] for e in builtins]))
# Now add errors:
for name, error in ERRORS.items():
scope[name] = error
#add eval
scope['eval'] = Eval
scope['JSON'] = JSON
JS_BUILTINS = {k:v for k,v in scope.items()}
| gpl-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django_extensions/management/commands/sqldiff.py | 24 | 48057 | """
sqldiff.py - Prints the (approximated) difference between models and database
TODO:
- better support for relations
- better support for constraints (mainly postgresql?)
- support for table spaces with postgresql
- when a table is not managed (meta.managed==False) then only do a one-way
sqldiff ? show differences from db->table but not the other way around since
it's not managed.
KNOWN ISSUES:
- MySQL has by far the most problems with introspection. Please be
carefull when using MySQL with sqldiff.
- Booleans are reported back as Integers, so there's no way to know if
there was a real change.
- Varchar sizes are reported back without unicode support so their size
may change in comparison to the real length of the varchar.
- Some of the 'fixes' to counter these problems might create false
positives or false negatives.
"""
import sys
from optparse import make_option
import django
import six
from django.core.management import CommandError, sql as _sql
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connection, transaction
from django.db.models.fields import AutoField, IntegerField
from django_extensions.compat import get_app_models
from django_extensions.management.utils import signalcommand
try:
from django.core.management.base import OutputWrapper
HAS_OUTPUTWRAPPER = True
except ImportError:
HAS_OUTPUTWRAPPER = False
ORDERING_FIELD = IntegerField('_order', null=True)
def flatten(l, ltypes=(list, tuple)):
ltype = type(l)
l = list(l)
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
i -= 1
break
else:
l[i:i + 1] = l[i]
i += 1
return ltype(l)
def all_local_fields(meta):
all_fields = []
if meta.proxy:
for parent in meta.parents:
all_fields.extend(all_local_fields(parent._meta))
else:
for f in meta.local_fields:
col_type = f.db_type(connection=connection)
if col_type is None:
continue
all_fields.append(f)
return all_fields
class SQLDiff(object):
DATA_TYPES_REVERSE_OVERRIDE = {}
IGNORE_MISSING_TABLES = [
"django_migrations",
"south_migrationhistory",
]
DIFF_TYPES = [
'error',
'comment',
'table-missing-in-db',
'table-missing-in-model',
'field-missing-in-db',
'field-missing-in-model',
'fkey-missing-in-db',
'fkey-missing-in-model',
'index-missing-in-db',
'index-missing-in-model',
'unique-missing-in-db',
'unique-missing-in-model',
'field-type-differ',
'field-parameter-differ',
'notnull-differ',
]
DIFF_TEXTS = {
'error': 'error: %(0)s',
'comment': 'comment: %(0)s',
'table-missing-in-db': "table '%(0)s' missing in database",
'table-missing-in-model': "table '%(0)s' missing in models",
'field-missing-in-db': "field '%(1)s' defined in model but missing in database",
'field-missing-in-model': "field '%(1)s' defined in database but missing in model",
'fkey-missing-in-db': "field '%(1)s' FOREIGN KEY defined in model but missing in database",
'fkey-missing-in-model': "field '%(1)s' FOREIGN KEY defined in database but missing in model",
'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database",
'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model",
'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database",
'unique-missing-in-model': "field '%(1)s' UNIQUE defined in database schema but missing in model",
'field-type-differ': "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'",
'field-parameter-differ': "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'",
'notnull-differ': "field '%(1)s' null constraint should be '%(2)s' in the database",
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[2:])))
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1])))
SQL_FKEY_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s %s (%s)%s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[4:])), style.SQL_KEYWORD('REFERENCES'), style.SQL_TABLE(qn(args[2])), style.SQL_FIELD(qn(args[3])), connection.ops.deferrable_sql())
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[3]))
# FIXME: need to lookup index name instead of just appending _idx to table + fieldname
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))))
SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1])))
# FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2])))))
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('MODIFY'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
SQL_ERROR = lambda self, style, qn, args: style.NOTICE('-- Error: %s' % style.ERROR(args[0]))
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE('-- Comment: %s' % style.SQL_TABLE(args[0]))
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE('-- Table missing: %s' % args[0])
SQL_TABLE_MISSING_IN_MODEL = lambda self, style, qn, args: style.NOTICE('-- Model missing for table: %s' % args[0])
can_detect_notnull_differ = False
can_detect_unsigned_differ = False
unsigned_suffix = None
def __init__(self, app_models, options):
self.has_differences = None
self.app_models = app_models
self.options = options
self.dense = options.get('dense_output', False)
try:
self.introspection = connection.introspection
except AttributeError:
from django.db import get_introspection_module
self.introspection = get_introspection_module()
self.cursor = connection.cursor()
self.django_tables = self.get_django_tables(options.get('only_existing', True))
self.db_tables = self.introspection.get_table_list(self.cursor)
if django.VERSION[:2] >= (1, 8):
# TODO: We are losing information about tables which are views here
self.db_tables = [table_info.name for table_info in self.db_tables]
self.differences = []
self.unknown_db_fields = {}
self.new_db_fields = set()
self.null = {}
self.unsigned = set()
self.DIFF_SQL = {
'error': self.SQL_ERROR,
'comment': self.SQL_COMMENT,
'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB,
'table-missing-in-model': self.SQL_TABLE_MISSING_IN_MODEL,
'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB,
'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'fkey-missing-in-db': self.SQL_FKEY_MISSING_IN_DB,
'fkey-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL,
'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB,
'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL,
'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB,
'unique-missing-in-model': self.SQL_UNIQUE_MISSING_IN_MODEL,
'field-type-differ': self.SQL_FIELD_TYPE_DIFFER,
'field-parameter-differ': self.SQL_FIELD_PARAMETER_DIFFER,
'notnull-differ': self.SQL_NOTNULL_DIFFER,
}
if self.can_detect_notnull_differ:
self.load_null()
if self.can_detect_unsigned_differ:
self.load_unsigned()
def load_null(self):
raise NotImplementedError("load_null functions must be implemented if diff backend has 'can_detect_notnull_differ' set to True")
def load_unsigned(self):
raise NotImplementedError("load_unsigned function must be implemented if diff backend has 'can_detect_unsigned_differ' set to True")
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, 'Unknown difference type'
self.differences[-1][-1].append((diff_type, args))
def get_django_tables(self, only_existing):
try:
django_tables = self.introspection.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
try:
django_tables = _sql.django_table_names(only_existing=only_existing)
except AttributeError:
# backwards compatibility for before svn r7568
django_tables = _sql.django_table_list(only_existing=only_existing)
return django_tables
def sql_to_dict(self, query, param):
""" sql_to_dict(query, param) -> list of dicts
code from snippet at http://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type(self, description, field=None, table_name=None):
from django.db import models
# DB-API cursor.description
#(name, type_code, display_size, internal_size, precision, scale, null_ok) = description
type_code = description[1]
if type_code in self.DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = self.DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
try:
reverse_type = self.introspection.data_types_reverse[type_code]
except AttributeError:
# backwards compatibility for before introspection refactoring (r8296)
reverse_type = self.introspection.DATA_TYPES_REVERSE.get(type_code)
except KeyError:
reverse_type = self.get_field_db_type_lookup(type_code)
if not reverse_type:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference('comment', "Unknown database type for field '%s' (%s)" % (description[0], type_code))
return None
kwargs = {}
if type_code == 16946 and field and getattr(field, 'geom_type', None) == 'POINT':
reverse_type = 'django.contrib.gis.db.models.fields.PointField'
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs['max_length'] = description[3]
if reverse_type == "DecimalField":
kwargs['max_digits'] = description[4]
kwargs['decimal_places'] = description[5] and abs(description[5]) or description[5]
if description[6]:
kwargs['blank'] = True
if reverse_type not in ('TextField', 'CharField'):
kwargs['null'] = True
if field and getattr(field, 'geography', False):
kwargs['geography'] = True
if '.' in reverse_type:
from django_extensions.compat import importlib
module_path, package_name = reverse_type.rsplit('.', 1)
module = importlib.import_module(module_path)
field_db_type = getattr(module, package_name)(**kwargs).db_type(connection=connection)
else:
field_db_type = getattr(models, reverse_type)(**kwargs).db_type(connection=connection)
tablespace = field.db_tablespace
if not tablespace:
tablespace = "public"
if (tablespace, table_name, field.column) in self.unsigned:
field_db_type = '%s %s' % (field_db_type, self.unsigned_suffix)
return field_db_type
def get_field_db_type_lookup(self, type_code):
return None
def get_field_db_nullable(self, field, table_name):
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
attname = field.db_column or field.attname
return self.null.get((tablespace, table_name, attname), 'fixme')
def strip_parameters(self, field_type):
if field_type and field_type != 'double precision':
return field_type.split(" ")[0].split("(")[0].lower()
return field_type
def find_unique_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
for field in all_local_fields(meta):
if field.unique and meta.managed:
attname = field.db_column or field.attname
db_field_unique = table_indexes.get(attname, {}).get('unique')
if not db_field_unique and table_constraints:
db_field_unique = any(constraint['unique'] for contraint_name, constraint in six.iteritems(table_constraints) if [attname] == constraint['columns'])
if attname in table_indexes and db_field_unique:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
def find_unique_missing_in_model(self, meta, table_indexes, table_constraints, table_name):
# TODO: Postgresql does not list unique_togethers in table_indexes
# MySQL does
fields = dict([(field.db_column or field.name, field.unique) for field in all_local_fields(meta)])
for att_name, att_opts in six.iteritems(table_indexes):
db_field_unique = att_opts['unique']
if not db_field_unique and table_constraints:
db_field_unique = any(constraint['unique'] for contraint_name, constraint in six.iteritems(table_constraints) if att_name in constraint['columns'])
if db_field_unique and att_name in fields and not fields[att_name]:
if att_name in flatten(meta.unique_together):
continue
self.add_difference('unique-missing-in-model', table_name, att_name)
def find_index_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if attname not in table_indexes:
self.add_difference('index-missing-in-db', table_name, attname, '', '')
db_type = field.db_type(connection=connection)
if db_type.startswith('varchar'):
self.add_difference('index-missing-in-db', table_name, attname, 'like', ' varchar_pattern_ops')
if db_type.startswith('text'):
self.add_difference('index-missing-in-db', table_name, attname, 'like', ' text_pattern_ops')
def find_index_missing_in_model(self, meta, table_indexes, table_constraints, table_name):
fields = dict([(field.name, field) for field in all_local_fields(meta)])
for att_name, att_opts in six.iteritems(table_indexes):
if att_name in fields:
field = fields[att_name]
db_field_unique = att_opts['unique']
if not db_field_unique and table_constraints:
db_field_unique = any(constraint['unique'] for contraint_name, constraint in six.iteritems(table_constraints) if att_name in constraint['columns'])
if field.db_index:
continue
if getattr(field, 'spatial_index', False):
continue
if att_opts['primary_key'] and field.primary_key:
continue
if db_field_unique and field.unique:
continue
if db_field_unique and att_name in flatten(meta.unique_together):
continue
self.add_difference('index-missing-in-model', table_name, att_name)
db_type = field.db_type(connection=connection)
if db_type.startswith('varchar') or db_type.startswith('text'):
self.add_difference('index-missing-in-model', table_name, att_name, 'like')
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference('field-missing-in-model', table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in six.iteritems(fieldmap):
if field_name not in db_fields:
field_output = []
if field.rel:
field_output.extend([field.rel.to._meta.db_table, field.rel.to._meta.get_field(field.rel.field_name).column])
op = 'fkey-missing-in-db'
else:
op = 'field-missing-in-db'
field_output.append(field.db_type(connection=connection))
if not field.null:
field_output.append('NOT NULL')
self.add_difference(op, table_name, field_name, *field_output)
self.new_db_fields.add((table_name, field_name))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not self.strip_parameters(db_type) == self.strip_parameters(model_type):
self.add_difference('field-type-differ', table_name, field.name, model_type, db_type)
def find_field_parameter_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if django.VERSION[:2] >= (1, 7):
# Django >=1.7
model_check = field.db_parameters(connection=connection)['check']
if ' CHECK' in db_type:
db_type, db_check = db_type.split(" CHECK", 1)
db_check = db_check.strip().lstrip("(").rstrip(")")
else:
db_check = None
if not model_type == db_type and not model_check == db_check:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
else:
# Django <1.7
if not model_type == db_type:
self.add_difference('field-parameter-differ', table_name, field.name, model_type, db_type)
def find_field_notnull_differ(self, meta, table_description, table_name):
if not self.can_detect_notnull_differ:
return
for field in all_local_fields(meta):
attname = field.db_column or field.attname
if (table_name, attname) in self.new_db_fields:
continue
null = self.get_field_db_nullable(field, table_name)
if field.null != null:
action = field.null and 'DROP' or 'SET'
self.add_difference('notnull-differ', table_name, attname, action)
def get_constraints(self, cursor, table_name, introspection):
return {}
def find_differences(self):
if self.options['all_applications']:
self.add_app_model_marker(None, None)
for table in self.db_tables:
if table not in self.django_tables and table not in self.IGNORE_MISSING_TABLES:
self.add_difference('table-missing-in-model', table)
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
if table_name not in self.db_tables:
# Table is missing from database
self.add_difference('table-missing-in-db', table_name)
continue
table_indexes = self.introspection.get_indexes(self.cursor, table_name)
if hasattr(self.introspection, 'get_constraints'):
table_constraints = self.introspection.get_constraints(self.cursor, table_name)
else:
table_constraints = self.get_constraints(self.cursor, table_name, self.introspection)
fieldmap = dict([(field.db_column or field.get_attname(), field) for field in all_local_fields(meta)])
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap['_order'] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(self.cursor, table_name)
except Exception as e:
self.add_difference('error', 'unable to introspect table: %s' % str(e).strip())
transaction.rollback() # reset transaction
continue
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(meta, table_indexes, table_constraints, table_name)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(meta, table_indexes, table_constraints, table_name)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(meta, table_indexes, table_constraints, table_name)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(meta, table_indexes, table_constraints, table_name)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
# 9) find: 'field-notnull'
self.find_field_notnull_differ(meta, table_description, table_name)
self.has_differences = max([len(diffs) for _app_label, _model_name, diffs in self.differences])
def print_diff(self, style=no_style()):
""" print differences to stdout """
if self.options.get('sql', True):
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
if not self.can_detect_notnull_differ:
print(style.NOTICE("# Detecting notnull changes not implemented for this database backend"))
print("")
if not self.can_detect_unsigned_differ:
print(style.NOTICE("# Detecting unsigned changes not implemented for this database backend"))
print("")
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and app_label and cur_app_label != app_label:
print("%s %s" % (style.NOTICE("+ Application:"), style.SQL_TABLE(app_label)))
cur_app_label = app_label
if not self.dense and model_name:
print("%s %s" % (style.NOTICE("|-+ Differences for model:"), style.SQL_TABLE(model_name)))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict((str(i), style.SQL_TABLE(e)) for i, e in enumerate(diff_args))
text = "'".join(i % 2 == 0 and style.ERROR(e) or e for i, e in enumerate(text.split("'")))
if not self.dense:
print("%s %s" % (style.NOTICE("|--+"), text))
else:
if app_label:
print("%s %s %s %s %s" % (style.NOTICE("App"), style.SQL_TABLE(app_label), style.NOTICE('Model'), style.SQL_TABLE(model_name), text))
else:
print(text)
def print_diff_sql(self, style):
if not self.can_detect_notnull_differ:
print(style.NOTICE("-- Detecting notnull changes not implemented for this database backend"))
print("")
cur_app_label = None
qn = connection.ops.quote_name
if not self.has_differences:
if not self.dense:
print(style.SQL_KEYWORD("-- No differences"))
else:
print(style.SQL_KEYWORD("BEGIN;"))
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
print(style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label)))
cur_app_label = app_label
if not self.dense and model_name:
print(style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name)))
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
print(text)
print(style.SQL_KEYWORD("COMMIT;"))
class GenericSQLDiff(SQLDiff):
can_detect_notnull_differ = False
class MySQLDiff(SQLDiff):
can_detect_notnull_differ = True
can_detect_unsigned_differ = True
unsigned_suffix = 'UNSIGNED'
def __init__(self, app_models, options):
super(MySQLDiff, self).__init__(app_models, options)
self.auto_increment = set()
self.load_auto_increment()
if not getattr(connection.features, 'can_introspect_small_integer_field', False):
from MySQLdb.constants import FIELD_TYPE
# Django version < 1.8 does not support MySQL small integer introspection, adding override.
self.DATA_TYPES_REVERSE_OVERRIDE[FIELD_TYPE.SHORT] = 'SmallIntegerField'
def load_null(self):
tablespace = 'public'
for table_name in self.db_tables:
result = self.sql_to_dict("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_schema = DATABASE()
AND table_name = %s""", [table_name])
for table_info in result:
key = (tablespace, table_name, table_info['column_name'])
self.null[key] = table_info['is_nullable'] == 'YES'
def load_unsigned(self):
tablespace = 'public'
for table_name in self.db_tables:
result = self.sql_to_dict("""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = DATABASE()
AND table_name = %s
AND column_type LIKE '%%unsigned'""", [table_name])
for table_info in result:
key = (tablespace, table_name, table_info['column_name'])
self.unsigned.add(key)
def load_auto_increment(self):
for table_name in self.db_tables:
result = self.sql_to_dict("""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = DATABASE()
AND table_name = %s
AND extra = 'auto_increment'""", [table_name])
for table_info in result:
key = (table_name, table_info['column_name'])
self.auto_increment.add(key)
# All the MySQL hacks together create something of a problem
# Fixing one bug in MySQL creates another issue. So just keep in mind
# that this is way unreliable for MySQL atm.
def get_field_db_type(self, description, field=None, table_name=None):
from MySQLdb.constants import FIELD_TYPE
db_type = super(MySQLDiff, self).get_field_db_type(description, field, table_name)
if not db_type:
return
if field:
# MySQL isn't really sure about char's and varchar's like sqlite
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
# They like to call 'bool's 'tinyint(1)' and introspection makes that a integer
# just convert it back to it's proper type, a bool is a bool and nothing else.
if db_type == 'integer' and description[1] == FIELD_TYPE.TINY and description[4] == 1:
db_type = 'bool'
if (table_name, field.column) in self.auto_increment:
db_type += ' AUTO_INCREMENT'
return db_type
class SqliteSQLDiff(SQLDiff):
can_detect_notnull_differ = True
def load_null(self):
for table_name in self.db_tables:
# sqlite does not support tablespaces
tablespace = "public"
# index, column_name, column_type, nullable, default_value
# see: http://www.sqlite.org/pragma.html#pragma_table_info
for table_info in self.sql_to_dict("PRAGMA table_info(%s);" % table_name, []):
key = (tablespace, table_name, table_info['name'])
self.null[key] = not table_info['notnull']
# Unique does not seem to be implied on Sqlite for Primary_key's
# if this is more generic among databases this might be usefull
# to add to the superclass's find_unique_missing_in_db method
def find_unique_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
for field in all_local_fields(meta):
if field.unique:
attname = field.db_column or field.attname
if attname in table_indexes and table_indexes[attname]['unique']:
continue
if attname in table_indexes and table_indexes[attname]['primary_key']:
continue
self.add_difference('unique-missing-in-db', table_name, attname)
# Finding Indexes by using the get_indexes dictionary doesn't seem to work
# for sqlite.
def find_index_missing_in_db(self, meta, table_indexes, table_constraints, table_name):
pass
def find_index_missing_in_model(self, meta, table_indexes, table_constraints, table_name):
pass
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(SqliteSQLDiff, self).get_field_db_type(description, field, table_name)
if not db_type:
return
if field:
field_type = self.get_field_model_type(field)
# Fix char/varchar inconsistencies
if self.strip_parameters(field_type) == 'char' and self.strip_parameters(db_type) == 'varchar':
db_type = db_type.lstrip("var")
return db_type
class PostgresqlSQLDiff(SQLDiff):
can_detect_notnull_differ = True
can_detect_unsigned_differ = True
DATA_TYPES_REVERSE_OVERRIDE = {
1042: 'CharField',
# postgis types (TODO: support is very incomplete)
17506: 'django.contrib.gis.db.models.fields.PointField',
16392: 'django.contrib.gis.db.models.fields.PointField',
55902: 'django.contrib.gis.db.models.fields.MultiPolygonField',
16946: 'django.contrib.gis.db.models.fields.MultiPolygonField'
}
DATA_TYPES_REVERSE_NAME = {
'hstore': 'django_hstore.hstore.DictionaryField',
}
# Hopefully in the future we can add constraint checking and other more
# advanced checks based on this database.
SQL_LOAD_CONSTRAINTS = """
SELECT nspname, relname, conname, attname, pg_get_constraintdef(pg_constraint.oid)
FROM pg_constraint
INNER JOIN pg_attribute ON pg_constraint.conrelid = pg_attribute.attrelid AND pg_attribute.attnum = any(pg_constraint.conkey)
INNER JOIN pg_class ON conrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace
ORDER BY CASE WHEN contype='f' THEN 0 ELSE 1 END,contype,nspname,relname,conname;
"""
SQL_LOAD_NULL = """
SELECT nspname, relname, attname, attnotnull
FROM pg_attribute
INNER JOIN pg_class ON attrelid=pg_class.oid
INNER JOIN pg_namespace ON pg_namespace.oid=pg_class.relnamespace;
"""
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD("TYPE"), style.SQL_COLTYPE(args[2]))
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ALTER COLUMN'), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[2]), style.SQL_KEYWORD('NOT NULL'))
def __init__(self, app_models, options):
super(PostgresqlSQLDiff, self).__init__(app_models, options)
self.check_constraints = {}
self.load_constraints()
def load_null(self):
for dct in self.sql_to_dict(self.SQL_LOAD_NULL, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
self.null[key] = not dct['attnotnull']
def load_unsigned(self):
# PostgreSQL does not support unsigned, so no columns are
# unsigned. Nothing to do.
pass
def load_constraints(self):
for dct in self.sql_to_dict(self.SQL_LOAD_CONSTRAINTS, []):
key = (dct['nspname'], dct['relname'], dct['attname'])
if 'CHECK' in dct['pg_get_constraintdef']:
self.check_constraints[key] = dct
def get_constraints(self, cursor, table_name, introspection):
""" backport of django's introspection.get_constraints(...) """
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text FROM information_schema.constraint_column_usage WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
def get_field_db_type(self, description, field=None, table_name=None):
db_type = super(PostgresqlSQLDiff, self).get_field_db_type(description, field, table_name)
if not db_type:
return
if field:
if field.primary_key and isinstance(field, AutoField):
if db_type == 'integer':
db_type = 'serial'
elif db_type == 'bigint':
db_type = 'bigserial'
if table_name:
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
attname = field.db_column or field.attname
check_constraint = self.check_constraints.get((tablespace, table_name, attname), {}).get('pg_get_constraintdef', None)
if check_constraint:
check_constraint = check_constraint.replace("((", "(")
check_constraint = check_constraint.replace("))", ")")
check_constraint = '("'.join([')' in e and '" '.join(p.strip('"') for p in e.split(" ", 1)) or e for e in check_constraint.split("(")])
# TODO: might be more then one constraint in definition ?
db_type += ' ' + check_constraint
return db_type
def get_field_db_type_lookup(self, type_code):
try:
name = self.sql_to_dict("SELECT typname FROM pg_type WHERE typelem=%s;", [type_code])[0]['typname']
return self.DATA_TYPES_REVERSE_NAME.get(name.strip('_'))
except (IndexError, KeyError):
pass
"""
def find_field_type_differ(self, meta, table_description, table_name):
def callback(field, description, model_type, db_type):
if field.primary_key and db_type=='integer':
db_type = 'serial'
return model_type, db_type
super(PostgresqlSQLDiff, self).find_field_type_differ(meta, table_description, table_name, callback)
"""
DATABASE_SQLDIFF_CLASSES = {
'postgis': PostgresqlSQLDiff,
'postgresql_psycopg2': PostgresqlSQLDiff,
'postgresql': PostgresqlSQLDiff,
'mysql': MySQLDiff,
'sqlite3': SqliteSQLDiff,
'oracle': GenericSQLDiff
}
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all-applications', '-a', action='store_true', dest='all_applications',
help="Automaticly include all application from INSTALLED_APPS."),
make_option('--not-only-existing', '-e', action='store_false', dest='only_existing',
help="Check all tables that exist in the database, not only tables that should exist based on models."),
make_option('--dense-output', '-d', action='store_true', dest='dense_output',
help="Shows the output in dense format, normally output is spreaded over multiple lines."),
make_option('--output_text', '-t', action='store_false', dest='sql', default=True,
help="Outputs the differences as descriptive text instead of SQL"),
)
help = """Prints the (approximated) difference between models and fields in the database for the given app name(s).
It indicates how columns in the database are different from the sql that would
be generated by Django. This command is not a database migration tool. (Though
it can certainly help) It's purpose is to show the current differences as a way
to check/debug ur models compared to the real database tables and columns."""
output_transaction = False
args = '<appname appname ...>'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.exit_code = 1
@signalcommand
def handle(self, *app_labels, **options):
from django.conf import settings
engine = None
if hasattr(settings, 'DATABASES'):
engine = settings.DATABASES['default']['ENGINE']
else:
engine = settings.DATABASE_ENGINE
if engine == 'dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set DATABASE_ENGINE.
raise CommandError("""Django doesn't know which syntax to use for your SQL statements,
because you haven't specified the DATABASE_ENGINE setting.
Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.""")
if options.get('all_applications', False):
app_models = get_app_models()
else:
if not app_labels:
raise CommandError('Enter at least one appname.')
app_models = get_app_models(app_labels)
# remove all models that are not managed by Django
#app_models = [model for model in app_models if getattr(model._meta, 'managed', True)]
if not app_models:
raise CommandError('Unable to execute sqldiff no models founds.')
if not engine:
engine = connection.__module__.split('.')[-2]
if '.' in engine:
engine = engine.split('.')[-1]
cls = DATABASE_SQLDIFF_CLASSES.get(engine, GenericSQLDiff)
sqldiff_instance = cls(app_models, options)
sqldiff_instance.find_differences()
if not sqldiff_instance.has_differences:
self.exit_code = 0
sqldiff_instance.print_diff(self.style)
def execute(self, *args, **options):
try:
super(Command, self).execute(*args, **options)
except CommandError as e:
if options.get('traceback', False):
raise
# self.stderr is not guaranteed to be set here
stderr = getattr(self, 'stderr', None)
if not stderr:
if HAS_OUTPUTWRAPPER:
stderr = OutputWrapper(sys.stderr, self.style.ERROR)
else:
stderr = sys.stderr
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(2)
def run_from_argv(self, argv):
super(Command, self).run_from_argv(argv)
sys.exit(self.exit_code)
| agpl-3.0 |
PriyaShitole/MedViger-lib | webnotes/model/doctype.py | 34 | 13356 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
Get metadata (main doctype with fields and permissions with all table doctypes)
- if exists in cache, get it from cache
- add custom fields
- override properties from PropertySetter
- sort based on prev_field
- optionally, post process (add js, css, select fields), or without
"""
from __future__ import unicode_literals
# imports
import webnotes
import webnotes.model
import webnotes.model.doc
import webnotes.model.doclist
from webnotes.utils import cint, get_base_path
doctype_cache = webnotes.local('doctype_doctype_cache')
docfield_types = webnotes.local('doctype_docfield_types')
# doctype_cache = {}
# docfield_types = None
def get(doctype, processed=False, cached=True):
"""return doclist"""
if cached:
doclist = from_cache(doctype, processed)
if doclist:
if processed:
update_language(doclist)
return DocTypeDocList(doclist)
load_docfield_types()
# main doctype doclist
doclist = get_doctype_doclist(doctype)
# add doctypes of table fields
table_types = [d.options for d in doclist \
if d.doctype=='DocField' and d.fieldtype=='Table']
for table_doctype in table_types:
doclist += get_doctype_doclist(table_doctype)
if processed:
add_code(doctype, doclist)
expand_selects(doclist)
add_print_formats(doclist)
add_search_fields(doclist)
add_workflows(doclist)
add_linked_with(doclist)
to_cache(doctype, processed, doclist)
if processed:
update_language(doclist)
return DocTypeDocList(doclist)
def load_docfield_types():
webnotes.local.doctype_docfield_types = dict(webnotes.conn.sql("""select fieldname, fieldtype from tabDocField
where parent='DocField'"""))
def add_workflows(doclist):
from webnotes.model.workflow import get_workflow_name
doctype = doclist[0].name
# get active workflow
workflow_name = get_workflow_name(doctype)
if workflow_name and webnotes.conn.exists("Workflow", workflow_name):
doclist += webnotes.get_doclist("Workflow", workflow_name)
# add workflow states (for icons and style)
for state in map(lambda d: d.state, doclist.get({"doctype":"Workflow Document State"})):
doclist += webnotes.get_doclist("Workflow State", state)
def get_doctype_doclist(doctype):
"""get doclist of single doctype"""
doclist = webnotes.get_doclist('DocType', doctype)
add_custom_fields(doctype, doclist)
apply_property_setters(doctype, doclist)
sort_fields(doclist)
return doclist
def sort_fields(doclist):
"""sort on basis of previous_field"""
from webnotes.model.doclist import DocList
newlist = DocList([])
pending = filter(lambda d: d.doctype=='DocField', doclist)
maxloops = 20
while (pending and maxloops>0):
maxloops -= 1
for d in pending[:]:
if d.previous_field:
# field already added
for n in newlist:
if n.fieldname==d.previous_field:
newlist.insert(newlist.index(n)+1, d)
pending.remove(d)
break
else:
newlist.append(d)
pending.remove(d)
# recurring at end
if pending:
newlist += pending
# renum
idx = 1
for d in newlist:
d.idx = idx
idx += 1
doclist.get({"doctype":["!=", "DocField"]}).extend(newlist)
def apply_property_setters(doctype, doclist):
for ps in webnotes.conn.sql("""select * from `tabProperty Setter` where
doc_type=%s""", doctype, as_dict=1):
if ps['doctype_or_field']=='DocType':
if ps.get('property_type', None) in ('Int', 'Check'):
ps['value'] = cint(ps['value'])
doclist[0].fields[ps['property']] = ps['value']
else:
docfield = filter(lambda d: d.doctype=="DocField" and d.fieldname==ps['field_name'],
doclist)
if not docfield: continue
if docfield_types.get(ps['property'], None) in ('Int', 'Check'):
ps['value'] = cint(ps['value'])
docfield[0].fields[ps['property']] = ps['value']
def add_custom_fields(doctype, doclist):
try:
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doctype, as_dict=1)
except Exception, e:
if e.args[0]==1146:
return doclist
else:
raise
for r in res:
custom_field = webnotes.model.doc.Document(fielddata=r)
# convert to DocField
custom_field.fields.update({
'doctype': 'DocField',
'parent': doctype,
'parentfield': 'fields',
'parenttype': 'DocType',
'__custom_field': 1
})
doclist.append(custom_field)
return doclist
def add_linked_with(doclist):
"""add list of doctypes this doctype is 'linked' with"""
doctype = doclist[0].name
links = webnotes.conn.sql("""select parent, fieldname from tabDocField
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (doctype, "link:"+ doctype))
links += webnotes.conn.sql("""select dt as parent, fieldname from `tabCustom Field`
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (doctype, "link:"+ doctype))
links = dict(links)
if not links:
return {}
ret = {}
for dt in links:
ret[dt] = { "fieldname": links[dt] }
for grand_parent, options in webnotes.conn.sql("""select parent, options from tabDocField
where fieldtype="Table"
and options in (select name from tabDocType
where istable=1 and name in (%s))""" % ", ".join(["%s"] * len(links)) ,tuple(links)):
ret[grand_parent] = {"child_doctype": options, "fieldname": links[options] }
if options in ret:
del ret[options]
doclist[0].fields["__linked_with"] = ret
def from_cache(doctype, processed):
""" load doclist from cache.
sets flag __from_cache in first doc of doclist if loaded from cache"""
# from memory
if doctype_cache and not processed and doctype in doctype_cache:
return doctype_cache[doctype]
doclist = webnotes.cache().get_value(cache_name(doctype, processed))
if doclist:
from webnotes.model.doclist import DocList
doclist = DocList([webnotes.model.doc.Document(fielddata=d)
for d in doclist])
doclist[0].fields["__from_cache"] = 1
return doclist
def to_cache(doctype, processed, doclist):
if not doctype_cache:
webnotes.local.doctype_doctype_cache = {}
webnotes.cache().set_value(cache_name(doctype, processed),
[d.fields for d in doclist])
if not processed:
doctype_cache[doctype] = doclist
def cache_name(doctype, processed):
"""returns cache key"""
suffix = ""
if processed:
suffix = ":Raw"
return "doctype:" + doctype + suffix
def clear_cache(doctype=None):
import webnotes.plugins
def clear_single(dt):
webnotes.cache().delete_value(cache_name(dt, False))
webnotes.cache().delete_value(cache_name(dt, True))
webnotes.plugins.clear_cache("DocType", dt)
if doctype_cache and doctype in doctype_cache:
del doctype_cache[dt]
if doctype:
clear_single(doctype)
# clear all parent doctypes
for dt in webnotes.conn.sql("""select parent from tabDocField
where fieldtype="Table" and options=%s""", doctype):
clear_single(dt[0])
# clear all notifications
from core.doctype.notification_count.notification_count import delete_notification_count_for
delete_notification_count_for(doctype)
else:
# clear all
for dt in webnotes.conn.sql("""select name from tabDocType"""):
clear_single(dt[0])
def add_code(doctype, doclist):
import os
from webnotes.modules import scrub, get_module_path
doc = doclist[0]
path = os.path.join(get_module_path(doc.module), 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__list_js')
_add_code('%s_calendar.js' % scrub(doc.name), '__calendar_js')
_add_code('%s_map.js' % scrub(doc.name), '__map_js')
add_embedded_js(doc)
def add_embedded_js(doc):
"""embed all require files"""
import re, os
from webnotes import conf
# custom script
custom = webnotes.conn.get_value("Custom Script", {"dt": doc.name,
"script_type": "Client"}, "script") or ""
doc.fields['__js'] = ((doc.fields.get('__js') or '') + '\n' + custom).encode("utf-8")
def _sub(match):
require_path = re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1]
fpath = os.path.join(get_base_path(), require_path)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + unicode(f.read(), "utf-8") + '\n'
else:
return 'wn.require("%s")' % require_path
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
def expand_selects(doclist):
for d in filter(lambda d: d.fieldtype=='Select' \
and (d.options or '').startswith('link:'), doclist):
doctype = d.options.split("\n")[0][5:]
d.link_doctype = doctype
d.options = '\n'.join([''] + [o.name for o in webnotes.conn.sql("""select
name from `tab%s` where docstatus<2 order by name asc""" % doctype, as_dict=1)])
def add_print_formats(doclist):
print_formats = webnotes.conn.sql("""select * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2""", doclist[0].name, as_dict=1)
for pf in print_formats:
doclist.append(webnotes.model.doc.Document('Print Format', fielddata=pf))
def get_property(dt, prop, fieldname=None):
"""get a doctype property"""
doctypelist = get(dt)
if fieldname:
field = doctypelist.get_field(fieldname)
return field and field.fields.get(prop) or None
else:
return doctypelist[0].fields.get(prop)
def get_link_fields(doctype):
"""get docfields of links and selects with "link:" """
doctypelist = get(doctype)
return doctypelist.get({"fieldtype":"Link"}).extend(doctypelist.get({"fieldtype":"Select",
"options": "^link:"}))
def add_validators(doctype, doclist):
for validator in webnotes.conn.sql("""select name from `tabDocType Validator` where
for_doctype=%s""", doctype, as_dict=1):
doclist.extend(webnotes.get_doclist('DocType Validator', validator.name))
def add_search_fields(doclist):
"""add search fields found in the doctypes indicated by link fields' options"""
for lf in doclist.get({"fieldtype": "Link", "options":["!=", "[Select]"]}):
if lf.options:
search_fields = get(lf.options)[0].search_fields
if search_fields:
lf.search_fields = map(lambda sf: sf.strip(), search_fields.split(","))
def update_language(doclist):
"""update language"""
if webnotes.lang != 'en':
from webnotes.modules import get_doc_path
if not hasattr(webnotes.local, 'translations'):
webnotes.local.translations = {}
translations = webnotes.local.translations
# load languages for each doctype
from webnotes.translate import get_lang_data
_messages = {}
for d in doclist:
if d.doctype=='DocType':
_messages.update(get_lang_data(get_doc_path(d.module, d.doctype, d.name),
webnotes.lang, 'doc'))
_messages.update(get_lang_data(get_doc_path(d.module, d.doctype, d.name),
webnotes.lang, 'js'))
doc = doclist[0]
# attach translations to client
doc.fields["__messages"] = _messages
if not webnotes.lang in translations:
translations[webnotes.lang] = webnotes._dict({})
translations[webnotes.lang].update(_messages)
class DocTypeDocList(webnotes.model.doclist.DocList):
def get_field(self, fieldname, parent=None, parentfield=None):
filters = {"doctype":"DocField"}
if isinstance(fieldname, dict):
filters.update(fieldname)
else:
filters["fieldname"] = fieldname
# if parentfield, get the name of the parent table
if parentfield:
parent = self.get_options(parentfield)
if parent:
filters["parent"] = parent
else:
filters["parent"] = self[0].name
fields = self.get(filters)
if fields:
return fields[0]
def get_fieldnames(self, filters=None):
if not filters: filters = {}
filters.update({"doctype": "DocField", "parent": self[0].name})
return map(lambda df: df.fieldname, self.get(filters))
def get_options(self, fieldname, parent=None, parentfield=None):
return self.get_field(fieldname, parent, parentfield).options
def get_label(self, fieldname, parent=None, parentfield=None):
return self.get_field(fieldname, parent, parentfield).label
def get_table_fields(self):
return self.get({"doctype": "DocField", "fieldtype": "Table"})
def get_parent_doclist(self):
return webnotes.doclist([self[0]] + self.get({"parent": self[0].name}))
def rename_field(doctype, old_fieldname, new_fieldname, lookup_field=None):
"""this function assumes that sync is NOT performed"""
import webnotes.model
doctype_list = get(doctype)
old_field = doctype_list.get_field(lookup_field or old_fieldname)
if not old_field:
print "rename_field: " + (lookup_field or old_fieldname) + " not found."
if old_field.fieldtype == "Table":
# change parentfield of table mentioned in options
webnotes.conn.sql("""update `tab%s` set parentfield=%s
where parentfield=%s""" % (old_field.options.split("\n")[0], "%s", "%s"),
(new_fieldname, old_fieldname))
elif old_field.fieldtype not in webnotes.model.no_value_fields:
# copy
if doctype_list[0].issingle:
webnotes.conn.sql("""update `tabSingles` set field=%s
where doctype=%s and field=%s""",
(new_fieldname, doctype, old_fieldname))
else:
webnotes.conn.sql("""update `tab%s` set `%s`=`%s`""" % \
(doctype, new_fieldname, old_fieldname))
| mit |
jnovinger/django | tests/indexes/tests.py | 321 | 3037 | from unittest import skipUnless
from django.db import connection
from django.test import TestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1", "c2", "c3"),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_7ce4cc86123")
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[2])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [])
| bsd-3-clause |
liosha2007/temporary-groupdocs-python-sdk | examples/api-samples/inc_samples/sample07.py | 1 | 2600 | ####<i>This sample will show how to use <b>MoveFile</b> method from Storage Api to copy/move a file in GroupDocs Storage </i>
#Import of classes from libraries
import base64
import os
from pyramid.renderers import render_to_response
from groupdocs.ApiClient import ApiClient
from groupdocs.StorageApi import StorageApi
from groupdocs.MgmtApi import MgmtApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
####Set variables and get POST data
def sample07(request):
clientId = request.POST.get("client_id")
privateKey = request.POST.get("private_key")
#Checking parameters
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False:
return render_to_response('__main__:templates/sample07.pt',
{ 'error' : 'You do not enter you User id or Private key' })
####Create Signer, ApiClient and Storage Api objects
#Create signer object
signer = GroupDocsRequestSigner(privateKey)
#Create apiClient object
apiClient = ApiClient(signer)
#Create Storage Api object
storageApi = StorageApi(apiClient)
####Make a request to Storage API using clientId
try:
#Obtaining all Entities from current user
files = storageApi.ListEntities(clientId, "", extended = True);
except Exception, e:
return render_to_response('__main__:templates/sample07.pt',
{ 'error' : str(e) })
#Obtaining all thumbnails
thumbnail = '';
name = '';
currentDir = os.path.dirname(os.path.realpath(__file__))
for i in range(len(files.result.files)):
#Check is file have thumbnail
if files.result.files[i].thumbnail != None:
#Placing thumbnails to local folder
fp = open(currentDir + '/../templates/thumbnail' + str(i) + '.jpg', 'wb')
fp.write(base64.b64decode(files.result.files[i].thumbnail))
fp.close()
#Geting file names for thumbnails
name = files.result.files[i].name
#Create HTML representation for thumbnails
thumbnail += '<img src="thumbnail' + str(i) + '.jpg", width="40px", height="40px">' + files.result.files[i].name + '</img> <br />'
#If request was successfull - set variables for template
return render_to_response('__main__:templates/sample07.pt',
{ 'thumbnailList' : thumbnail, 'userId' : clientId, 'privateKey' : privateKey },
request=request)
| apache-2.0 |
cherylyli/stress-aid | env/lib/python3.5/site-packages/pymongo/periodic_executor.py | 15 | 5444 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import atexit
import threading
import time
import weakref
from pymongo.monotonic import time as _time
class PeriodicExecutor(object):
def __init__(self, interval, min_interval, target, name=None):
""""Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
- `name`: A name to give the underlying thread.
"""
# threading.Event and its internal condition variable are expensive
# in Python 2, see PYTHON-983. Use a boolean to know when to wake.
# The executor's design is constrained by several Python issues, see
# "periodic_executor.rst" in this repository.
self._event = False
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
self._name = name
self._thread_will_exit = False
self._lock = threading.Lock()
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
with self._lock:
if self._thread_will_exit:
# If the background thread has read self._stopped as True
# there is a chance that it has not yet exited. The call to
# join should not block indefinitely because there is no
# other work done outside the while loop in self._run.
try:
self._thread.join()
except ReferenceError:
# Thread terminated.
pass
self._thread_will_exit = False
self._stopped = False
started = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run, name=self._name)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy=None):
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
"""
self._stopped = True
def join(self, timeout=None):
if self._thread is not None:
try:
self._thread.join(timeout)
except (ReferenceError, RuntimeError):
# Thread already terminated, or not yet started.
pass
def wake(self):
"""Execute the target function soon."""
self._event = True
def __should_stop(self):
with self._lock:
if self._stopped:
self._thread_will_exit = True
return True
return False
def _run(self):
while not self.__should_stop():
try:
if not self._target():
self._stopped = True
break
except:
self._stopped = True
raise
deadline = _time() + self._interval
while not self._stopped and _time() < deadline:
time.sleep(self._min_interval)
if self._event:
break # Early wake.
self._event = False
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
if _EXECUTORS is None:
return
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
atexit.register(_shutdown_executors)
| mit |
llooker/python_sdk | test/test_credentials_google.py | 1 | 2394 | # coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.credentials_google import CredentialsGoogle
class TestCredentialsGoogle(unittest.TestCase):
""" CredentialsGoogle unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCredentialsGoogle(self):
"""
Test CredentialsGoogle
"""
model = swagger_client.models.credentials_google.CredentialsGoogle()
if __name__ == '__main__':
unittest.main()
| mit |
paulscherrerinstitute/pcaspy | example/pysh.py | 1 | 2155 | #!/usr/bin/env python
import time
import sys
import threading
import subprocess
import shlex
from pcaspy import Driver, SimpleServer
prefix = 'MTEST:'
pvdb = {
'COMMAND' : {
'type' : 'char',
'count': 128,
'asyn' : True
},
'OUTPUT' : {
'type' : 'char',
'count': 500,
},
'STATUS' : {
'type' : 'enum',
'enums': ['DONE', 'BUSY']
},
'ERROR' : {
'type' : 'string',
},
}
import math
class myDriver(Driver):
def __init__(self):
Driver.__init__(self)
self.tid = None
def write(self, reason, value):
status = True
# take proper actions
if reason == 'COMMAND':
if not self.tid:
command = value
if command:
self.tid = threading.Thread(target=self.runShell,args=(command,))
self.tid.start()
else:
status = False
else:
status = False
# store the values
if status:
self.setParam(reason, value)
return status
def runShell(self, command):
print("DEBUG: Run ", command)
# set status BUSY
self.setParam('STATUS', 1)
self.updatePVs()
# run shell
try:
time.sleep(0.01)
proc = subprocess.Popen(shlex.split(command),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
proc.wait()
except OSError:
self.setParam('ERROR', str(sys.exc_info()[1]))
self.setParam('OUTPUT', '')
else:
self.setParam('ERROR', proc.stderr.read().rstrip())
self.setParam('OUTPUT', proc.stdout.read().rstrip())
self.callbackPV('COMMAND')
# set status DONE
self.setParam('STATUS', 0)
self.updatePVs()
self.tid = None
print("DEBUG: Finish ", command)
if __name__ == '__main__':
server = SimpleServer()
server.createPV(prefix, pvdb)
driver = myDriver()
while True:
# process CA transactions
server.process(0.1)
| bsd-3-clause |
nataddrho/DigiCue-USB | Python3/src/venv/Lib/site-packages/serial/urlhandler/protocol_alt.py | 2 | 2033 | #! python
#
# This module implements a special URL handler that allows selecting an
# alternate implementation provided by some backends.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
#
# URL format: alt://port[?option[=value][&option[=value]]]
# options:
# - class=X used class named X instead of Serial
#
# example:
# use poll based implementation on Posix (Linux):
# python -m serial.tools.miniterm alt:///dev/ttyUSB0?class=PosixPollSerial
from __future__ import absolute_import
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import serial
def serial_class_for_url(url):
"""extract host and port from an URL string"""
parts = urlparse.urlsplit(url)
if parts.scheme != 'alt':
raise serial.SerialException(
'expected a string in the form "alt://port[?option[=value][&option[=value]]]": '
'not starting with alt:// ({!r})'.format(parts.scheme))
class_name = 'Serial'
try:
for option, values in urlparse.parse_qs(parts.query, True).items():
if option == 'class':
class_name = values[0]
else:
raise ValueError('unknown option: {!r}'.format(option))
except ValueError as e:
raise serial.SerialException(
'expected a string in the form '
'"alt://port[?option[=value][&option[=value]]]": {!r}'.format(e))
if not hasattr(serial, class_name):
raise ValueError('unknown class: {!r}'.format(class_name))
cls = getattr(serial, class_name)
if not issubclass(cls, serial.Serial):
raise ValueError('class {!r} is not an instance of Serial'.format(class_name))
return (''.join([parts.netloc, parts.path]), cls)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
s = serial.serial_for_url('alt:///dev/ttyS0?class=PosixPollSerial')
print(s)
| mit |
mr-niels-christensen/environment-scotland-dot-rural | src/main/rdflib/namespace.py | 7 | 15465 | from rdflib.py3compat import format_doctest_out
__doc__ = format_doctest_out("""
===================
Namespace Utilities
===================
RDFLib provides mechanisms for managing Namespaces.
In particular, there is a :class:`~rdflib.namespace.Namespace` class
that takes as its argument the base URI of the namespace.
.. code-block:: pycon
>>> from rdflib.namespace import Namespace
>>> owl = Namespace('http://www.w3.org/2002/07/owl#')
Fully qualified URIs in the namespace can be constructed either by attribute
or by dictionary access on Namespace instances:
.. code-block:: pycon
>>> owl.seeAlso
rdflib.term.URIRef(%(u)s'http://www.w3.org/2002/07/owl#seeAlso')
>>> owl['seeAlso']
rdflib.term.URIRef(%(u)s'http://www.w3.org/2002/07/owl#seeAlso')
Automatic handling of unknown predicates
-----------------------------------------
As a programming convenience, a namespace binding is automatically
created when :class:`rdflib.term.URIRef` predicates are added to the graph.
Importable namespaces
-----------------------
The following namespaces are available by directly importing from rdflib:
* RDF
* RDFS
* OWL
* XSD
* FOAF
* SKOS
* DOAP
* DC
* DCTERMS
* VOID
.. code-block:: pycon
>>> from rdflib import OWL
>>> OWL.seeAlso
rdflib.term.URIRef(%(u)s'http://www.w3.org/2002/07/owl#seeAlso')
""")
import logging
_logger = logging.getLogger(__name__)
import os
from urlparse import urljoin, urldefrag
from urllib import pathname2url
from rdflib.term import URIRef, Variable, _XSD_PFX, _is_valid_uri
__all__ = [
'is_ncname', 'split_uri', 'Namespace',
'ClosedNamespace', 'NamespaceManager',
'XMLNS', 'RDF', 'RDFS', 'XSD', 'OWL',
'SKOS', 'DOAP', 'FOAF', 'DC', 'DCTERMS', 'VOID']
class Namespace(unicode):
__doc__ = format_doctest_out("""
Utility class for quickly generating URIRefs with a common prefix
>>> from rdflib import Namespace
>>> n = Namespace("http://example.org/")
>>> n.Person # as attribute
rdflib.term.URIRef(%(u)s'http://example.org/Person')
>>> n['first-name'] # as item - for things that are not valid python identifiers
rdflib.term.URIRef(%(u)s'http://example.org/first-name')
""")
def __new__(cls, value):
try:
rt = unicode.__new__(cls, value)
except UnicodeDecodeError:
rt = unicode.__new__(cls, value, 'utf-8')
return rt
@property
def title(self):
return URIRef(self + 'title')
def term(self, name):
# need to handle slices explicitly because of __getitem__ override
return URIRef(self + (name if isinstance(name, basestring) else ''))
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __repr__(self):
return "Namespace(%s)"%unicode.__repr__(self)
class URIPattern(unicode):
__doc__ = format_doctest_out("""
Utility class for creating URIs according to some pattern
This supports either new style formatting with .format
or old-style with %% operator
>>> u=URIPattern("http://example.org/%%s/%%d/resource")
>>> u%%('books', 12345)
rdflib.term.URIRef(%(u)s'http://example.org/books/12345/resource')
""")
def __new__(cls, value):
try:
rt = unicode.__new__(cls, value)
except UnicodeDecodeError:
rt = unicode.__new__(cls, value, 'utf-8')
return rt
def __mod__(self, *args, **kwargs):
return URIRef(unicode(self).__mod__(*args, **kwargs))
def format(self, *args, **kwargs):
return URIRef(unicode.format(self, *args, **kwargs))
def __repr__(self):
return "URIPattern(%r)"%unicode.__repr__(self)
class ClosedNamespace(object):
"""
A namespace with a closed list of members
Trying to create terms not listen is an error
"""
def __init__(self, uri, terms):
self.uri = uri
self.__uris = {}
for t in terms:
self.__uris[t] = URIRef(self.uri + t)
def term(self, name):
uri = self.__uris.get(name)
if uri is None:
raise Exception(
"term '%s' not in namespace '%s'" % (name, self.uri))
else:
return uri
def __getitem__(self, key, default=None):
return self.term(key)
def __getattr__(self, name):
if name.startswith("__"): # ignore any special Python names!
raise AttributeError
else:
return self.term(name)
def __str__(self):
return str(self.uri)
def __repr__(self):
return """rdf.namespace.ClosedNamespace('%s')""" % str(self.uri)
class _RDFNamespace(ClosedNamespace):
"""
Closed namespace for RDF terms
"""
def __init__(self):
super(_RDFNamespace, self).__init__(
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
terms=[
# Syntax Names
"RDF", "Description", "ID", "about", "parseType",
"resource", "li", "nodeID", "datatype",
# RDF Classes
"Seq", "Bag", "Alt", "Statement", "Property",
"List", "PlainLiteral",
# RDF Properties
"subject", "predicate", "object", "type",
"value", "first", "rest",
# and _n where n is a non-negative integer
# RDF Resources
"nil",
# Added in RDF 1.1
"XMLLiteral", "HTML", "langString"]
)
def term(self, name):
try:
i = int(name)
return URIRef("%s_%s" % (self.uri, i))
except ValueError:
return super(_RDFNamespace, self).term(name)
RDF = _RDFNamespace()
RDFS = ClosedNamespace(
uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
terms=[
"Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
"domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
"ContainerMembershipProperty", "member", "Datatype"]
)
OWL = Namespace('http://www.w3.org/2002/07/owl#')
XSD = Namespace(_XSD_PFX)
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
DOAP = Namespace('http://usefulinc.com/ns/doap#')
FOAF = Namespace('http://xmlns.com/foaf/0.1/')
DC = Namespace('http://purl.org/dc/elements/1.1/')
DCTERMS = Namespace('http://purl.org/dc/terms/')
VOID = Namespace('http://rdfs.org/ns/void#')
class NamespaceManager(object):
"""
Class for managing prefix => namespace mappings
Sample usage from FuXi ...
.. code-block:: python
ruleStore = N3RuleStore(additionalBuiltins=additionalBuiltins)
nsMgr = NamespaceManager(Graph(ruleStore))
ruleGraph = Graph(ruleStore,namespace_manager=nsMgr)
and ...
.. code-block:: pycon
>>> import rdflib
>>> from rdflib import Graph
>>> from rdflib.namespace import Namespace, NamespaceManager
>>> exNs = Namespace('http://example.com/')
>>> namespace_manager = NamespaceManager(Graph())
>>> namespace_manager.bind('ex', exNs, override=False)
>>> g = Graph()
>>> g.namespace_manager = namespace_manager
>>> all_ns = [n for n in g.namespace_manager.namespaces()]
>>> assert ('ex', rdflib.term.URIRef('http://example.com/')) in all_ns
>>>
"""
def __init__(self, graph):
self.graph = graph
self.__cache = {}
self.__log = None
self.bind("xml", u"http://www.w3.org/XML/1998/namespace")
self.bind("rdf", RDF)
self.bind("rdfs", RDFS)
self.bind("xsd", XSD)
def reset(self):
self.__cache = {}
def __get_store(self):
return self.graph.store
store = property(__get_store)
def qname(self, uri):
prefix, namespace, name = self.compute_qname(uri)
if prefix == "":
return name
else:
return ":".join((prefix, name))
def normalizeUri(self, rdfTerm):
"""
Takes an RDF Term and 'normalizes' it into a QName (using the
registered prefix) or (unlike compute_qname) the Notation 3
form for URIs: <...URI...>
"""
try:
namespace, name = split_uri(rdfTerm)
namespace = URIRef(unicode(namespace))
except:
if isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
else:
return "<%s>" % rdfTerm
prefix = self.store.prefix(namespace)
if prefix is None and isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
elif prefix is None:
return "<%s>" % rdfTerm
else:
qNameParts = self.compute_qname(rdfTerm)
return ':'.join([qNameParts[0], qNameParts[-1]])
def compute_qname(self, uri, generate=True):
if not _is_valid_uri(uri):
raise Exception('"%s" does not look like a valid URI, I cannot serialize this. Perhaps you wanted to urlencode it?'%uri)
if not uri in self.__cache:
namespace, name = split_uri(uri)
namespace = URIRef(namespace)
prefix = self.store.prefix(namespace)
if prefix is None:
if not generate:
raise Exception(
"No known prefix for %s and generate=False")
num = 1
while 1:
prefix = "ns%s" % num
if not self.store.namespace(prefix):
break
num += 1
self.bind(prefix, namespace)
self.__cache[uri] = (prefix, namespace, name)
return self.__cache[uri]
def bind(self, prefix, namespace, override=True, replace=False):
"""bind a given namespace to the prefix
if override, rebind, even if the given namespace is already
bound to another prefix.
if replace, replace any existing prefix with the new namespace
"""
namespace = URIRef(unicode(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
prefix = ''
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI
# and if so convert it into a URIRef for comparison
# This is to prevent duplicate namespaces with the
# same URI
if bound_namespace:
bound_namespace = URIRef(bound_namespace)
if bound_namespace and bound_namespace != namespace:
if replace:
self.store.bind(prefix, namespace)
return
# prefix already in use for different namespace
#
# append number to end of prefix until we find one
# that's not in use.
if not prefix:
prefix = "default"
num = 1
while 1:
new_prefix = "%s%s" % (prefix, num)
tnamespace = self.store.namespace(new_prefix)
if tnamespace and namespace == URIRef(tnamespace):
# the prefix is already bound to the correct
# namespace
return
if not self.store.namespace(new_prefix):
break
num += 1
self.store.bind(new_prefix, namespace)
else:
bound_prefix = self.store.prefix(namespace)
if bound_prefix is None:
self.store.bind(prefix, namespace)
elif bound_prefix == prefix:
pass # already bound
else:
if override or bound_prefix.startswith("_"): # or a generated
# prefix
self.store.bind(prefix, namespace)
def namespaces(self):
for prefix, namespace in self.store.namespaces():
namespace = URIRef(namespace)
yield prefix, namespace
def absolutize(self, uri, defrag=1):
base = urljoin("file:", pathname2url(os.getcwd()))
result = urljoin("%s/" % base, uri, allow_fragments=not defrag)
if defrag:
result = urldefrag(result)[0]
if not defrag:
if uri and uri[-1] == "#" and result[-1] != "#":
result = "%s#" % result
return URIRef(result)
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
# Lt, Nl.
#
# * Name characters other than Name-start characters must have one of
# the categories Mc, Me, Mn, Lm, or Nd.
#
# * Characters in the compatibility area (i.e. with character code
# greater than #xF900 and less than #xFFFE) are not allowed in XML
# names.
#
# * Characters which have a font or compatibility decomposition
# (i.e. those with a "compatibility formatting tag" in field 5 of the
# database -- marked by field 5 beginning with a "<") are not allowed.
#
# * The following characters are treated as name-start characters rather
# than name characters, because the property file classifies them as
# Alphabetic: [#x02BB-#x02C1], #x0559, #x06E5, #x06E6.
#
# * Characters #x20DD-#x20E0 are excluded (in accordance with Unicode
# 2.0, section 5.14).
#
# * Character #x00B7 is classified as an extender, because the property
# list so identifies it.
#
# * Character #x0387 is added as a name character, because #x00B7 is its
# canonical equivalent.
#
# * Characters ':' and '_' are allowed as name-start characters.
#
# * Characters '-' and '.' are allowed as name characters.
from unicodedata import category
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = [u"\u00B7", u"\u0387", u"-", u".", u"_"]
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# [4] NCName ::= (Letter | '_') (NCNameChar)* /* An XML Name, minus
# the ":" */
# [5] NCNameChar ::= Letter | Digit | '.' | '-' | '_' | CombiningChar
# | Extender
def is_ncname(name):
first = name[0]
if first == "_" or category(first) in NAME_START_CATEGORIES:
for i in xrange(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
return 0
# if in compatibility area
# if decomposition(c)!='':
# return 0
return 1
else:
return 0
XMLNS = "http://www.w3.org/XML/1998/namespace"
def split_uri(uri):
if uri.startswith(XMLNS):
return (XMLNS, uri.split(XMLNS)[1])
length = len(uri)
for i in xrange(0, length):
c = uri[-i - 1]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
for j in xrange(-1 - i, length):
if category(uri[j]) in NAME_START_CATEGORIES or uri[j] == "_":
ns = uri[:j]
if not ns:
break
ln = uri[j:]
return (ns, ln)
break
raise Exception("Can't split '%s'" % uri)
| apache-2.0 |
Dhivyap/ansible | lib/ansible/modules/network/aci/mso_schema_template_vrf.py | 26 | 5576 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_vrf
short_description: Manage VRFs in schema templates
description:
- Manage VRFs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
vrf:
description:
- The name of the VRF to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
layer3_multicast:
description:
- Whether to enable L3 multicast.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new VRF
mso_schema_template_vrf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
vrf: VRF 1
state: present
delegate_to: localhost
- name: Remove an VRF
mso_schema_template_vrf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
vrf: VRF1
state: absent
delegate_to: localhost
- name: Query a specific VRFs
mso_schema_template_vrf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
vrf: VRF1
state: query
delegate_to: localhost
register: query_result
- name: Query all VRFs
mso_schema_template_vrf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
vrf=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
display_name=dict(type='str'),
layer3_multicast=dict(type='bool'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['vrf']],
['state', 'present', ['vrf']],
],
)
schema = module.params['schema']
template = module.params['template']
vrf = module.params['vrf']
display_name = module.params['display_name']
layer3_multicast = module.params['layer3_multicast']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
vrfs = [v['name'] for v in schema_obj['templates'][template_idx]['vrfs']]
if vrf is not None and vrf in vrfs:
vrf_idx = vrfs.index(vrf)
mso.existing = schema_obj['templates'][template_idx]['vrfs'][vrf_idx]
if state == 'query':
if vrf is None:
mso.existing = schema_obj['templates'][template_idx]['vrfs']
elif not mso.existing:
mso.fail_json(msg="VRF '{vrf}' not found".format(vrf=vrf))
mso.exit_json()
vrfs_path = '/templates/{0}/vrfs'.format(template)
vrf_path = '/templates/{0}/vrfs/{1}'.format(template, vrf)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=vrf_path))
elif state == 'present':
if display_name is None and not mso.existing:
display_name = vrf
payload = dict(
name=vrf,
displayName=display_name,
l3MCast=layer3_multicast,
# FIXME
regions=[],
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=vrf_path, value=mso.sent))
else:
ops.append(dict(op='add', path=vrfs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/billiard/einfo.py | 5 | 3671 | from __future__ import absolute_import
import sys
import traceback
__all__ = ['ExceptionInfo', 'Traceback']
DEFAULT_MAX_FRAMES = sys.getrecursionlimit() // 8
class _Code(object):
def __init__(self, code):
self.co_filename = code.co_filename
self.co_name = code.co_name
self.co_argcount = code.co_argcount
self.co_cellvars = ()
self.co_firstlineno = code.co_firstlineno
self.co_flags = code.co_flags
self.co_freevars = ()
self.co_code = b''
self.co_lnotab = b''
self.co_names = code.co_names
self.co_nlocals = code.co_nlocals
self.co_stacksize = code.co_stacksize
self.co_varnames = ()
class _Frame(object):
Code = _Code
def __init__(self, frame):
self.f_builtins = {}
self.f_globals = {
"__file__": frame.f_globals.get("__file__", "__main__"),
"__name__": frame.f_globals.get("__name__"),
"__loader__": None,
}
self.f_locals = fl = {}
try:
fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"]
except KeyError:
pass
self.f_back = None
self.f_trace = None
self.f_exc_traceback = None
self.f_exc_type = None
self.f_exc_value = None
self.f_code = self.Code(frame.f_code)
self.f_lineno = frame.f_lineno
self.f_lasti = frame.f_lasti
# don't want to hit https://bugs.python.org/issue21967
self.f_restricted = False
class _Object(object):
def __init__(self, **kw):
[setattr(self, k, v) for k, v in kw.items()]
class _Truncated(object):
def __init__(self):
self.tb_lineno = -1
self.tb_frame = _Object(
f_globals={"__file__": "",
"__name__": "",
"__loader__": None},
f_fileno=None,
f_code=_Object(co_filename="...",
co_name="[rest of traceback truncated]"),
)
self.tb_next = None
self.tb_lasti = 0
class Traceback(object):
Frame = _Frame
def __init__(self, tb, max_frames=DEFAULT_MAX_FRAMES, depth=0):
self.tb_frame = self.Frame(tb.tb_frame)
self.tb_lineno = tb.tb_lineno
self.tb_lasti = tb.tb_lasti
self.tb_next = None
if tb.tb_next is not None:
if depth <= max_frames:
self.tb_next = Traceback(tb.tb_next, max_frames, depth + 1)
else:
self.tb_next = _Truncated()
class ExceptionInfo(object):
"""Exception wrapping an exception and its traceback.
:param exc_info: The exception info tuple as returned by
:func:`sys.exc_info`.
"""
#: Exception type.
type = None
#: Exception instance.
exception = None
#: Pickleable traceback instance for use with :mod:`traceback`
tb = None
#: String representation of the traceback.
traceback = None
#: Set to true if this is an internal error.
internal = False
def __init__(self, exc_info=None, internal=False):
self.type, self.exception, tb = exc_info or sys.exc_info()
try:
self.tb = Traceback(tb)
self.traceback = ''.join(
traceback.format_exception(self.type, self.exception, tb),
)
self.internal = internal
finally:
del(tb)
def __str__(self):
return self.traceback
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.exception, )
@property
def exc_info(self):
return self.type, self.exception, self.tb
| apache-2.0 |
Linaro/squad | squad/core/management/commands/migrate_test_runs.py | 2 | 4214 | import sys
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django.db import transaction
from squad.core.models import Project, Build, Environment, Status, Test, Metric
from squad.core.tasks import UpdateProjectStatus
class Command(BaseCommand):
help = """Move test runs identified by environment slug
from one project to another. This action preserves
datetime of the objects and statuses."""
def add_arguments(self, parser):
parser.add_argument(
'--old-project-slug',
dest="old_project_slug",
help="Slug of the project from which to migrate test runs"
)
parser.add_argument(
'--new-project-slug',
dest="new_project_slug",
help="Slug of the project to which to migrate test runs"
)
parser.add_argument(
'--env-slug',
dest="env_slug",
help="Slug of the environment to migrate to new project"
)
def handle(self, *args, **options):
self.options = options
if not self.options['old_project_slug']:
print("ERROR: old_project_slug missing")
sys.exit(1)
if not self.options['new_project_slug']:
print("ERROR: new_project_slug missing")
sys.exit(1)
if not self.options['env_slug']:
print("ERROR: env_slug missing")
sys.exit(1)
old_project = None
new_project = None
env = None
try:
old_project = Project.objects.get(slug=self.options['old_project_slug'])
except ObjectDoesNotExist:
print("Project: %s not found. Exiting" % self.options['old_project_slug'])
sys.exit(0)
try:
new_project = Project.objects.get(slug=self.options['new_project_slug'])
except ObjectDoesNotExist:
print("Project: %s not found. Exiting" % self.options['new_project_slug'])
sys.exit(0)
try:
env = Environment.objects.get(project=old_project, slug=self.options['env_slug'])
except ObjectDoesNotExist:
print("Environment: %s not found. Exiting" % self.options['env_slug'])
sys.exit(0)
print("Migrating testruns from project %s to %s" % (old_project.slug, new_project.slug))
print("All test runs with environment name: %s will be migrated" % env.slug)
self.__handle__(old_project, new_project, env)
@transaction.atomic
def __handle__(self, old_project, new_project, env):
for build in old_project.builds.all():
if build.test_runs.filter(environment=env):
print("moving build: %s" % build)
new_build, _ = Build.objects.get_or_create(
version=build.version,
datetime=build.datetime,
project=new_project,
created_at=build.created_at)
for testrun in build.test_runs.filter(environment=env):
testrun.build = new_build
testrun.save()
testrun.environment.project = new_project
testrun.environment.save()
for testjob in testrun.test_jobs.all():
testjob.target = new_project
testjob.save()
UpdateProjectStatus()(testrun)
new_build.status.created_at = build.status.created_at
new_build.status.last_updated = build.status.last_updated
new_build.status.save()
else:
print("No matching test runs found in build: %s" % build)
env.project = new_project
env.save()
for suite in old_project.suites.all():
new_suite, _ = new_project.suites.get_or_create(
slug=suite.slug,
defaults={'name': suite.name}
)
for model in [Status, Test, Metric]:
model.objects.filter(
suite=suite,
test_run__build__project_id=new_project.id,
).update(suite=new_suite)
| agpl-3.0 |
odoo-turkiye/odoo | addons/purchase_double_validation/__init__.py | 441 | 1090 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_double_validation_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ncoghlan/pip | pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
spmjc/plugin.video.freplay | resources/lib/channels/pluzz2.py | 1 | 6003 | # -*- coding: utf-8 -*-
import json
import resources.lib.utils as utils
from resources.lib import globalvar
title = ['La 1ère', 'France 2', 'France 3', 'France 4', 'France 5', 'France Ô']
img = ['la_1ere', 'france2', 'france3', 'france4', 'france5', 'franceo']
readyForUse = True
channelCatalog = 'http://pluzz.webservices.francetelevisions.fr/' \
'pluzz/liste/type/replay/nb/10000/chaine/%s'
showInfo = 'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/' \
'?idDiffusion=%s&catalogue=Pluzz'
imgURL = 'http://refonte.webservices.francetelevisions.fr%s'
categories = {"france2": "France 2",
"france3": "France 3",
"france4": "France 4",
"france5": "France 5",
"franceo": "France Ô",
"guadeloupe": "Guadeloupe 1ère",
"guyane": "Guyane 1ère",
"martinique": "Martinique 1ère",
"mayotte": "Mayotte 1ère",
"nouvellecaledonie": "Nouvelle Calédonie 1ère",
"polynesie": "Polynésie 1ère",
"reunion": "Réunion 1ère",
"saintpierreetmiquelon": "St-Pierre et Miquelon 1ère",
"wallisetfutuna": "Wallis et Futuna 1ère",
"sport": "Sport",
"info": "Info",
"documentaire": "Documentaire",
"seriefiction": "Série & fiction",
"magazine": "Magazine",
"jeunesse": "Jeunesse",
"divertissement": "Divertissement",
"jeu": "Jeu",
"culture": "Culture"}
def list_shows(channel, folder):
shows = []
uniqueItem = dict()
realChannel = channel
if channel == 'la_1ere':
realChannel = 'la_1ere_reunion%2C' \
'la_1ere_guyane%2C' \
'la_1ere_polynesie%2C' \
'la_1ere_martinique%2C' \
'la_1ere_mayotte%2C' \
'la_1ere_nouvellecaledonie%2C' \
'la_1ere_guadeloupe%2C' \
'la_1ere_wallisetfutuna%2C' \
'la_1ere_saintpierreetmiquelon'
url_json = channelCatalog % (realChannel)
filePath = utils.downloadCatalog(url_json,
'%s.json' % (channel),
False,
{})
filPrgm = open(filePath).read()
jsonParser = json.loads(filPrgm)
emissions = jsonParser['reponse']['emissions']
if folder == 'none':
for emission in emissions:
rubrique = emission['rubrique'].encode('utf-8')
if rubrique not in uniqueItem:
uniqueItem[rubrique] = rubrique
shows.append([
channel,
rubrique,
change_to_nicer_name(rubrique),
'',
'folder'])
else:
for emission in emissions:
rubrique = emission['rubrique'].encode('utf-8')
if rubrique == folder:
titre = emission['titre_programme'].encode('utf-8')
if titre != '':
id = emission['id_programme'].encode('utf-8')
if id == '':
id = emission['id_emission'].encode('utf-8')
if id not in uniqueItem:
uniqueItem[id] = id
shows.append([
channel,
id,
titre,
imgURL % (emission['image_large']),
'shows'])
return shows
def change_to_nicer_name(original_name):
if original_name in categories:
return categories[original_name]
return original_name
def list_videos(channel, folder):
videos = []
filePath = utils.downloadCatalog(
channelCatalog % (channel),
'%s.json' % (channel),
False,
{})
filPrgm = open(filePath).read()
jsonParser = json.loads(filPrgm)
emissions = jsonParser['reponse']['emissions']
for emission in emissions:
id = emission['id_programme'].encode('utf-8')
if id == '':
id = emission['id_emission'].encode('utf-8')
if id == folder:
titre = ''
plot = ''
duration = 0
date = ''
id_diffusion = emission['id_diffusion']
if 'accroche' in emission:
plot = emission['accroche'].encode('utf-8')
if 'real_duration' in emission:
duration = int(emission['real_duration'])
if 'titre' in emission:
titre = emission['titre'].encode('utf-8')
if 'soustitre' in emission:
titre += ' - ' + emission['soustitre'].encode('utf-8')
if 'date_diffusion' in emission:
year = emission['date_diffusion'][:4]
titre += ' - ' + emission['date_diffusion'][:10].encode('utf-8')
if 'image_medium' in emission:
image = imgURL % emission['image_medium']
infoLabels = {
"Title": titre,
"Plot": plot,
"Duration": duration,
"Year": year}
#print titre + getVideoURL(channel,id_diffusion)
videos.append([
channel,
id_diffusion,
titre,
image,
infoLabels,
'play'])
return videos
def getVideoURL(channel, id):
print showInfo % (id)
filPrgm = utils.get_webcontent(showInfo % (id))
jsonParser = json.loads(filPrgm)
for video in jsonParser['videos']:
if video['format'] == globalvar.ADDON.getSetting(
'%sQuality' % (channel)):
url = video['url']
return url
| gpl-2.0 |
jmesteve/saas3 | openerp/addons_extra/ibeacon/pexpect-2.3/build/lib/fdpexpect.py | 264 | 2488 | """This is like pexpect, but will work on any file descriptor that you pass it.
So you are reponsible for opening and close the file descriptor.
$Id: fdpexpect.py 505 2007-12-26 21:33:50Z noah $
"""
from pexpect import *
import os
__all__ = ['fdspawn']
class fdspawn (spawn):
"""This is like pexpect.spawn but allows you to supply your own open file
descriptor. For example, you could use it to read through a file looking
for patterns, or to control a modem or serial device. """
def __init__ (self, fd, args=[], timeout=30, maxread=2000, searchwindowsize=None, logfile=None):
"""This takes a file descriptor (an int) or an object that support the
fileno() method (returning an int). All Python file-like objects
support fileno(). """
### TODO: Add better handling of trying to use fdspawn in place of spawn
### TODO: (overload to allow fdspawn to also handle commands as spawn does.
if type(fd) != type(0) and hasattr(fd, 'fileno'):
fd = fd.fileno()
if type(fd) != type(0):
raise ExceptionPexpect ('The fd argument is not an int. If this is a command string then maybe you want to use pexpect.spawn.')
try: # make sure fd is a valid file descriptor
os.fstat(fd)
except OSError:
raise ExceptionPexpect, 'The fd argument is not a valid file descriptor.'
self.args = None
self.command = None
spawn.__init__(self, None, args, timeout, maxread, searchwindowsize, logfile)
self.child_fd = fd
self.own_fd = False
self.closed = False
self.name = '<file descriptor %d>' % fd
def __del__ (self):
return
def close (self):
if self.child_fd == -1:
return
if self.own_fd:
self.close (self)
else:
self.flush()
os.close(self.child_fd)
self.child_fd = -1
self.closed = True
def isalive (self):
"""This checks if the file descriptor is still valid. If os.fstat()
does not raise an exception then we assume it is alive. """
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except:
return False
def terminate (self, force=False):
raise ExceptionPexpect ('This method is not valid for file descriptors.')
def kill (self, sig):
return
| agpl-3.0 |
mjfarmer/scada_py | env/lib/python2.7/site-packages/IPython/utils/_tokenize_py2.py | 29 | 17153 | """Patched version of standard library tokenize, to deal with various bugs.
Patches
- Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing),
manually applied.
- Newlines in comments and blank lines should be either NL or NEWLINE, depending
on whether they are in a multi-line statement. Filed as Python issue #17061.
-------------------------------------------------------------------------------
Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
from __future__ import print_function
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger')
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if not x.startswith("_")]
__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
del x
del token
__all__ += ["TokenError"]
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
Binnumber = r'0[bB][01]+[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
srow, scol = srow_scol
erow, ecol = erow_ecol
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row >= self.prev_row
col_offset = col - self.prev_col
if col_offset > 0:
self.tokens.append(" " * col_offset)
elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
# Line was backslash-continued
self.tokens.append(" ")
def untokenize(self, tokens):
iterable = iter(tokens)
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end = t[:4]
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
# This import is here to avoid problems when the itertools
# module is not built yet and tokenize is imported.
from itertools import chain
startline = False
prevstring = False
indents = []
toks_append = self.tokens.append
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NEWLINE, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield (NEWLINE, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield (NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1:
tokenize(open(sys.argv[1]).readline)
else:
tokenize(sys.stdin.readline)
| gpl-3.0 |
memkeytm/p2pool | wstools/c14n.py | 290 | 16401 | #! /usr/bin/env python
'''XML Canonicalization
Patches Applied to xml.dom.ext.c14n:
http://sourceforge.net/projects/pyxml/
[ 1444526 ] c14n.py: http://www.w3.org/TR/xml-exc-c14n/ fix
-- includes [ 829905 ] c14n.py fix for bug #825115,
Date Submitted: 2003-10-24 23:43
-- include dependent namespace declarations declared in ancestor nodes
(checking attributes and tags),
-- handle InclusiveNamespaces PrefixList parameter
This module generates canonical XML of a document or element.
http://www.w3.org/TR/2001/REC-xml-c14n-20010315
and includes a prototype of exclusive canonicalization
http://www.w3.org/Signature/Drafts/xml-exc-c14n
Requires PyXML 0.7.0 or later.
Known issues if using Ft.Lib.pDomlette:
1. Unicode
2. does not white space normalize attributes of type NMTOKEN and ID?
3. seems to be include "\n" after importing external entities?
Note, this version processes a DOM tree, and consequently it processes
namespace nodes as attributes, not from a node's namespace axis. This
permits simple document and element canonicalization without
XPath. When XPath is used, the XPath result node list is passed and used to
determine if the node is in the XPath result list, but little else.
Authors:
"Joseph M. Reagle Jr." <reagle@w3.org>
"Rich Salz" <rsalz@zolera.com>
$Date$ by $Author$
'''
_copyright = '''Copyright 2001, Zolera Systems Inc. All Rights Reserved.
Copyright 2001, MIT. All Rights Reserved.
Distributed under the terms of:
Python 2.0 License or later.
http://www.python.org/2.0.1/license.html
or
W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software-19980720
'''
import string
from xml.dom import Node
try:
from xml.ns import XMLNS
except:
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
_IN_XML_NS = lambda n: n.name.startswith("xmlns")
_inclusive = lambda n: n.unsuppressedPrefixes == None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1,n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i: return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1,n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns': return -1
if n2[0] == 'xmlns': return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None:
for attr in _attrs(node):
if n == attr.prefix: return 1
return 0
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes):
'''http://www.w3.org/TR/xml-exc-c14n/
InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that
are handled in the manner described by the Canonical XML Recommendation'''
inclusive = []
if node.prefix:
usedPrefixes = ['xmlns:%s' %node.prefix]
else:
usedPrefixes = ['xmlns']
for a in _attrs(node):
if a.nodeName.startswith('xmlns') or not a.prefix: continue
usedPrefixes.append('xmlns:%s' %a.prefix)
unused_namespace_dict = {}
for attr in context:
n = attr.nodeName
if n in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes:
inclusive.append(attr)
elif attr.nodeName in usedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:'):
unused_namespace_dict[n] = attr.value
return inclusive, unused_namespace_dict
#_in_subset = lambda subset, node: not subset or node in subset
_in_subset = lambda subset, node: subset is None or node in subset # rich's tweak
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, {'xml':''}, {}, {}) #0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
inherited,unused = _inclusiveNamespacePrefixes(node, self._inherit_context(node),
self.unsuppressedPrefixes)
self._do_element(node, inherited, unused=unused)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node): return
s = string.replace(node.data, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "\015", "
")
if s: self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node): return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement: W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement: W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = string.replace(value, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, '"', '"')
s = string.replace(s, '\011', '	')
s = string.replace(s, '\012', '
')
s = string.replace(s, '\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = [], unused = None):
'''_do_element(self, node, initial_other_attrs = [], unused = {}) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
# ns_unused_inherited -- not rendered namespaces, used for exclusive
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_unused_inherited = unused
if unused is None:
ns_unused_inherited = self.state[3].copy()
ns_local = ns_parent.copy()
inclusive = _inclusive(self)
xml_attrs_local = {}
# Divide attributes into NS, XML, and others.
other_attrs = []
in_subset = _in_subset(self.subset, node)
for a in initial_other_attrs + _attrs(node):
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:": n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if inclusive or (in_subset and _in_subset(self.subset, a)): #020925 Test to see if attribute node in subset
xml_attrs_local[a.nodeName] = a #0426
else:
if _in_subset(self.subset, a): #020925 Test to see if attribute node in subset
other_attrs.append(a)
# # TODO: exclusive, might need to define xmlns:prefix here
# if not inclusive and a.prefix is not None and not ns_rendered.has_key('xmlns:%s' %a.prefix):
# ns_local['xmlns:%s' %a.prefix] = ??
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
if not inclusive:
if node.prefix is not None:
prefix = 'xmlns:%s' %node.prefix
else:
prefix = 'xmlns'
if not ns_rendered.has_key(prefix) and not ns_local.has_key(prefix):
if not ns_unused_inherited.has_key(prefix):
raise RuntimeError,\
'For exclusive c14n, unable to map prefix "%s" in %s' %(
prefix, node)
ns_local[prefix] = ns_unused_inherited[prefix]
del ns_unused_inherited[prefix]
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n,v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n,v) not in ns_rendered.items():
if inclusive or _utilized(n, node, other_attrs, self.unsuppressedPrefixes):
ns_to_render.append((n, v))
elif not inclusive:
ns_unused_inherited[n] = v
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n,v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n]=v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not inclusive or _in_subset(self.subset,node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs, ns_unused_inherited)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name: W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue()
| gpl-3.0 |
thusoy/grunt-pylint | postinstall.py | 1 | 1377 | #!/usr/bin/env python
import os
import subprocess
import sys
# Versions here must match what is bundled with the package (see package.json)
packages = [
'astroid-2.2.5.tar.gz',
'isort-4.3.17.tar.gz',
'lazy-object-proxy-1.3.1.tar.gz',
'mccabe-0.6.1.tar.gz',
'pylint-2.3.1.tar.gz',
'six-1.12.0.tar.gz',
'typed-ast-1.3.1.tar.gz',
'wrapt-1.11.1.tar.gz',
]
py34_packages = [
'typing-3.6.6.tar.gz',
]
def main():
if os.environ.get('GRUNT_PYLINT_SKIP_POSTINSTALL', 'no').lower().startswith('y'):
return
if sys.version_info < (3, 5, 0):
packages.extend(py34_packages)
install_cmd = [
'pip',
'install',
'--no-index',
'--no-deps',
'--ignore-installed',
'--target', '.',
]
for package in packages:
install_cmd.append('./' + package)
try:
subprocess.check_call(install_cmd, cwd='tasks/lib')
except subprocess.CalledProcessError as e:
if e.returncode != 2:
raise e
# Try to work around a faulty patch applied by debian to pip
# https://github.com/pypa/pip/issues/3826
sys.stderr.write('Installing pylint dependencies failed, retrying with --system\n')
install_cmd.append('--system')
subprocess.check_call(install_cmd, cwd='tasks/lib')
if __name__ == '__main__':
main()
| mit |
os-cloud-storage/openstack-workload-disaster-recovery | dragon/engine/service.py | 1 | 12170 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
import functools
from oslo.config import cfg
from oslo import messaging
from dragon.common import context
from dragon.common import messaging as rpc_messaging
from dragon.db import api as db_api
from dragon.engine.clients import Clients
from dragon.openstack.common import log as logging
from dragon.openstack.common.gettextutils import _
from dragon.openstack.common import service
from dragon.workload_policy import workload_policy as wp
from swiftclient.exceptions import ClientException
LOG = logging.getLogger(__name__)
def request_context(func):
@functools.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
if ctx is not None and not isinstance(ctx, context.RequestContext):
ctx = context.RequestContext.from_dict(ctx.to_dict())
return func(self, ctx, *args, **kwargs)
return wrapped
class EngineService(service.Service):
"""
Manages the running instances from creation to destruction.
All the methods in here are called from the RPC backend. This is
all done dynamically so if a call is made via RPC that does not
have a corresponding method here, an exception will be thrown when
it attempts to call into this class. Arguments to these methods
are also dynamically added and will be named as keyword arguments
by the RPC caller.
"""
RPC_API_VERSION = '1.1'
def __init__(self, host, topic, manager=None):
super(EngineService, self).__init__()
self.stg = {}
self.clients = None
# self.engine_id = None
self.target = None
self.host = host
self.topic = topic
def _service_task(self):
"""
This is a dummy task which gets queued on the service.Service
threadgroup. Without this service.Service sees nothing running
i.e has nothing to wait() on, so the process exits..
This could also be used to trigger periodic non-stack-specific
housekeeping tasks
"""
pass
def start(self):
target = messaging.Target(
version=self.RPC_API_VERSION, server=cfg.CONF.host,
topic=self.topic)
self.target = target
server = rpc_messaging.get_rpc_server(target, self)
server.start()
super(EngineService, self).start()
# Create dummy service task, because when there is nothing queued
# on self.tg the process exits
self.tg.add_timer(cfg.CONF.periodic_interval,
self._service_task)
def stop(self):
# Stop rpc connection at first for preventing new requests
LOG.info(_("Attempting to stop engine service..."))
try:
self.conn.close()
except Exception:
pass
super(EngineService, self).stop()
@request_context
def protect(self, cnxt, workload_policy_id, consistent):
LOG.debug("In service.protect , consistent- %s" % consistent)
workload_policy = wp.WorkloadPolicy(cnxt, workload_policy_id,
consistent)
return workload_policy.protect(cnxt)
@request_context
def failover(self, cnxt, container_name):
container_fields = container_name.split("/")
return wp.WorkloadPolicy.failover(cnxt, container_name)
@request_context
def list_actions(self, cnxt, resource_type):
return db_api.action_get_by_resource_type(cnxt, resource_type)
@request_context
def get_default_action_for_resource_type(self, cnxt, resource_type_id):
LOG.debug("dragon get_default_action_for_resource_type %s"
% resource_type_id)
return db_api.action_get_default_by_resource_type(
cnxt, resource_type_id)
@request_context
def get_resource(self, cnxt, resource_id):
return db_api.resource_get(cnxt, resource_id)
@request_context
def list_resources(self, cnxt):
return db_api.resource_get_all(cnxt, cnxt.tenant_id)
@request_context
def create_resource(self, cnxt, values):
return db_api.resource_create(cnxt, values)
@request_context
def create_workload_policy(self, cnxt, values):
return db_api.workload_policy_create(cnxt, values)
@request_context
def list_workload_policies(self, cnxt):
return db_api.workload_policy_get_all(cnxt, cnxt.tenant_id)
@request_context
def get_workload_policy(self, cnxt, workload_policy_id):
return db_api.workload_policy_get(cnxt, workload_policy_id)
@request_context
def delete_workload_policy(self, cnxt, workload_policy_id):
return db_api.workload_policy_delete(cnxt, workload_policy_id)
@request_context
def set_resource_action(self, cnxt, resource_id, action_id,
workload_policy_id):
values = {"resource_id": resource_id, "action_id": action_id,
"workload_policy_id": workload_policy_id}
return db_api.resource_actions_create(cnxt, values)
@request_context
def update_resource_action(self, cnxt, workload_policy_id, resource_id,
tuple_id, action_id):
values = {"resource_id": resource_id, "action_id": action_id,
"workload_policy_id": workload_policy_id}
return db_api.resource_actions_update(cnxt, tuple_id, values)
@request_context
def delete_resource_action(self, cnxt, tuple_id):
return db_api.resource_actions_delete(cnxt, tuple_id)
@request_context
def delete_resource_actions(self, cntx, workload_policy_id):
return db_api.resource_actions_delete_all_by_policy_id(
cntx,
workload_policy_id)
@request_context
def get_policy_resource_actions(self, cnxt, workload_policy_id):
return db_api.resource_actions_get_by_workload(cnxt,
workload_policy_id)
@request_context
def get_policy_resource_action(self, cnxt, workload_policy_id,
resource_id):
LOG.debug("dragon get_policy_resource_action %s %s"
% (workload_policy_id, resource_id))
return db_api.resource_actions_get(cnxt, workload_policy_id,
resource_id)
@request_context
def list_policy_executions(self, cnxt, workload_policy_id):
return db_api.workload_policy_excution_get_by_workload(
cnxt, workload_policy_id)
@request_context
def get_policy_executions(self, cnxt, policy_execution_id):
return db_api.workload_policy_excution_get(cnxt, policy_execution_id)
@request_context
def get_policy_execution_actions(self, cnxt, policy_execution_id):
return db_api.workload_policy_excution_actions_get(cnxt,
policy_execution_id)
@request_context
def recovery_list_policies(self, cntx):
tenant_name = cntx.tenant
try:
headers, containers = Clients(cntx).swift().get_account(
prefix=tenant_name, full_listing=True)
# TOF: since cinder backup restore wants containers
# (and not pseudo-containers)
# we have to create a container for each policy execution.
# Here we're doing a sort of hack to derive policy names from
# container names in this form:
# tenant_policyName_executionTimeStamp (e.g.,
# admin_instance_only_20140825140210)
# get policy names
policies = {}
for container in containers:
try:
[policy_name, timestamp] =\
wp.get_policy_name_and_timestamp_from_container(
tenant_name, container['name'])
policies[policy_name] = {'id': policy_name,
'name': policy_name,
"timestamp": timestamp}
except Exception, e:
# keep going
LOG.warn(e)
# convert dict to list
policies = policies.values()
return policies
except ClientException, c:
if c.http_status == 404:
# TOF: I wonder why they send an exception for a 404.
# Anyhow, we'll send back an empty list
return []
else:
raise c
@request_context
def recovery_list_policy_executions(self, cntx, policy_name):
LOG.debug("In recovery_list_policy_executions with name: %s"
% policy_name)
tenant_name = cntx.tenant
headers, containers = Clients(cntx).swift().\
get_account(prefix=tenant_name + "_" + policy_name,
full_listing=True)
# sort in reverse name order (newer containers first)
containers.sort(reverse=True, key=lambda k: k['name'])
policies = []
# split name and timestamp and put in list of dict
for container in containers:
try:
[policy_name, timestamp] =\
wp.get_policy_name_and_timestamp_from_container(
tenant_name, container['name'])
policies.append({'id': container['name'],
'name': policy_name,
"timestamp": timestamp})
except Exception, e:
# keep going
LOG.warn(e)
LOG.info("policies: %s" % policies)
return policies
@request_context
def create_resource_type(self, cntx, values):
return db_api.resource_type_create(cntx, values)
@request_context
def resource_type_get_by_name(self, cntx, name):
return db_api.resource_type_get_by_name(cntx, name)
@request_context
def create_action(self, cntx, values):
return db_api.action_create(cntx, values)
@request_context
def delete_action(self, cntx, action_id):
return db_api.action_delete(cntx, action_id)
@request_context
def delete_resource(self, cntx, resource_id):
return db_api.resource_delete(cntx, resource_id)
@request_context
def list_resource_types(self, cntx):
return db_api.resource_type_get_all(cntx)
@request_context
def workload_policy_excution_create(self, cntx, values):
return db_api.workload_policy_excution_create(cntx, values)
@request_context
def workload_policy_excution_get_by_workload(self, cntx,
workload_policy_id):
return db_api.workload_policy_excution_get_by_workload(
cntx,
workload_policy_id)
@request_context
def workload_policy_execution_delete(self, cntx, workload_policy_exec_id):
return db_api.workload_policy_execution_delete(cntx,
workload_policy_exec_id)
@request_context
def action_excution_delete_all_by_policy_exec(self,
cntx,
workload_policy_exe_id):
return db_api.action_excution_delete_all_by_policy_exec(
cntx,
workload_policy_exe_id)
| apache-2.0 |
openstack/senlin | senlin/common/policy.py | 1 | 2008 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy Engine For Senlin
"""
# from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_policy import opts
from oslo_policy import policy
from senlin.common import exception
from senlin.common import policies
POLICY_ENFORCER = None
CONF = cfg.CONF
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
# @lockutils.synchronized('policy_enforcer', 'senlin-')
def _get_enforcer(policy_file=None, rules=None, default_rule=None):
global POLICY_ENFORCER
if POLICY_ENFORCER is None:
POLICY_ENFORCER = policy.Enforcer(CONF,
policy_file=policy_file,
rules=rules,
default_rule=default_rule)
POLICY_ENFORCER.register_defaults(policies.list_rules())
return POLICY_ENFORCER
def enforce(context, rule, target, do_raise=True, *args, **kwargs):
enforcer = _get_enforcer()
credentials = context.to_dict()
target = target or {}
if do_raise:
kwargs.update(exc=exception.Forbidden)
return enforcer.enforce(rule, target, credentials, do_raise,
*args, **kwargs)
| apache-2.0 |
TomMinor/MayaPerforce | Perforce/DepotClientTreeView.py | 1 | 16413 | import sys
import os
from PySide import QtCore, QtGui
from P4 import P4, P4Exception
# http://stackoverflow.com/questions/32229314/pyqt-how-can-i-set-row-heights-of-qtreeview
class TreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.data = data
self.childItems = []
def appendChild(self, item):
self.childItems.append(item)
def popChild(self):
if self.childItems:
self.childItems.pop()
def row(self):
if self.parentItem:
return self.parentItem.childItems.index(self)
return 0
def reconnect():
p4.disconnect()
p4.connect()
p4.password = "contact_dev"
p4.run_login()
def epochToTimeStr(time):
import datetime
return datetime.datetime.utcfromtimestamp(int(time)).strftime("%d/%m/%Y %H:%M:%S")
def perforceListDir(p4path):
result = []
if p4path[-1] == '/' or p4path[-1] == '\\':
p4path = p4path[:-1]
path = "{0}/{1}".format(p4path, '*')
isDepotPath = p4path.startswith("//depot")
dirs = []
files = []
# Dir silently does nothing if there are no dirs
try:
dirs = p4.run_dirs(path)
except P4Exception:
pass
# Files will return an exception if there are no files in the dir
# Stupid inconsistency imo
try:
if isDepotPath:
files = p4.run_files(path)
else:
tmp = p4.run_have(path)
for fileItem in tmp:
files += p4.run_fstat(fileItem['clientFile'])
except P4Exception:
pass
result = []
for dir in dirs:
if isDepotPath:
dirName = dir['dir'][8:]
else:
dirName = dir['dir']
tmp = {'name': os.path.basename(dirName),
'path': dir['dir'],
'time': '',
'type': 'Folder',
'change': ''
}
result.append(tmp)
for fileItem in files:
if isDepotPath:
deleteTest = p4.run("filelog", "-t", fileItem['depotFile'])[0]
isDeleted = deleteTest['action'][0] == "delete"
fileType = fileItem['type']
if isDeleted:
fileType = "{0} [Deleted]".format(fileType)
# Remove //depot/ from the path for the 'pretty' name
tmp = {'name': os.path.basename(fileItem['depotFile'][8:]),
'path': fileItem['depotFile'],
'time': epochToTimeStr(fileItem['time']),
'type': fileType,
'change': fileItem['change']
}
result.append(tmp)
else:
deleteTest = p4.run("filelog", "-t", fileItem['clientFile'])[0]
isDeleted = deleteTest['action'][0] == "delete"
fileType = fileItem['headType']
if isDeleted:
fileType = "{0} [Deleted]".format(fileType)
tmp = {'name': os.path.basename(fileItem['clientFile']),
'path': fileItem['clientFile'],
'time': epochToTimeStr(fileItem['headModTime']),
'type': fileType,
'change': fileItem['headChange']
}
result.append(tmp)
return sorted(result, key=lambda k: k['name'])
def perforceIsDir(p4path):
try:
if p4path[-1] == '/' or p4path[-1] == '\\':
p4path = p4path[:-1]
result = p4.run_dirs(p4path)
return len(result) > 0
except P4Exception as e:
print e
return False
def p4Filelist(dir, findDeleted=False):
p4path = '/'.join([dir, '*'])
try:
files = p4.run_filelog("-t", p4path)
except P4Exception as e:
print e
return []
results = []
for x in files:
latestRevision = x.revisions[0]
print latestRevision.action, latestRevision.depotFile
if not findDeleted and latestRevision.action == 'delete':
continue
else:
results.append({'name': latestRevision.depotFile,
'action': latestRevision.action,
'change': latestRevision.change,
'time': latestRevision.time,
'type': latestRevision.type
}
)
filesInCurrentChange = p4.run_opened(p4path)
for x in filesInCurrentChange:
print x
results.append({'name': x['clientFile'],
'action': x['action'],
'change': x['change'],
'time': "",
'type': x['type']
}
)
return results
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None):
super(TreeModel, self).__init__(parent)
self.rootItem = TreeItem(None)
self.showDeleted = False
def populate(self, rootdir="//depot", findDeleted=False):
self.rootItem = TreeItem(None)
self.showDeleted = findDeleted
depotPath = False
if "depot" in rootdir:
depotPath = True
p4path = '/'.join([rootdir, '*'])
if depotPath:
dirs = p4.run_dirs(p4path)
else:
dirs = p4.run_dirs('-H', p4path)
for dir in dirs:
dirName = os.path.basename(dir['dir'])
# subDir = '/'.join( [rootdir, dirName )] )
data = [dirName, "Folder", "", "", ""]
treeItem = TreeItem(data, self.rootItem)
self.rootItem.appendChild(treeItem)
treeItem.appendChild(None)
files = p4Filelist(dir['dir'], findDeleted)
for f in files:
fileName = os.path.basename(f['name'])
data = [fileName, f['type'], f[
'time'], f['action'], f['change']]
fileItem = TreeItem(data, treeItem)
treeItem.appendChild(fileItem)
# def populate(self, rootdir):
# rootdir = rootdir.replace('\\', '/')
# print "Scanning subfolders in {0}...".format(rootdir)
# import maya.cmds as cmds
# cmds.refresh()
# def scanDirectoryPerforce(root, treeItem):
# change = p4.run_opened()
# for item in perforceListDir(root):
# itemPath = "{0}/{1}".format(root, item['name'] ) # os.path.join(root, item)
# print "{0}{1}{2}".format( "".join(["\t" for i in range(depth)]), '+'
# if perforceIsDir(itemPath) else '-', item['name'] )
# data = [ item['name'], item['type'], item['time'], item['change'] ]
# childDir = TreeItem( data, treeItem)
# treeItem.appendChild( childDir )
# tmpDir = TreeItem( [ "TMP", "", "", "" ], childDir )
# childDir.appendChild( None )
# print itemPath, perforceIsDir( itemPath )
# if perforceIsDir( itemPath ):
# scanDirectoryPerforce(itemPath, childDir)
# def scanDirectory(root, treeItem):
# for item in os.listdir(root):
# itemPath = os.path.join(root, item)
# print "{0}{1}{2}".format( "".join(["\t" for i in range(depth)]), '+' if os.path.isdir(itemPath) else '-', item)
# childDir = TreeItem( [item], treeItem)
# treeItem.appendChild( childDir )
# if os.path.isdir( itemPath ):
# scanDirectory(itemPath, childDir)
# scanDirectoryPerforce(rootdir, self.rootItem )
# print dirName
# directory = "{0}:{1}".format(i, os.path.basename(dirName))
# childDir = TreeItem( [directory], self.rootItem)
# self.rootItem.appendChild( childDir )
# for fname in fileList:
# childFile = TreeItem(fname, childDir)
# childDir.appendChild([childFile])
# for i,c in enumerate("abcdefg"):
# child = TreeItem([i],self.rootItem)
# self.rootItem.appendChild(child)
def columnCount(self, parent):
return 5
def data(self, index, role):
column = index.column()
if not index.isValid():
return None
if role == QtCore.Qt.DisplayRole:
item = index.internalPointer()
return item.data[column]
elif role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(20, 20)
elif role == QtCore.Qt.DecorationRole:
if column == 1:
itemType = index.internalPointer().data[column]
isDeleted = index.internalPointer().data[3] == 'delete'
if isDeleted:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0104.png")
if itemType == "Folder":
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0059.png")
elif "binary" in itemType:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0315.png")
elif "text" in itemType:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0027.png")
else:
return QtGui.QIcon(r"/home/i7245143/src/MayaPerforce/Perforce/images/File0106.png")
icon = QtGui.QFileIconProvider(QtGui.QFileIconProvider.Folder)
return icon
else:
return None
return None
def flags(self, index):
if not index.isValid():
return QtCore.Qt.NoItemFlags
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return ["Filename", "Type", "Modification Time", "Action", "Change"][section]
return None
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.childItems[row]
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
parentItem = index.internalPointer().parentItem
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rootrowcount(self):
return len(self.rootItem.childItems)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return len(parentItem.childItems)
# allFiles = p4.run_files("//depot/...")
# hiddenFiles = p4.run_files("//depot/.../.*")
# testData = [['assets', '.place-holder'], ['assets', 'heroTV', 'lookDev', 'heroTV_lookDev.ma'], ['assets', 'heroTV', 'lookDev', 'heroTv_lookdev.ma'], ['assets', 'heroTV', 'modelling', '.place-holder'], ['assets', 'heroTV', 'modelling', 'Old_TV.obj'], ['assets', 'heroTV', 'modelling', 'heroTv_wip.ma'], ['assets', 'heroTV', 'rigging', '.place-holder'], ['assets', 'heroTV', 'texturing', '.place-holder'], ['assets', 'heroTV', 'workspace.mel'], ['assets', 'lookDevSourceimages', 'Garage.EXR'], ['assets', 'lookDevSourceimages', 'UVtile.jpg'], ['assets', 'lookDevSourceimages', 'macbeth_background.jpg'], ['assets', 'lookDevTemplate.ma'], ['assets', 'previs_WIP.ma'], ['assets', 'previs_slapcomp_WIP.ma'], ['audio', '.place-holder'], ['finalEdit', 'delivery', '.place-holder'], ['finalEdit', 'projects', '.place-holder'], ['finalEdit', 'test'], ['finalEdit', 'test.ma'], ['shots', '.place-holder'], ['shots', 'space', 'space_sh_010', 'cg', 'maya', 'scenes', 'spc_sh_010_animBuild_WIP.ma']]
# result = {}
# files = [ item['depotFile'][8:].split('/') for item in allFiles ]
# for item in files:
# print item
# from collections import defaultdict
# deepestIndex, deepestPath = max(enumerate(files), key = lambda tup: len(tup[1]))
try:
print p4
except:
p4 = P4()
p4.user = "tminor"
p4.password = "contact_dev"
p4.port = "ssl:52.17.163.3:1666"
p4.connect()
p4.run_login()
reconnect()
# Iterate upwards until we have the full path to the node
def fullPath(idx):
result = [idx]
parent = idx.parent()
while True:
if not parent.isValid():
break
result.append(parent)
parent = parent.parent()
return list(reversed(result))
def populateSubDir(idx, root="//depot", findDeleted=False):
idxPathModel = fullPath(idx)
idxPathSubDirs = [idxPath.data() for idxPath in idxPathModel]
idxFullPath = os.path.join(*idxPathSubDirs)
if not idxFullPath:
idxFullPath = "."
# children = []
p4path = '/'.join([root, idxFullPath, '*'])
depotPath = False
if "depot" in root:
depotPath = True
if depotPath:
p4subdirs = p4.run_dirs(p4path)
else:
p4subdirs = p4.run_dirs('-H', p4path)
p4subdir_names = [child['dir'] for child in p4subdirs]
treeItem = idx.internalPointer()
# print idx.child(0,0).data(), p4subidrs
if not idx.child(0, 0).data() and p4subdirs:
# Pop empty "None" child
treeItem.popChild()
for p4child in p4subdir_names:
print p4child
data = [os.path.basename(p4child), "Folder", "", "", ""]
childData = TreeItem(data, treeItem)
treeItem.appendChild(childData)
childData.appendChild(None)
files = p4Filelist(p4child, findDeleted)
for f in files:
fileName = os.path.basename(f['name'])
data = [fileName, f['type'], f[
'time'], f['action'], f['change']]
fileData = TreeItem(data, childData)
childData.appendChild(fileData)
def tmp(*args):
idx = args[0]
children = []
i = 1
while True:
child = idx.child(i, 0)
print i, child.data()
if not child.isValid():
break
children.append(child)
i += 1
populateSubDir(child, findDeleted=False)
return
treeItem = idx.internalPointer()
idxPathModel = fullPath(idx, model.showDeleted)
idxPathSubDirs = [idxPath.data() for idxPath in idxPathModel]
idxFullPath = os.path.join(*idxPathSubDirs)
pathDepth = len(idxPathSubDirs)
children = []
p4path = "//{0}/{1}/*".format(p4.client, idxFullPath)
print p4path
p4children = p4.run_dirs("-H", p4path)
p4children_names = [child['dir'] for child in p4children]
if idx.child(0, 0).data() == "TMP":
for p4child in p4children_names:
data = [p4child, "", "", ""]
childData = TreeItem(data, idx)
treeItem.appendChild(childData)
i = 0
while True:
child = idx.child(i, 0)
if not child.isValid():
break
children.append(child)
i += 1
for child in children:
childIdx = child.internalPointer()
data = ["TEST", "TEST", "TEST", "TEST"]
childDir = TreeItem(data, childIdx)
childIdx.appendChild(childDir)
tmpDir = TreeItem(["TMP", "", "", "", ""], childDir)
childDir.appendChild(tmpDir)
# view.setModel(model)
view = QtGui.QTreeView()
view.expandAll()
view.setWindowTitle("Perforce Depot Files")
view.resize(512, 512)
view.expanded.connect(tmp)
model = TreeModel()
# model.populate("//{0}".format(p4.client), findDeleted=True)
model.populate("//depot", findDeleted=True)
view.setModel(model)
# populateSubDir( view.rootIndex() )
for i in range(model.rootrowcount()):
idx = model.index(i, 0, model.parent(QtCore.QModelIndex()))
treeItem = idx.internalPointer()
populateSubDir(idx)
# test = TreeItem( ["TEST", "", "", ""], treeItem )
# treeItem.appendChild( test )
view.setColumnWidth(0, 220)
view.setColumnWidth(1, 100)
view.setColumnWidth(2, 120)
view.setColumnWidth(3, 60)
view.show()
| mit |
bestvibes/neo4j-social-network | env/lib/python2.7/site-packages/jinja2/runtime.py | 606 | 19558 | # -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from itertools import chain
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import next, imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if key[:2] == 'l_' and value is not missing:
parent[key[2:]] = value
return Context(environment, parent, template_name, blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
class Context(object):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
'name', 'blocks', '__weakref__')
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return self.environment.undefined(name=key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return a copy of the complete context as dict including the
exported variables.
"""
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context."""
context = new_context(self.environment, self.name, {},
self.parent, True, None, locals)
context.vars.update(self.vars)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve(key)
if isinstance(item, Undefined):
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContext(object):
"""A loop context for dynamic iteration."""
def __init__(self, iterable, recurse=None, depth0=0):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
self.depth0 = depth0
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
self._length = len(iterable) + self.index0 + 1
return self._length
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments, defaults,
catch_kwargs, catch_varargs, caller):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.defaults = defaults
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
@internalcode
def __call__(self, *args, **kwargs):
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
try:
value = self.defaults[idx - self._argument_count + off]
except IndexError:
value = self._environment.undefined(
'parameter %r was not provided' % name, name=name)
arguments.append(value)
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._func(*arguments)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
def __repr__(self):
return 'Undefined'
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
| mit |
vizual54/MissionPlanner | Lib/encodings/hp_roman8.py | 96 | 7543 | """ Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
Original source: LaserJet IIP Printer User's Manual HP part no
33471-90901, Hewlet-Packard, June 1989.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hp-roman8',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x00a1: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00a2: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00a3: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00a4: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00a5: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00a6: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a7: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00a8: 0x00b4, # ACUTE ACCENT
0x00a9: 0x02cb, # MODIFIER LETTER GRAVE ACCENT (Mandarin Chinese fourth tone)
0x00aa: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x00ab: 0x00a8, # DIAERESIS
0x00ac: 0x02dc, # SMALL TILDE
0x00ad: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ae: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00af: 0x20a4, # LIRA SIGN
0x00b0: 0x00af, # MACRON
0x00b1: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00b2: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00b3: 0x00b0, # DEGREE SIGN
0x00b4: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00b5: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x00b6: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00b7: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00b8: 0x00a1, # INVERTED EXCLAMATION MARK
0x00b9: 0x00bf, # INVERTED QUESTION MARK
0x00ba: 0x00a4, # CURRENCY SIGN
0x00bb: 0x00a3, # POUND SIGN
0x00bc: 0x00a5, # YEN SIGN
0x00bd: 0x00a7, # SECTION SIGN
0x00be: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00bf: 0x00a2, # CENT SIGN
0x00c0: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00c1: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00c2: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00c3: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00c4: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00c5: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x00c6: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00c7: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00c8: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x00c9: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x00ca: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x00cb: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x00cc: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x00cd: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ce: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x00cf: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x00d0: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00d1: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00d2: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x00d3: 0x00c6, # LATIN CAPITAL LETTER AE
0x00d4: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x00d5: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00d6: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x00d7: 0x00e6, # LATIN SMALL LETTER AE
0x00d8: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00d9: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00da: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00db: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dc: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x00dd: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x00de: 0x00df, # LATIN SMALL LETTER SHARP S (German)
0x00df: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e0: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00e1: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00e2: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00e3: 0x00d0, # LATIN CAPITAL LETTER ETH (Icelandic)
0x00e4: 0x00f0, # LATIN SMALL LETTER ETH (Icelandic)
0x00e5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00e6: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00e7: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e8: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e9: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00ea: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00eb: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00ec: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00ed: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ee: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00ef: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00f0: 0x00de, # LATIN CAPITAL LETTER THORN (Icelandic)
0x00f1: 0x00fe, # LATIN SMALL LETTER THORN (Icelandic)
0x00f2: 0x00b7, # MIDDLE DOT
0x00f3: 0x00b5, # MICRO SIGN
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f6: 0x2014, # EM DASH
0x00f7: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00f8: 0x00bd, # VULGAR FRACTION ONE HALF
0x00f9: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00fa: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00fb: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00fc: 0x25a0, # BLACK SQUARE
0x00fd: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00fe: 0x00b1, # PLUS-MINUS SIGN
0x00ff: None,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| gpl-3.0 |
xdevelsistemas/taiga-back-community | taiga/export_import/management/commands/dump_project.py | 2 | 3190 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from taiga.projects.models import Project
from taiga.export_import.services import render_project
import os
import gzip
class Command(BaseCommand):
help = "Export projects to a json file"
def add_arguments(self, parser):
parser.add_argument("project_slugs",
nargs="+",
help="<project_slug project_slug ...>")
parser.add_argument("-d", "--dst_dir",
action="store",
dest="dst_dir",
default="./",
metavar="DIR",
help="Directory to save the json files. ('./' by default)")
parser.add_argument("-f", "--format",
action="store",
dest="format",
default="plain",
metavar="[plain|gzip]",
help="Format to the output file plain json or gzipped json. ('plain' by default)")
def handle(self, *args, **options):
dst_dir = options["dst_dir"]
if not os.path.exists(dst_dir):
raise CommandError("Directory {} does not exist.".format(dst_dir))
if not os.path.isdir(dst_dir):
raise CommandError("'{}' must be a directory, not a file.".format(dst_dir))
project_slugs = options["project_slugs"]
for project_slug in project_slugs:
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
raise CommandError("Project '{}' does not exist".format(project_slug))
if options["format"] == "gzip":
dst_file = os.path.join(dst_dir, "{}.json.gz".format(project_slug))
with gzip.GzipFile(dst_file, "wb") as f:
render_project(project, f)
else:
dst_file = os.path.join(dst_dir, "{}.json".format(project_slug))
with open(dst_file, "wb") as f:
render_project(project, f)
print("-> Generate dump of project '{}' in '{}'".format(project.name, dst_file))
| agpl-3.0 |
krytarowski/coreclr | src/scripts/Utilities.py | 68 | 1635 | from filecmp import dircmp
import shutil
import os
def walk_recursively_and_update(dcmp):
#for different Files Copy from right to left
for name in dcmp.diff_files:
srcpath = dcmp.right + "/" + name
destpath = dcmp.left + "/" + name
print("Updating %s" % (destpath))
if os.path.isfile(srcpath):
shutil.copyfile(srcpath, destpath)
else :
raise Exception("path: " + srcpath + "is neither a file or folder")
#copy right only files
for name in dcmp.right_only:
srcpath = dcmp.right + "/" + name
destpath = dcmp.left + "/" + name
print("Updating %s" % (destpath))
if os.path.isfile(srcpath):
shutil.copyfile(srcpath, destpath)
elif os.path.isdir(srcpath):
shutil.copytree(srcpath, destpath)
else :
raise Exception("path: " + srcpath + "is neither a file or folder")
#delete left only files
for name in dcmp.left_only:
path = dcmp.left + "/" + name
print("Deleting %s" % (path))
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else :
raise Exception("path: " + path + "is neither a file or folder")
#call recursively
for sub_dcmp in dcmp.subdirs.values():
walk_recursively_and_update(sub_dcmp)
def UpdateDirectory(destpath,srcpath):
print("Updating %s with %s" % (destpath,srcpath))
if not os.path.exists(destpath):
os.makedirs(destpath)
dcmp = dircmp(destpath,srcpath)
walk_recursively_and_update(dcmp)
| mit |
ewandor/home-assistant | homeassistant/components/twilio.py | 24 | 1574 | """
Support for Twilio.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/twilio/
"""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
REQUIREMENTS = ['twilio==5.7.0']
DOMAIN = 'twilio'
API_PATH = '/api/{}'.format(DOMAIN)
CONF_ACCOUNT_SID = 'account_sid'
CONF_AUTH_TOKEN = 'auth_token'
DATA_TWILIO = DOMAIN
DEPENDENCIES = ['http']
RECEIVED_DATA = '{}_data_received'.format(DOMAIN)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_ACCOUNT_SID): cv.string,
vol.Required(CONF_AUTH_TOKEN): cv.string
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Twilio component."""
from twilio.rest import TwilioRestClient
conf = config[DOMAIN]
hass.data[DATA_TWILIO] = TwilioRestClient(
conf.get(CONF_ACCOUNT_SID), conf.get(CONF_AUTH_TOKEN))
hass.http.register_view(TwilioReceiveDataView())
return True
class TwilioReceiveDataView(HomeAssistantView):
"""Handle data from Twilio inbound messages and calls."""
url = API_PATH
name = 'api:{}'.format(DOMAIN)
@callback
def post(self, request): # pylint: disable=no-self-use
"""Handle Twilio data post."""
from twilio.twiml import Response
hass = request.app['hass']
data = yield from request.post()
hass.bus.async_fire(RECEIVED_DATA, dict(data))
return Response().toxml()
| apache-2.0 |
ellio167/lammps | tools/fep/bar.py | 7 | 2275 | #!/usr/bin/env python
# bar.py - Bennet's acceptance ratio method for free energy calculation
import sys
import math
if len(sys.argv) < 4:
print("Bennet acceptance ratio method")
print("usage: bar.py temperature datafile01 datafile10 [delf_lo delf_hi]")
print(" datafile01 contains (U_1 - U_0)_0 in 2nd column")
print(" datafile10 contains (U_0 - U_1)_1 in 2nd column")
print(" (first column is index, time step, etc. and is ignored)")
print(" delf_lo and delf_hi are optional guesses bracketing the solution")
sys.exit()
if len(sys.argv) == 6:
delf_lo = float(sys.argv[4])
delf_hi = float(sys.argv[5])
else:
delf_lo = -50.0
delf_hi = 50.0
def fermi(x):
if x > 100:
return 0.0
else:
return 1.0 / (1.0 + math.exp(x))
def avefermi(eng, delf):
ave = 0.0
n = 0
for du in eng:
ave += fermi((du + delf) / rt)
n += 1
return ave / n
def bareq(delf):
ave0 = avefermi(eng01, -delf)
ave1 = avefermi(eng10, delf)
return ave1 - ave0
def bisect(func, xlo, xhi, xtol = 1.0e-4, maxit = 20):
if xlo > xhi:
aux = xhi
xhi = xlo
xlo = aux
if func(xlo) * func(xhi) > 0.0:
print("error: root not bracketed by interval")
sys.exit(2)
for i in range(maxit):
sys.stdout.write('.')
sys.stdout.flush()
xmid = (xlo + xhi) / 2.0
if func(xlo) * func(xmid) < 0.0:
xhi = xmid
else:
xlo = xmid
if xhi - xlo < xtol:
return xmid
return xmid
print("Bennet acceptance ratio method")
print(sys.argv[1], " K")
rt = 0.008314 / 4.184 * float(sys.argv[1])
eng01 = [] # read datafiles
with open(sys.argv[2], 'r') as f:
for line in f:
if line.startswith('#'):
continue
eng01.append(float(line.split()[1]))
eng10 = []
with open(sys.argv[3], 'r') as f:
for line in f:
if line.startswith('#'):
continue
eng10.append(float(line.split()[1]))
sys.stdout.write("solving")
delf = bisect(bareq, delf_lo, delf_hi)
print(".")
ave0 = avefermi(eng01, -delf)
ave1 = avefermi(eng10, delf)
print("<...>0 = ", ave0)
print("<...>1 = ", ave1)
print("deltaA = ", delf)
| gpl-2.0 |
r39132/airflow | tests/utils/test_cli_util.py | 4 | 3137 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
from argparse import Namespace
from contextlib import contextmanager
from datetime import datetime
from airflow.utils import cli, cli_action_loggers
class CliUtilTest(unittest.TestCase):
def test_metrics_build(self):
func_name = 'test'
exec_date = datetime.utcnow()
ns = Namespace(dag_id='foo', task_id='bar',
subcommand='test', execution_date=exec_date)
metrics = cli._build_metrics(func_name, ns)
expected = {'user': os.environ.get('USER'),
'sub_command': 'test',
'dag_id': 'foo',
'task_id': 'bar',
'execution_date': exec_date}
for k, v in expected.items():
self.assertEqual(v, metrics.get(k))
self.assertTrue(metrics.get('start_datetime') <= datetime.utcnow())
self.assertTrue(metrics.get('full_command'))
log_dao = metrics.get('log')
self.assertTrue(log_dao)
self.assertEqual(log_dao.dag_id, metrics.get('dag_id'))
self.assertEqual(log_dao.task_id, metrics.get('task_id'))
self.assertEqual(log_dao.execution_date, metrics.get('execution_date'))
self.assertEqual(log_dao.owner, metrics.get('user'))
def test_fail_function(self):
"""
Actual function is failing and fail needs to be propagated.
:return:
"""
with self.assertRaises(NotImplementedError):
fail_func(Namespace())
def test_success_function(self):
"""
Test success function but with failing callback.
In this case, failure should not propagate.
:return:
"""
with fail_action_logger_callback():
success_func(Namespace())
@contextmanager
def fail_action_logger_callback():
"""
Adding failing callback and revert it back when closed.
:return:
"""
tmp = cli_action_loggers.__pre_exec_callbacks[:]
def fail_callback(**_):
raise NotImplementedError
cli_action_loggers.register_pre_exec_callback(fail_callback)
yield
cli_action_loggers.__pre_exec_callbacks = tmp
@cli.action_logging
def fail_func(_):
raise NotImplementedError
@cli.action_logging
def success_func(_):
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
goodwinnk/intellij-community | python/lib/Lib/csv.py | 87 | 15210 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "excel", "excel_tab", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
def next(self):
row = self.reader.next()
if self.fieldnames is None:
self.fieldnames = row
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
for k in rowdict.keys():
if k not in self.fieldnames:
raise ValueError, "dict contains fields not in fieldnames"
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
doublequote = False
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
return (quotechar, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| apache-2.0 |
mjudsp/Tsallis | sklearn/manifold/tests/test_locally_linear.py | 27 | 5247 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
rdeheele/odoo | addons/hr_attendance/res_config.py | 434 | 1406 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_attendance_config_settings(osv.osv_memory):
_inherit = 'hr.config.settings'
_columns = {
'group_hr_attendance': fields.boolean('Track attendances for all employees',
implied_group='base.group_hr_attendance',
help="Allocates attendance group to all users."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fnouama/intellij-community | python/helpers/pydev/third_party/pep8/pep8.py | 43 | 75838 | #!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8.
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
from __future__ import with_statement
__version__ = '1.6.0a0'
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
DEFAULT_IGNORE = 'E123,E226,E24,E704'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
# ERRORTOKEN is triggered by backticks in Python 3
SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""For new projects, spaces-only are strongly recommended over tabs.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number, total_lines):
r"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
However the last line should end with a new line (warning W292).
"""
if line_number == total_lines:
stripped_last_line = physical_line.rstrip()
if not stripped_last_line:
return 0, "W391 blank line at end of file"
if stripped_last_line == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
r"""Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
blank_before, previous_logical, previous_indent_level):
r"""Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_before or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_before != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_before
def extraneous_whitespace(logical_line):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
r"""Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
E114: # a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
E115: for item in items:\n# Hi\n pass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
E116: a = 1\n # b = 2
"""
c = 0 if logical_line else 3
tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)"
if indent_level % 4:
yield 0, tmpl % (1 + c, "indentation is not a multiple of four")
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, tmpl % (2 + c, "expected an indented block")
elif not indent_expect and indent_level > previous_indent_level:
yield 0, tmpl % (3 + c, "unexpected indentation")
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""Continuation lines indentation.
Continuation lines should align wrapped elements either vertically
using Python's implicit line joining inside parentheses, brackets
and braces, or using a hanging indent.
When using a hanging indent these considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = not last_token_multiline and token_type not in NEWLINE
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
if indent_next and expand_indent(line) == indent_level + 4:
pos = (start[0], indent[0] + 4)
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield pos, "%s with same indent as next logical line" % code
def whitespace_before_parameters(logical_line, tokens):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in the following situations:
- before the open parenthesis that starts the argument list of a
function call.
- before the open parenthesis that starts an indexing or slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""Avoid extraneous whitespace around an operator.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
else:
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if (prev_text in '}])' if prev_type == tokenize.OP
else prev_text not in KEYWORDS):
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
r"""Don't use spaces around the '=' sign in function arguments.
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if token_type == tokenize.NL:
continue
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end, message)
prev_end = end
def whitespace_before_comment(logical_line, tokens):
r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
E266: ### Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#')
if inline_comment:
if bad_prefix or comment[:1] in WHITESPACE:
yield start, "E262 inline comment should start with '# '"
elif bad_prefix and (bad_prefix != '!' or start[0] > 1):
if bad_prefix != '#':
yield start, "E265 block comment should start with '# '"
elif comment:
yield start, "E266 too many leading '#' for block comment"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""Compound statements (on the same line) are generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements.
Also avoid folding such long lines!
Always use a def statement instead of an assignment statement that
binds a lambda expression directly to a name.
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
E704: def f(x): return 2*x
E731: f = lambda x: 2*x
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if ((before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')'))): # (annotation)
if LAMBDA_REGEX.search(before):
yield 0, "E731 do not assign a lambda expression, use a def"
break
if before.startswith('def '):
yield 0, "E704 multiple statements on one line (def)"
else:
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line, noqa):
r"""Comparison to singletons should use "is" or "is not".
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""Negative comparison should be done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line):
r"""Object type comparisons should always use isinstance().
Do not compare types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
r"""When raising an exception, use "raise ValueError('message')".
The older form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
r"""New code should always use != instead of <>.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
r"""Backticks are removed in Python 3: use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
"""Read the source code."""
with open(filename, 'rU') as f:
return f.readlines()
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(coding, lines) = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.readlines()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
(row, nrows) = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value or isinstance(value, list):
return value
paths = []
for path in value.split(','):
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
if COMMENT_WITH_NL:
def _is_eol_token(token):
return (token[0] in NEWLINE or
(token[0] == tokenize.COMMENT and token[1] == token[4]))
else:
def _is_eol_token(token):
return token[0] in NEWLINE
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""Register all globally visible functions.
The first argument name is either 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
"""Check if the syntax is valid."""
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""Run a check plugin."""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping
def check_logical(self):
"""Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
mapping = self.build_tokens_line()
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
for offset, text in self.run_check(check, argument_names) or ():
if not isinstance(offset, tuple):
for token_offset, pos in mapping:
if offset <= token_offset:
break
offset = (pos[0], pos[1] + offset - token_offset)
self.report_error(offset[0], offset[1], text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.blank_lines = 0
self.tokens = []
def check_ast(self):
"""Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
"""Tokenize the file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
if token[2][0] > self.total_lines:
return
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""If appropriate (based on token), check current physical line(s)."""
# Called after every token, but act only on end of line.
if _is_eol_token(token):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""Check if the file should be excluded.
Check if 'options.exclude' contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read both user configuration and local configuration."""
config = RawConfigParser()
user_conf = options.config
if user_conf and os.path.isfile(user_conf):
if options.verbose:
print('user configuration: %s' % user_conf)
config.read(user_conf)
local_dir = os.curdir
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if opt.replace('_', '-') not in parser.config_options:
print(" unknown option '%s' ignored" % opt)
continue
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
if config_file is True:
config_file = DEFAULT_CONFIG
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = normalize_paths(options.exclude)
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main() | apache-2.0 |
biswajitsahu/kuma | vendor/packages/translate/convert/poreplace.py | 29 | 2227 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Simple script to do replacements on translated strings inside po files.
"""
# this is used as the basis for other scripts, it currently replaces nothing
from translate.storage import po
class poreplace:
def convertstring(self, postr):
"""does the conversion required on the given string (nothing in this case)"""
return postr
def convertfile(self, thepofile):
"""goes through a po file and converts each element"""
for thepo in thepofile.units:
thepo.msgstr = [self.convertstring(postr) for postr in thepo.msgstr]
return thepofile
def convertpo(self, inputfile, outputfile, templatefile):
"""reads in inputfile using po, converts using poreplace, writes to outputfile"""
# note that templatefile is not used, but it is required by the converter...
inputstore = po.pofile(inputfile)
if inputstore.isempty():
return 0
outputstore = self.convertfile(inputstore)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
def main(converterclass, argv=None):
# handle command line options
from translate.convert import convert
replacer = converterclass()
formats = {"po": ("po", replacer.convertpo), "pot": ("pot", replacer.convertpo)}
parser = convert.ConvertOptionParser(formats, usepots=True)
parser.run(argv)
if __name__ == '__main__':
main(poreplace)
| mpl-2.0 |
felixma/nova | nova/objects/block_device.py | 11 | 13367 | # Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import block_device
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
_BLOCK_DEVICE_OPTIONAL_JOINED_FIELD = ['instance']
BLOCK_DEVICE_OPTIONAL_ATTRS = _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD
def _expected_cols(expected_attrs):
return [attr for attr in expected_attrs
if attr in _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD]
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add instance_uuid to get_by_volume_id method
# Version 1.2: Instance version 1.14
# Version 1.3: Instance version 1.15
# Version 1.4: Instance version 1.16
# Version 1.5: Instance version 1.17
# Version 1.6: Instance version 1.18
# Version 1.7: Add update_or_create method
# Version 1.8: Instance version 1.19
# Version 1.9: Instance version 1.20
# Version 1.10: Changed source_type field to BlockDeviceSourceTypeField.
# Version 1.11: Changed destination_type field to
# BlockDeviceDestinationTypeField.
# Version 1.12: Changed device_type field to BlockDeviceTypeField.
# Version 1.13: Instance version 1.21
# Version 1.14: Instance version 1.22
# Version 1.15: Instance version 1.23
VERSION = '1.15'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'instance': fields.ObjectField('Instance', nullable=True),
'source_type': fields.BlockDeviceSourceTypeField(nullable=True),
'destination_type': fields.BlockDeviceDestinationTypeField(
nullable=True),
'guest_format': fields.StringField(nullable=True),
'device_type': fields.BlockDeviceTypeField(nullable=True),
'disk_bus': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'device_name': fields.StringField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'snapshot_id': fields.StringField(nullable=True),
'volume_id': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
'image_id': fields.StringField(nullable=True),
'no_device': fields.BooleanField(default=False),
'connection_info': fields.StringField(nullable=True),
}
obj_relationships = {
'instance': [('1.0', '1.13'), ('1.2', '1.14'), ('1.3', '1.15'),
('1.4', '1.16'), ('1.5', '1.17'), ('1.6', '1.18'),
('1.8', '1.19'), ('1.9', '1.20'), ('1.13', '1.21'),
('1.14', '1.22'), ('1.15', '1.23')],
}
@staticmethod
def _from_db_object(context, block_device_obj,
db_block_device, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for key in block_device_obj.fields:
if key in BLOCK_DEVICE_OPTIONAL_ATTRS:
continue
block_device_obj[key] = db_block_device[key]
if 'instance' in expected_attrs:
my_inst = objects.Instance(context)
my_inst._from_db_object(context, my_inst,
db_block_device['instance'])
block_device_obj.instance = my_inst
block_device_obj._context = context
block_device_obj.obj_reset_changes()
return block_device_obj
def _create(self, context, update_or_create=False):
"""Create the block device record in the database.
In case the id field is set on the object, and if the instance is set
raise an ObjectActionError. Resets all the changes on the object.
Returns None
:param context: security context used for database calls
:param update_or_create: consider existing block devices for the
instance based on the device name and swap, and only update
the ones that match. Normally only used when creating the
instance for the first time.
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api':
raise exception.ObjectActionError(
action='create',
reason='BlockDeviceMapping cannot be '
'created in the API cell.')
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='create',
reason='instance assigned')
cells_create = update_or_create or None
if update_or_create:
db_bdm = db.block_device_mapping_update_or_create(
context, updates, legacy=False)
else:
db_bdm = db.block_device_mapping_create(
context, updates, legacy=False)
self._from_db_object(context, self, db_bdm)
# NOTE(alaski): bdms are looked up by instance uuid and device_name
# so if we sync up with no device_name an entry will be created that
# will not be found on a later update_or_create call and a second bdm
# create will occur.
if cell_type == 'compute' and db_bdm.get('device_name') is not None:
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(
context, self, create=cells_create)
@base.remotable
def create(self):
self._create(self._context)
@base.remotable
def update_or_create(self):
self._create(self._context, update_or_create=True)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
db.block_device_mapping_destroy(self._context, self.id)
delattr(self, base.get_attrname('id'))
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_destroy_at_top(self._context, self.instance_uuid,
device_name=self.device_name,
volume_id=self.volume_id)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='save',
reason='instance changed')
updates.pop('id', None)
updated = db.block_device_mapping_update(self._context, self.id,
updates, legacy=False)
if not updated:
raise exception.BDMNotFound(id=self.id)
self._from_db_object(self._context, self, updated)
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
create = False
# NOTE(alaski): If the device name has just been set this bdm
# likely does not exist in the parent cell and we should create it.
# If this is a modification of the device name we should update
# rather than create which is why None is used here instead of True
if 'device_name' in updates:
create = None
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(self._context, self,
create=create)
@base.remotable_classmethod
def get_by_volume_id(cls, context, volume_id,
instance_uuid=None, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_bdm = db.block_device_mapping_get_by_volume_id(
context, volume_id, _expected_cols(expected_attrs))
if not db_bdm:
raise exception.VolumeBDMNotFound(volume_id=volume_id)
# NOTE (ndipanov): Move this to the db layer into a
# get_by_instance_and_volume_id method
if instance_uuid and instance_uuid != db_bdm['instance_uuid']:
raise exception.InvalidVolume(
reason=_("Volume does not belong to the "
"requested instance."))
return cls._from_db_object(context, cls(), db_bdm,
expected_attrs=expected_attrs)
@property
def is_root(self):
return self.boot_index == 0
@property
def is_volume(self):
return (self.destination_type ==
fields.BlockDeviceDestinationType.VOLUME)
@property
def is_image(self):
return self.source_type == fields.BlockDeviceSourceType.IMAGE
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
self.instance = objects.Instance.get_by_uuid(self._context,
self.instance_uuid)
self.obj_reset_changes(fields=['instance'])
@base.NovaObjectRegistry.register
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: BlockDeviceMapping <= version 1.1
# Version 1.2: Added use_slave to get_by_instance_uuid
# Version 1.3: BlockDeviceMapping <= version 1.2
# Version 1.4: BlockDeviceMapping <= version 1.3
# Version 1.5: BlockDeviceMapping <= version 1.4
# Version 1.6: BlockDeviceMapping <= version 1.5
# Version 1.7: BlockDeviceMapping <= version 1.6
# Version 1.8: BlockDeviceMapping <= version 1.7
# Version 1.9: BlockDeviceMapping <= version 1.8
# Version 1.10: BlockDeviceMapping <= version 1.9
# Version 1.11: BlockDeviceMapping <= version 1.10
# Version 1.12: BlockDeviceMapping <= version 1.11
# Version 1.13: BlockDeviceMapping <= version 1.12
# Version 1.14: BlockDeviceMapping <= version 1.13
# Version 1.15: BlockDeviceMapping <= version 1.14
# Version 1.16: BlockDeviceMapping <= version 1.15
VERSION = '1.16'
fields = {
'objects': fields.ListOfObjectsField('BlockDeviceMapping'),
}
obj_relationships = {
'objects': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.1'),
('1.3', '1.2'), ('1.4', '1.3'), ('1.5', '1.4'),
('1.6', '1.5'), ('1.7', '1.6'), ('1.8', '1.7'),
('1.9', '1.8'), ('1.10', '1.9'), ('1.11', '1.10'),
('1.12', '1.11'), ('1.13', '1.12'), ('1.14', '1.13'),
('1.15', '1.14'), ('1.16', '1.15')],
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
db_bdms = db.block_device_mapping_get_all_by_instance(
context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
def root_bdm(self):
try:
return next(bdm_obj for bdm_obj in self if bdm_obj.is_root)
except StopIteration:
return
def block_device_make_list(context, db_list, **extra_args):
return base.obj_make_list(context,
objects.BlockDeviceMappingList(context),
objects.BlockDeviceMapping, db_list,
**extra_args)
def block_device_make_list_from_dicts(context, bdm_dicts_list):
bdm_objects = [objects.BlockDeviceMapping(context=context, **bdm)
for bdm in bdm_dicts_list]
return BlockDeviceMappingList(objects=bdm_objects)
| apache-2.0 |
remodoy/reauth-python | reauth/reauth.py | 1 | 2556 | import json
import ssl
import time
import urllib.request
from datetime import timedelta
import logging
import Crypto.PublicKey.RSA
import python_jwt as jwt
_accepted_sign_algs = ["PS512"]
_pubkey_cache_living_time = 60*10 # 10min
_pubkey_cache_exp_time = 0
_pubkey_cache = ""
_iat_skew = timedelta(minutes=5)
logger = logging.getLogger("reauth")
def get_public_key(reauth_url, verify=True):
"""
Get ReAuth server public key from server.
It's recommended in production setup to store public key locally for example in configuration.
:param reauth_url: ReAuth server base url. E.g. https://reauth.example.com
:param verify: Verify TLS, default value is True
:return: Public key in text format
"""
logger.debug("get_public_key(%s, verify=%s" % (reauth_url, verify))
global _pubkey_cache_exp_time, _pubkey_cache, _pubkey_cache_living_time
if time.time() < _pubkey_cache_exp_time:
public_key = _pubkey_cache
else:
ctx = ssl.create_default_context()
ctx.check_hostname = verify
with urllib.request.urlopen(reauth_url + "/key.pub", timeout=15, context=ctx) as f:
public_key = f.read()
_pubkey_cache = public_key
_pubkey_cache_exp_time = time.time() + _pubkey_cache_living_time
return public_key
def fetch_reauth_token(code, reauth_url, verify=True):
"""
Fetch ReAuth token from ReAuth server using code passed in redirect.
:param code: Code
:param reauth_url: ReAuth server base url. E.g. https://reauth.example.com
:param verify: Verify TLS, default value is True
:return: Token in text format
"""
logger.debug("fetch_reauth_token(%s, %s, verify=%s" % (code, reauth_url, verify))
ctx = ssl.create_default_context()
ctx.check_hostname = verify
with urllib.request.urlopen(reauth_url.rstrip("/") + "/api/v1/token/" + code + "/", timeout=15, context=ctx) as f:
data = json.loads(f.read().decode("utf-8"))
if 'jwtToken' in data:
return data['jwtToken']
return None
def decode_reauth_token(token, public_key):
"""
Decode and verify ReAuth token
:param token: Token in text format
:param public_key: Server public key.
:return: Dictionary containing Claims from token
"""
logger.debug("decode_reauth_token(%s, %s)" % (token, public_key))
public_key = Crypto.PublicKey.RSA.importKey(public_key)
header, claims = jwt.verify_jwt(token, pub_key=public_key, allowed_algs=_accepted_sign_algs, iat_skew=_iat_skew)
return claims
| mit |
dpwrussell/openmicroscopy | components/tools/OmeroPy/src/omero/plugins/chgrp.py | 1 | 4086 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
"""
chgrp plugin
Plugin read by omero.cli.Cli during initialization. The method(s)
defined here will be added to the Cli class for later use.
"""
from omero.cli import CLI, GraphControl, ExperimenterGroupArg
import sys
HELP = """Move data between groups
Move entire graphs of data based on the ID of the top-node.
Examples:
# In each case move an image to group 101
omero chgrp 101 Image:1
omero chgrp Group:101 Image:2
omero chgrp ExperimenterGroup:101 Image:3
# In both cases move five images to the group named "My Lab"
omero chgrp "My Lab" Image:51,52,53,54,56
omero chgrp "My Lab" Image:51-54,56
# Move a plate but leave all images in the original group
omero chgrp 201 Plate:1 --exclude Image
# Move all images contained under a project
omero chgrp 101 Project/Dataset/Image:53
# Move all images contained under two projects
omero chgrp 101 Project/Image:201,202
# Do a dry run of a move reporting the outcome if the move had been run
omero chgrp 101 Dataset:53 --dry-run
# Do a dry run of a move, reporting all the objects
# that would have been moved
omero chgrp 101 Dataset:53 --dry-run --report
"""
class ChgrpControl(GraphControl):
def cmd_type(self):
import omero
import omero.all
return omero.cmd.Chgrp2
def _pre_objects(self, parser):
parser.add_argument(
"grp", nargs="?", type=ExperimenterGroupArg,
help="""Group to move objects to""")
def _process_request(self, req, args, client):
# Retrieve group id
gid = args.grp.lookup(client)
if gid is None:
self.ctx.die(196, "Failed to find group: %s" % args.grp.orig)
# Retrieve group
import omero
try:
group = client.sf.getAdminService().getGroup(gid)
except omero.ApiUsageException:
self.ctx.die(196, "Failed to find group: %s" % args.grp.orig)
# Check session owner is member of the target group
uid = client.sf.getAdminService().getEventContext().userId
ids = [x.child.id.val for x in group.copyGroupExperimenterMap()]
if uid not in ids:
self.ctx.die(197, "Current user is not member of group: %s" %
group.id.val)
# Set requests group
if isinstance(req, omero.cmd.DoAll):
for request in req.requests:
if isinstance(request, omero.cmd.SkipHead):
request.request.groupId = gid
else:
request.groupId = gid
else:
if isinstance(req, omero.cmd.SkipHead):
req.request.groupId = gid
else:
req.groupId = gid
super(ChgrpControl, self)._process_request(req, args, client)
def print_detailed_report(self, req, rsp, status):
import omero
if isinstance(rsp, omero.cmd.DoAllRsp):
for response in rsp.responses:
if isinstance(response, omero.cmd.Chgrp2Response):
self.print_chgrp_response(response)
elif isinstance(rsp, omero.cmd.Chgrp2Response):
self.print_chgrp_response(rsp)
def print_chgrp_response(self, rsp):
if rsp.includedObjects:
self.ctx.out("Included objects")
objIds = self._get_object_ids(rsp.includedObjects)
for k in objIds:
self.ctx.out(" %s:%s" % (k, objIds[k]))
if rsp.deletedObjects:
self.ctx.out("Deleted objects")
objIds = self._get_object_ids(rsp.deletedObjects)
for k in objIds:
self.ctx.out(" %s:%s" % (k, objIds[k]))
try:
register("chgrp", ChgrpControl, HELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("chgrp", ChgrpControl, HELP)
cli.invoke(sys.argv[1:])
| gpl-2.0 |
Jaccorot/python | renzongxian/0014/0014.py | 40 | 1050 | # Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-22
# Python 3.4
"""
第 0014 题: 纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:
{
"1":["张三",150,120,100],
"2":["李四",90,99,95],
"3":["王五",60,66,68]
}
请将上述内容写到 student.xls 文件中。
"""
import xlwt3
import json
# 该部分参考了agmcs的代码,https://github.com/Show-Me-the-Code/python/blob/master/agmcs/0014/0014.py
def write_txt_to_xls(txt_file):
# Read form the txt file
txt_object = open(txt_file, 'r')
file_content = json.load(txt_object)
# Write to the xls file
xls_object = xlwt3.Workbook()
sheet = xls_object.add_sheet('student')
for i in range(len(file_content)):
sheet.write(i, 0, i+1)
data = file_content[str(i+1)]
for j in range(len(data)):
sheet.write(i, j+1, data[j])
xls_object.save('student.xls')
if __name__ == '__main__':
write_txt_to_xls('student.txt')
| mit |
wan-qy/electron | script/create-node-headers.py | 4 | 3364 | #!/usr/bin/env python
import argparse
import os
import shutil
import sys
import tarfile
from lib.util import safe_mkdir, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
NODE_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'node')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
HEADERS_SUFFIX = [
'.h',
'.gypi',
]
HEADERS_DIRS = [
'src',
'deps/http_parser',
'deps/zlib',
'deps/uv',
'deps/npm',
'deps/mdb_v8',
]
HEADERS_FILES = [
'common.gypi',
'config.gypi',
]
def main():
args = parse_args()
safe_mkdir(args.directory)
node_headers_dir = os.path.join(args.directory,
'node-{0}'.format(args.version))
iojs_headers_dir = os.path.join(args.directory,
'iojs-{0}'.format(args.version))
iojs2_headers_dir = os.path.join(args.directory,
'iojs-{0}-headers'.format(args.version))
copy_headers(node_headers_dir)
create_header_tarball(args.directory, node_headers_dir)
copy_headers(iojs_headers_dir)
create_header_tarball(args.directory, iojs_headers_dir)
copy_headers(iojs2_headers_dir)
create_header_tarball(args.directory, iojs2_headers_dir)
def parse_args():
parser = argparse.ArgumentParser(description='create node header tarballs')
parser.add_argument('-v', '--version', help='Specify the version',
required=True)
parser.add_argument('-d', '--directory', help='Specify the output directory',
default=DIST_DIR,
required=False)
return parser.parse_args()
def copy_headers(dist_headers_dir):
safe_mkdir(dist_headers_dir)
# Copy standard node headers from node. repository.
for include_path in HEADERS_DIRS:
abs_path = os.path.join(NODE_DIR, include_path)
for dirpath, _, filenames in os.walk(abs_path):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(os.path.join(dirpath, filename), NODE_DIR,
dist_headers_dir)
for other_file in HEADERS_FILES:
copy_source_file(os.path.join(NODE_DIR, other_file), NODE_DIR,
dist_headers_dir)
# Copy V8 headers from chromium's repository.
src = os.path.join(SOURCE_ROOT, 'vendor', 'download', 'libchromiumcontent',
'src')
for dirpath, _, filenames in os.walk(os.path.join(src, 'v8')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(os.path.join(dirpath, filename), src,
os.path.join(dist_headers_dir, 'deps'))
def create_header_tarball(directory, dist_headers_dir):
target = dist_headers_dir + '.tar.gz'
with scoped_cwd(directory):
tarball = tarfile.open(name=target, mode='w:gz')
tarball.add(os.path.relpath(dist_headers_dir))
tarball.close()
def copy_source_file(source, start, destination):
relative = os.path.relpath(source, start=start)
final_destination = os.path.join(destination, relative)
safe_mkdir(os.path.dirname(final_destination))
shutil.copy2(source, final_destination)
if __name__ == '__main__':
sys.exit(main())
| mit |
has2k1/numpy | numpy/lib/_datasource.py | 148 | 21266 | """A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations
when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os
module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only
gzip and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import shutil
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way
that an instance of `_FileOpeners` itself can be indexed with the keys
of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the
`DataSource` `destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
path. Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
shutil.copyfileobj(openedurl, f)
finally:
f.close()
openedurl.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| bsd-3-clause |
Daniex/horizon | openstack_dashboard/dashboards/project/access_and_security/floating_ips/tests.py | 29 | 15039 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from django.utils.http import urlencode
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security \
.floating_ips import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from horizon.workflows import views
INDEX_URL = reverse('horizon:project:access_and_security:index')
NAMESPACE = "horizon:project:access_and_security:floating_ips"
class FloatingIpViewTests(test.TestCase):
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
url = reverse('%s:associate' % NAMESPACE)
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'floating_ip_target_get_by_instance',
'tenant_floating_ip_list',)})
def test_associate_with_instance_id(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest), 'TEST-ID', self.servers.list()) \
.AndReturn('TEST-ID')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'instance_id': 'TEST-ID'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertTrue(self.floating_ips.first() not in choices)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_redirect(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
next = reverse("horizon:project:instances:index")
res = self.client.post("%s?next=%s" % (url, next), form_data)
self.assertRedirectsNoFollow(res, next)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_exception(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'floating_ip_supported',
'tenant_floating_ip_get',
'tenant_floating_ip_list',)})
def test_disassociate_post(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'floating_ip_supported',
'tenant_floating_ip_get',
'tenant_floating_ip_list',)})
def test_disassociate_post_with_exception(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_supported',
'tenant_floating_ip_list',
'security_group_list',
'floating_ip_pools_list',),
api.nova: ('keypair_list',
'server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_disabled_when_quota_exceeded(self):
keypairs = self.keypairs.list()
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 0
sec_groups = self.security_groups.list()
api.network.floating_ip_supported(
IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.security_group_list(
IsA(http.HttpRequest)).MultipleTimes()\
.AndReturn(sec_groups)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.keypair_list(
IsA(http.HttpRequest)) \
.AndReturn(keypairs)
api.nova.server_list(
IsA(http.HttpRequest)) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'ec2').MultipleTimes() \
.AndReturn(False)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL +
"?tab=access_security_tabs__floating_ips_tab")
allocate_link = tables.AllocateIP()
url = allocate_link.get_link_url()
classes = (list(allocate_link.get_default_classes())
+ list(allocate_link.classes))
link_name = "%s (%s)" % (unicode(allocate_link.verbose_name),
"Quota exceeded")
expected_string = ("<a href='%s' title='%s' class='%s disabled' "
"id='floating_ips__action_allocate'>"
"<span class='fa fa-link'>"
"</span>%s</a>"
% (url, link_name, " ".join(classes), link_name))
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
class FloatingIpNeutronViewTests(FloatingIpViewTests):
def setUp(self):
super(FloatingIpViewTests, self).setUp()
self._floating_ips_orig = self.floating_ips
self.floating_ips = self.floating_ips_uuid
def tearDown(self):
self.floating_ips = self._floating_ips_orig
super(FloatingIpViewTests, self).tearDown()
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts,
all_tenants=True) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest)) \
.AndReturn(self.subnets.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(self.networks.list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.